language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
quarkusio__quarkus
|
integration-tests/gradle/src/test/java/io/quarkus/gradle/QuarkusGradleDevToolsTestBase.java
|
{
"start": 257,
"end": 1170
}
|
class ____ extends QuarkusGradleWrapperTestBase {
private static Properties devToolsProps = new Properties();
@BeforeAll
static void enableDevToolsTestConfig() {
RegistryClientTestHelper.enableRegistryClientTestConfig(Paths.get("").normalize().toAbsolutePath().resolve("build"),
devToolsProps);
for (Map.Entry<?, ?> prop : devToolsProps.entrySet()) {
System.setProperty(prop.getKey().toString(), prop.getValue().toString());
}
}
@AfterAll
static void disableDevToolsTestConfig() {
RegistryClientTestHelper.disableRegistryClientTestConfig();
}
@Override
protected void setupTestCommand() {
gradleNoWatchFs(false);
for (Map.Entry<?, ?> prop : devToolsProps.entrySet()) {
setSystemProperty(prop.getKey().toString(), prop.getValue().toString());
}
}
}
|
QuarkusGradleDevToolsTestBase
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/results/internal/complete/CompleteResultBuilderCollection.java
|
{
"start": 191,
"end": 267
}
|
interface ____ extends CompleteResultBuilder {
}
|
CompleteResultBuilderCollection
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulDoublesEvaluator.java
|
{
"start": 5193,
"end": 5944
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory lhs;
private final EvalOperator.ExpressionEvaluator.Factory rhs;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory lhs,
EvalOperator.ExpressionEvaluator.Factory rhs) {
this.source = source;
this.lhs = lhs;
this.rhs = rhs;
}
@Override
public MulDoublesEvaluator get(DriverContext context) {
return new MulDoublesEvaluator(source, lhs.get(context), rhs.get(context), context);
}
@Override
public String toString() {
return "MulDoublesEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]";
}
}
}
|
Factory
|
java
|
bumptech__glide
|
integration/sqljournaldiskcache/src/test/java/com/bumptech/glide/integration/sqljournaldiskcache/DiskCacheUtils.java
|
{
"start": 1673,
"end": 2186
}
|
class ____ extends ExternalResource {
private File cacheDir;
@Override
protected void before() throws Throwable {
cacheDir =
new File(ApplicationProvider.getApplicationContext().getCacheDir(), "test_sql_cache");
super.before();
}
@Override
protected void after() {
super.after();
deleteRecursively(cacheDir);
}
void cleanup() {
deleteRecursively(cacheDir);
}
File diskCacheDir() {
return cacheDir;
}
}
}
|
DiskCacheDirRule
|
java
|
spring-projects__spring-boot
|
build-plugin/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/ExcludeFilter.java
|
{
"start": 931,
"end": 1334
}
|
class ____ extends DependencyFilter {
public ExcludeFilter(Exclude... excludes) {
this(Arrays.asList(excludes));
}
public ExcludeFilter(List<Exclude> excludes) {
super(excludes);
}
@Override
protected boolean filter(Artifact artifact) {
for (FilterableDependency dependency : getFilters()) {
if (equals(artifact, dependency)) {
return true;
}
}
return false;
}
}
|
ExcludeFilter
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/main/java/org/elasticsearch/painless/ir/InvokeCallNode.java
|
{
"start": 673,
"end": 1661
}
|
class ____ extends ArgumentsNode {
/* ---- begin node data ---- */
private PainlessMethod method;
private Class<?> box;
public void setMethod(PainlessMethod method) {
this.method = method;
}
public PainlessMethod getMethod() {
return method;
}
public void setBox(Class<?> box) {
this.box = box;
}
public Class<?> getBox() {
return box;
}
/* ---- end node data, begin visitor ---- */
@Override
public <Scope> void visit(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
irTreeVisitor.visitInvokeCall(this, scope);
}
@Override
public <Scope> void visitChildren(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
for (ExpressionNode argumentNode : getArgumentNodes()) {
argumentNode.visit(irTreeVisitor, scope);
}
}
/* ---- end visitor ---- */
public InvokeCallNode(Location location) {
super(location);
}
}
|
InvokeCallNode
|
java
|
apache__logging-log4j2
|
log4j-core-test/src/test/java/org/apache/logging/log4j/core/impl/ThreadContextDataInjectorTest.java
|
{
"start": 3306,
"end": 5484
}
|
class ____",
threadContextMap.getClass().getName(),
is(equalTo(threadContextMapClassName)));
final ContextDataInjector contextDataInjector = createInjector();
final StringMap stringMap = contextDataInjector.injectContextData(null, new SortedArrayStringMap());
assertThat("thread context map", ThreadContext.getContext(), allOf(hasEntry("foo", "bar"), not(hasKey("baz"))));
assertThat("context map", stringMap.toMap(), allOf(hasEntry("foo", "bar"), not(hasKey("baz"))));
if (!stringMap.isFrozen()) {
stringMap.clear();
assertThat(
"thread context map",
ThreadContext.getContext(),
allOf(hasEntry("foo", "bar"), not(hasKey("baz"))));
assertThat("context map", stringMap.toMap().entrySet(), is(empty()));
}
ThreadContext.put("foo", "bum");
ThreadContext.put("baz", "bam");
assertThat(
"thread context map",
ThreadContext.getContext(),
allOf(hasEntry("foo", "bum"), hasEntry("baz", "bam")));
if (stringMap.isFrozen()) {
assertThat("context map", stringMap.toMap(), allOf(hasEntry("foo", "bar"), not(hasKey("baz"))));
} else {
assertThat("context map", stringMap.toMap().entrySet(), is(empty()));
}
}
private void prepareThreadContext(final boolean isThreadContextMapInheritable) {
System.setProperty("log4j2.isThreadContextMapInheritable", Boolean.toString(isThreadContextMapInheritable));
resetThreadContextMap();
ThreadContext.clearMap();
ThreadContext.put("foo", "bar");
}
@Test
public void testThreadContextImmutability() {
prepareThreadContext(false);
testContextDataInjector();
}
@Test
public void testInheritableThreadContextImmutability() throws Throwable {
prepareThreadContext(true);
try {
newSingleThreadExecutor().submit(this::testContextDataInjector).get();
} catch (final ExecutionException ee) {
throw ee.getCause();
}
}
}
|
name
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/files/Files_assertHasParent_Test.java
|
{
"start": 1557,
"end": 4622
}
|
class ____ extends FilesBaseTest {
private File actual = new File("./some/test");
private File expectedParent = new File("./some");
@Test
void should_throw_error_if_actual_is_null() {
// GIVEN
File actual = null;
// WHEN
var error = expectAssertionError(() -> underTest.assertHasParent(INFO, actual, expectedParent));
// THEN
then(error).hasMessage(actualIsNull());
}
@Test
void should_throw_npe_if_expected_is_null() {
// GIVEN
File expected = null;
// WHEN
NullPointerException npe = catchNullPointerException(() -> underTest.assertHasParent(INFO, actual, expected));
// THEN
then(npe).hasMessage("The expected parent file should not be null.");
}
@Test
void should_fail_if_actual_has_no_parent() {
// GIVEN
File withoutParent = new File("without-parent");
// WHEN
expectAssertionError(() -> underTest.assertHasParent(INFO, withoutParent, expectedParent));
// THEN
verify(failures).failure(INFO, shouldHaveParent(withoutParent, expectedParent));
}
@Test
void should_fail_if_actual_does_not_have_the_expected_parent() {
// GIVEN
File expectedParent = new File("./expected-parent");
// WHEN
expectAssertionError(() -> underTest.assertHasParent(INFO, actual, expectedParent));
// THEN
verify(failures).failure(INFO, shouldHaveParent(actual, expectedParent));
}
@Test
void should_pass_if_actual_has_expected_parent() {
underTest.assertHasParent(INFO, actual, expectedParent);
}
@Test
void should_pass_if_actual_has_expected_parent_when_actual_form_is_absolute() {
underTest.assertHasParent(INFO, actual.getAbsoluteFile(), expectedParent);
}
@Test
void should_pass_if_actual_has_expected_parent_when_actual_form_is_canonical() throws Exception {
underTest.assertHasParent(INFO, actual.getCanonicalFile(), expectedParent);
}
@Test
void should_throw_exception_when_canonical_form_representation_fail() throws Exception {
// GIVEN
File actual = mock(File.class);
File actualParent = mock(File.class);
when(actual.getParentFile()).thenReturn(actualParent);
when(actualParent.getCanonicalFile()).thenThrow(new IOException());
// WHEN
UncheckedIOException uioe = catchThrowableOfType(UncheckedIOException.class,
() -> underTest.assertHasParent(INFO, actual, actualParent));
// THEN
then(uioe).hasMessageStartingWith("Unable to get canonical form of");
}
@Test
void should_throw_exception_when_canonical_form_representation_fail_for_expected_parent() throws Exception {
File expectedParent = mock(File.class);
when(expectedParent.getCanonicalFile()).thenThrow(new IOException());
// WHEN
UncheckedIOException uioe = catchThrowableOfType(UncheckedIOException.class,
() -> underTest.assertHasParent(INFO, actual, expectedParent));
// THEN
then(uioe).hasMessageStartingWith("Unable to get canonical form of");
}
}
|
Files_assertHasParent_Test
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/OperatorCoordinatorCheckpoints.java
|
{
"start": 1520,
"end": 1863
}
|
class ____ a simplified error handling logic. If one of the several coordinator
* checkpoints fail, no cleanup is triggered for the other concurrent ones. That is okay, since they
* all produce just byte[] as the result. We have to change that once we allow then to create
* external resources that actually need to be cleaned up.
*/
final
|
has
|
java
|
mybatis__mybatis-3
|
src/main/java/org/apache/ibatis/executor/parameter/ParameterHandler.java
|
{
"start": 883,
"end": 1012
}
|
interface ____ {
Object getParameterObject();
void setParameters(PreparedStatement ps) throws SQLException;
}
|
ParameterHandler
|
java
|
spring-projects__spring-framework
|
spring-core-test/src/main/java/org/springframework/core/test/tools/DynamicClassFileObject.java
|
{
"start": 1027,
"end": 1088
}
|
class ____.
*
* @author Phillip Webb
* @since 6.0
*/
|
bytecode
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/support/FlashMapManagerTests.java
|
{
"start": 1558,
"end": 12729
}
|
class ____ {
private final TestFlashMapManager flashMapManager = new TestFlashMapManager();
private final MockHttpServletRequest request = new MockHttpServletRequest();
private final MockHttpServletResponse response = new MockHttpServletResponse();
@Test
void retrieveAndUpdateMatchByPath() {
FlashMap flashMap = new FlashMap();
flashMap.put("key", "value");
flashMap.setTargetRequestPath("/path");
this.flashMapManager.setFlashMaps(List.of(flashMap));
this.request.setRequestURI("/path");
FlashMap inputFlashMap = this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThatFlashMap(inputFlashMap).isEqualTo(flashMap);
}
@Test // SPR-8779
public void retrieveAndUpdateMatchByOriginatingPath() {
FlashMap flashMap = new FlashMap();
flashMap.put("key", "value");
flashMap.setTargetRequestPath("/accounts");
this.flashMapManager.setFlashMaps(List.of(flashMap));
this.request.setAttribute(WebUtils.FORWARD_REQUEST_URI_ATTRIBUTE, "/accounts");
this.request.setRequestURI("/mvc/accounts");
FlashMap inputFlashMap = this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThatFlashMap(inputFlashMap).isEqualTo(flashMap);
assertThat(this.flashMapManager.getFlashMaps()).as("Input FlashMap should have been removed").isEmpty();
}
@Test
void retrieveAndUpdateMatchWithTrailingSlash() {
FlashMap flashMap = new FlashMap();
flashMap.put("key", "value");
flashMap.setTargetRequestPath("/path");
this.flashMapManager.setFlashMaps(List.of(flashMap));
this.request.setRequestURI("/path/");
FlashMap inputFlashMap = this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThatFlashMap(inputFlashMap).isEqualTo(flashMap);
assertThat(this.flashMapManager.getFlashMaps()).as("Input FlashMap should have been removed").isEmpty();
}
@Test
void retrieveAndUpdateMatchByParams() {
FlashMap flashMap = new FlashMap();
flashMap.put("key", "value");
flashMap.addTargetRequestParam("number", "one");
this.flashMapManager.setFlashMaps(List.of(flashMap));
this.request.setQueryString("number=");
FlashMap inputFlashMap = this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThatFlashMap(inputFlashMap).isNull();
assertThat(this.flashMapManager.getFlashMaps()).as("FlashMap should not have been removed").hasSize(1);
this.request.setQueryString("number=two");
inputFlashMap = this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThatFlashMap(inputFlashMap).isNull();
assertThat(this.flashMapManager.getFlashMaps()).as("FlashMap should not have been removed").hasSize(1);
this.request.setQueryString("number=one");
inputFlashMap = this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThatFlashMap(inputFlashMap).isEqualTo(flashMap);
assertThat(this.flashMapManager.getFlashMaps()).as("Input FlashMap should have been removed").isEmpty();
}
@Test // SPR-8798
public void retrieveAndUpdateMatchWithMultiValueParam() {
FlashMap flashMap = new FlashMap();
flashMap.put("name", "value");
flashMap.addTargetRequestParam("id", "1");
flashMap.addTargetRequestParam("id", "2");
this.flashMapManager.setFlashMaps(List.of(flashMap));
this.request.setQueryString("id=1");
FlashMap inputFlashMap = this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThatFlashMap(inputFlashMap).isNull();
assertThat(this.flashMapManager.getFlashMaps()).as("FlashMap should not have been removed").hasSize(1);
this.request.setQueryString("id=1&id=2");
inputFlashMap = this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThatFlashMap(inputFlashMap).isEqualTo(flashMap);
assertThat(this.flashMapManager.getFlashMaps()).as("Input FlashMap should have been removed").isEmpty();
}
@Test
void retrieveAndUpdateSortMultipleMatches() {
FlashMap emptyFlashMap = new FlashMap();
FlashMap flashMapOne = new FlashMap();
flashMapOne.put("key1", "value1");
flashMapOne.setTargetRequestPath("/one");
FlashMap flashMapTwo = new FlashMap();
flashMapTwo.put("key1", "value1");
flashMapTwo.put("key2", "value2");
flashMapTwo.setTargetRequestPath("/one/two");
this.flashMapManager.setFlashMaps(Arrays.asList(emptyFlashMap, flashMapOne, flashMapTwo));
this.request.setRequestURI("/one/two");
FlashMap inputFlashMap = this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThatFlashMap(inputFlashMap).isEqualTo(flashMapTwo);
assertThat(this.flashMapManager.getFlashMaps()).as("Input FlashMap should have been removed").hasSize(2);
}
@Test
void retrieveAndUpdateRemoveExpired() {
List<FlashMap> flashMaps = new ArrayList<>();
for (int i = 0; i < 5; i++) {
FlashMap expiredFlashMap = new FlashMap();
expiredFlashMap.startExpirationPeriod(-1);
flashMaps.add(expiredFlashMap);
}
this.flashMapManager.setFlashMaps(flashMaps);
this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThat(this.flashMapManager.getFlashMaps()).as("Expired instances should be removed even if the saved FlashMap is empty")
.isEmpty();
}
@Test
void saveOutputFlashMapEmpty() {
FlashMap flashMap = new FlashMap();
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
List<FlashMap> allMaps = this.flashMapManager.getFlashMaps();
assertThat(allMaps).isNull();
}
@Test
void saveOutputFlashMap() {
FlashMap flashMap = new FlashMap();
flashMap.put("name", "value");
this.flashMapManager.setFlashMapTimeout(-1); // expire immediately so we can check expiration started
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
List<FlashMap> allMaps = this.flashMapManager.getFlashMaps();
assertThat(allMaps).isNotNull();
assertThatFlashMap(allMaps.get(0)).isSameAs(flashMap);
assertThat(flashMap.isExpired()).isTrue();
}
@Test
void saveOutputFlashMapDecodeTargetPath() {
FlashMap flashMap = new FlashMap();
flashMap.put("key", "value");
flashMap.setTargetRequestPath("/once%20upon%20a%20time");
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
assertThat(flashMap.getTargetRequestPath()).isEqualTo("/once upon a time");
}
@Test
void saveOutputFlashMapNormalizeTargetPath() {
FlashMap flashMap = new FlashMap();
flashMap.put("key", "value");
flashMap.setTargetRequestPath(".");
this.request.setRequestURI("/once/upon/a/time");
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
assertThat(flashMap.getTargetRequestPath()).isEqualTo("/once/upon/a");
flashMap.setTargetRequestPath("./");
this.request.setRequestURI("/once/upon/a/time");
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
assertThat(flashMap.getTargetRequestPath()).isEqualTo("/once/upon/a/");
flashMap.setTargetRequestPath("..");
this.request.setRequestURI("/once/upon/a/time");
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
assertThat(flashMap.getTargetRequestPath()).isEqualTo("/once/upon");
flashMap.setTargetRequestPath("../");
this.request.setRequestURI("/once/upon/a/time");
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
assertThat(flashMap.getTargetRequestPath()).isEqualTo("/once/upon/");
flashMap.setTargetRequestPath("../../only");
this.request.setRequestURI("/once/upon/a/time");
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
assertThat(flashMap.getTargetRequestPath()).isEqualTo("/once/only");
}
@Test // gh-23240
public void saveOutputFlashMapAndNormalizeEmptyTargetPath() {
FlashMap flashMap = new FlashMap();
flashMap.put("key", "value");
flashMap.setTargetRequestPath("");
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
assertThat(flashMap.getTargetRequestPath()).isEmpty();
}
@Test // SPR-9657, SPR-11504
public void saveOutputFlashMapDecodeParameters() {
FlashMap flashMap = new FlashMap();
flashMap.put("key", "value");
flashMap.setTargetRequestPath("/path");
flashMap.addTargetRequestParam("param", "%D0%90%D0%90");
flashMap.addTargetRequestParam("param", "%D0%91%D0%91");
flashMap.addTargetRequestParam("param", "%D0%92%D0%92");
flashMap.addTargetRequestParam("%3A%2F%3F%23%5B%5D%40", "value");
this.request.setCharacterEncoding("UTF-8");
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
MockHttpServletRequest requestAfterRedirect = new MockHttpServletRequest("GET", "/path");
requestAfterRedirect.setQueryString(
"param=%D0%90%D0%90¶m=%D0%91%D0%91¶m=%D0%92%D0%92&%3A%2F%3F%23%5B%5D%40=value");
requestAfterRedirect.addParameter("param", "\u0410\u0410");
requestAfterRedirect.addParameter("param", "\u0411\u0411");
requestAfterRedirect.addParameter("param", "\u0412\u0412");
requestAfterRedirect.addParameter(":/?#[]@", "value");
flashMap = this.flashMapManager.retrieveAndUpdate(requestAfterRedirect, new MockHttpServletResponse());
assertThatFlashMap(flashMap).isNotNull();
assertThat(flashMap.size()).isEqualTo(1);
assertThat(flashMap.get("key")).isEqualTo("value");
}
@Test // SPR-12569
public void flashAttributesWithQueryParamsWithSpace() {
String encodedValue = URLEncoder.encode("1 2", StandardCharsets.UTF_8);
FlashMap flashMap = new FlashMap();
flashMap.put("key", "value");
flashMap.setTargetRequestPath("/path");
flashMap.addTargetRequestParam("param", encodedValue);
this.request.setCharacterEncoding("UTF-8");
this.flashMapManager.saveOutputFlashMap(flashMap, this.request, this.response);
MockHttpServletRequest requestAfterRedirect = new MockHttpServletRequest("GET", "/path");
requestAfterRedirect.setQueryString("param=" + encodedValue);
requestAfterRedirect.addParameter("param", "1 2");
flashMap = this.flashMapManager.retrieveAndUpdate(requestAfterRedirect, new MockHttpServletResponse());
assertThatFlashMap(flashMap).isNotNull();
assertThat(flashMap.size()).isEqualTo(1);
assertThat(flashMap.get("key")).isEqualTo("value");
}
@Test // SPR-15505
public void retrieveAndUpdateMatchByOriginatingPathAndQueryString() {
FlashMap flashMap = new FlashMap();
flashMap.put("key", "value");
flashMap.setTargetRequestPath("/accounts");
flashMap.addTargetRequestParam("a", "b");
this.flashMapManager.setFlashMaps(Collections.singletonList(flashMap));
this.request.setAttribute(WebUtils.FORWARD_REQUEST_URI_ATTRIBUTE, "/accounts");
this.request.setAttribute(WebUtils.FORWARD_QUERY_STRING_ATTRIBUTE, "a=b");
this.request.setRequestURI("/mvc/accounts");
this.request.setQueryString("x=y");
FlashMap inputFlashMap = this.flashMapManager.retrieveAndUpdate(this.request, this.response);
assertThatFlashMap(inputFlashMap).isEqualTo(flashMap);
assertThat(this.flashMapManager.getFlashMaps()).as("Input FlashMap should have been removed").isEmpty();
}
private static ObjectAssert<Object> assertThatFlashMap(@Nullable FlashMap flashMap) {
return assertThat((Object) flashMap);
}
private static
|
FlashMapManagerTests
|
java
|
apache__camel
|
components/camel-stream/src/test/java/org/apache/camel/component/stream/StreamGroupLinesLastStrategyTest.java
|
{
"start": 1051,
"end": 2462
}
|
class ____ extends StreamGroupLinesStrategyTest {
@Override
@Test
public void testGroupLines() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(2);
mock.setAssertPeriod(1000);
mock.message(0).header(StreamConstants.STREAM_INDEX).isEqualTo(0);
mock.message(0).header(StreamConstants.STREAM_COMPLETE).isEqualTo(false);
mock.message(1).header(StreamConstants.STREAM_INDEX).isEqualTo(1);
mock.message(1).header(StreamConstants.STREAM_COMPLETE).isEqualTo(true);
MockEndpoint.assertIsSatisfied(context);
Object result = mock.getExchanges().get(0).getIn().getBody();
assertEquals("A" + LS + "B" + LS + "C" + LS + "D" + LS, result, "Get a wrong result.");
// we did not have 4 lines but since its the last it was triggered anyway
Object result2 = mock.getExchanges().get(1).getIn().getBody();
assertEquals("E" + LS + "F" + LS, result2, "Get a wrong result.");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("stream:file?fileName=target/stream/streamfile.txt&groupLines=4&groupStrategy=#myGroupStrategy")
.to("mock:result");
}
};
}
}
|
StreamGroupLinesLastStrategyTest
|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4788InstallationToCustomLocalRepoTest.java
|
{
"start": 1040,
"end": 1924
}
|
class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify that plugins can install artifacts to a custom local repo (i.e. custom base dir and custom layout).
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-4788");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
verifier.verifyFilePresent("target/local-repo/test-0.1-SNAPSHOT.jar");
verifier.verifyFileNotPresent(
"target/local-repo/org/apache/maven/its/mng4788/test/0.1-SNAPSHOT/test-0.1-SNAPSHOT.jar");
}
}
|
MavenITmng4788InstallationToCustomLocalRepoTest
|
java
|
spring-projects__spring-security
|
test/src/test/java/org/springframework/security/test/web/servlet/request/SecurityMockMvcRequestPostProcessorsAuthenticationStatelessTests.java
|
{
"start": 3371,
"end": 3786
}
|
class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.sessionManagement((management) -> management
.sessionCreationPolicy(SessionCreationPolicy.STATELESS));
return http.build();
// @formatter:on
}
@Bean
UserDetailsService userDetailsService() {
return new InMemoryUserDetailsManager();
}
@RestController
static
|
Config
|
java
|
mapstruct__mapstruct
|
core/src/main/java/org/mapstruct/EnumMapping.java
|
{
"start": 488,
"end": 554
}
|
enum ____ {
* BRIE,
* ROQUEFORT
* }
*
* public
|
CheeseType
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/FloatingPointLiteralPrecisionTest.java
|
{
"start": 1892,
"end": 2869
}
|
class ____ {
double d2 = 1.0;
double d3 = 1;
double d4 = 1e6;
double d5 = 1e-3;
double d6 = 1d;
double d7 = 1_000.0;
double d8 = 0x1.0p63d;
float f2 = 1.0f;
float f3 = 1f;
float f4 = 0.88f;
float f5 = 1_000.0f;
float f6 = 0x1.0p63f;
}
""")
.doTest();
}
@Test
public void replacementTooLong() {
// In JDK versions before 19, String.valueOf(1e23) was 9.999999999999999E22, and the logic we're
// testing here was introduced to avoid introducing strings like that in rewrites. JDK 19 fixes
// https://bugs.openjdk.org/browse/JDK-4511638 (over 20 years after it was filed) so
// we don't need the logic or its test there.
String string1e23 = String.valueOf(1e23);
assumeTrue(string1e23.length() > "1e23".length() * 3);
String[] input = {
"
|
Test
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ComponentValidationTest.java
|
{
"start": 7784,
"end": 8787
}
|
interface ____ {",
" Object object();",
"}");
CompilerTests.daggerCompiler(module, component)
.compile(
subject -> {
subject.hasErrorCount(2);
subject.hasErrorContaining("test.BadModule has errors")
.onSource(component)
.onLine(5);
subject.hasErrorContaining(
"@Binds methods must have exactly one parameter, whose type is assignable to "
+ "the return type")
.onSource(module)
.onLine(8);
});
}
@Test
public void attemptToInjectWildcardGenerics() {
Source testComponent =
CompilerTests.javaSource(
"test.TestComponent",
"package test;",
"",
"import dagger.Component;",
"import dagger.Lazy;",
"import javax.inject.Provider;",
"",
"@Component",
"
|
BadComponent
|
java
|
spring-projects__spring-boot
|
module/spring-boot-graphql/src/main/java/org/springframework/boot/graphql/autoconfigure/GraphQlCorsProperties.java
|
{
"start": 1342,
"end": 5132
}
|
class ____ {
/**
* List of origins to allow with '*' allowing all origins. When allow-credentials is
* enabled, '*' cannot be used, and setting origin patterns should be considered
* instead. When neither allowed origins nor allowed origin patterns are set,
* cross-origin requests are effectively disabled.
*/
private List<String> allowedOrigins = new ArrayList<>();
/**
* List of origin patterns to allow. Unlike allowed origins which only support '*',
* origin patterns are more flexible, e.g. 'https://*.example.com', and can be used
* with allow-credentials. When neither allowed origins nor allowed origin patterns
* are set, cross-origin requests are effectively disabled.
*/
private List<String> allowedOriginPatterns = new ArrayList<>();
/**
* List of HTTP methods to allow. '*' allows all methods. When not set, defaults to
* GET.
*/
private List<String> allowedMethods = new ArrayList<>();
/**
* List of HTTP headers to allow in a request. '*' allows all headers.
*/
private List<String> allowedHeaders = new ArrayList<>();
/**
* List of headers to include in a response.
*/
private List<String> exposedHeaders = new ArrayList<>();
/**
* Whether credentials are supported. When not set, credentials are not supported.
*/
private @Nullable Boolean allowCredentials;
/**
* How long the response from a pre-flight request can be cached by clients. If a
* duration suffix is not specified, seconds will be used.
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration maxAge = Duration.ofSeconds(1800);
public List<String> getAllowedOrigins() {
return this.allowedOrigins;
}
public void setAllowedOrigins(List<String> allowedOrigins) {
this.allowedOrigins = allowedOrigins;
}
public List<String> getAllowedOriginPatterns() {
return this.allowedOriginPatterns;
}
public void setAllowedOriginPatterns(List<String> allowedOriginPatterns) {
this.allowedOriginPatterns = allowedOriginPatterns;
}
public List<String> getAllowedMethods() {
return this.allowedMethods;
}
public void setAllowedMethods(List<String> allowedMethods) {
this.allowedMethods = allowedMethods;
}
public List<String> getAllowedHeaders() {
return this.allowedHeaders;
}
public void setAllowedHeaders(List<String> allowedHeaders) {
this.allowedHeaders = allowedHeaders;
}
public List<String> getExposedHeaders() {
return this.exposedHeaders;
}
public void setExposedHeaders(List<String> exposedHeaders) {
this.exposedHeaders = exposedHeaders;
}
public @Nullable Boolean getAllowCredentials() {
return this.allowCredentials;
}
public void setAllowCredentials(@Nullable Boolean allowCredentials) {
this.allowCredentials = allowCredentials;
}
public Duration getMaxAge() {
return this.maxAge;
}
public void setMaxAge(Duration maxAge) {
this.maxAge = maxAge;
}
public @Nullable CorsConfiguration toCorsConfiguration() {
if (CollectionUtils.isEmpty(this.allowedOrigins) && CollectionUtils.isEmpty(this.allowedOriginPatterns)) {
return null;
}
PropertyMapper map = PropertyMapper.get();
CorsConfiguration config = new CorsConfiguration();
map.from(this::getAllowedOrigins).to(config::setAllowedOrigins);
map.from(this::getAllowedOriginPatterns).to(config::setAllowedOriginPatterns);
map.from(this::getAllowedHeaders).whenNot(CollectionUtils::isEmpty).to(config::setAllowedHeaders);
map.from(this::getAllowedMethods).whenNot(CollectionUtils::isEmpty).to(config::setAllowedMethods);
map.from(this::getExposedHeaders).whenNot(CollectionUtils::isEmpty).to(config::setExposedHeaders);
map.from(this::getMaxAge).as(Duration::getSeconds).to(config::setMaxAge);
map.from(this::getAllowCredentials).to(config::setAllowCredentials);
return config;
}
}
|
GraphQlCorsProperties
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/cdi/SamplePersonRepositoryImpl.java
|
{
"start": 708,
"end": 838
}
|
class ____ implements SamplePersonRepositoryCustom {
@Override
public int returnOne() {
return 1;
}
}
|
SamplePersonRepositoryImpl
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/core/annotation/AnnotatedElementUtils.java
|
{
"start": 14486,
"end": 24923
}
|
class ____ of the annotation type to find
* @param classValuesAsString whether to convert Class references into Strings or to
* preserve them as Class references
* @param nestedAnnotationsAsMap whether to convert nested Annotation instances
* into {@code AnnotationAttributes} maps or to preserve them as Annotation instances
* @return the merged {@code AnnotationAttributes}, or {@code null} if not found
* @since 4.2
* @see #findMergedAnnotation(AnnotatedElement, Class)
* @see #findMergedAnnotationAttributes(AnnotatedElement, String, boolean, boolean)
* @see #getAllAnnotationAttributes(AnnotatedElement, String, boolean, boolean)
*/
public static @Nullable AnnotationAttributes getMergedAnnotationAttributes(AnnotatedElement element,
String annotationName, boolean classValuesAsString, boolean nestedAnnotationsAsMap) {
MergedAnnotation<?> mergedAnnotation = getAnnotations(element)
.get(annotationName, null, MergedAnnotationSelectors.firstDirectlyDeclared());
return getAnnotationAttributes(mergedAnnotation, classValuesAsString, nestedAnnotationsAsMap);
}
/**
* Get the first annotation of the specified {@code annotationType} within
* the annotation hierarchy <em>above</em> the supplied {@code element},
* merge that annotation's attributes with <em>matching</em> attributes from
* annotations in lower levels of the annotation hierarchy, and synthesize
* the result back into an annotation of the specified {@code annotationType}.
* <p>{@link AliasFor @AliasFor} semantics are fully supported, both within
* a single annotation and within the annotation hierarchy.
* @param element the annotated element
* @param annotationType the annotation type to find
* @return the merged, synthesized {@code Annotation}, or {@code null} if not found
* @since 4.2
* @see #findMergedAnnotation(AnnotatedElement, Class)
*/
public static <A extends Annotation> @Nullable A getMergedAnnotation(AnnotatedElement element, Class<A> annotationType) {
// Shortcut: directly present on the element, with no merging needed?
if (AnnotationFilter.PLAIN.matches(annotationType) ||
AnnotationsScanner.hasPlainJavaAnnotationsOnly(element)) {
return element.getDeclaredAnnotation(annotationType);
}
// Exhaustive retrieval of merged annotations...
return getAnnotations(element)
.get(annotationType, null, MergedAnnotationSelectors.firstDirectlyDeclared())
.synthesize(MergedAnnotation::isPresent).orElse(null);
}
/**
* Get <strong>all</strong> annotations of the specified {@code annotationType}
* within the annotation hierarchy <em>above</em> the supplied {@code element};
* and for each annotation found, merge that annotation's attributes with
* <em>matching</em> attributes from annotations in lower levels of the annotation
* hierarchy and synthesize the results back into an annotation of the specified
* {@code annotationType}.
* <p>{@link AliasFor @AliasFor} semantics are fully supported, both within
* a single annotation and within the annotation hierarchy.
* <p>This method follows <em>get semantics</em> as described in the
* {@linkplain AnnotatedElementUtils class-level javadoc}.
* @param element the annotated element (never {@code null})
* @param annotationType the annotation type to find (never {@code null})
* @return the set of all merged, synthesized {@code Annotations} found,
* or an empty set if none were found
* @since 4.3
* @see #getMergedAnnotation(AnnotatedElement, Class)
* @see #getAllAnnotationAttributes(AnnotatedElement, String)
* @see #findAllMergedAnnotations(AnnotatedElement, Class)
*/
public static <A extends Annotation> Set<A> getAllMergedAnnotations(
AnnotatedElement element, Class<A> annotationType) {
return getAnnotations(element).stream(annotationType)
.collect(MergedAnnotationCollectors.toAnnotationSet());
}
/**
* Get <strong>all</strong> annotations of the specified {@code annotationTypes}
* within the annotation hierarchy <em>above</em> the supplied {@code element};
* and for each annotation found, merge that annotation's attributes with
* <em>matching</em> attributes from annotations in lower levels of the
* annotation hierarchy and synthesize the results back into an annotation
* of the corresponding {@code annotationType}.
* <p>{@link AliasFor @AliasFor} semantics are fully supported, both within
* a single annotation and within the annotation hierarchy.
* <p>This method follows <em>get semantics</em> as described in the
* {@linkplain AnnotatedElementUtils class-level javadoc}.
* @param element the annotated element (never {@code null})
* @param annotationTypes the annotation types to find
* @return the set of all merged, synthesized {@code Annotations} found,
* or an empty set if none were found
* @since 5.1
* @see #getAllMergedAnnotations(AnnotatedElement, Class)
*/
public static Set<Annotation> getAllMergedAnnotations(AnnotatedElement element,
Set<Class<? extends Annotation>> annotationTypes) {
return getAnnotations(element).stream()
.filter(MergedAnnotationPredicates.typeIn(annotationTypes))
.collect(MergedAnnotationCollectors.toAnnotationSet());
}
/**
* Get all <em>repeatable annotations</em> of the specified {@code annotationType}
* within the annotation hierarchy <em>above</em> the supplied {@code element};
* and for each annotation found, merge that annotation's attributes with
* <em>matching</em> attributes from annotations in lower levels of the annotation
* hierarchy and synthesize the results back into an annotation of the specified
* {@code annotationType}.
* <p>The container type that holds the repeatable annotations will be looked up
* via {@link java.lang.annotation.Repeatable @Repeatable}.
* <p>{@link AliasFor @AliasFor} semantics are fully supported, both within
* a single annotation and within the annotation hierarchy.
* <p>This method follows <em>get semantics</em> as described in the
* {@linkplain AnnotatedElementUtils class-level javadoc}.
* @param element the annotated element (never {@code null})
* @param annotationType the annotation type to find (never {@code null})
* @return the set of all merged repeatable {@code Annotations} found,
* or an empty set if none were found
* @throws IllegalArgumentException if the {@code element} or {@code annotationType}
* is {@code null}, or if the container type cannot be resolved
* @since 4.3
* @see #getMergedAnnotation(AnnotatedElement, Class)
* @see #getAllMergedAnnotations(AnnotatedElement, Class)
* @see #getMergedRepeatableAnnotations(AnnotatedElement, Class, Class)
*/
public static <A extends Annotation> Set<A> getMergedRepeatableAnnotations(
AnnotatedElement element, Class<A> annotationType) {
return getMergedRepeatableAnnotations(element, annotationType, null);
}
/**
* Get all <em>repeatable annotations</em> of the specified {@code annotationType}
* within the annotation hierarchy <em>above</em> the supplied {@code element};
* and for each annotation found, merge that annotation's attributes with
* <em>matching</em> attributes from annotations in lower levels of the annotation
* hierarchy and synthesize the results back into an annotation of the specified
* {@code annotationType}.
* <p>{@link AliasFor @AliasFor} semantics are fully supported, both within
* a single annotation and within the annotation hierarchy.
* <p>This method follows <em>get semantics</em> as described in the
* {@linkplain AnnotatedElementUtils class-level javadoc}.
* <p><strong>WARNING</strong>: if the supplied {@code containerType} is not
* {@code null}, the search will be restricted to supporting only repeatable
* annotations whose container is the supplied {@code containerType}. This
* prevents the search from finding repeatable annotations declared as
* meta-annotations on other types of repeatable annotations. If you need to
* support such a use case, favor {@link #getMergedRepeatableAnnotations(AnnotatedElement, Class)}
* over this method or alternatively use the {@link MergedAnnotations} API
* directly in conjunction with {@link RepeatableContainers} that are
* {@linkplain RepeatableContainers#plus(Class, Class) composed} to support
* multiple repeatable annotation types — for example:
* <pre class="code">
* RepeatableContainers.standardRepeatables()
* .plus(MyRepeatable1.class, MyContainer1.class)
* .plus(MyRepeatable2.class, MyContainer2.class);</pre>
* @param element the annotated element (never {@code null})
* @param annotationType the repeatable annotation type to find (never {@code null})
* @param containerType the type of the container that holds the repeatable
* annotations; may be {@code null} if the container type should be looked up
* via {@link java.lang.annotation.Repeatable @Repeatable}
* @return the set of all merged repeatable {@code Annotations} found,
* or an empty set if none were found
* @throws IllegalArgumentException if the {@code element} or {@code annotationType}
* is {@code null}, or if the container type cannot be resolved
* @throws AnnotationConfigurationException if the supplied {@code containerType}
* is not a valid container annotation for the supplied {@code annotationType}
* @since 4.3
* @see #getMergedAnnotation(AnnotatedElement, Class)
* @see #getAllMergedAnnotations(AnnotatedElement, Class)
*/
public static <A extends Annotation> Set<A> getMergedRepeatableAnnotations(
AnnotatedElement element, Class<A> annotationType,
@Nullable Class<? extends Annotation> containerType) {
return getRepeatableAnnotations(element, annotationType, containerType)
.stream(annotationType)
.collect(MergedAnnotationCollectors.toAnnotationSet());
}
/**
* Get the annotation attributes of <strong>all</strong> annotations of the specified
* {@code annotationName} in the annotation hierarchy above the supplied
* {@link AnnotatedElement} and store the results in a {@link MultiValueMap}.
* <p>Note: in contrast to {@link #getMergedAnnotationAttributes(AnnotatedElement, String)},
* this method does <em>not</em> support attribute overrides.
* <p>This method follows <em>get semantics</em> as described in the
* {@linkplain AnnotatedElementUtils class-level javadoc}.
* @param element the annotated element
* @param annotationName the fully qualified
|
name
|
java
|
square__retrofit
|
samples/src/main/java/com/example/retrofit/AnnotatedConverters.java
|
{
"start": 3736,
"end": 5881
}
|
interface ____ {
@GET("/")
@Moshi
Call<Library> exampleMoshi();
@GET("/")
@Gson
Call<Library> exampleGson();
@GET("/")
@SimpleXml
Call<Library> exampleSimpleXml();
@GET("/")
Call<Library> exampleDefault();
}
public static void main(String... args) throws IOException {
MockWebServer server = new MockWebServer();
server.start();
server.enqueue(new MockResponse().setBody("{\"name\": \"Moshi\"}"));
server.enqueue(new MockResponse().setBody("{\"name\": \"Gson\"}"));
server.enqueue(new MockResponse().setBody("<user name=\"SimpleXML\"/>"));
server.enqueue(new MockResponse().setBody("{\"name\": \"Gson\"}"));
com.squareup.moshi.Moshi moshi = new com.squareup.moshi.Moshi.Builder().build();
com.google.gson.Gson gson = new GsonBuilder().create();
MoshiConverterFactory moshiConverterFactory = MoshiConverterFactory.create(moshi);
GsonConverterFactory gsonConverterFactory = GsonConverterFactory.create(gson);
SimpleXmlConverterFactory simpleXmlConverterFactory = SimpleXmlConverterFactory.create();
Retrofit retrofit =
new Retrofit.Builder()
.baseUrl(server.url("/"))
.addConverterFactory(
new AnnotatedConverterFactory.Builder()
.add(Moshi.class, moshiConverterFactory)
.add(Gson.class, gsonConverterFactory)
.add(SimpleXml.class, simpleXmlConverterFactory)
.build())
.addConverterFactory(gsonConverterFactory)
.build();
Service service = retrofit.create(Service.class);
Library library1 = service.exampleMoshi().execute().body();
System.out.println("Library 1: " + library1.name);
Library library2 = service.exampleGson().execute().body();
System.out.println("Library 2: " + library2.name);
Library library3 = service.exampleSimpleXml().execute().body();
System.out.println("Library 3: " + library3.name);
Library library4 = service.exampleDefault().execute().body();
System.out.println("Library 4: " + library4.name);
server.shutdown();
}
}
|
Service
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/convert/support/StreamConverterTests.java
|
{
"start": 6284,
"end": 6513
}
|
class ____ {
public List<String> listOfStrings;
public Long[] arrayOfLongs;
public Stream<Integer> streamOfIntegers;
public Stream<Boolean> streamOfBooleans;
public Stream rawStream;
public List rawList;
}
}
|
Types
|
java
|
quarkusio__quarkus
|
extensions/spring-boot-properties/deployment/src/main/java/io/quarkus/spring/boot/properties/deployment/YamlListObjectHandler.java
|
{
"start": 2684,
"end": 4207
}
|
class ____ has a List field and getters and setter which have the proper generic type
// this way SnakeYaml can properly populate the field
MethodDescriptor getterDesc;
try (ClassCreator cc = ClassCreator.builder().classOutput(classOutput)
.className(wrapperClassName)
.build()) {
FieldDescriptor fieldDesc = cc.getFieldCreator(configName, List.class).setModifiers(Modifier.PRIVATE)
.getFieldDescriptor();
MethodCreator getter = cc.getMethodCreator(JavaBeanUtil.getGetterName(configName, classInfo.name()), List.class);
getter.setSignature(String.format("()Ljava/util/List<L%s;>;", forSignature(classInfo)));
getterDesc = getter.getMethodDescriptor();
getter.returnValue(getter.readInstanceField(fieldDesc, getter.getThis()));
MethodCreator setter = cc.getMethodCreator(JavaBeanUtil.getSetterName(configName), void.class, List.class);
setter.setSignature(String.format("(Ljava/util/List<L%s;>;)V", forSignature(classInfo)));
setter.writeInstanceField(fieldDesc, setter.getThis(), setter.getMethodParam(0));
setter.returnValue(null);
}
// we always generate getters and setters, so reflection is only needed on the methods
reflectiveClasses.produce(ReflectiveClassBuildItem.builder(wrapperClassName).methods().build());
// generate an MP-Config converter which looks something like this
// public
|
that
|
java
|
google__auto
|
factory/src/main/java/com/google/auto/factory/processor/Elements2.java
|
{
"start": 1391,
"end": 2705
}
|
class ____ {
private Elements2() {}
static ImmutableSet<ExecutableElement> getConstructors(TypeElement type) {
checkNotNull(type);
checkArgument(type.getKind() == CLASS);
return ImmutableSet.copyOf(ElementFilter.constructorsIn(type.getEnclosedElements()));
}
static boolean isValidSupertypeForClass(TypeElement type) {
if (!type.getKind().equals(CLASS)) {
return false;
}
if (type.getModifiers().contains(FINAL)) {
return false;
}
if (!type.getEnclosingElement().getKind().equals(PACKAGE)
&& !type.getModifiers().contains(STATIC)) {
return false;
}
if (type.getSimpleName().length() == 0) {
return false;
}
return true;
}
/**
* Given an executable element in a supertype, returns its ExecutableType when it is viewed as a
* member of a subtype.
*/
static ExecutableType getExecutableElementAsMemberOf(
Types types, ExecutableElement executableElement, TypeElement subTypeElement) {
checkNotNull(types);
checkNotNull(executableElement);
checkNotNull(subTypeElement);
TypeMirror subTypeMirror = subTypeElement.asType();
if (!subTypeMirror.getKind().equals(TypeKind.DECLARED)) {
throw new IllegalStateException(
"Expected subTypeElement.asType() to return a class/
|
Elements2
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLAggregateExpr.java
|
{
"start": 934,
"end": 10102
}
|
class ____ extends SQLMethodInvokeExpr implements Serializable, SQLReplaceable {
private static final long serialVersionUID = 1L;
protected SQLAggregateOption option;
protected SQLKeep keep;
protected SQLExpr filter;
protected SQLOver over;
protected SQLName overRef;
protected SQLOrderBy orderBy;
protected SQLExpr limit;
protected boolean withinGroup;
// For BigQuery scenario.
protected boolean ignoreNulls;
// For BigQuery scenario.
protected boolean respectNulls;
public SQLAggregateExpr(String methodName) {
this.methodName = methodName;
this.ignoreNulls = false;
this.respectNulls = false;
}
public SQLAggregateExpr(String methodName, SQLAggregateOption option) {
this.methodName = methodName;
this.option = option;
this.ignoreNulls = false;
this.respectNulls = false;
}
public SQLAggregateExpr(String methodName, SQLAggregateOption option, SQLExpr... arguments) {
this.methodName = methodName;
this.option = option;
this.ignoreNulls = false;
this.respectNulls = false;
if (arguments != null) {
for (SQLExpr argument : arguments) {
if (argument != null) {
addArgument(argument);
}
}
}
}
public SQLOrderBy getOrderBy() {
return orderBy;
}
public void setOrderBy(SQLOrderBy x) {
if (x != null) {
x.setParent(this);
}
this.orderBy = x;
}
public SQLExpr getLimit() {
return limit;
}
public void setLimit(SQLExpr x) {
if (x != null) {
x.setParent(this);
}
this.limit = x;
}
public SQLAggregateOption getOption() {
return this.option;
}
public void setOption(SQLAggregateOption option) {
this.option = option;
}
public boolean isDistinct() {
return option == SQLAggregateOption.DISTINCT;
}
public SQLOver getOver() {
return over;
}
public void setOver(SQLOver x) {
if (x != null) {
x.setParent(this);
}
this.over = x;
}
public SQLName getOverRef() {
return overRef;
}
public void setOverRef(SQLName x) {
if (x != null) {
x.setParent(this);
}
this.overRef = x;
}
public SQLKeep getKeep() {
return keep;
}
public void setKeep(SQLKeep keep) {
if (keep != null) {
keep.setParent(this);
}
this.keep = keep;
}
public boolean isWithinGroup() {
return withinGroup;
}
public void setWithinGroup(boolean withinGroup) {
this.withinGroup = withinGroup;
}
//为了兼容之前的逻辑
@Deprecated
public SQLOrderBy getWithinGroup() {
return orderBy;
}
public boolean isIgnoreNulls() {
return this.ignoreNulls;
}
public boolean getIgnoreNulls() {
return this.ignoreNulls;
}
public boolean isRespectNulls() {
return this.respectNulls;
}
public void setIgnoreNulls(boolean ignoreNulls) {
this.ignoreNulls = ignoreNulls;
}
public void setRespectNulls(boolean respectNulls) {
this.respectNulls = respectNulls;
}
public String toString() {
return SQLUtils.toSQLString(this);
}
@Override
protected void accept0(SQLASTVisitor v) {
if (v.visit(this)) {
if (this.owner != null) {
this.owner.accept(v);
}
for (SQLExpr arg : this.arguments) {
if (arg != null) {
arg.accept(v);
}
}
if (this.keep != null) {
this.keep.accept(v);
}
if (this.filter != null) {
this.filter.accept(v);
}
if (this.over != null) {
this.over.accept(v);
}
if (this.overRef != null) {
this.overRef.accept(v);
}
acceptChild(v, orderBy);
acceptChild(v, limit);
}
v.endVisit(this);
}
@Override
public List getChildren() {
List<SQLObject> children = new ArrayList<SQLObject>();
children.addAll(this.arguments);
if (keep != null) {
children.add(this.keep);
}
if (over != null) {
children.add(over);
}
if (orderBy != null) {
children.add(orderBy);
}
return children;
}
public SQLExpr getFilter() {
return filter;
}
public void setFilter(SQLExpr x) {
if (x != null) {
x.setParent(this);
}
this.filter = x;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
SQLAggregateExpr that = (SQLAggregateExpr) o;
if (option != that.option) {
return false;
}
if (keep != null ? !keep.equals(that.keep) : that.keep != null) {
return false;
}
if (filter != null ? !filter.equals(that.filter) : that.filter != null) {
return false;
}
if (over != null ? !over.equals(that.over) : that.over != null) {
return false;
}
if (overRef != null ? !overRef.equals(that.overRef) : that.overRef != null) {
return false;
}
if (orderBy != null ? !orderBy.equals(that.orderBy) : that.orderBy != null) {
return false;
}
return ignoreNulls == that.ignoreNulls && respectNulls == that.respectNulls;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (option != null ? option.hashCode() : 0);
result = 31 * result + (keep != null ? keep.hashCode() : 0);
result = 31 * result + (filter != null ? filter.hashCode() : 0);
result = 31 * result + (over != null ? over.hashCode() : 0);
result = 31 * result + (overRef != null ? overRef.hashCode() : 0);
result = 31 * result + (orderBy != null ? orderBy.hashCode() : 0);
return result;
}
public SQLAggregateExpr clone() {
SQLAggregateExpr x = new SQLAggregateExpr(methodName);
x.option = option;
for (SQLExpr arg : arguments) {
x.addArgument(arg.clone());
}
if (keep != null) {
x.setKeep(keep.clone());
}
if (over != null) {
x.setOver(over.clone());
}
if (overRef != null) {
x.setOverRef(overRef.clone());
}
if (orderBy != null) {
x.setOrderBy(orderBy.clone());
}
// 修复listagg wthin group语句转换错误的问题
// https://github.com/alibaba/druid/issues/5930
x.setWithinGroup(withinGroup);
x.ignoreNulls = ignoreNulls;
if (attributes != null) {
for (Map.Entry<String, Object> entry : attributes.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (value instanceof SQLObject) {
value = ((SQLObject) value).clone();
}
x.putAttribute(key, value);
}
}
return x;
}
public SQLDataType computeDataType() {
if (resolvedReturnDataType != null) {
return resolvedReturnDataType;
}
long hash = methodNameHashCode64();
if (hash == FnvHash.Constants.COUNT
|| hash == FnvHash.Constants.ROW_NUMBER) {
return SQLIntegerExpr.DATA_TYPE;
}
if (arguments.size() > 0) {
SQLDataType dataType = arguments.get(0)
.computeDataType();
if (dataType != null
&& (dataType.nameHashCode64() != FnvHash.Constants.BOOLEAN)) {
return dataType;
}
}
if (hash == FnvHash.Constants.SUM) {
return SQLNumberExpr.DATA_TYPE_DOUBLE;
}
if (hash == FnvHash.Constants.WM_CONCAT
|| hash == FnvHash.Constants.GROUP_CONCAT) {
return SQLCharExpr.DATA_TYPE;
}
return null;
}
public boolean replace(SQLExpr expr, SQLExpr target) {
if (target == null) {
return false;
}
for (int i = 0; i < arguments.size(); ++i) {
if (arguments.get(i) == expr) {
arguments.set(i, target);
target.setParent(this);
return true;
}
}
if (overRef == expr) {
setOverRef((SQLName) target);
return true;
}
if (filter != null) {
filter = target;
target.setParent(this);
}
return false;
}
}
|
SQLAggregateExpr
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/cdi/bcextensions/SyntheticObserverOfParameterizedTypeTest.java
|
{
"start": 1219,
"end": 1740
}
|
class ____ {
@RegisterExtension
public ArcTestContainer container = ArcTestContainer.builder()
.beanClasses(MyService.class)
.buildCompatibleExtensions(new MyExtension())
.build();
@Test
public void test() {
MyService myService = Arc.container().select(MyService.class).get();
myService.fireEvent();
assertIterableEquals(List.of("Hello World", "Hello again"), MyObserver.observed);
}
public static
|
SyntheticObserverOfParameterizedTypeTest
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/xml/ConfiguredRouteBuilder.java
|
{
"start": 938,
"end": 1528
}
|
class ____ extends RouteBuilder {
private String fromUri;
private String toUri;
@Override
public void configure() throws Exception {
ObjectHelper.notNull(fromUri, "fromUri");
ObjectHelper.notNull(toUri, "toUri");
from(fromUri).to(toUri);
}
public String getFromUri() {
return fromUri;
}
public void setFromUri(String fromUri) {
this.fromUri = fromUri;
}
public String getToUri() {
return toUri;
}
public void setToUri(String toUri) {
this.toUri = toUri;
}
}
|
ConfiguredRouteBuilder
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/fields/RecursiveComparisonAssert_isEqualTo_withComparatorsForFieldMatchingRegexes_Test.java
|
{
"start": 1867,
"end": 10382
}
|
class ____
extends WithComparingFieldsIntrospectionStrategyBaseTest {
@ParameterizedTest(name = "{4}: actual={0} / expected={1} - fieldRegexes {3}")
@MethodSource("recursivelyEqualObjectsWhenUsingFieldComparators")
void should_pass_with_registered_BiPredicates_by_fields_matching_regexes(Object actual, Object expected,
BiPredicate<Object, Object> equals,
String[] fieldRegexes, String testDescription) {
assertThat(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.withEqualsForFieldsMatchingRegexes(equals, fieldRegexes)
.isEqualTo(expected);
}
private static Stream<Arguments> recursivelyEqualObjectsWhenUsingFieldComparators() {
Person person1 = new Person("John", "Doe");
person1.home.address.number = 1;
Person person2 = new Person("JoHN", "DoE");
person2.home.address.number = 1;
Person person3 = new Person("John", "Doe");
person3.home.address.number = 1;
Person person4 = new Person("Jack", "Doe");
person4.home.address.number = 2;
Person person5 = new Person("John", "Doe");
person5.home.address.number = 1;
person5.dateOfBirth = new Date(123);
person5.neighbour = new Person("John", "Doe");
person5.neighbour.home.address.number = 123;
Person person6 = new Person("John", "Doe");
person6.home.address.number = 1;
person6.dateOfBirth = new Date(123);
person6.neighbour = new Person("Jim", "Doe");
person6.neighbour.home.address.number = 456;
Person person7 = new Person("John", "Doe");
person7.title = "Sir";
Person person8 = new Person("Jack", "Dough");
person8.title = "Mr";
return Stream.of(arguments(person1, person2, STRING_EQUALS, array("name.*name"), "same data except case for strings"),
arguments(person3, person4, ALWAYS_EQUALS, array(".*first..me", "home.address.number"),
"same data except for address number and first name"),
// any neighbour differences should be ignored as we compare persons with ALWAYS_EQUALS
arguments(person5, person6, ALWAYS_EQUALS, array("neigh.*"), "same data except for neighbour"),
arguments(person7, person8, ALWAYS_EQUALS, array(".*stname", "t.tle"), "same data except for strings"));
}
@Test
void should_fail_when_actual_differs_from_expected_when_using_BiPredicates_by_fields_matching_regexes() {
// GIVEN
Person actual = new Person("John", "Doe");
actual.home.address.number = 1;
actual.dateOfBirth = new Date(123);
actual.neighbour = new Person("Jack", "Doe");
actual.neighbour.home.address.number = 123;
// actually a clone of actual
Person expected = new Person("John", "Doe");
expected.home.address.number = 1;
expected.dateOfBirth = new Date(123);
expected.neighbour = new Person("Jack", "Doe");
expected.neighbour.home.address.number = 123;
// register BiPredicates for some fields that will fail the comparison
recursiveComparisonConfiguration.registerEqualsForFieldsMatchingRegexes(ALWAYS_DIFFERENT, "dateOf.*", "neighbour.ho.*");
// WHEN/THEN
ComparisonDifference dateOfBirthDiff = diff("dateOfBirth", actual.dateOfBirth, expected.dateOfBirth);
ComparisonDifference neighbourAddressDiff = diff("neighbour.home", actual.neighbour.home, expected.neighbour.home);
compareRecursivelyFailsWithDifferences(actual, expected, dateOfBirthDiff, neighbourAddressDiff);
}
@Test
void should_be_able_to_compare_objects_recursively_using_some_precision_for_numerical_fields() {
// GIVEN
Giant goliath = new Giant("Goliath", 3.0);
Giant goliathTwin = new Giant("Goliath", 3.1);
// THEN
then(goliath).usingRecursiveComparison(recursiveComparisonConfiguration)
.withEqualsForFieldsMatchingRegexes((Double d1, Double d2) -> Math.abs(d1 - d2) <= 0.2, "hei...")
.isEqualTo(goliathTwin);
}
@Test
void should_be_able_to_compare_objects_recursively_using_given_BiPredicate_for_specified_nested_field() {
// GIVEN
Giant goliath = new Giant("Goliath", 3.0);
goliath.home.address.number = 1;
Giant goliathTwin = new Giant("Goliath", 3.1);
goliathTwin.home.address.number = 5;
// THEN
then(goliath).usingRecursiveComparison(recursiveComparisonConfiguration)
.withEqualsForFieldsMatchingRegexes((Double d1, Double d2) -> Math.abs(d1 - d2) <= 0.2, "height")
.withEqualsForFieldsMatchingRegexes((Integer d1, Integer d2) -> d1 - d2 <= 10, "home.address.number")
.isEqualTo(goliathTwin);
}
@Test
void should_handle_null_field_with_BiPredicates_by_fields_matching_regexes() {
// GIVEN
Patient actual = new Patient(null);
Patient expected = new Patient(new Timestamp(3L));
// THEN
then(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.withEqualsForFieldsMatchingRegexes(ALWAYS_EQUALS, "dateOfBirth")
.isEqualTo(expected);
}
@Test
void field_BiPredicate_should_take_precedence_over_type_comparator_whatever_their_order_of_registration() {
// GIVEN
Patient actual = new Patient(new Timestamp(1L));
Patient expected = new Patient(new Timestamp(3L));
// THEN
then(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.withComparatorForType(NEVER_EQUALS, Timestamp.class)
.withEqualsForFieldsMatchingRegexes(ALWAYS_EQUALS, "dateOfBirth")
.isEqualTo(expected);
then(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.withEqualsForFieldsMatchingRegexes(ALWAYS_EQUALS, "dateOfBirth")
.withComparatorForType(NEVER_EQUALS, Timestamp.class)
.isEqualTo(expected);
}
@Test
void exact_field_location_comparators_should_take_precedence_over_regexes_BiPredicates_matching_field_location_whatever_their_order_of_registration() {
// GIVEN
Patient actual = new Patient(new Timestamp(1L));
Patient expected = new Patient(new Timestamp(3L));
// THEN
then(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.withEqualsForFields(ALWAYS_EQUALS, "dateOfBirth")
.withEqualsForFieldsMatchingRegexes(ALWAYS_DIFFERENT, "dateOfB.*")
.isEqualTo(expected);
then(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.withEqualsForFieldsMatchingRegexes(ALWAYS_DIFFERENT, "dateOfBi.*")
.withEqualsForFields(ALWAYS_EQUALS, "dateOfBirth")
.isEqualTo(expected);
}
@Test
void biPredicates_matching_field_location_take_precedence_over_overridden_equals() {
// GIVEN
Person actual = new Person("Fred", "Flint");
actual.neighbour = new AlwaysEqualPerson();
actual.neighbour.name = new Name("Omar", "Sy");
Person expected = new Person("Fred", "Flint");
expected.neighbour = new AlwaysEqualPerson();
expected.neighbour.name = new Name("Omar2", "Sy");
// THEN
then(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.withEqualsForFieldsMatchingRegexes(ALWAYS_EQUALS, "neighbour") // fails if commented
.usingOverriddenEquals()
.isEqualTo(expected);
}
@Test
void should_use_custom_equal_over_reference_comparison() {
// GIVEN
Foo actual = new Foo(1);
Foo expected = new Foo(1);
BiPredicate<Integer, Integer> greaterThan = (i1, i2) -> Objects.equals(i1, i2 + 1);
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.withEqualsForFieldsMatchingRegexes(greaterThan,
"b..")
.isEqualTo(expected));
// THEN
then(assertionError).hasMessageContainingAll("- the fields matching these regexes were compared with the following comparators",
" - [b..] -> ");
}
private static
|
RecursiveComparisonAssert_isEqualTo_withComparatorsForFieldMatchingRegexes_Test
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/version/UserVersionTest.java
|
{
"start": 1227,
"end": 1876
}
|
class ____ {
@AfterEach
void dropTestData(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@Test
public void testIt(SessionFactoryScope factoryScope) {
factoryScope.inTransaction(session -> {
session.persist( new TestEntity( 1 ) );
} );
factoryScope.inTransaction( session -> {
TestEntity testEntity = session.find( TestEntity.class, 1 );
assertThat( testEntity.getVersion().getRev() ).isEqualTo( CustomVersionUserVersionType.versionValue );
} );
}
@Entity(name = "TestEntity")
@TypeRegistration(basicClass = CustomVersion.class, userType = CustomVersionUserVersionType.class)
public static
|
UserVersionTest
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/DeletesByKeyPrograms.java
|
{
"start": 1287,
"end": 19342
}
|
class ____ {
/**
* Tests a simple INSERT INTO SELECT scenario where ChangelogNormalize can be eliminated since
* we don't need UPDATE_BEFORE, and we have key information for all changes.
*/
public static final TableTestProgram INSERT_SELECT_DELETE_BY_KEY_DELETE_BY_KEY =
TableTestProgram.of(
"select-delete-on-key-to-delete-on-key",
"No ChangelogNormalize: validates results when querying source with deletes by key"
+ " only, writing to sink supporting deletes by key only, which"
+ " is a case where ChangelogNormalize can be eliminated")
.setupTableSource(
SourceTestStep.newBuilder("source_t")
.addSchema(
"id INT PRIMARY KEY NOT ENFORCED",
"name STRING",
"`value` INT")
.addOption("changelog-mode", "I,UA,D")
.addOption("source.produces-delete-by-key", "true")
.producedValues(
Row.ofKind(RowKind.INSERT, 1, "Alice", 10),
Row.ofKind(RowKind.INSERT, 2, "Bob", 20),
// Delete by key
Row.ofKind(RowKind.DELETE, 1, null, null),
// Update after only
Row.ofKind(RowKind.UPDATE_AFTER, 2, "Bob", 30))
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"id INT PRIMARY KEY NOT ENFORCED",
"name STRING",
"`value` INT")
.addOption(
"changelog-mode",
"I,UA,D") // Insert, UpdateAfter, Delete
.addOption("sink.supports-delete-by-key", "true")
.consumedValues(
"+I[1, Alice, 10]",
"+I[2, Bob, 20]",
"-D[1, null, null]",
"+U[2, Bob, 30]")
.build())
.runSql("INSERT INTO sink_t SELECT id, name, `value` FROM source_t")
.build();
public static final TableTestProgram INSERT_SELECT_DELETE_BY_KEY_DELETE_BY_KEY_WITH_PROJECTION =
TableTestProgram.of(
"select-delete-on-key-to-delete-on-key-with-projection",
"No ChangelogNormalize: validates results when querying source with deletes by key"
+ " only, writing to sink supporting deletes by key only with a"
+ "projection, which is a case where ChangelogNormalize can be"
+ " eliminated")
.setupTableSource(
SourceTestStep.newBuilder("source_t")
.addSchema(
"id INT PRIMARY KEY NOT ENFORCED",
"name STRING NOT NULL",
"`value` INT NOT NULL")
.addOption("changelog-mode", "I,UA,D")
.addOption("source.produces-delete-by-key", "true")
.producedValues(
Row.ofKind(RowKind.INSERT, 1, "Alice", 10),
Row.ofKind(RowKind.INSERT, 2, "Bob", 20),
// Delete by key
Row.ofKind(RowKind.DELETE, 1, null, null),
// Update after only
Row.ofKind(RowKind.UPDATE_AFTER, 2, "Bob", 30))
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"id INT PRIMARY KEY NOT ENFORCED",
"name STRING",
"`value` INT")
.addOption(
"changelog-mode",
"I,UA,D") // Insert, UpdateAfter, Delete
.addOption("sink.supports-delete-by-key", "true")
.consumedValues(
"+I[1, Alice, 12]",
"+I[2, Bob, 22]",
"-D[1, , -1]",
"+U[2, Bob, 32]")
.build())
.runSql("INSERT INTO sink_t SELECT id, name, `value` + 2 FROM source_t")
.build();
public static final TableTestProgram INSERT_SELECT_DELETE_BY_KEY_FULL_DELETE =
TableTestProgram.of(
"select-delete-on-key-to-full-delete",
"ChangelogNormalize: validates results when querying source with deletes by key"
+ " only, writing to sink supporting requiring full deletes, "
+ "which is a case where ChangelogNormalize stays")
.setupTableSource(
SourceTestStep.newBuilder("source_t")
.addSchema(
"id INT PRIMARY KEY NOT ENFORCED",
"name STRING",
"`value` INT")
.addOption("changelog-mode", "I,UA,D")
.addOption("source.produces-delete-by-key", "true")
.producedValues(
Row.ofKind(RowKind.INSERT, 1, "Alice", 10),
Row.ofKind(RowKind.INSERT, 2, "Bob", 20),
// Delete by key
Row.ofKind(RowKind.DELETE, 1, null, null),
// Update after only
Row.ofKind(RowKind.UPDATE_AFTER, 2, "Bob", 30))
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"id INT PRIMARY KEY NOT ENFORCED",
"name STRING",
"`value` INT")
.addOption("changelog-mode", "I,UA,D")
.addOption("sink.supports-delete-by-key", "false")
.consumedValues(
"+I[1, Alice, 10]",
"+I[2, Bob, 20]",
"-D[1, Alice, 10]",
"+U[2, Bob, 30]")
.build())
.runSql("INSERT INTO sink_t SELECT id, name, `value` FROM source_t")
.build();
public static final TableTestProgram INSERT_SELECT_FULL_DELETE_FULL_DELETE =
TableTestProgram.of(
"select-full-delete-to-full-delete",
"No ChangelogNormalize: validates results when querying source with full deletes, "
+ "writing to sink requiring full deletes, which is a case"
+ " where ChangelogNormalize can be eliminated")
.setupTableSource(
SourceTestStep.newBuilder("source_t")
.addSchema(
"id INT PRIMARY KEY NOT ENFORCED",
"name STRING",
"`value` INT")
.addOption("changelog-mode", "I,UA,D")
.addOption("source.produces-delete-by-key", "false")
.producedValues(
Row.ofKind(RowKind.INSERT, 1, "Alice", 10),
Row.ofKind(RowKind.INSERT, 2, "Bob", 20),
// Delete by key
Row.ofKind(RowKind.DELETE, 1, "Alice", 10),
// Update after only
Row.ofKind(RowKind.UPDATE_AFTER, 2, "Bob", 30))
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"id INT PRIMARY KEY NOT ENFORCED",
"name STRING",
"`value` INT")
.addOption("changelog-mode", "I,UA,D")
.addOption("sink.supports-delete-by-key", "false")
.consumedValues(
"+I[1, Alice, 10]",
"+I[2, Bob, 20]",
"-D[1, Alice, 10]",
"+U[2, Bob, 30]")
.build())
.runSql("INSERT INTO sink_t SELECT id, name, `value` FROM source_t")
.build();
public static final TableTestProgram JOIN_INTO_FULL_DELETES =
TableTestProgram.of(
"join-to-full-delete",
"ChangelogNormalize: validates results when joining sources with deletes by key"
+ " only, writing to sink requiring full deletes, which"
+ " is a case where ChangelogNormalize stays")
.setupTableSource(
SourceTestStep.newBuilder("left_t")
.addSchema("id INT PRIMARY KEY NOT ENFORCED", "`value` INT")
.addOption("changelog-mode", "I,UA,D")
.addOption("source.produces-delete-by-key", "true")
.producedValues(
Row.ofKind(RowKind.INSERT, 1, 10),
Row.ofKind(RowKind.INSERT, 2, 20),
Row.ofKind(RowKind.INSERT, 3, 30),
// Delete by key
Row.ofKind(RowKind.DELETE, 1, null),
// Update after only
Row.ofKind(RowKind.UPDATE_AFTER, 3, 40))
.build())
.setupTableSource(
SourceTestStep.newBuilder("right_t")
.addSchema("id INT PRIMARY KEY NOT ENFORCED", "name STRING")
.addOption("changelog-mode", "I,UA,D")
.addOption("source.produces-delete-by-key", "true")
.producedValues(
Row.ofKind(RowKind.INSERT, 1, "Alice"),
Row.ofKind(RowKind.INSERT, 2, "Bob"),
Row.ofKind(RowKind.INSERT, 3, "Emily"),
// Delete by key
Row.ofKind(RowKind.DELETE, 1, null),
// Update after only
Row.ofKind(RowKind.UPDATE_AFTER, 2, "BOB"))
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"id INT PRIMARY KEY NOT ENFORCED",
"name STRING",
"`value` INT")
.addOption("changelog-mode", "I,UA,D")
.addOption("sink.supports-delete-by-key", "false")
.consumedValues(
"+I[1, Alice, 10]",
"+I[2, Bob, 20]",
"+I[3, Emily, 30]",
"-D[1, Alice, 10]",
"+U[3, Emily, 40]",
"+U[2, BOB, 20]")
.build())
.runSql(
"INSERT INTO sink_t SELECT l.id, r.name, l.`value` FROM left_t l JOIN right_t r ON l.id = r.id")
.build();
public static final TableTestProgram JOIN_INTO_DELETES_BY_KEY =
TableTestProgram.of(
"join-to-delete-on-key",
"No ChangelogNormalize: validates results when joining sources with deletes by key"
+ " only, writing to sink supporting deletes by key, which"
+ " is a case where ChangelogNormalize can be removed")
.setupTableSource(
SourceTestStep.newBuilder("left_t")
.addSchema("id INT PRIMARY KEY NOT ENFORCED", "`value` INT")
.addOption("changelog-mode", "I,UA,D")
.addOption("source.produces-delete-by-key", "true")
.producedValues(
Row.ofKind(RowKind.INSERT, 1, 10),
Row.ofKind(RowKind.INSERT, 2, 20),
Row.ofKind(RowKind.INSERT, 3, 30),
// Delete by key
Row.ofKind(RowKind.DELETE, 1, null),
// Update after only
Row.ofKind(RowKind.UPDATE_AFTER, 3, 40))
.build())
.setupTableSource(
SourceTestStep.newBuilder("right_t")
.addSchema("id INT PRIMARY KEY NOT ENFORCED", "name STRING")
.addOption("changelog-mode", "I,UA,D")
.addOption("source.produces-delete-by-key", "true")
.producedValues(
Row.ofKind(RowKind.INSERT, 1, "Alice"),
Row.ofKind(RowKind.INSERT, 2, "Bob"),
Row.ofKind(RowKind.INSERT, 3, "Emily"),
// Delete by key
Row.ofKind(RowKind.DELETE, 1, null),
// Update after only
Row.ofKind(RowKind.UPDATE_AFTER, 2, "BOB"))
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"id INT PRIMARY KEY NOT ENFORCED",
"name STRING",
"`value` INT")
.addOption("changelog-mode", "I,UA,D")
.addOption("sink.supports-delete-by-key", "true")
.consumedValues(
"+I[1, Alice, 10]",
"+I[2, Bob, 20]",
"+I[3, Emily, 30]",
"-D[1, Alice, null]",
"+U[3, Emily, 40]",
"+U[2, BOB, 20]")
.build())
.runSql(
"INSERT INTO sink_t SELECT l.id, r.name, l.`value` FROM left_t l JOIN right_t r ON l.id = r.id")
.build();
private DeletesByKeyPrograms() {}
}
|
DeletesByKeyPrograms
|
java
|
apache__camel
|
components/camel-snmp/src/main/java/org/apache/camel/component/snmp/SnmpPrivacyProtocolType.java
|
{
"start": 852,
"end": 942
}
|
enum ____ {
DES,
TRIDES,
AES128,
AES192,
AES256;
}
|
SnmpPrivacyProtocolType
|
java
|
spring-projects__spring-security
|
config/src/main/java/org/springframework/security/config/Customizer.java
|
{
"start": 688,
"end": 888
}
|
interface ____ accepts a single input argument and returns no result.
*
* @param <T> the type of the input to the operation
* @author Eleftheria Stein
* @since 5.2
*/
@FunctionalInterface
public
|
that
|
java
|
apache__flink
|
flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopReduceFunctionITCase.java
|
{
"start": 6891,
"end": 7783
}
|
class ____
implements Reducer<IntWritable, Text, IntWritable, IntWritable> {
@Override
public void reduce(
IntWritable k,
Iterator<Text> vs,
OutputCollector<IntWritable, IntWritable> out,
Reporter r)
throws IOException {
int commentCnt = 0;
while (vs.hasNext()) {
String v = vs.next().toString();
if (v.startsWith("Comment")) {
commentCnt++;
}
}
out.collect(new IntWritable(42), new IntWritable(commentCnt));
}
@Override
public void configure(final JobConf arg0) {}
@Override
public void close() throws IOException {}
}
/** A {@link Reducer} to sum counts for a specific prefix. */
public static
|
AllCommentCntReducer
|
java
|
apache__spark
|
sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
|
{
"start": 11117,
"end": 12652
}
|
class ____ extends KVIterator<UnsafeRow, UnsafeRow> {
private UnsafeRow key = new UnsafeRow(keySchema.size());
private UnsafeRow value = new UnsafeRow(valueSchema.size());
private final UnsafeSorterIterator underlying;
private KVSorterIterator(UnsafeSorterIterator underlying) {
this.underlying = underlying;
}
@Override
public boolean next() throws IOException {
try {
if (underlying.hasNext()) {
underlying.loadNext();
Object baseObj = underlying.getBaseObject();
long recordOffset = underlying.getBaseOffset();
int recordLen = underlying.getRecordLength();
// Note that recordLen = keyLen + valueLen + uaoSize (for the keyLen itself)
int uaoSize = UnsafeAlignedOffset.getUaoSize();
int keyLen = Platform.getInt(baseObj, recordOffset);
int valueLen = recordLen - keyLen - uaoSize;
key.pointTo(baseObj, recordOffset + uaoSize, keyLen);
value.pointTo(baseObj, recordOffset + uaoSize + keyLen, valueLen);
return true;
} else {
key = null;
value = null;
cleanupResources();
return false;
}
} catch (IOException e) {
cleanupResources();
throw e;
}
}
@Override
public UnsafeRow getKey() {
return key;
}
@Override
public UnsafeRow getValue() {
return value;
}
@Override
public void close() {
cleanupResources();
}
}
}
|
KVSorterIterator
|
java
|
netty__netty
|
codec-classes-quic/src/main/java/io/netty/handler/codec/quic/QuicException.java
|
{
"start": 786,
"end": 1852
}
|
class ____ extends Exception {
private final QuicTransportError error;
QuicException(String message) {
super(message);
this.error = null;
}
public QuicException(QuicTransportError error) {
super(error.name());
this.error = error;
}
public QuicException(String message, QuicTransportError error) {
super(message);
this.error = error;
}
public QuicException(Throwable cause, QuicTransportError error) {
super(cause);
this.error = error;
}
public QuicException(String message, Throwable cause, QuicTransportError error) {
super(message, cause);
this.error = error;
}
/**
* Returns the {@link QuicTransportError} which was the cause of the {@link QuicException}.
*
* @return the {@link QuicTransportError} that caused this {@link QuicException} or {@code null} if
* it was caused by something different.
*/
@Nullable
public QuicTransportError error() {
return error;
}
}
|
QuicException
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/streaming/api/datastream/GetOperatorUniqueIDTest.java
|
{
"start": 1499,
"end": 2245
}
|
class ____ extends TestLogger {
/**
* If expected values ever change double check that the change is not braking the contract of
* {@link StreamingRuntimeContext#getOperatorUniqueID()} being stable between job submissions.
*/
@Test
public void testGetOperatorUniqueID() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
env.fromData(1, 2, 3)
.map(new VerifyOperatorIDMapFunction("6c4f323f22da8fb6e34f80c61be7a689"))
.uid("42")
.map(new VerifyOperatorIDMapFunction("3e129e83691e7737fbf876b47452acbc"))
.uid("44");
env.execute();
}
private static
|
GetOperatorUniqueIDTest
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/Mockito.java
|
{
"start": 72961,
"end": 75260
}
|
interface ____ not extensible (see {@link NotExtensible}).
* The change should be seamless to our users.
* </li>
* <li>{@link NotExtensible} -
* Public annotation that indicates to the user that she should not provide custom implementations of given type.
* Helps framework integrators and our users understand how to use Mockito API safely.
* </li>
* </ul>
* Do you have feedback? Please leave comment in <a href="https://github.com/mockito/mockito/issues/1110">issue 1110</a>.
*
* <h3 id="42">42. <a class="meaningful_link" href="#verifiation_started_listener" name="verifiation_started_listener">
* New API for integrations: listening on verification start events (Since 2.11.+)</a></h3>
*
* Framework integrations such as <a href="https://projects.spring.io/spring-boot">Spring Boot</a> needs public API to tackle double-proxy use case
* (<a href="https://github.com/mockito/mockito/issues/1191">issue 1191</a>).
* We added:
* <ul>
* <li>New {@link VerificationStartedListener} and {@link VerificationStartedEvent}
* enable framework integrators to replace the mock object for verification.
* The main driving use case is <a href="https://projects.spring.io/spring-boot/">Spring Boot</a> integration.
* For details see Javadoc for {@link VerificationStartedListener}.
* </li>
* <li>New public method {@link MockSettings#verificationStartedListeners(VerificationStartedListener...)}
* allows to supply verification started listeners at mock creation time.
* </li>
* <li>New handy method {@link MockingDetails#getMock()} was added to make the {@code MockingDetails} API more complete.
* We found this method useful during the implementation.
* </li>
* </ul>
*
* <h3 id="43">43. <a class="meaningful_link" href="#mockito_session_testing_frameworks" name="mockito_session_testing_frameworks">
* New API for integrations: <code>MockitoSession</code> is usable by testing frameworks (Since 2.15.+)</a></h3>
*
* <p>{@link MockitoSessionBuilder} and {@link MockitoSession} were enhanced to enable reuse by testing framework
* integrations (e.g. {@link MockitoRule} for JUnit):</p>
* <ul>
* <li>{@link MockitoSessionBuilder#initMocks(Object...)} allows to pass in multiple test
|
is
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/collection/forged/Source.java
|
{
"start": 250,
"end": 1221
}
|
class ____ {
//CHECKSTYLE:OFF
public Set<String> publicFooSet;
//CHECKSTYLE:ON
private Set<String> fooSet;
private Set<String> fooSet2;
//CHECKSTYLE:OFF
public Map<String, Long> publicBarMap;
//CHECKSTYLE:ON
private Map<String, Long> barMap;
private Map<String, Long> barMap2;
public Set<String> getFooSet() {
return fooSet;
}
public void setFooSet(Set<String> fooSet) {
this.fooSet = fooSet;
}
public Map<String, Long> getBarMap() {
return barMap;
}
public void setBarMap(Map<String, Long> barMap) {
this.barMap = barMap;
}
public Set<String> getFooSet2() {
return fooSet2;
}
public void setFooSet2( Set<String> fooSet2 ) {
this.fooSet2 = fooSet2;
}
public Map<String, Long> getBarMap2() {
return barMap2;
}
public void setBarMap2( Map<String, Long> barMap2 ) {
this.barMap2 = barMap2;
}
}
|
Source
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/OperatorCoordinatorHolder.java
|
{
"start": 25444,
"end": 30154
}
|
class ____
implements OperatorCoordinator.Context {
private static final Logger LOG =
LoggerFactory.getLogger(LazyInitializedCoordinatorContext.class);
private final JobID jobID;
private final OperatorID operatorId;
private final String operatorName;
private final ClassLoader userCodeClassLoader;
private final CoordinatorStore coordinatorStore;
private final boolean supportsConcurrentExecutionAttempts;
private final OperatorCoordinatorMetricGroup metricGroup;
private GlobalFailureHandler globalFailureHandler;
private Executor schedulerExecutor;
@Nullable private CheckpointCoordinator checkpointCoordinator;
private int operatorParallelism;
private volatile boolean failed;
public LazyInitializedCoordinatorContext(
JobID jobID,
final OperatorID operatorId,
final String operatorName,
final ClassLoader userCodeClassLoader,
final int operatorParallelism,
final CoordinatorStore coordinatorStore,
final boolean supportsConcurrentExecutionAttempts,
final OperatorCoordinatorMetricGroup metricGroup) {
this.jobID = jobID;
this.operatorId = checkNotNull(operatorId);
this.operatorName = checkNotNull(operatorName);
this.userCodeClassLoader = checkNotNull(userCodeClassLoader);
this.operatorParallelism = operatorParallelism;
this.coordinatorStore = checkNotNull(coordinatorStore);
this.supportsConcurrentExecutionAttempts = supportsConcurrentExecutionAttempts;
this.metricGroup = checkNotNull(metricGroup);
}
void lazyInitialize(
GlobalFailureHandler globalFailureHandler,
Executor schedulerExecutor,
@Nullable CheckpointCoordinator checkpointCoordinator,
final int operatorParallelism) {
this.globalFailureHandler = checkNotNull(globalFailureHandler);
this.schedulerExecutor = checkNotNull(schedulerExecutor);
this.checkpointCoordinator = checkpointCoordinator;
this.operatorParallelism = operatorParallelism;
}
void unInitialize() {
this.globalFailureHandler = null;
this.schedulerExecutor = null;
this.checkpointCoordinator = null;
}
boolean isInitialized() {
return schedulerExecutor != null;
}
private void checkInitialized() {
checkState(isInitialized(), "Context was not yet initialized");
}
void resetFailed() {
failed = false;
}
@Override
public JobID getJobID() {
return jobID;
}
@Override
public OperatorID getOperatorId() {
return operatorId;
}
@Override
public OperatorCoordinatorMetricGroup metricGroup() {
return metricGroup;
}
@Override
public void failJob(final Throwable cause) {
checkInitialized();
final FlinkException e =
new FlinkException(
"Global failure triggered by OperatorCoordinator for '"
+ operatorName
+ "' (operator "
+ operatorId
+ ").",
cause);
if (failed) {
LOG.debug(
"Ignoring the request to fail job because the job is already failing. "
+ "The ignored failure cause is",
e);
return;
}
failed = true;
schedulerExecutor.execute(() -> globalFailureHandler.handleGlobalFailure(e));
}
@Override
public int currentParallelism() {
return operatorParallelism;
}
@Override
public ClassLoader getUserCodeClassloader() {
return userCodeClassLoader;
}
@Override
public CoordinatorStore getCoordinatorStore() {
return coordinatorStore;
}
@Override
public boolean isConcurrentExecutionAttemptsSupported() {
return supportsConcurrentExecutionAttempts;
}
@Override
@Nullable
public CheckpointCoordinator getCheckpointCoordinator() {
return checkpointCoordinator;
}
}
}
|
LazyInitializedCoordinatorContext
|
java
|
google__guice
|
extensions/assistedinject/test/com/google/inject/assistedinject/subpkg/SubpackageTest.java
|
{
"start": 1281,
"end": 2351
}
|
class ____ {
private static final double JAVA_VERSION =
Double.parseDouble(StandardSystemProperty.JAVA_SPECIFICATION_VERSION.value());
private static final MethodHandles.Lookup LOOKUPS = MethodHandles.lookup();
private final Logger loggerToWatch = Logger.getLogger(AssistedInject.class.getName());
private final List<LogRecord> logRecords = Lists.newArrayList();
private final Handler fakeHandler =
new Handler() {
@Override
public void publish(LogRecord logRecord) {
logRecords.add(logRecord);
}
@Override
public void flush() {}
@Override
public void close() {}
};
@Before
public void setUp() throws Exception {
loggerToWatch.addHandler(fakeHandler);
setAllowPrivateLookupFallback(true);
setAllowMethodHandleWorkaround(true);
}
@After
public void tearDown() throws Exception {
loggerToWatch.removeHandler(fakeHandler);
setAllowPrivateLookupFallback(true);
setAllowMethodHandleWorkaround(true);
}
public abstract static
|
SubpackageTest
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficController.java
|
{
"start": 3080,
"end": 3217
}
|
class ____ dev eth0 parent 42:0 classid 42:1 htb rate 100mbit ceil 100mbit";
private static final String ADD_DEFAULT_CLASS_CMD =
"
|
add
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/JUnit4SetUpNotRunTest.java
|
{
"start": 7121,
"end": 7245
}
|
class ____ {
int setUp() {
return 42;
}
}
/** setUp() has parameters */
@RunWith(JUnit4.class)
|
J4SetUpNonVoidReturnType
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/discovery/predicates/IsTestFactoryMethodTests.java
|
{
"start": 1465,
"end": 4024
}
|
class ____ {
final List<DiscoveryIssue> discoveryIssues = new ArrayList<>();
final Predicate<Method> isTestFactoryMethod = new IsTestFactoryMethod(
DiscoveryIssueReporter.collecting(discoveryIssues));
@ParameterizedTest
@ValueSource(strings = { "dynamicTestsFactoryFromCollection", "dynamicTestsFactoryFromStreamWithExtendsWildcard",
"dynamicTestsFactoryFromNode", "dynamicTestsFactoryFromTest", "dynamicTestsFactoryFromContainer",
"dynamicTestsFactoryFromNodeArray", "dynamicTestsFactoryFromTestArray",
"dynamicTestsFactoryFromContainerArray" })
void validFactoryMethods(String methodName) {
assertThat(isTestFactoryMethod).accepts(method(methodName));
assertThat(discoveryIssues).isEmpty();
}
@ParameterizedTest
@ValueSource(strings = { "bogusVoidFactory", "bogusStringsFactory", "bogusStringArrayFactory",
"dynamicTestsFactoryFromStreamWithSuperWildcard" })
void invalidFactoryMethods(String methodName) {
var method = method(methodName);
assertThat(isTestFactoryMethod).rejects(method);
var issue = getOnlyElement(discoveryIssues);
assertThat(issue.severity()).isEqualTo(DiscoveryIssue.Severity.WARNING);
assertThat(issue.message()).isEqualTo(
"@TestFactory method '%s' must return a single org.junit.jupiter.api.DynamicNode or a "
+ "Stream, Collection, Iterable, Iterator, Iterator provider, or array of org.junit.jupiter.api.DynamicNode. "
+ "It will not be executed.",
method.toGenericString());
assertThat(issue.source()).contains(MethodSource.from(method));
}
@ParameterizedTest
@ValueSource(strings = { "objectFactory", "objectArrayFactory", "rawCollectionFactory", "unboundStreamFactory" })
void suspiciousFactoryMethods(String methodName) {
var method = method(methodName);
assertThat(isTestFactoryMethod).accepts(method);
var issue = getOnlyElement(discoveryIssues);
assertThat(issue.severity()).isEqualTo(DiscoveryIssue.Severity.INFO);
assertThat(issue.message()).isEqualTo(
"The declared return type of @TestFactory method '%s' does not support static validation. "
+ "It must return a single org.junit.jupiter.api.DynamicNode or a "
+ "Stream, Collection, Iterable, Iterator, Iterator provider, or array of org.junit.jupiter.api.DynamicNode.",
method.toGenericString());
assertThat(issue.source()).contains(MethodSource.from(method));
}
private static Method method(String name) {
return ReflectionSupport.findMethod(ClassWithTestFactoryMethods.class, name).orElseThrow();
}
@SuppressWarnings("unused")
private static
|
IsTestFactoryMethodTests
|
java
|
apache__camel
|
components/camel-box/camel-box-component/src/generated/java/org/apache/camel/component/box/BoxFoldersManagerEndpointConfigurationConfigurer.java
|
{
"start": 734,
"end": 14060
}
|
class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("Access", com.box.sdk.BoxSharedLink.Access.class);
map.put("AccessTokenCache", com.box.sdk.IAccessTokenCache.class);
map.put("ApiName", org.apache.camel.component.box.internal.BoxApiName.class);
map.put("AuthenticationType", java.lang.String.class);
map.put("ClientId", java.lang.String.class);
map.put("ClientSecret", java.lang.String.class);
map.put("DestinationFolderId", java.lang.String.class);
map.put("EncryptionAlgorithm", com.box.sdk.EncryptionAlgorithm.class);
map.put("EnterpriseId", java.lang.String.class);
map.put("Fields", java.lang.String[].class);
map.put("FolderId", java.lang.String.class);
map.put("FolderName", java.lang.String.class);
map.put("HttpParams", java.util.Map.class);
map.put("Info", com.box.sdk.BoxFolder.Info.class);
map.put("Limit", java.lang.Long.class);
map.put("MaxCacheEntries", int.class);
map.put("MethodName", java.lang.String.class);
map.put("NewFolderName", java.lang.String.class);
map.put("NewName", java.lang.String.class);
map.put("Offset", java.lang.Long.class);
map.put("ParentFolderId", java.lang.String.class);
map.put("Path", java.lang.String[].class);
map.put("Permissions", com.box.sdk.BoxSharedLink.Permissions.class);
map.put("PrivateKeyFile", java.lang.String.class);
map.put("PrivateKeyPassword", java.lang.String.class);
map.put("PublicKeyId", java.lang.String.class);
map.put("SslContextParameters", org.apache.camel.support.jsse.SSLContextParameters.class);
map.put("UnshareDate", java.util.Date.class);
map.put("UserId", java.lang.String.class);
map.put("UserName", java.lang.String.class);
map.put("UserPassword", java.lang.String.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.box.BoxFoldersManagerEndpointConfiguration target = (org.apache.camel.component.box.BoxFoldersManagerEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "access": target.setAccess(property(camelContext, com.box.sdk.BoxSharedLink.Access.class, value)); return true;
case "accesstokencache":
case "accessTokenCache": target.setAccessTokenCache(property(camelContext, com.box.sdk.IAccessTokenCache.class, value)); return true;
case "apiname":
case "apiName": target.setApiName(property(camelContext, org.apache.camel.component.box.internal.BoxApiName.class, value)); return true;
case "authenticationtype":
case "authenticationType": target.setAuthenticationType(property(camelContext, java.lang.String.class, value)); return true;
case "clientid":
case "clientId": target.setClientId(property(camelContext, java.lang.String.class, value)); return true;
case "clientsecret":
case "clientSecret": target.setClientSecret(property(camelContext, java.lang.String.class, value)); return true;
case "destinationfolderid":
case "destinationFolderId": target.setDestinationFolderId(property(camelContext, java.lang.String.class, value)); return true;
case "encryptionalgorithm":
case "encryptionAlgorithm": target.setEncryptionAlgorithm(property(camelContext, com.box.sdk.EncryptionAlgorithm.class, value)); return true;
case "enterpriseid":
case "enterpriseId": target.setEnterpriseId(property(camelContext, java.lang.String.class, value)); return true;
case "fields": target.setFields(property(camelContext, java.lang.String[].class, value)); return true;
case "folderid":
case "folderId": target.setFolderId(property(camelContext, java.lang.String.class, value)); return true;
case "foldername":
case "folderName": target.setFolderName(property(camelContext, java.lang.String.class, value)); return true;
case "httpparams":
case "httpParams": target.setHttpParams(property(camelContext, java.util.Map.class, value)); return true;
case "info": target.setInfo(property(camelContext, com.box.sdk.BoxFolder.Info.class, value)); return true;
case "limit": target.setLimit(property(camelContext, java.lang.Long.class, value)); return true;
case "maxcacheentries":
case "maxCacheEntries": target.setMaxCacheEntries(property(camelContext, int.class, value)); return true;
case "methodname":
case "methodName": target.setMethodName(property(camelContext, java.lang.String.class, value)); return true;
case "newfoldername":
case "newFolderName": target.setNewFolderName(property(camelContext, java.lang.String.class, value)); return true;
case "newname":
case "newName": target.setNewName(property(camelContext, java.lang.String.class, value)); return true;
case "offset": target.setOffset(property(camelContext, java.lang.Long.class, value)); return true;
case "parentfolderid":
case "parentFolderId": target.setParentFolderId(property(camelContext, java.lang.String.class, value)); return true;
case "path": target.setPath(property(camelContext, java.lang.String[].class, value)); return true;
case "permissions": target.setPermissions(property(camelContext, com.box.sdk.BoxSharedLink.Permissions.class, value)); return true;
case "privatekeyfile":
case "privateKeyFile": target.setPrivateKeyFile(property(camelContext, java.lang.String.class, value)); return true;
case "privatekeypassword":
case "privateKeyPassword": target.setPrivateKeyPassword(property(camelContext, java.lang.String.class, value)); return true;
case "publickeyid":
case "publicKeyId": target.setPublicKeyId(property(camelContext, java.lang.String.class, value)); return true;
case "sslcontextparameters":
case "sslContextParameters": target.setSslContextParameters(property(camelContext, org.apache.camel.support.jsse.SSLContextParameters.class, value)); return true;
case "unsharedate":
case "unshareDate": target.setUnshareDate(property(camelContext, java.util.Date.class, value)); return true;
case "userid":
case "userId": target.setUserId(property(camelContext, java.lang.String.class, value)); return true;
case "username":
case "userName": target.setUserName(property(camelContext, java.lang.String.class, value)); return true;
case "userpassword":
case "userPassword": target.setUserPassword(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "access": return com.box.sdk.BoxSharedLink.Access.class;
case "accesstokencache":
case "accessTokenCache": return com.box.sdk.IAccessTokenCache.class;
case "apiname":
case "apiName": return org.apache.camel.component.box.internal.BoxApiName.class;
case "authenticationtype":
case "authenticationType": return java.lang.String.class;
case "clientid":
case "clientId": return java.lang.String.class;
case "clientsecret":
case "clientSecret": return java.lang.String.class;
case "destinationfolderid":
case "destinationFolderId": return java.lang.String.class;
case "encryptionalgorithm":
case "encryptionAlgorithm": return com.box.sdk.EncryptionAlgorithm.class;
case "enterpriseid":
case "enterpriseId": return java.lang.String.class;
case "fields": return java.lang.String[].class;
case "folderid":
case "folderId": return java.lang.String.class;
case "foldername":
case "folderName": return java.lang.String.class;
case "httpparams":
case "httpParams": return java.util.Map.class;
case "info": return com.box.sdk.BoxFolder.Info.class;
case "limit": return java.lang.Long.class;
case "maxcacheentries":
case "maxCacheEntries": return int.class;
case "methodname":
case "methodName": return java.lang.String.class;
case "newfoldername":
case "newFolderName": return java.lang.String.class;
case "newname":
case "newName": return java.lang.String.class;
case "offset": return java.lang.Long.class;
case "parentfolderid":
case "parentFolderId": return java.lang.String.class;
case "path": return java.lang.String[].class;
case "permissions": return com.box.sdk.BoxSharedLink.Permissions.class;
case "privatekeyfile":
case "privateKeyFile": return java.lang.String.class;
case "privatekeypassword":
case "privateKeyPassword": return java.lang.String.class;
case "publickeyid":
case "publicKeyId": return java.lang.String.class;
case "sslcontextparameters":
case "sslContextParameters": return org.apache.camel.support.jsse.SSLContextParameters.class;
case "unsharedate":
case "unshareDate": return java.util.Date.class;
case "userid":
case "userId": return java.lang.String.class;
case "username":
case "userName": return java.lang.String.class;
case "userpassword":
case "userPassword": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.box.BoxFoldersManagerEndpointConfiguration target = (org.apache.camel.component.box.BoxFoldersManagerEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "access": return target.getAccess();
case "accesstokencache":
case "accessTokenCache": return target.getAccessTokenCache();
case "apiname":
case "apiName": return target.getApiName();
case "authenticationtype":
case "authenticationType": return target.getAuthenticationType();
case "clientid":
case "clientId": return target.getClientId();
case "clientsecret":
case "clientSecret": return target.getClientSecret();
case "destinationfolderid":
case "destinationFolderId": return target.getDestinationFolderId();
case "encryptionalgorithm":
case "encryptionAlgorithm": return target.getEncryptionAlgorithm();
case "enterpriseid":
case "enterpriseId": return target.getEnterpriseId();
case "fields": return target.getFields();
case "folderid":
case "folderId": return target.getFolderId();
case "foldername":
case "folderName": return target.getFolderName();
case "httpparams":
case "httpParams": return target.getHttpParams();
case "info": return target.getInfo();
case "limit": return target.getLimit();
case "maxcacheentries":
case "maxCacheEntries": return target.getMaxCacheEntries();
case "methodname":
case "methodName": return target.getMethodName();
case "newfoldername":
case "newFolderName": return target.getNewFolderName();
case "newname":
case "newName": return target.getNewName();
case "offset": return target.getOffset();
case "parentfolderid":
case "parentFolderId": return target.getParentFolderId();
case "path": return target.getPath();
case "permissions": return target.getPermissions();
case "privatekeyfile":
case "privateKeyFile": return target.getPrivateKeyFile();
case "privatekeypassword":
case "privateKeyPassword": return target.getPrivateKeyPassword();
case "publickeyid":
case "publicKeyId": return target.getPublicKeyId();
case "sslcontextparameters":
case "sslContextParameters": return target.getSslContextParameters();
case "unsharedate":
case "unshareDate": return target.getUnshareDate();
case "userid":
case "userId": return target.getUserId();
case "username":
case "userName": return target.getUserName();
case "userpassword":
case "userPassword": return target.getUserPassword();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "httpparams":
case "httpParams": return java.lang.Object.class;
default: return null;
}
}
}
|
BoxFoldersManagerEndpointConfigurationConfigurer
|
java
|
elastic__elasticsearch
|
x-pack/plugin/deprecation/qa/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java
|
{
"start": 2983,
"end": 34116
}
|
class ____ extends ESRestTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("deprecation-plugin")
.module("x-pack-deprecation")
.module("x-pack-stack")
.module("x-pack-ilm")
.module("ingest-common")
.module("constant-keyword")
.setting("cluster.deprecation_indexing.enabled", "true")
.setting("cluster.deprecation_indexing.flush_interval", "100ms")
.setting("xpack.security.enabled", "false")
.setting("xpack.license.self_generated.type", "trial")
.build();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
@Rule
public TestName testName = new TestName();
private String xOpaqueId() {
String name = testName.getMethodName();
int pos = name.indexOf(" "); // additional suffix in case of repeated runs
return pos == -1 ? name : name.substring(0, pos) + "-" + name.hashCode();
}
@Override
protected boolean preserveClusterUponCompletion() {
return true; // isolation is based on xOpaqueId
}
@Before
public void assertIndexingIsEnabled() throws Exception {
// make sure the deprecation logs indexing is enabled
Response response = performScopedRequest(new Request("GET", "/_cluster/settings?include_defaults=true&flat_settings=true"));
ObjectMapper mapper = new ObjectMapper();
final JsonNode jsonNode = mapper.readTree(response.getEntity().getContent());
final boolean defaultValue = jsonNode.at("/defaults/cluster.deprecation_indexing.enabled").asBoolean();
assertTrue(defaultValue);
}
/**
* Check that configuring deprecation settings causes a warning to be added to the
* response headers.
*/
public void testDeprecatedSettingsReturnWarnings() throws Exception {
try {
XContentBuilder builder = JsonXContent.contentBuilder()
.startObject()
.startObject("persistent")
.field(TEST_DEPRECATED_SETTING_TRUE1.getKey(), TEST_DEPRECATED_SETTING_TRUE1.getDefault(Settings.EMPTY) == false)
.field(TEST_DEPRECATED_SETTING_TRUE2.getKey(), TEST_DEPRECATED_SETTING_TRUE2.getDefault(Settings.EMPTY) == false)
// There should be no warning for this field
.field(TEST_NOT_DEPRECATED_SETTING.getKey(), TEST_NOT_DEPRECATED_SETTING.getDefault(Settings.EMPTY) == false)
.endObject()
.endObject();
final Request request = new Request("PUT", "_cluster/settings");
request.setJsonEntity(Strings.toString(builder));
final Response response = performScopedRequest(request);
final List<String> deprecatedWarnings = getWarningHeaders(response.getHeaders());
assertThat(deprecatedWarnings, everyItem(matchesRegex(HeaderWarning.WARNING_HEADER_PATTERN)));
assertThat(
extractWarningValuesFromWarningHeaders(deprecatedWarnings),
containsInAnyOrder(
matchDeprecationWarning(TEST_DEPRECATED_SETTING_TRUE1),
matchDeprecationWarning(TEST_DEPRECATED_SETTING_TRUE2)
)
);
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
assertThat(documents, hasSize(2));
}, 45, TimeUnit.SECONDS);
} finally {
Response response = cleanupSettings();
List<String> warningHeaders = getWarningHeaders(response.getHeaders());
logger.warn("Warning headers on cleanup: {}", warningHeaders);
}
}
private Matcher<String> matchDeprecationWarning(Setting<?> setting) {
var format = "[%s] setting was deprecated in Elasticsearch and will be removed in a future release. "
+ "See the deprecation documentation for the next major version.";
return equalTo(Strings.format(format, setting.getKey()));
}
private Response cleanupSettings() throws IOException {
XContentBuilder builder = JsonXContent.contentBuilder()
.startObject()
.startObject("persistent")
.field(TEST_DEPRECATED_SETTING_TRUE1.getKey(), (Boolean) null)
.field(TEST_DEPRECATED_SETTING_TRUE2.getKey(), (Boolean) null)
// There should be no warning for this field
.field(TEST_NOT_DEPRECATED_SETTING.getKey(), (Boolean) null)
.endObject()
.endObject();
final Request request = new Request("PUT", "_cluster/settings");
request.setJsonEntity(Strings.toString(builder));
return performScopedRequest(request, xOpaqueId() + "-cleanup");
}
public void testDeprecationWarningsAppearInHeaders() throws Exception {
doTestDeprecationWarningsAppearInHeaders(xOpaqueId());
}
public void testDeprecationHeadersDoNotGetStuck() throws Exception {
for (int i = 0; i < 3; i++) {
doTestDeprecationWarningsAppearInHeaders(xOpaqueId() + "-" + i);
}
}
/**
* Run a request that receives a predictably randomized number of deprecation warnings.
* <p>
* Re-running this back-to-back helps to ensure that warnings are not being maintained across requests.
*/
private void doTestDeprecationWarningsAppearInHeaders(String xOpaqueId) throws Exception {
final boolean useDeprecatedField = randomBoolean();
final boolean useNonDeprecatedSetting = randomBoolean();
// deprecated settings should also trigger a deprecation warning
final List<Setting<Boolean>> settings = new ArrayList<>(3);
settings.add(TEST_DEPRECATED_SETTING_TRUE1);
if (randomBoolean()) {
settings.add(TEST_DEPRECATED_SETTING_TRUE2);
}
if (useNonDeprecatedSetting) {
settings.add(TEST_NOT_DEPRECATED_SETTING);
}
Collections.shuffle(settings, random());
// trigger all deprecations
Request request = new Request("GET", "/_test_cluster/deprecated_settings");
request.setEntity(buildSettingsRequest(settings, useDeprecatedField ? "deprecated_settings" : "settings"));
Response response = performScopedRequest(request, xOpaqueId);
final List<String> deprecatedWarnings = getWarningHeaders(response.getHeaders());
final List<Matcher<? super String>> headerMatchers = new ArrayList<>(4);
headerMatchers.add(equalTo(DEPRECATED_ENDPOINT));
if (useDeprecatedField) {
headerMatchers.add(equalTo(DEPRECATED_USAGE));
}
assertThat(deprecatedWarnings, everyItem(matchesRegex(HeaderWarning.WARNING_HEADER_PATTERN)));
assertThat(extractWarningValuesFromWarningHeaders(deprecatedWarnings), containsInAnyOrder(headerMatchers));
// expect to index same number of new deprecations as the number of header warnings in the response
assertBusy(() -> {
var documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId);
logger.warn(documents);
assertThat(documents, hasSize(headerMatchers.size()));
}, 45, TimeUnit.SECONDS);
}
public void testDeprecationRouteThrottling() throws Exception {
performScopedRequest(deprecatedRequest("GET"));
performScopedRequest(deprecatedRequest("GET"));
performScopedRequest(deprecatedRequest("POST"));
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
assertThat(
documents,
containsInAnyOrder(
allOf(
hasEntry(KEY_FIELD_NAME, "deprecated_route_POST_/_test_cluster/deprecated_settings"),
hasEntry("message", "[/_test_cluster/deprecated_settings] exists for deprecated tests")
),
allOf(
hasEntry(KEY_FIELD_NAME, "deprecated_route_GET_/_test_cluster/deprecated_settings"),
hasEntry("message", "[/_test_cluster/deprecated_settings] exists for deprecated tests")
),
allOf(
hasEntry(KEY_FIELD_NAME, "deprecated_settings"),
hasEntry("message", "[deprecated_settings] usage is deprecated. use [settings] instead")
)
)
);
}, 45, TimeUnit.SECONDS);
}
public void testDisableDeprecationLogIndexing() throws Exception {
performScopedRequest(deprecatedRequest("GET"));
configureWriteDeprecationLogsToIndex(false);
try {
performScopedRequest(deprecatedRequest("POST"));
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
assertThat(
documents,
containsInAnyOrder(
allOf(
hasEntry(KEY_FIELD_NAME, "deprecated_route_GET_/_test_cluster/deprecated_settings"),
hasEntry("message", "[/_test_cluster/deprecated_settings] exists for deprecated tests")
),
allOf(
hasEntry(KEY_FIELD_NAME, "deprecated_settings"),
hasEntry("message", "[deprecated_settings] usage is deprecated. use [settings] instead")
)
)
);
}, 45, TimeUnit.SECONDS);
} finally {
configureWriteDeprecationLogsToIndex(null);
}
}
// triggers two deprecations - endpoint and setting
private Request deprecatedRequest(String method) throws IOException {
final Request getRequest = new Request(method, "/_test_cluster/deprecated_settings");
getRequest.setEntity(buildSettingsRequest(Collections.singletonList(TEST_DEPRECATED_SETTING_TRUE1), "deprecated_settings"));
return getRequest;
}
/**
* Check that deprecation messages can be recorded to an index
*/
public void testDeprecationMessagesCanBeIndexed() throws Exception {
performScopedRequest(deprecatedRequest("GET"));
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
assertThat(
documents,
containsInAnyOrder(
allOf(
hasKey("@timestamp"),
hasKey("elasticsearch.cluster.name"),
hasKey("elasticsearch.cluster.uuid"),
hasEntry(X_OPAQUE_ID_FIELD_NAME, xOpaqueId()),
hasEntry("elasticsearch.event.category", "settings"),
hasKey("elasticsearch.node.id"),
hasKey("elasticsearch.node.name"),
hasEntry("data_stream.dataset", "elasticsearch.deprecation"),
hasEntry("data_stream.namespace", "default"),
hasEntry("data_stream.type", "logs"),
hasKey("ecs.version"),
hasEntry(KEY_FIELD_NAME, "deprecated_settings"),
hasEntry("event.dataset", "elasticsearch.deprecation"),
hasEntry("log.level", "WARN"),
hasKey("log.logger"),
hasEntry("message", "[deprecated_settings] usage is deprecated. use [settings] instead")
),
allOf(
hasKey("@timestamp"),
hasKey("elasticsearch.cluster.name"),
hasKey("elasticsearch.cluster.uuid"),
hasEntry(X_OPAQUE_ID_FIELD_NAME, xOpaqueId()),
hasEntry("elasticsearch.event.category", "api"),
hasKey("elasticsearch.node.id"),
hasKey("elasticsearch.node.name"),
hasEntry("data_stream.dataset", "elasticsearch.deprecation"),
hasEntry("data_stream.namespace", "default"),
hasEntry("data_stream.type", "logs"),
hasKey("ecs.version"),
hasEntry(KEY_FIELD_NAME, "deprecated_route_GET_/_test_cluster/deprecated_settings"),
hasEntry("event.dataset", "elasticsearch.deprecation"),
hasEntry("log.level", "WARN"),
hasKey("log.logger"),
hasEntry("message", "[/_test_cluster/deprecated_settings] exists for deprecated tests")
)
)
);
}, 45, TimeUnit.SECONDS);
}
/**
* Check that a deprecation message with CRITICAL level can be recorded to an index
*/
public void testDeprecationCriticalWarnMessagesCanBeIndexed() throws Exception {
final Request request = new Request("GET", "/_test_cluster/only_deprecated_setting");
request.setEntity(buildSettingsRequest(Collections.singletonList(TEST_DEPRECATED_SETTING_TRUE3), "deprecation_critical"));
performScopedRequest(request);
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
assertThat(
documents,
containsInAnyOrder(
allOf(
hasKey("@timestamp"),
hasKey("elasticsearch.cluster.name"),
hasKey("elasticsearch.cluster.uuid"),
hasEntry(X_OPAQUE_ID_FIELD_NAME, xOpaqueId()),
hasEntry("elasticsearch.event.category", "settings"),
hasKey("elasticsearch.node.id"),
hasKey("elasticsearch.node.name"),
hasEntry("data_stream.dataset", "elasticsearch.deprecation"),
hasEntry("data_stream.namespace", "default"),
hasEntry("data_stream.type", "logs"),
hasKey("ecs.version"),
hasEntry(KEY_FIELD_NAME, "deprecated_critical_settings"),
hasEntry("event.dataset", "elasticsearch.deprecation"),
hasEntry("log.level", "CRITICAL"),
hasKey("log.logger"),
hasEntry("message", "[deprecated_settings] usage is deprecated. use [settings] instead")
)
)
);
}, 45, TimeUnit.SECONDS);
}
/**
* Check that deprecation messages with WARN level can be recorded to an index
*/
public void testDeprecationWarnMessagesCanBeIndexed() throws Exception {
final Request request = new Request("GET", "/_test_cluster/deprecated_settings");
request.setEntity(buildSettingsRequest(Collections.singletonList(TEST_DEPRECATED_SETTING_TRUE1), "deprecation_warning"));
performScopedRequest(request);
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
assertThat(
documents,
containsInAnyOrder(
allOf(
hasKey("@timestamp"),
hasKey("elasticsearch.cluster.name"),
hasKey("elasticsearch.cluster.uuid"),
hasEntry(X_OPAQUE_ID_FIELD_NAME, xOpaqueId()),
hasEntry("elasticsearch.event.category", "settings"),
hasKey("elasticsearch.node.id"),
hasKey("elasticsearch.node.name"),
hasEntry("data_stream.dataset", "elasticsearch.deprecation"),
hasEntry("data_stream.namespace", "default"),
hasEntry("data_stream.type", "logs"),
hasKey("ecs.version"),
hasEntry(KEY_FIELD_NAME, "deprecated_warn_settings"),
hasEntry("event.dataset", "elasticsearch.deprecation"),
hasEntry("log.level", "WARN"),
hasKey("log.logger"),
hasEntry("message", "[deprecated_warn_settings] usage is deprecated but won't be breaking in next version")
),
allOf(
hasKey("@timestamp"),
hasKey("elasticsearch.cluster.name"),
hasKey("elasticsearch.cluster.uuid"),
hasEntry(X_OPAQUE_ID_FIELD_NAME, xOpaqueId()),
hasEntry("elasticsearch.event.category", "api"),
hasKey("elasticsearch.node.id"),
hasKey("elasticsearch.node.name"),
hasEntry("data_stream.dataset", "elasticsearch.deprecation"),
hasEntry("data_stream.namespace", "default"),
hasEntry("data_stream.type", "logs"),
hasKey("ecs.version"),
hasEntry(KEY_FIELD_NAME, "deprecated_route_GET_/_test_cluster/deprecated_settings"),
hasEntry("event.dataset", "elasticsearch.deprecation"),
hasEntry("log.level", "WARN"),
hasKey("log.logger"),
hasEntry("message", "[/_test_cluster/deprecated_settings] exists for deprecated tests")
)
)
);
}, 45, TimeUnit.SECONDS);
}
public void testDeprecateAndKeep() throws Exception {
final Request request = new Request("GET", "/_test_cluster/deprecated_but_dont_remove");
request.setEntity(buildSettingsRequest(Collections.singletonList(TEST_NOT_DEPRECATED_SETTING), "settings"));
Response response = performScopedRequest(request);
final List<String> deprecatedWarnings = getWarningHeaders(response.getHeaders());
assertThat(
extractWarningValuesFromWarningHeaders(deprecatedWarnings),
containsInAnyOrder("[/_test_cluster/deprecated_but_dont_remove] is deprecated, but no plans to remove quite yet")
);
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
// only assert the relevant fields: level, message, and category
assertThat(
documents,
containsInAnyOrder(
allOf(
hasEntry("elasticsearch.event.category", "api"),
hasEntry("log.level", "WARN"),
hasEntry("message", "[/_test_cluster/deprecated_but_dont_remove] is deprecated, but no plans to remove quite yet")
)
)
);
}, 45, TimeUnit.SECONDS);
}
public void testReplacesInCurrentVersion() throws Exception {
final Request request = new Request("GET", "/_test_cluster/old_name1"); // deprecated in current version
request.setEntity(buildSettingsRequest(Collections.singletonList(TEST_NOT_DEPRECATED_SETTING), "settings"));
Response response = performScopedRequest(request);
final List<String> deprecatedWarnings = getWarningHeaders(response.getHeaders());
assertThat(
extractWarningValuesFromWarningHeaders(deprecatedWarnings),
containsInAnyOrder("[GET /_test_cluster/old_name1] is deprecated! Use [GET /_test_cluster/new_name1] instead.")
);
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
// only assert the relevant fields: level, message, and category
assertThat(
documents,
containsInAnyOrder(
allOf(
hasEntry("elasticsearch.event.category", "api"),
hasEntry("log.level", "WARN"),
hasEntry("message", "[GET /_test_cluster/old_name1] is deprecated! Use [GET /_test_cluster/new_name1] instead.")
)
)
);
}, 45, TimeUnit.SECONDS);
}
public void testReplacesInCompatibleVersion() throws Exception {
final Request request = new Request("GET", "/_test_cluster/old_name2"); // deprecated in minimum supported version
request.setEntity(buildSettingsRequest(Collections.singletonList(TEST_DEPRECATED_SETTING_TRUE1), "deprecated_settings"));
final RequestOptions compatibleOptions = request.getOptions()
.toBuilder()
.addHeader("Accept", "application/vnd.elasticsearch+json;compatible-with=" + RestApiVersion.minimumSupported().major)
.addHeader("Content-Type", "application/vnd.elasticsearch+json;compatible-with=" + RestApiVersion.minimumSupported().major)
.build();
request.setOptions(compatibleOptions);
Response response = performScopedRequest(request);
final List<String> deprecatedWarnings = getWarningHeaders(response.getHeaders());
assertThat(
extractWarningValuesFromWarningHeaders(deprecatedWarnings),
containsInAnyOrder(
"[GET /_test_cluster/old_name2] is deprecated! Use [GET /_test_cluster/new_name2] instead.",
"You are using a compatible API for this request"
)
);
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
// only assert the relevant fields: level, message, and category
assertThat(
documents,
containsInAnyOrder(
allOf(
hasEntry("elasticsearch.event.category", "compatible_api"),
hasEntry("log.level", "CRITICAL"),
hasEntry("message", "[GET /_test_cluster/old_name2] is deprecated! Use [GET /_test_cluster/new_name2] instead.")
),
allOf(
hasEntry("elasticsearch.event.category", "compatible_api"),
hasEntry("log.level", "CRITICAL"),
// this message comes from the test, not production code. this is the message for setting the deprecated setting
hasEntry("message", "You are using a compatible API for this request")
)
)
);
}, 45, TimeUnit.SECONDS);
}
/**
* Check that log messages about REST API compatibility are recorded to an index
*/
public void testCompatibleMessagesCanBeIndexed() throws Exception {
final Request compatibleRequest = new Request("GET", "/_test_cluster/compat_only");
final RequestOptions compatibleOptions = compatibleRequest.getOptions()
.toBuilder()
.addHeader("Accept", "application/vnd.elasticsearch+json;compatible-with=" + RestApiVersion.minimumSupported().major)
.addHeader("Content-Type", "application/vnd.elasticsearch+json;compatible-with=" + RestApiVersion.minimumSupported().major)
.build();
compatibleRequest.setOptions(compatibleOptions);
compatibleRequest.setEntity(buildSettingsRequest(Collections.singletonList(TEST_DEPRECATED_SETTING_TRUE1), "deprecated_settings"));
Response deprecatedApiResponse = performScopedRequest(compatibleRequest);
final List<String> deprecatedWarnings = getWarningHeaders(deprecatedApiResponse.getHeaders());
assertThat(
extractWarningValuesFromWarningHeaders(deprecatedWarnings),
containsInAnyOrder(DEPRECATED_ENDPOINT, COMPATIBLE_API_USAGE)
);
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
assertThat(
documents,
containsInAnyOrder(
allOf(
hasKey("@timestamp"),
hasKey("elasticsearch.cluster.name"),
hasKey("elasticsearch.cluster.uuid"),
hasEntry(X_OPAQUE_ID_FIELD_NAME, xOpaqueId()),
hasEntry("elasticsearch.event.category", "compatible_api"),
hasKey("elasticsearch.node.id"),
hasKey("elasticsearch.node.name"),
hasEntry("data_stream.dataset", "elasticsearch.deprecation"),
hasEntry("data_stream.namespace", "default"),
hasEntry("data_stream.type", "logs"),
hasKey("ecs.version"),
hasEntry(KEY_FIELD_NAME, "compatible_key"),
hasEntry("event.dataset", "elasticsearch.deprecation"),
hasEntry("log.level", "CRITICAL"),
hasKey("log.logger"),
hasEntry("message", "You are using a compatible API for this request")
),
allOf(
hasKey("@timestamp"),
hasKey("elasticsearch.cluster.name"),
hasKey("elasticsearch.cluster.uuid"),
hasEntry(X_OPAQUE_ID_FIELD_NAME, xOpaqueId()),
hasEntry("elasticsearch.event.category", "compatible_api"),
hasKey("elasticsearch.node.id"),
hasKey("elasticsearch.node.name"),
hasEntry("data_stream.dataset", "elasticsearch.deprecation"),
hasEntry("data_stream.namespace", "default"),
hasEntry("data_stream.type", "logs"),
hasKey("ecs.version"),
hasEntry(KEY_FIELD_NAME, "deprecated_route_GET_/_test_cluster/compat_only"),
hasEntry("event.dataset", "elasticsearch.deprecation"),
hasEntry("log.level", "CRITICAL"),
hasKey("log.logger"),
hasEntry("message", "[/_test_cluster/deprecated_settings] exists for deprecated tests")
)
)
);
}, 45, TimeUnit.SECONDS);
}
/**
* Check that deprecation messages can be recorded to an index
*/
public void testDeprecationIndexingCacheReset() throws Exception {
performScopedRequest(deprecatedRequest("GET"));
performScopedRequest(new Request("DELETE", "/_logging/deprecation_cache"));
performScopedRequest(deprecatedRequest("GET"));
assertBusy(() -> {
List<Map<String, Object>> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId());
logger.warn(documents);
assertThat(
documents,
containsInAnyOrder(
allOf(
hasEntry(KEY_FIELD_NAME, "deprecated_route_GET_/_test_cluster/deprecated_settings"),
hasEntry("message", "[/_test_cluster/deprecated_settings] exists for deprecated tests")
),
allOf(
hasEntry(KEY_FIELD_NAME, "deprecated_route_GET_/_test_cluster/deprecated_settings"),
hasEntry("message", "[/_test_cluster/deprecated_settings] exists for deprecated tests")
),
allOf(
hasEntry(KEY_FIELD_NAME, "deprecated_settings"),
hasEntry("message", "[deprecated_settings] usage is deprecated. use [settings] instead")
),
allOf(
hasEntry(KEY_FIELD_NAME, "deprecated_settings"),
hasEntry("message", "[deprecated_settings] usage is deprecated. use [settings] instead")
)
)
);
}, 45, TimeUnit.SECONDS);
}
private void configureWriteDeprecationLogsToIndex(Boolean value) throws IOException {
final Request request = new Request("PUT", "_cluster/settings");
request.setJsonEntity("{ \"persistent\": { \"cluster.deprecation_indexing.enabled\": " + value + " } }");
performScopedRequest(request);
}
private List<String> getWarningHeaders(Header[] headers) {
return Arrays.stream(headers).filter(h -> h.getName().equals("Warning")).map(Header::getValue).toList();
}
private List<String> extractWarningValuesFromWarningHeaders(List<String> deprecatedWarnings) {
return deprecatedWarnings.stream()
.map(s -> HeaderWarning.extractWarningValueFromWarningHeader(s, true))
.collect(Collectors.toList());
}
private HttpEntity buildSettingsRequest(List<Setting<Boolean>> settings, String settingName) throws IOException {
XContentBuilder builder = JsonXContent.contentBuilder();
builder.startObject().startArray(settingName);
for (Setting<Boolean> setting : settings) {
builder.value(setting.getKey());
}
builder.endArray().endObject();
return new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON);
}
private Response performScopedRequest(Request req) throws IOException {
return performScopedRequest(req, xOpaqueId());
}
private Response performScopedRequest(Request req, String xOpaqueId) throws IOException {
req.setOptions(req.getOptions().toBuilder().addHeader("X-Opaque-Id", xOpaqueId).build());
Response response = client().performRequest(req);
assertOK(response);
return response;
}
/**
* Builds a REST client that will tolerate warnings in the response headers. The default
* is to throw an exception.
*/
@Override
protected RestClient buildClient(Settings settings, HttpHost[] hosts) throws IOException {
RestClientBuilder builder = RestClient.builder(hosts);
configureClient(builder, settings);
builder.setStrictDeprecationMode(false);
return builder.build();
}
}
|
DeprecationHttpIT
|
java
|
apache__kafka
|
generator/src/main/java/org/apache/kafka/message/ApiMessageTypeGenerator.java
|
{
"start": 16687,
"end": 18069
}
|
enum ____ {%n");
buffer.incrementIndent();
Iterator<RequestListenerType> listenerIter = Arrays.stream(RequestListenerType.values()).iterator();
while (listenerIter.hasNext()) {
RequestListenerType scope = listenerIter.next();
buffer.printf("%s%s%n", scope.name(), listenerIter.hasNext() ? "," : ";");
}
buffer.decrementIndent();
buffer.printf("}%n");
}
private void generateHighestSupportedVersion() {
buffer.printf("public short highestSupportedVersion(boolean enableUnstableLastVersion) {%n");
buffer.incrementIndent();
buffer.printf("if (!this.latestVersionUnstable || enableUnstableLastVersion) {%n");
buffer.incrementIndent();
buffer.printf("return this.highestSupportedVersion;%n");
buffer.decrementIndent();
buffer.printf("} else {%n");
buffer.incrementIndent();
buffer.printf("// A negative value means that the API has no enabled versions.%n");
buffer.printf("return (short) (this.highestSupportedVersion - 1);%n");
buffer.decrementIndent();
buffer.printf("}%n");
buffer.decrementIndent();
buffer.printf("}%n");
}
private void write(BufferedWriter writer) throws IOException {
headerGenerator.buffer().write(writer);
buffer.write(writer);
}
}
|
ListenerType
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/route/builder/RouteLocatorBuilder.java
|
{
"start": 1071,
"end": 1403
}
|
class ____ {
private ConfigurableApplicationContext context;
public RouteLocatorBuilder(ConfigurableApplicationContext context) {
this.context = context;
}
/**
* Creates a new {@link Builder}.
* @return a new {@link Builder}.
*/
public Builder routes() {
return new Builder(context);
}
/**
* A
|
RouteLocatorBuilder
|
java
|
elastic__elasticsearch
|
libs/entitlement/tools/common/src/main/java/org/elasticsearch/entitlement/tools/Utils.java
|
{
"start": 932,
"end": 3891
}
|
class ____ {
private static final FileSystem JRT_FS = FileSystems.getFileSystem(URI.create("jrt:/"));
// TODO Currently ServerProcessBuilder is using --add-modules=ALL-MODULE-PATH, should this rather
// reflect below excludes (except for java.desktop which requires a special handling)?
// internal and incubator modules are also excluded
private static final Set<String> EXCLUDED_MODULES = Set.of(
"java.desktop",
"jdk.jartool",
"jdk.jdi",
"java.security.jgss",
"jdk.jshell",
"jdk.jcmd",
"jdk.hotspot.agent",
"jdk.jfr",
"jdk.javadoc",
// "jdk.jpackage", // Do we want to include this?
// "jdk.jlink", // Do we want to include this?
"jdk.localedata" // noise, change here are not interesting
);
public static final Predicate<String> DEFAULT_MODULE_PREDICATE = m -> EXCLUDED_MODULES.contains(m) == false
&& m.contains(".internal.") == false
&& m.contains(".incubator.") == false;
public static final Predicate<String> modulePredicate(boolean includeIncubator) {
return includeIncubator == false ? DEFAULT_MODULE_PREDICATE : DEFAULT_MODULE_PREDICATE.or(m -> m.contains(".incubator."));
}
public static Map<String, Set<String>> loadExportsByModule() throws IOException {
var modulesExports = new HashMap<String, Set<String>>();
try (var stream = Files.walk(JRT_FS.getPath("modules"))) {
stream.filter(p -> p.getFileName().toString().equals("module-info.class")).forEach(x -> {
try (var is = Files.newInputStream(x)) {
var md = ModuleDescriptor.read(is);
modulesExports.put(
md.name(),
md.exports()
.stream()
.filter(e -> e.isQualified() == false)
.map(ModuleDescriptor.Exports::source)
.collect(Collectors.toSet())
);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
return Collections.unmodifiableMap(modulesExports);
}
public static Map<String, String> loadClassToModuleMapping() throws IOException {
Map<String, String> moduleNameByClass = new HashMap<>();
Utils.walkJdkModules(m -> true, Collections.emptyMap(), (moduleName, moduleClasses, moduleExports) -> {
for (var classFile : moduleClasses) {
String prev = moduleNameByClass.put(internalClassName(classFile, moduleName), moduleName);
if (prev != null) {
throw new IllegalStateException("Class " + classFile + " is in both modules " + prev + " and " + moduleName);
}
}
});
return Collections.unmodifiableMap(moduleNameByClass);
}
public
|
Utils
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/AsyncTriggerConverterTest.java
|
{
"start": 1462,
"end": 2295
}
|
class ____ extends Trigger<Object, TimeWindow>
implements AsyncTriggerConverter {
@Override
public TriggerResult onElement(
Object element, long timestamp, TimeWindow window, TriggerContext ctx)
throws Exception {
return null;
}
@Override
public TriggerResult onProcessingTime(long time, TimeWindow window, TriggerContext ctx)
throws Exception {
return null;
}
@Override
public TriggerResult onEventTime(long time, TimeWindow window, TriggerContext ctx)
throws Exception {
return null;
}
@Override
public void clear(TimeWindow window, TriggerContext ctx) throws Exception {}
}
private static
|
DummyTriggerWithoutAsyncConverter
|
java
|
apache__camel
|
components/camel-ftp/src/generated/java/org/apache/camel/component/file/remote/FtpsEndpointUriFactory.java
|
{
"start": 521,
"end": 7436
}
|
class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":host:port/directoryName";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(124);
props.add("account");
props.add("activePortRange");
props.add("allowNullBody");
props.add("antExclude");
props.add("antFilterCaseSensitive");
props.add("antInclude");
props.add("autoCreate");
props.add("backoffErrorThreshold");
props.add("backoffIdleThreshold");
props.add("backoffMultiplier");
props.add("binary");
props.add("bridgeErrorHandler");
props.add("browseLimit");
props.add("bufferSize");
props.add("charset");
props.add("checksumFileAlgorithm");
props.add("chmod");
props.add("connectTimeout");
props.add("delay");
props.add("delete");
props.add("directoryName");
props.add("disableSecureDataChannelDefaults");
props.add("disconnect");
props.add("disconnectOnBatchComplete");
props.add("doneFileName");
props.add("download");
props.add("eagerDeleteTargetFile");
props.add("eagerMaxMessagesPerPoll");
props.add("exceptionHandler");
props.add("exchangePattern");
props.add("exclude");
props.add("excludeExt");
props.add("exclusiveReadLockStrategy");
props.add("execPbsz");
props.add("execProt");
props.add("fastExistsCheck");
props.add("fileExist");
props.add("fileName");
props.add("filter");
props.add("filterDirectory");
props.add("filterFile");
props.add("flatten");
props.add("ftpClient");
props.add("ftpClientConfig");
props.add("ftpClientConfigParameters");
props.add("ftpClientKeyStoreParameters");
props.add("ftpClientParameters");
props.add("ftpClientTrustStoreParameters");
props.add("greedy");
props.add("handleDirectoryParserAbsoluteResult");
props.add("host");
props.add("idempotent");
props.add("idempotentEager");
props.add("idempotentKey");
props.add("idempotentRepository");
props.add("ignoreFileNotFoundOrPermissionError");
props.add("implicit");
props.add("inProgressRepository");
props.add("include");
props.add("includeExt");
props.add("initialDelay");
props.add("jailStartingDirectory");
props.add("keepLastModified");
props.add("lazyStartProducer");
props.add("localWorkDirectory");
props.add("maxDepth");
props.add("maxMessagesPerPoll");
props.add("maximumReconnectAttempts");
props.add("minDepth");
props.add("move");
props.add("moveExisting");
props.add("moveExistingFileStrategy");
props.add("moveFailed");
props.add("noop");
props.add("onCompletionExceptionHandler");
props.add("passiveMode");
props.add("password");
props.add("pollStrategy");
props.add("port");
props.add("preMove");
props.add("preSort");
props.add("processStrategy");
props.add("readLock");
props.add("readLockCheckInterval");
props.add("readLockDeleteOrphanLockFiles");
props.add("readLockLoggingLevel");
props.add("readLockMarkerFile");
props.add("readLockMinAge");
props.add("readLockMinLength");
props.add("readLockRemoveOnCommit");
props.add("readLockRemoveOnRollback");
props.add("readLockTimeout");
props.add("reconnectDelay");
props.add("recursive");
props.add("repeatCount");
props.add("resumeDownload");
props.add("runLoggingLevel");
props.add("scheduledExecutorService");
props.add("scheduler");
props.add("schedulerProperties");
props.add("securityProtocol");
props.add("sendEmptyMessageWhenIdle");
props.add("sendNoop");
props.add("separator");
props.add("shuffle");
props.add("siteCommand");
props.add("soTimeout");
props.add("sortBy");
props.add("sorter");
props.add("sslContextParameters");
props.add("startScheduler");
props.add("stepwise");
props.add("streamDownload");
props.add("tempFileName");
props.add("tempPrefix");
props.add("throwExceptionOnConnectFailed");
props.add("timeUnit");
props.add("timeout");
props.add("transferLoggingIntervalSeconds");
props.add("transferLoggingLevel");
props.add("transferLoggingVerbose");
props.add("useFixedDelay");
props.add("useList");
props.add("username");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(3);
secretProps.add("account");
secretProps.add("password");
secretProps.add("username");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
Map<String, String> prefixes = new HashMap<>(5);
prefixes.put("ftpClientConfigParameters", "ftpClientConfig.");
prefixes.put("ftpClientKeyStoreParameters", "ftpClient.keyStore.");
prefixes.put("ftpClientParameters", "ftpClient.");
prefixes.put("ftpClientTrustStoreParameters", "ftpClient.trustStore.");
prefixes.put("schedulerProperties", "scheduler.");
MULTI_VALUE_PREFIXES = Collections.unmodifiableMap(prefixes);
}
@Override
public boolean isEnabled(String scheme) {
return "ftps".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "host", null, true, copy);
uri = buildPathParameter(syntax, uri, "port", null, false, copy);
uri = buildPathParameter(syntax, uri, "directoryName", null, false, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
|
FtpsEndpointUriFactory
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/aot/generate/GeneratedClasses.java
|
{
"start": 3399,
"end": 4360
}
|
class ____ be generated.
* @param featureName the name of the feature to associate with the
* generated class
* @param targetComponent the target component
* @param type a {@link Consumer} used to build the type
* @return an existing or newly generated class
*/
public GeneratedClass getOrAddForFeatureComponent(String featureName,
ClassName targetComponent, Consumer<TypeSpec.Builder> type) {
Assert.hasLength(featureName, "'featureName' must not be empty");
Assert.notNull(targetComponent, "'targetComponent' must not be null");
Assert.notNull(type, "'type' must not be null");
Owner owner = new Owner(this.classNameGenerator.getFeatureNamePrefix(), featureName, targetComponent);
GeneratedClass generatedClass = this.classesByOwner.computeIfAbsent(owner, key ->
createAndAddGeneratedClass(featureName, targetComponent, type));
generatedClass.assertSameType(type);
return generatedClass;
}
/**
* Get or add a generated
|
will
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/LastLongByTimestampGroupingAggregatorFunctionTests.java
|
{
"start": 2958,
"end": 4326
}
|
class ____ {
private final Set<Object> expected = new HashSet<>();
private final boolean first;
private long expectedTimestamp = 0;
ExpectedWork(boolean first) {
this.first = first;
}
void add(long timestamp, Object value) {
if (expected.isEmpty()) {
expectedTimestamp = timestamp;
expected.add(value);
} else if (first ? timestamp < expectedTimestamp : timestamp > expectedTimestamp) {
expectedTimestamp = timestamp;
expected.clear();
expected.add(value);
} else if (timestamp == expectedTimestamp) {
expected.add(value);
}
}
void check(Object v) {
if (expected.isEmpty()) {
if (v != null) {
throw new AssertionError("expected null but was " + v);
}
} else {
if (expected.contains(v) == false) {
String expectedMessage = expected.size() == 1
? "expected " + expected.iterator().next()
: "expected one of " + expected.stream().sorted().toList();
throw new AssertionError(expectedMessage + " but was " + v);
}
}
}
}
}
|
ExpectedWork
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/async/AsyncLogger.java
|
{
"start": 3864,
"end": 3978
}
|
class ____ its signature is
// package-private, without any public implementation.
@BaselineIgnore("2.24.3")
public
|
in
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/CustomDeserializersTest.java
|
{
"start": 3457,
"end": 3623
}
|
class ____ {
private final int id;
public CustomKey(int id) {this.id = id;}
public int getId() { return id; }
}
public static
|
CustomKey
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/batch/BatchEntityWithDisabledProxyTest.java
|
{
"start": 1563,
"end": 3469
}
|
class ____ {
@BeforeAll
public void setupData(SessionFactoryScope scope) {
Product cheese1 = new Product( 1l, "Cheese 1" );
Product cheese2 = new Product( 2l, "Cheese 2" );
Product cheese3 = new Product( 3l, "Cheese 3" );
Order order = new Order( 1l, "Hibernate" );
order.addProduct( cheese1 );
order.addProduct( cheese2 );
cheese1.setBestCheese( cheese1 );
cheese2.setBestCheese( cheese1 );
cheese3.setBestCheese( cheese1 );
cheese1.setReplacement( cheese2 );
cheese2.setReplacement( cheese1 );
cheese3.setReplacement( cheese1 );
scope.inTransaction( s -> {
s.persist( cheese1 );
s.persist( cheese2 );
s.persist( cheese3 );
s.persist( order );
} );
}
@Test
public void testGetOrder(SessionFactoryScope scope) {
scope.inSession( s -> {
s.getSessionFactory().getCache().evictAllRegions();
Order o = s.get( Order.class, 1 );
assertEquals( 2, o.getProducts().size() );
} );
}
@Test
public void testCriteriaQuery(SessionFactoryScope scope) {
scope.inSession( s -> {
s.getSessionFactory().getCache().evictAllRegions();
CriteriaBuilder cb = s.getCriteriaBuilder();
CriteriaQuery<Product> cr = cb.createQuery( Product.class );
Root<Product> root = cr.from( Product.class );
CriteriaQuery<Product> query = cr.select( root );
List<Product> products = s.createQuery( query ).getResultList();
assertEquals( 3, products.size() );
} );
}
@Test
@JiraKey("HHH-16966")
public void testGetReference(SessionFactoryScope scope) {
scope.inSession( s -> {
s.getSessionFactory().getCache().evictAllRegions();
Product product = s.getReference( Product.class, 3 );
assertFalse( Hibernate.isInitialized( product) );
Hibernate.initialize( product );
assertTrue( Hibernate.isInitialized( product) );
} );
}
@Entity(name = "Order")
@Table(name = "ORDER_TABLE")
public static
|
BatchEntityWithDisabledProxyTest
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/abstract_/AbstractAssert_extracting_with_Function_and_AssertFactory_Test.java
|
{
"start": 5028,
"end": 5550
}
|
class ____ extends AbstractAssertWithComparator<TestAssert, Employee> {
TestAssert(Employee actual) {
super(actual, TestAssert.class);
}
// re-declare to allow test access
@Override
protected <T, ASSERT extends AbstractAssert<?, ?>> ASSERT extracting(Function<? super Employee, ? extends T> extractor,
AssertFactory<T, ASSERT> assertFactory) {
return super.extracting(extractor, assertFactory);
}
}
}
|
TestAssert
|
java
|
quarkusio__quarkus
|
extensions/mongodb-client/deployment/src/main/java/io/quarkus/mongodb/deployment/DevServicesMongoProcessor.java
|
{
"start": 14324,
"end": 17022
}
|
class ____ extends MongoDBContainer {
private final Integer fixedExposedPort;
private final boolean useSharedNetwork;
private final String hostName;
private static final int MONGODB_INTERNAL_PORT = 27017;
@SuppressWarnings("deprecation")
private QuarkusMongoDBContainer(Integer fixedExposedPort, String defaultNetworkId, boolean useSharedNetwork,
LaunchMode launchMode, String serviceName) {
this.fixedExposedPort = fixedExposedPort;
this.useSharedNetwork = useSharedNetwork;
this.hostName = ConfigureUtil.configureNetwork(this, defaultNetworkId, useSharedNetwork, "mongo");
this.withLabel(Labels.QUARKUS_DEV_SERVICE, launchMode == LaunchMode.DEVELOPMENT ? serviceName : null);
}
private QuarkusMongoDBContainer(DockerImageName dockerImageName, Integer fixedExposedPort,
String defaultNetworkId, boolean useSharedNetwork,
LaunchMode launchMode, String serviceName) {
super(dockerImageName);
this.fixedExposedPort = fixedExposedPort;
this.useSharedNetwork = useSharedNetwork;
this.hostName = ConfigureUtil.configureNetwork(this, defaultNetworkId, useSharedNetwork, "mongo");
this.withLabel(Labels.QUARKUS_DEV_SERVICE, launchMode == LaunchMode.DEVELOPMENT ? serviceName : null);
}
@Override
public void configure() {
super.configure();
if (useSharedNetwork) {
return;
}
if (fixedExposedPort != null) {
addFixedExposedPort(fixedExposedPort, MONGODB_INTERNAL_PORT);
} else {
addExposedPort(MONGODB_INTERNAL_PORT);
}
}
@Override
public String getReplicaSetUrl(String databaseName) {
if (useSharedNetwork) {
if (!isRunning()) { // done by the super method
throw new IllegalStateException("MongoDBContainer should be started first");
}
return String.format(
"mongodb://%s:%d/%s",
hostName,
MONGODB_INTERNAL_PORT,
databaseName);
} else {
return super.getReplicaSetUrl(databaseName);
}
}
public String getEffectiveHost() {
return useSharedNetwork ? hostName : super.getHost();
}
public Integer getEffectivePort() {
return useSharedNetwork ? MONGODB_INTERNAL_PORT : getMappedPort(MONGO_EXPOSED_PORT);
}
}
}
|
QuarkusMongoDBContainer
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/EqualsGetClassTest.java
|
{
"start": 4792,
"end": 5154
}
|
class ____ {
private int a;
@Override
public boolean equals(Object o) {
if (o == null) return false;
else return o.getClass() == getClass() && ((Test) o).a == a;
}
}
""")
.addOutputLines(
"Test.java",
"""
|
Test
|
java
|
micronaut-projects__micronaut-core
|
http-server-netty/src/test/java/io/micronaut/http/server/exceptions/response/DefaultHtmlErrorResponseBodyProviderTest.java
|
{
"start": 6293,
"end": 6598
}
|
class ____ {
@Singleton
MessageSource createMessageSource() {
return new ResourceBundleMessageSource("i18n.messages");
}
}
@Introspected
record Book(@NotBlank String title, @NotBlank String author, @Max(4032) int pages) {
}
static
|
MessageSourceFactory
|
java
|
apache__camel
|
components/camel-aws/camel-aws-secrets-manager/src/test/java/org/apache/camel/component/aws/secretsmanager/integration/SecretsManagerPropertiesSourceTestLocalstackIT.java
|
{
"start": 1301,
"end": 6852
}
|
class ____ extends AwsSecretsManagerBaseTest {
private static String secretVersion;
@BeforeAll
public static void setup() {
// Base secret
CreateSecretRequest.Builder builder = CreateSecretRequest.builder();
builder.name("test");
builder.secretString("hello");
getSecretManagerClient().createSecret(builder.build());
// Json multifield Secret
builder = CreateSecretRequest.builder();
builder.name("testJson");
builder.secretString("{\n" +
" \"username\": \"admin\",\n" +
" \"password\": \"password\",\n" +
" \"host\": \"myhost.com\"\n" +
"}");
getSecretManagerClient().createSecret(builder.build());
// Json multifield Secret
builder = CreateSecretRequest.builder();
builder.name("testJsonVersioned");
builder.secretString("{\n" +
" \"username\": \"admin\",\n" +
" \"password\": \"password\",\n" +
" \"host\": \"myhost.com\"\n" +
"}");
getSecretManagerClient().createSecret(builder.build());
// Json versioned multifield Secret
PutSecretValueRequest.Builder builderPutSecValue = PutSecretValueRequest.builder();
builderPutSecValue.secretId("testJsonVersioned");
builderPutSecValue.secretString("{\n" +
" \"username\": \"admin\",\n" +
" \"password\": \"admin123\",\n" +
" \"host\": \"myhost.com\"\n" +
"}");
PutSecretValueResponse resp = getSecretManagerClient().putSecretValue(builderPutSecValue.build());
secretVersion = resp.versionId();
}
@Test
public void testFunction() throws Exception {
context.getVaultConfiguration().aws().setAccessKey(getAccessKey());
context.getVaultConfiguration().aws().setSecretKey(getSecretKey());
context.getVaultConfiguration().aws().setRegion(getRegion());
context.getVaultConfiguration().aws().setOverrideEndpoint(true);
context.getVaultConfiguration().aws().setUriEndpointOverride(getUrlOverride());
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start").setBody(simple("{{aws:test}}")).to("mock:result");
}
});
context.start();
getMockEndpoint("mock:result").expectedBodiesReceived("hello");
template.sendBody("direct:start", "Hello World");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testFunctionJson() throws Exception {
context.getVaultConfiguration().aws().setAccessKey(getAccessKey());
context.getVaultConfiguration().aws().setSecretKey(getSecretKey());
context.getVaultConfiguration().aws().setRegion(getRegion());
context.getVaultConfiguration().aws().setOverrideEndpoint(true);
context.getVaultConfiguration().aws().setUriEndpointOverride(getUrlOverride());
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:username").setBody(simple("{{aws:testJson#username}}")).to("mock:bar");
from("direct:password").setBody(simple("{{aws:testJson#password}}")).to("mock:bar");
from("direct:host").setBody(simple("{{aws:testJson#host}}")).to("mock:bar");
}
});
context.start();
getMockEndpoint("mock:bar").expectedBodiesReceived("admin", "password", "myhost.com");
template.sendBody("direct:username", "Hello World");
template.sendBody("direct:password", "Hello World");
template.sendBody("direct:host", "Hello World");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testFunctionJsonWithVersion() throws Exception {
context.getVaultConfiguration().aws().setAccessKey(getAccessKey());
context.getVaultConfiguration().aws().setSecretKey(getSecretKey());
context.getVaultConfiguration().aws().setRegion(getRegion());
context.getVaultConfiguration().aws().setOverrideEndpoint(true);
context.getVaultConfiguration().aws().setUriEndpointOverride(getUrlOverride());
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:usernameVersioned").setBody(simple("{{aws:testJsonVersioned#username@" + secretVersion + "}}"))
.to("mock:put");
from("direct:passwordVersioned").setBody(simple("{{aws:testJsonVersioned#password@" + secretVersion + "}}"))
.to("mock:put");
from("direct:hostVersioned").setBody(simple("{{aws:testJsonVersioned#host@" + secretVersion + "}}"))
.to("mock:put");
}
});
context.start();
getMockEndpoint("mock:put").expectedBodiesReceived("admin", "admin123", "myhost.com");
template.sendBody("direct:usernameVersioned", "Hello World");
template.sendBody("direct:passwordVersioned", "Hello World");
template.sendBody("direct:hostVersioned", "Hello World");
MockEndpoint.assertIsSatisfied(context);
}
}
|
SecretsManagerPropertiesSourceTestLocalstackIT
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/cascade/CascadeMergeToProxyEntityCopyAllowedTest.java
|
{
"start": 8202,
"end": 8346
}
|
class ____ extends AbstractEntity {
}
@Entity(name = "Speaker")
@Inheritance(strategy = InheritanceType.TABLE_PER_CLASS)
public static
|
Project
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java
|
{
"start": 87604,
"end": 88711
}
|
interface ____ {",
" Builder setTitle(String title);",
" Builder setHasThumbnail(boolean t);",
" Item build();",
" }",
"}");
Compilation compilation =
javac()
.withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor())
.compile(javaFileObject);
assertThat(compilation)
.hadErrorContaining(
"Method setTitle does not correspond to a property method of foo.bar.Item")
.inFile(javaFileObject)
.onLineContaining("Builder setTitle(String title)");
assertThat(compilation)
.hadNoteContaining("hasThumbnail")
.inFile(javaFileObject)
.onLineContaining("Builder setTitle(String title)");
}
@Test
public void autoValueBuilderExtraSetter() {
JavaFileObject javaFileObject =
JavaFileObjects.forSourceLines(
"foo.bar.Baz",
"package foo.bar;",
"",
"import com.google.auto.value.AutoValue;",
"",
"@AutoValue",
"public abstract
|
Builder
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestResourceUsageEmulators.java
|
{
"start": 11185,
"end": 11468
}
|
class
____.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,
TestResourceUsageEmulatorPlugin.class,
ResourceUsageEmulatorPlugin.class);
long currentTime = System.currentTimeMillis();
// initialize the matcher
|
conf
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SpringRabbitMQEndpointBuilderFactory.java
|
{
"start": 87289,
"end": 95191
}
|
class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final SpringRabbitMQHeaderNameBuilder INSTANCE = new SpringRabbitMQHeaderNameBuilder();
/**
* To override the endpoint configuration's routing key.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* SpringRabbitmqRoutingOverrideKey}.
*/
public String springRabbitmqRoutingOverrideKey() {
return "CamelSpringRabbitmqRoutingOverrideKey";
}
/**
* To override the endpoint configuration's exchange name.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code
* SpringRabbitmqExchangeOverrideName}.
*/
public String springRabbitmqExchangeOverrideName() {
return "CamelSpringRabbitmqExchangeOverrideName";
}
/**
* Whether the message was previously delivered and requeued.
*
* The option is a: {@code Boolean} type.
*
* Group: consumer
*
* @return the name of the header {@code SpringRabbitmqRedelivered}.
*/
public String springRabbitmqRedelivered() {
return "CamelSpringRabbitmqRedelivered";
}
/**
* Delivery tag for manual acknowledge mode.
*
* The option is a: {@code long} type.
*
* Group: consumer
*
* @return the name of the header {@code SpringRabbitmqDeliveryTag}.
*/
public String springRabbitmqDeliveryTag() {
return "CamelSpringRabbitmqDeliveryTag";
}
/**
* The exchange name that was used when publishing the message.
*
* The option is a: {@code String} type.
*
* Group: consumer
*
* @return the name of the header {@code SpringRabbitmqExchangeName}.
*/
public String springRabbitmqExchangeName() {
return "CamelSpringRabbitmqExchangeName";
}
/**
* The routing key that was used when publishing the message.
*
* The option is a: {@code String} type.
*
* Group: consumer
*
* @return the name of the header {@code SpringRabbitmqRoutingKey}.
*/
public String springRabbitmqRoutingKey() {
return "CamelSpringRabbitmqRoutingKey";
}
/**
* The message delivery mode.
*
* The option is a: {@code MessageDeliveryMode} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqDeliveryMode}.
*/
public String springRabbitmqDeliveryMode() {
return "CamelSpringRabbitmqDeliveryMode";
}
/**
* Application-specific message type.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqType}.
*/
public String springRabbitmqType() {
return "CamelSpringRabbitmqType";
}
/**
* The message content type.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqContentType}.
*/
public String springRabbitmqContentType() {
return "CamelSpringRabbitmqContentType";
}
/**
* The message content length.
*
* The option is a: {@code long} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqContentLength}.
*/
public String springRabbitmqContentLength() {
return "CamelSpringRabbitmqContentLength";
}
/**
* Content encoding used by applications.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqContentEncoding}.
*/
public String springRabbitmqContentEncoding() {
return "CamelSpringRabbitmqContentEncoding";
}
/**
* Arbitrary message id.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqMessageId}.
*/
public String springRabbitmqMessageId() {
return "CamelSpringRabbitmqMessageId";
}
/**
* Identifier to correlate RPC responses with requests.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqCorrelationId}.
*/
public String springRabbitmqCorrelationId() {
return "CamelSpringRabbitmqCorrelationId";
}
/**
* Commonly used to name a callback queue.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqReplyTo}.
*/
public String springRabbitmqReplyTo() {
return "CamelSpringRabbitmqReplyTo";
}
/**
* Per-message TTL.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqExpiration}.
*/
public String springRabbitmqExpiration() {
return "CamelSpringRabbitmqExpiration";
}
/**
* Application-provided timestamp.
*
* The option is a: {@code Date} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqTimestamp}.
*/
public String springRabbitmqTimestamp() {
return "CamelSpringRabbitmqTimestamp";
}
/**
* Validated user id.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqUserId}.
*/
public String springRabbitmqUserId() {
return "CamelSpringRabbitmqUserId";
}
/**
* The application name.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqAppId}.
*/
public String springRabbitmqAppId() {
return "CamelSpringRabbitmqAppId";
}
/**
* The message priority.
*
* The option is a: {@code Integer} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqPriority}.
*/
public String springRabbitmqPriority() {
return "CamelSpringRabbitmqPriority";
}
/**
* The cluster id.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SpringRabbitmqClusterId}.
*/
public String springRabbitmqClusterId() {
return "CamelSpringRabbitmqClusterId";
}
}
static SpringRabbitMQEndpointBuilder endpointBuilder(String componentName, String path) {
|
SpringRabbitMQHeaderNameBuilder
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/stream/NestedCollectionFetchStreamTest.java
|
{
"start": 3168,
"end": 3279
}
|
class ____ {
@Id
@GeneratedValue
private Long id;
}
@Entity( name = "EntityA" )
public static
|
BasicEntity
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersectsGeoPointDocValuesAndConstantGridEvaluator.java
|
{
"start": 1183,
"end": 3904
}
|
class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(SpatialIntersectsGeoPointDocValuesAndConstantGridEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator encodedPoints;
private final long gridId;
private final DataType gridType;
private final DriverContext driverContext;
private Warnings warnings;
public SpatialIntersectsGeoPointDocValuesAndConstantGridEvaluator(Source source,
EvalOperator.ExpressionEvaluator encodedPoints, long gridId, DataType gridType,
DriverContext driverContext) {
this.source = source;
this.encodedPoints = encodedPoints;
this.gridId = gridId;
this.gridType = gridType;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (LongBlock encodedPointsBlock = (LongBlock) encodedPoints.eval(page)) {
return eval(page.getPositionCount(), encodedPointsBlock);
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += encodedPoints.baseRamBytesUsed();
return baseRamBytesUsed;
}
public BooleanBlock eval(int positionCount, LongBlock encodedPointsBlock) {
try(BooleanBlock.Builder result = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
boolean allBlocksAreNulls = true;
if (!encodedPointsBlock.isNull(p)) {
allBlocksAreNulls = false;
}
if (allBlocksAreNulls) {
result.appendNull();
continue position;
}
try {
SpatialIntersects.processGeoPointDocValuesAndConstantGrid(result, p, encodedPointsBlock, this.gridId, this.gridType);
} catch (IllegalArgumentException | IOException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
@Override
public String toString() {
return "SpatialIntersectsGeoPointDocValuesAndConstantGridEvaluator[" + "encodedPoints=" + encodedPoints + ", gridId=" + gridId + ", gridType=" + gridType + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(encodedPoints);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static
|
SpatialIntersectsGeoPointDocValuesAndConstantGridEvaluator
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/cglib/reflect/ConstructorDelegate.java
|
{
"start": 1452,
"end": 1628
}
|
class ____ {
private static final ConstructorKey KEY_FACTORY =
(ConstructorKey)KeyFactory.create(ConstructorKey.class, KeyFactory.CLASS_BY_NAME);
|
ConstructorDelegate
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/throwable/ThrowableClassifierTest.java
|
{
"start": 4519,
"end": 4649
}
|
class ____
extends TestPartitionDataMissingErrorException {}
private static
|
TestPartitionDataMissingErrorSubException
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/iterables/Iterables_assertAreNot_Test.java
|
{
"start": 1495,
"end": 2603
}
|
class ____ extends IterablesWithConditionsBaseTest {
@Test
void should_pass_if_each_element_not_satisfies_condition() {
actual = newArrayList("Solo", "Leia");
iterables.assertAreNot(someInfo(), actual, jedi);
verify(conditions).assertIsNotNull(jedi);
}
@Test
void should_throw_error_if_condition_is_null() {
assertThatNullPointerException().isThrownBy(() -> {
actual = newArrayList("Solo", "Leia");
iterables.assertAreNot(someInfo(), actual, null);
}).withMessage("The condition to evaluate should not be null");
verify(conditions).assertIsNotNull(null);
}
@Test
void should_fail_if_condition_is_met() {
testCondition.shouldMatch(false);
AssertionInfo info = someInfo();
actual = newArrayList("Solo", "Leia", "Yoda");
Throwable error = catchThrowable(() -> iterables.assertAreNot(someInfo(), actual, jedi));
assertThat(error).isInstanceOf(AssertionError.class);
verify(conditions).assertIsNotNull(jedi);
verify(failures).failure(info, elementsShouldNotBe(actual, newArrayList("Yoda"), jedi));
}
}
|
Iterables_assertAreNot_Test
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/datastreams/UpdateDataStreamMappingsAction.java
|
{
"start": 2070,
"end": 4752
}
|
class ____ extends AcknowledgedRequest<Request> implements IndicesRequest.Replaceable {
private final CompressedXContent mappings;
private final boolean dryRun;
private String[] dataStreamNames = Strings.EMPTY_ARRAY;
public Request(CompressedXContent mappings, boolean dryRun, TimeValue masterNodeTimeout, TimeValue ackTimeout) {
super(masterNodeTimeout, ackTimeout);
this.mappings = mappings;
this.dryRun = dryRun;
}
@Override
public Request indices(String... dataStreamNames) {
this.dataStreamNames = dataStreamNames;
return this;
}
public CompressedXContent getMappings() {
return mappings;
}
@Override
public boolean includeDataStreams() {
return true;
}
public boolean isDryRun() {
return dryRun;
}
public Request(StreamInput in) throws IOException {
super(in);
this.dataStreamNames = in.readStringArray();
this.mappings = CompressedXContent.readCompressedString(in);
this.dryRun = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(dataStreamNames);
mappings.writeTo(out);
out.writeBoolean(dryRun);
}
@Override
public String[] indices() {
return dataStreamNames;
}
@Override
public IndicesOptions indicesOptions() {
return IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED;
}
@Override
public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
return new CancellableTask(id, type, action, "", parentTaskId, headers);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Request request = (Request) o;
return Arrays.equals(dataStreamNames, request.dataStreamNames)
&& mappings.equals(request.mappings)
&& dryRun == request.dryRun
&& Objects.equals(masterNodeTimeout(), request.masterNodeTimeout())
&& Objects.equals(ackTimeout(), request.ackTimeout());
}
@Override
public int hashCode() {
return Objects.hash(Arrays.hashCode(dataStreamNames), mappings, dryRun, masterNodeTimeout(), ackTimeout());
}
}
public static
|
Request
|
java
|
apache__flink
|
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/FencedRpcGateway.java
|
{
"start": 1032,
"end": 1230
}
|
interface ____<F extends Serializable> extends RpcGateway {
/**
* Get the current fencing token.
*
* @return current fencing token
*/
F getFencingToken();
}
|
FencedRpcGateway
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/cdi/testsupport/CdiContainerExtension.java
|
{
"start": 568,
"end": 3070
}
|
class ____ implements TestInstancePostProcessor, BeforeEachCallback {
private static final String CONTEXT_KEY = CdiContainerScope.class.getName();
/**
* Intended for use from external consumers. Will never create a scope, just
* attempt to consume an already created and stored one
*/
public static CdiContainerScope findCdiContainerScope(Object testInstance, ExtensionContext context) {
final ExtensionContext.Store store = locateExtensionStore( testInstance, context );
final CdiContainerScope existing = (CdiContainerScope) store.get( CONTEXT_KEY );
if ( existing != null ) {
return existing;
}
throw new RuntimeException( "Could not locate CdiContainerScope : " + context.getDisplayName() );
}
@Override
public void postProcessTestInstance(Object testInstance, ExtensionContext context) {
final Optional<CdiContainer> annRef = AnnotationSupport.findAnnotation(
context.getRequiredTestClass(),
CdiContainer.class
);
if( annRef.isPresent() ) {
final ExtensionContext.Store extensionStore = locateExtensionStore( testInstance, context );
var scope = new CdiContainerScope( () -> {
final SeContainerInitializer initializer = SeContainerInitializer.newInstance();
if ( !annRef.get().enableDiscovery() ) {
initializer.disableDiscovery();
}
initializer.addBeanClasses( annRef.get().beanClasses() );
return initializer.initialize();
} );
extensionStore.put( CONTEXT_KEY, scope );
}
}
@Override
public void beforeEach(ExtensionContext context) throws Exception {
final Optional<CdiContainer> annRef = AnnotationSupport.findAnnotation(
context.getRequiredTestMethod(),
CdiContainer.class
);
if ( annRef.isEmpty() ) {
// assume the annotations are defined on the class-level...
return;
}
final ExtensionContext.Store extensionStore = locateExtensionStore( context.getRequiredTestInstance(), context );
var scope = new CdiContainerScope( () -> {
final SeContainerInitializer initializer = SeContainerInitializer.newInstance();
if ( !annRef.get().enableDiscovery() ) {
initializer.disableDiscovery();
}
initializer.addBeanClasses( annRef.get().beanClasses() );
return initializer.initialize();
} );
extensionStore.put( CONTEXT_KEY, scope );
}
private static ExtensionContext.Store locateExtensionStore(Object testInstance, ExtensionContext context) {
return JUnitHelper.locateExtensionStore( CdiContainerExtension.class, context, testInstance );
}
}
|
CdiContainerExtension
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/AlreadyCheckedTest.java
|
{
"start": 7358,
"end": 7718
}
|
class ____ {
public void test(boolean a) {
a = true;
if (a) {
} else if (a) {
}
}
}
""")
.doTest();
}
@Test
public void ternaryWithinIf() {
helper
.addSourceLines(
"Test.java",
"""
|
Test
|
java
|
quarkusio__quarkus
|
integration-tests/virtual-threads/virtual-threads-disabled/src/main/java/io/quarkus/virtual/disabled/FilteredResource.java
|
{
"start": 357,
"end": 1003
}
|
class ____ {
@Inject
Counter counter;
@GET
@RunOnVirtualThread
public Response filtered() {
VirtualThreadsAssertions.assertWorkerOrEventLoopThread();
// Request scope
assert counter.increment() == 2;
// DC
assert Vertx.currentContext().getLocal("filter").equals("test");
Vertx.currentContext().putLocal("test", "test test");
// MDC
assert MDC.get("mdc").equals("test");
MDC.put("mdc", "test test");
return Response.ok()
.header("X-filter", "true")
.entity("ok")
.build();
}
}
|
FilteredResource
|
java
|
apache__logging-log4j2
|
log4j-core-test/src/test/java/org/apache/logging/log4j/core/jackson/LevelMixInYamlTest.java
|
{
"start": 911,
"end": 1075
}
|
class ____ extends LevelMixInTest {
@Override
protected ObjectMapper newObjectMapper() {
return new Log4jYamlObjectMapper();
}
}
|
LevelMixInYamlTest
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/connector/source/DynamicFilteringValuesSourceReader.java
|
{
"start": 1898,
"end": 7312
}
|
class ____
implements SourceReader<RowData, ValuesSourcePartitionSplit> {
private static final Logger LOG =
LoggerFactory.getLogger(DynamicFilteringValuesSourceReader.class);
/** The context for this reader, to communicate with the enumerator. */
private final SourceReaderContext context;
/** The availability future. This reader is available as soon as a split is assigned. */
private CompletableFuture<Void> availability;
private final TypeSerializer<RowData> serializer;
private final Map<Map<String, String>, byte[]> serializedElements;
private final Map<Map<String, String>, Integer> counts;
private final Queue<ValuesSourcePartitionSplit> remainingSplits;
private transient ValuesSourcePartitionSplit currentSplit;
private transient Iterator<RowData> iterator;
private transient boolean noMoreSplits;
private transient boolean reachedInfiniteEnd;
public DynamicFilteringValuesSourceReader(
Map<Map<String, String>, byte[]> serializedElements,
Map<Map<String, String>, Integer> counts,
TypeSerializer<RowData> serializer,
SourceReaderContext context) {
this.serializedElements = checkNotNull(serializedElements);
this.counts = checkNotNull(counts);
this.serializer = serializer;
this.context = checkNotNull(context);
this.availability = new CompletableFuture<>();
this.remainingSplits = new ArrayDeque<>();
}
@Override
public void start() {
// request a split if we don't have one
if (remainingSplits.isEmpty()) {
context.sendSplitRequest();
}
}
@Override
public InputStatus pollNext(ReaderOutput<RowData> output) {
if (reachedInfiniteEnd) {
return InputStatus.NOTHING_AVAILABLE;
}
if (iterator != null) {
if (iterator.hasNext()) {
output.collect(iterator.next());
return InputStatus.MORE_AVAILABLE;
} else {
finishSplit();
}
}
return tryMoveToNextSplit();
}
private void finishSplit() {
iterator = null;
currentSplit = null;
// request another split if no other is left
// we do this only here in the finishSplit part to avoid requesting a split
// whenever the reader is polled and doesn't currently have a split
if (remainingSplits.isEmpty() && !noMoreSplits) {
context.sendSplitRequest();
}
}
private InputStatus tryMoveToNextSplit() {
currentSplit = remainingSplits.poll();
if (currentSplit != null) {
if (currentSplit.isInfinite()) {
this.reachedInfiniteEnd = true;
resetAvailability();
return InputStatus.NOTHING_AVAILABLE;
} else {
Map<String, String> partition = currentSplit.getPartition();
List<RowData> list =
deserialize(serializedElements.get(partition), counts.get(partition));
iterator = list.iterator();
return InputStatus.MORE_AVAILABLE;
}
} else if (noMoreSplits) {
return InputStatus.END_OF_INPUT;
} else {
resetAvailability();
return InputStatus.NOTHING_AVAILABLE;
}
}
private void resetAvailability() {
// ensure we are not called in a loop by resetting the availability future
if (availability.isDone()) {
availability = new CompletableFuture<>();
}
}
private List<RowData> deserialize(byte[] data, int count) {
List<RowData> list = new ArrayList<>();
try (ByteArrayInputStream bais = new ByteArrayInputStream(data)) {
final DataInputView input = new DataInputViewStreamWrapper(bais);
for (int i = 0; i < count; ++i) {
RowData element = serializer.deserialize(input);
list.add(element);
}
} catch (Exception e) {
throw new TableException(
"Failed to deserialize an element from the source. "
+ "If you are using user-defined serialization (Value and Writable types), check the "
+ "serialization functions.\nSerializer is "
+ serializer,
e);
}
return list;
}
@Override
public List<ValuesSourcePartitionSplit> snapshotState(long checkpointId) {
return Collections.emptyList();
}
@Override
public CompletableFuture<Void> isAvailable() {
return availability;
}
@Override
public void addSplits(List<ValuesSourcePartitionSplit> splits) {
remainingSplits.addAll(splits);
// set availability so that pollNext is actually called
availability.complete(null);
}
@Override
public void notifyNoMoreSplits() {
noMoreSplits = true;
// set availability so that pollNext is actually called
availability.complete(null);
}
@Override
public void close() throws Exception {}
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
LOG.info("checkpoint {} finished.", checkpointId);
}
}
|
DynamicFilteringValuesSourceReader
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/common/utils/Serializer.java
|
{
"start": 1047,
"end": 2220
}
|
class ____ {
public static byte[] serialize(Object toSerialize) throws IOException {
ByteArrayOutputStream arrayOutputStream = new ByteArrayOutputStream();
try (ObjectOutputStream ooStream = new ObjectOutputStream(arrayOutputStream)) {
ooStream.writeObject(toSerialize);
return arrayOutputStream.toByteArray();
}
}
public static Object deserialize(InputStream inputStream) throws IOException, ClassNotFoundException {
try (ObjectInputStream objectInputStream = new ObjectInputStream(inputStream)) {
return objectInputStream.readObject();
}
}
public static Object deserialize(byte[] byteArray) throws IOException, ClassNotFoundException {
ByteArrayInputStream arrayInputStream = new ByteArrayInputStream(byteArray);
return deserialize(arrayInputStream);
}
public static Object deserialize(String fileName) throws IOException, ClassNotFoundException {
ClassLoader classLoader = Serializer.class.getClassLoader();
InputStream fileStream = classLoader.getResourceAsStream(fileName);
return deserialize(fileStream);
}
}
|
Serializer
|
java
|
netty__netty
|
handler/src/test/java/io/netty/handler/ssl/SslContextBuilderTest.java
|
{
"start": 22959,
"end": 23984
}
|
class ____ extends SecureRandom {
private int count;
@Override
public int nextInt() {
count++;
return super.nextInt();
}
@Override
public int nextInt(int bound) {
count++;
return super.nextInt(bound);
}
@Override
public long nextLong() {
count++;
return super.nextLong();
}
@Override
public boolean nextBoolean() {
count++;
return super.nextBoolean();
}
@Override
public float nextFloat() {
count++;
return super.nextFloat();
}
@Override
public double nextDouble() {
count++;
return super.nextDouble();
}
@Override
public double nextGaussian() {
count++;
return super.nextGaussian();
}
public int getCount() {
return count;
}
}
}
|
SpySecureRandom
|
java
|
micronaut-projects__micronaut-core
|
aop/src/main/java/io/micronaut/aop/runtime/RuntimeProxy.java
|
{
"start": 1395,
"end": 2129
}
|
class ____ call super.foo(..) to invoke the original method since
* this is more efficient and allows proxied methods to work for calls from within the class.</p>
*
* <p>However certain cases it may be useful to be able to to instead proxy all public methods of the original implementation.
* By setting the value here to <code>true</code> the {@link io.micronaut.aop.Interceptor} can specify that it requires proxying of the class</p>
*
* <p>Generated subclasses will implement {@link io.micronaut.aop.InterceptedProxy} if this attribute is set to true</p>
*
* @return True if the original implementation should be proxied. Defaults to false.
*/
boolean proxyTarget() default false;
}
|
and
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/FileSlotAllocationSnapshotPersistenceService.java
|
{
"start": 1375,
"end": 5465
}
|
class ____
implements SlotAllocationSnapshotPersistenceService {
private static final Logger LOG =
LoggerFactory.getLogger(FileSlotAllocationSnapshotPersistenceService.class);
private static final String SUFFIX = ".bin";
private final File slotAllocationSnapshotDirectory;
public FileSlotAllocationSnapshotPersistenceService(File slotAllocationSnapshotDirectory) {
this.slotAllocationSnapshotDirectory = slotAllocationSnapshotDirectory;
if (!slotAllocationSnapshotDirectory.exists()
&& !slotAllocationSnapshotDirectory.mkdirs()) {
throw new IllegalStateException(
String.format(
"Cannot create the slot allocation snapshot directory %s.",
slotAllocationSnapshotDirectory));
}
}
@Override
public void persistAllocationSnapshot(SlotAllocationSnapshot slotAllocationSnapshot)
throws IOException {
// Let's try to write the slot allocations on file
final File slotAllocationSnapshotFile =
slotAllocationFile(slotAllocationSnapshot.getSlotID().getSlotNumber());
try (ObjectOutputStream oos =
new ObjectOutputStream(new FileOutputStream(slotAllocationSnapshotFile))) {
oos.writeObject(slotAllocationSnapshot);
LOG.debug(
"Successfully written allocation state metadata file {} for job {} and allocation {}.",
slotAllocationSnapshotFile.toPath(),
slotAllocationSnapshot.getJobId(),
slotAllocationSnapshot.getAllocationId());
}
}
private File slotAllocationFile(int slotIndex) {
return new File(
slotAllocationSnapshotDirectory.getAbsolutePath(), slotIndexToFilename(slotIndex));
}
private static String slotIndexToFilename(int slotIndex) {
return slotIndex + SUFFIX;
}
private static int filenameToSlotIndex(String filename) {
return Integer.parseInt(filename.substring(0, filename.length() - SUFFIX.length()));
}
@Override
public void deleteAllocationSnapshot(int slotIndex) {
// Let's try to write the slot allocations on file
final File slotAllocationSnapshotFile = slotAllocationFile(slotIndex);
try {
FileUtils.deleteFileOrDirectory(slotAllocationSnapshotFile);
LOG.debug(
"Successfully deleted allocation state metadata file {}.",
slotAllocationSnapshotFile.toPath());
} catch (IOException ioe) {
LOG.warn(
"Cannot delete the local allocations state file {}.",
slotAllocationSnapshotFile.toPath(),
ioe);
}
}
@Override
public Collection<SlotAllocationSnapshot> loadAllocationSnapshots() {
// Let's try to populate the slot allocation from local file
final File[] slotAllocationFiles = slotAllocationSnapshotDirectory.listFiles();
if (slotAllocationFiles == null) {
LOG.debug("No allocation files to load.");
return Collections.emptyList();
}
Collection<SlotAllocationSnapshot> slotAllocationSnapshots =
new ArrayList<>(slotAllocationFiles.length);
for (File allocationFile : slotAllocationFiles) {
try (ObjectInputStream ois =
new ObjectInputStream(new FileInputStream(allocationFile))) {
slotAllocationSnapshots.add((SlotAllocationSnapshot) ois.readObject());
} catch (IOException | ClassNotFoundException e) {
LOG.debug(
"Cannot read the local allocations state file {}. Deleting it now.",
allocationFile.toPath(),
e);
deleteAllocationSnapshot(filenameToSlotIndex(allocationFile.getName()));
}
}
return slotAllocationSnapshots;
}
}
|
FileSlotAllocationSnapshotPersistenceService
|
java
|
playframework__playframework
|
web/play-java-forms/src/main/java/play/data/FormFactory.java
|
{
"start": 1819,
"end": 2199
}
|
class ____ map to a form.
* @param groups the classes of groups.
* @return a new form that wraps the specified class.
*/
public <T> Form<T> form(String name, Class<T> clazz, Class<?>... groups) {
return new Form<>(name, clazz, groups, messagesApi, formatters, validatorFactory, config);
}
/**
* @param <T> the type of value in the form.
* @param clazz the
|
to
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/client/protocol/decoder/SlotsDecoder.java
|
{
"start": 1073,
"end": 2317
}
|
class ____ implements MultiDecoder<Object> {
@Override
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state, long size) {
return StringCodec.INSTANCE.getValueDecoder();
}
@Override
public Object decode(List<Object> parts, State state) {
if (!parts.isEmpty() && parts.get(0) instanceof List) {
Map<ClusterSlotRange, Set<String>> result = new HashMap<>();
List<List<Object>> rows = (List<List<Object>>) (Object) parts;
for (List<Object> row : rows) {
Iterator<Object> iterator = row.iterator();
Long startSlot = (Long) iterator.next();
Long endSlot = (Long) iterator.next();
ClusterSlotRange range = new ClusterSlotRange(startSlot.intValue(), endSlot.intValue());
Set<String> addresses = new HashSet<>();
while (iterator.hasNext()) {
List<Object> addressParts = (List<Object>) iterator.next();
addresses.add(addressParts.get(0) + ":" + addressParts.get(1));
}
result.put(range, addresses);
}
return result;
}
return parts;
}
}
|
SlotsDecoder
|
java
|
elastic__elasticsearch
|
test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java
|
{
"start": 3966,
"end": 5091
}
|
enum ____ {
;
public static SearchResponseBuilder response() {
return new SearchResponseBuilder();
}
public static SearchResponseBuilder response(SearchHits hits) {
return new SearchResponseBuilder().searchHits(hits).numReducePhases(1).shards(1, 1, 0).tookInMillis(100);
}
public static SearchResponse successfulResponse(SearchHits hits) {
return response(hits).build();
}
public static SearchResponse emptyWithTotalHits(
String scrollId,
int totalShards,
int successfulShards,
int skippedShards,
long tookInMillis,
ShardSearchFailure[] shardFailures,
SearchResponse.Clusters clusters
) {
return new SearchResponse(
SearchHits.EMPTY_WITH_TOTAL_HITS,
null,
null,
false,
null,
null,
1,
scrollId,
totalShards,
successfulShards,
skippedShards,
tookInMillis,
shardFailures,
clusters
);
}
public static
|
SearchResponseUtils
|
java
|
apache__camel
|
components/camel-netty-http/src/test/java/org/apache/camel/component/netty/http/NettyMixedCaseHttpPathTest.java
|
{
"start": 1089,
"end": 1914
}
|
class ____ extends BaseNettyTest {
@Test
public void testMixedCase() throws Exception {
getMockEndpoint("mock:input").expectedHeaderReceived(Exchange.HTTP_PATH, "/HelloWorld");
String out = template.requestBody("netty-http:http://localhost:{{port}}/SHoppING/HelloWorld", "Camel", String.class);
assertEquals("Bye Camel", out);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("netty-http:http://0.0.0.0:{{port}}/Shopping?matchOnUriPrefix=true")
.to("mock:input")
.transform(body().prepend("Bye "));
}
};
}
}
|
NettyMixedCaseHttpPathTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jdbc/src/test/java/org/springframework/boot/jdbc/DataSourceBuilderRuntimeHintsTests.java
|
{
"start": 1105,
"end": 2387
}
|
class ____ {
@Test
void shouldRegisterDataSourceConstructors() {
ReflectionHints hints = registerHints();
Stream
.of(com.mchange.v2.c3p0.ComboPooledDataSource.class, com.zaxxer.hikari.HikariDataSource.class,
oracle.jdbc.datasource.OracleDataSource.class, oracle.ucp.jdbc.PoolDataSource.class,
org.apache.commons.dbcp2.BasicDataSource.class, org.apache.tomcat.jdbc.pool.DataSource.class,
org.h2.jdbcx.JdbcDataSource.class, org.postgresql.ds.PGSimpleDataSource.class,
org.springframework.jdbc.datasource.SimpleDriverDataSource.class,
org.vibur.dbcp.ViburDBCPDataSource.class)
.forEach((dataSourceType) -> {
TypeHint typeHint = hints.getTypeHint(dataSourceType);
assertThat(typeHint).withFailMessage(() -> "No hints found for data source type " + dataSourceType)
.isNotNull();
assertThat(typeHint).isNotNull();
Set<MemberCategory> memberCategories = typeHint.getMemberCategories();
assertThat(memberCategories).containsExactly(MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS);
});
}
private ReflectionHints registerHints() {
RuntimeHints hints = new RuntimeHints();
new DataSourceBuilderRuntimeHints().registerHints(hints, getClass().getClassLoader());
return hints.reflection();
}
}
|
DataSourceBuilderRuntimeHintsTests
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-ai/camel-spring-ai-chat/src/test/java/org/apache/camel/component/springai/chat/SpringAiChatVectorStoreIT.java
|
{
"start": 1694,
"end": 5142
}
|
class ____ extends OllamaTestSupport {
private VectorStore vectorStore;
@Override
protected void setupResources() throws Exception {
super.setupResources();
// Create embedding model for vector store
OllamaApi ollamaApi = OllamaApi.builder()
.baseUrl(OLLAMA.baseUrl())
.build();
EmbeddingModel embeddingModel = OllamaEmbeddingModel.builder()
.ollamaApi(ollamaApi)
.defaultOptions(OllamaEmbeddingOptions.builder()
.model("embeddinggemma:300m")
.build())
.build();
// Create and populate vector store
vectorStore = SimpleVectorStore.builder(embeddingModel)
.build();
List<Document> documents = List.of(
new Document(
"Apache Camel is an integration framework created in 2007.",
Map.of("source", "camel-docs")),
new Document(
"Camel supports Enterprise Integration Patterns.",
Map.of("source", "camel-docs")),
new Document(
"Spring AI provides AI integration for Spring applications.",
Map.of("source", "spring-ai-docs")),
new Document(
"Vector stores are used for similarity search in RAG applications.",
Map.of("source", "ai-docs")));
vectorStore.add(documents);
}
@Test
public void testVectorStoreAutoRetrieval() {
String response = template().requestBody("direct:vector-rag",
"When was Apache Camel created? Answer with just the year.", String.class);
assertThat(response).isNotNull();
assertThat(response).contains("2007");
}
@Test
public void testVectorStoreWithCustomTopK() {
String response = template().requestBody("direct:vector-rag-top-k",
"What patterns does Camel support? Answer in 3 words.", String.class);
assertThat(response).isNotNull();
assertThat(response.toLowerCase()).containsAnyOf("integration", "enterprise", "pattern");
}
@Test
public void testVectorStoreDoesNotRetrieveIrrelevantContext() {
// Query about something not in the vector store
String response = template().requestBody("direct:vector-rag",
"What is quantum computing? If you don't know, say 'I don't know'.", String.class);
assertThat(response).isNotNull();
// The response might still try to answer, but should not contain specific details from our docs
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
bindChatModel(this.getCamelContext());
this.getCamelContext().getRegistry().bind("vectorStore", vectorStore);
from("direct:vector-rag")
.to("spring-ai-chat:vectorstore?chatModel=#chatModel&vectorStore=#vectorStore&similarityThreshold=0.4");
from("direct:vector-rag-top-k")
.to("spring-ai-chat:vectorstore?chatModel=#chatModel&vectorStore=#vectorStore&topK=2&similarityThreshold=0.4");
}
};
}
}
|
SpringAiChatVectorStoreIT
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/util/MimeTypeUtils.java
|
{
"start": 1347,
"end": 12662
}
|
class ____ {
private static final byte[] BOUNDARY_CHARS =
new byte[] {'-', '_', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A',
'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z'};
/**
* Public constant mime type that includes all media ranges (i.e. "*/*").
*/
public static final MimeType ALL;
/**
* A String equivalent of {@link MimeTypeUtils#ALL}.
*/
public static final String ALL_VALUE = "*/*";
/**
* Public constant mime type for {@code application/graphql+json}.
* @since 5.3.19
* @see <a href="https://github.com/graphql/graphql-over-http">GraphQL over HTTP spec</a>
*/
public static final MimeType APPLICATION_GRAPHQL;
/**
* A String equivalent of {@link MimeTypeUtils#APPLICATION_GRAPHQL}.
* @since 5.3.19
*/
public static final String APPLICATION_GRAPHQL_VALUE = "application/graphql+json";
/**
* Public constant mime type for {@code application/json}.
*/
public static final MimeType APPLICATION_JSON;
/**
* A String equivalent of {@link MimeTypeUtils#APPLICATION_JSON}.
*/
public static final String APPLICATION_JSON_VALUE = "application/json";
/**
* Public constant mime type for {@code application/octet-stream}.
* */
public static final MimeType APPLICATION_OCTET_STREAM;
/**
* A String equivalent of {@link MimeTypeUtils#APPLICATION_OCTET_STREAM}.
*/
public static final String APPLICATION_OCTET_STREAM_VALUE = "application/octet-stream";
/**
* Public constant mime type for {@code application/xml}.
*/
public static final MimeType APPLICATION_XML;
/**
* A String equivalent of {@link MimeTypeUtils#APPLICATION_XML}.
*/
public static final String APPLICATION_XML_VALUE = "application/xml";
/**
* Public constant mime type for {@code image/gif}.
*/
public static final MimeType IMAGE_GIF;
/**
* A String equivalent of {@link MimeTypeUtils#IMAGE_GIF}.
*/
public static final String IMAGE_GIF_VALUE = "image/gif";
/**
* Public constant mime type for {@code image/jpeg}.
*/
public static final MimeType IMAGE_JPEG;
/**
* A String equivalent of {@link MimeTypeUtils#IMAGE_JPEG}.
*/
public static final String IMAGE_JPEG_VALUE = "image/jpeg";
/**
* Public constant mime type for {@code image/png}.
*/
public static final MimeType IMAGE_PNG;
/**
* A String equivalent of {@link MimeTypeUtils#IMAGE_PNG}.
*/
public static final String IMAGE_PNG_VALUE = "image/png";
/**
* Public constant mime type for {@code text/html}.
* */
public static final MimeType TEXT_HTML;
/**
* A String equivalent of {@link MimeTypeUtils#TEXT_HTML}.
*/
public static final String TEXT_HTML_VALUE = "text/html";
/**
* Public constant mime type for {@code text/plain}.
* */
public static final MimeType TEXT_PLAIN;
/**
* A String equivalent of {@link MimeTypeUtils#TEXT_PLAIN}.
*/
public static final String TEXT_PLAIN_VALUE = "text/plain";
/**
* Public constant mime type for {@code text/xml}.
* */
public static final MimeType TEXT_XML;
/**
* A String equivalent of {@link MimeTypeUtils#TEXT_XML}.
*/
public static final String TEXT_XML_VALUE = "text/xml";
private static final ConcurrentLruCache<String, MimeType> cachedMimeTypes =
new ConcurrentLruCache<>(64, MimeTypeUtils::parseMimeTypeInternal);
private static volatile @Nullable Random random;
static {
// Not using "parseMimeType" to avoid static init cost
ALL = new MimeType(MimeType.WILDCARD_TYPE, MimeType.WILDCARD_TYPE);
APPLICATION_GRAPHQL = new MimeType("application", "graphql+json");
APPLICATION_JSON = new MimeType("application", "json");
APPLICATION_OCTET_STREAM = new MimeType("application", "octet-stream");
APPLICATION_XML = new MimeType("application", "xml");
IMAGE_GIF = new MimeType("image", "gif");
IMAGE_JPEG = new MimeType("image", "jpeg");
IMAGE_PNG = new MimeType("image", "png");
TEXT_HTML = new MimeType("text", "html");
TEXT_PLAIN = new MimeType("text", "plain");
TEXT_XML = new MimeType("text", "xml");
}
/**
* Parse the given String into a single {@code MimeType}.
* Recently parsed {@code MimeType} are cached for further retrieval.
* @param mimeType the string to parse
* @return the mime type
* @throws InvalidMimeTypeException if the string cannot be parsed
*/
public static MimeType parseMimeType(String mimeType) {
if (!StringUtils.hasLength(mimeType)) {
throw new InvalidMimeTypeException(mimeType, "'mimeType' must not be empty");
}
// do not cache multipart mime types with random boundaries
if (mimeType.startsWith("multipart")) {
return parseMimeTypeInternal(mimeType);
}
return cachedMimeTypes.get(mimeType);
}
private static MimeType parseMimeTypeInternal(String mimeType) {
int index = mimeType.indexOf(';');
String fullType = (index >= 0 ? mimeType.substring(0, index) : mimeType).trim();
if (fullType.isEmpty()) {
throw new InvalidMimeTypeException(mimeType, "'mimeType' must not be empty");
}
// java.net.HttpURLConnection returns a *; q=.2 Accept header
if (MimeType.WILDCARD_TYPE.equals(fullType)) {
fullType = "*/*";
}
int subIndex = fullType.indexOf('/');
if (subIndex == -1) {
throw new InvalidMimeTypeException(mimeType, "does not contain '/'");
}
if (subIndex == fullType.length() - 1) {
throw new InvalidMimeTypeException(mimeType, "does not contain subtype after '/'");
}
String type = fullType.substring(0, subIndex);
String subtype = fullType.substring(subIndex + 1);
if (MimeType.WILDCARD_TYPE.equals(type) && !MimeType.WILDCARD_TYPE.equals(subtype)) {
throw new InvalidMimeTypeException(mimeType, "wildcard type is legal only in '*/*' (all mime types)");
}
Map<String, String> parameters = null;
do {
int nextIndex = index + 1;
boolean quoted = false;
while (nextIndex < mimeType.length()) {
char ch = mimeType.charAt(nextIndex);
if (ch == ';') {
if (!quoted) {
break;
}
}
else if (ch == '"') {
quoted = !quoted;
}
nextIndex++;
}
String parameter = mimeType.substring(index + 1, nextIndex).trim();
if (parameter.length() > 0) {
if (parameters == null) {
parameters = new LinkedHashMap<>(4);
}
int eqIndex = parameter.indexOf('=');
if (eqIndex >= 0) {
String attribute = parameter.substring(0, eqIndex).trim();
String value = parameter.substring(eqIndex + 1).trim();
parameters.put(attribute, value);
}
}
index = nextIndex;
}
while (index < mimeType.length());
try {
return new MimeType(type, subtype, parameters);
}
catch (UnsupportedCharsetException ex) {
throw new InvalidMimeTypeException(mimeType, "unsupported charset '" + ex.getCharsetName() + "'");
}
catch (IllegalArgumentException ex) {
throw new InvalidMimeTypeException(mimeType, ex.getMessage());
}
}
/**
* Parse the comma-separated string into a mutable list of {@code MimeType} objects.
* @param mimeTypes the string to parse
* @return the list of mime types
* @throws InvalidMimeTypeException if the string cannot be parsed
*/
public static List<MimeType> parseMimeTypes(String mimeTypes) {
if (!StringUtils.hasLength(mimeTypes)) {
return Collections.emptyList();
}
return tokenize(mimeTypes).stream()
.filter(StringUtils::hasText)
.map(MimeTypeUtils::parseMimeType)
.collect(Collectors.toList());
}
/**
* Tokenize the given comma-separated string of {@code MimeType} objects
* into a {@code List<String>}. Unlike simple tokenization by ",", this
* method takes into account quoted parameters.
* @param mimeTypes the string to tokenize
* @return the list of tokens
* @since 5.1.3
*/
public static List<String> tokenize(String mimeTypes) {
if (!StringUtils.hasLength(mimeTypes)) {
return Collections.emptyList();
}
List<String> tokens = new ArrayList<>();
boolean inQuotes = false;
int startIndex = 0;
int i = 0;
while (i < mimeTypes.length()) {
switch (mimeTypes.charAt(i)) {
case '"' -> inQuotes = !inQuotes;
case ',' -> {
if (!inQuotes) {
tokens.add(mimeTypes.substring(startIndex, i));
startIndex = i + 1;
}
}
case '\\' -> i++;
}
i++;
}
tokens.add(mimeTypes.substring(startIndex));
return tokens;
}
/**
* Generate a string representation of the given collection of {@link MimeType}
* objects.
* @param mimeTypes the {@code MimeType} objects
* @return a string representation of the {@code MimeType} objects
*/
public static String toString(Collection<? extends MimeType> mimeTypes) {
StringBuilder builder = new StringBuilder();
for (Iterator<? extends MimeType> iterator = mimeTypes.iterator(); iterator.hasNext();) {
MimeType mimeType = iterator.next();
mimeType.appendTo(builder);
if (iterator.hasNext()) {
builder.append(", ");
}
}
return builder.toString();
}
/**
* Sort the given list of {@code MimeType} objects by
* {@linkplain MimeType#isMoreSpecific(MimeType) specificity}.
* <p>Because of the computational cost, this method throws an exception if
* the given list contains too many elements.
* @param mimeTypes the list of mime types to be sorted
* @throws InvalidMimeTypeException if {@code mimeTypes} contains more than 50 elements
* @see <a href="https://tools.ietf.org/html/rfc7231#section-5.3.2">HTTP 1.1: Semantics
* and Content, section 5.3.2</a>
* @see MimeType#isMoreSpecific(MimeType)
*/
public static <T extends MimeType> void sortBySpecificity(List<T> mimeTypes) {
Assert.notNull(mimeTypes, "'mimeTypes' must not be null");
if (mimeTypes.size() > 50) {
throw new InvalidMimeTypeException(mimeTypes.toString(), "Too many elements");
}
bubbleSort(mimeTypes, MimeType::isLessSpecific);
}
static <T> void bubbleSort(List<T> list, BiPredicate<? super T, ? super T> swap) {
int len = list.size();
for (int i = 0; i < len; i++) {
for (int j = 1; j < len - i ; j++) {
T prev = list.get(j - 1);
T cur = list.get(j);
if (swap.test(prev, cur)) {
list.set(j, prev);
list.set(j - 1, cur);
}
}
}
}
/**
* Generate a random MIME boundary as bytes, often used in multipart mime types.
*/
public static byte[] generateMultipartBoundary() {
Random randomToUse = initRandom();
byte[] boundary = new byte[randomToUse.nextInt(11) + 30];
for (int i = 0; i < boundary.length; i++) {
boundary[i] = BOUNDARY_CHARS[randomToUse.nextInt(BOUNDARY_CHARS.length)];
}
return boundary;
}
/**
* Lazily initialize the {@link SecureRandom} for {@link #generateMultipartBoundary()}.
*/
private static Random initRandom() {
Random randomToUse = random;
if (randomToUse == null) {
synchronized (MimeTypeUtils.class) {
randomToUse = random;
if (randomToUse == null) {
randomToUse = new SecureRandom();
random = randomToUse;
}
}
}
return randomToUse;
}
/**
* Generate a random MIME boundary as String, often used in multipart mime types.
*/
public static String generateMultipartBoundaryString() {
return new String(generateMultipartBoundary(), StandardCharsets.US_ASCII);
}
}
|
MimeTypeUtils
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/createTable/OracleCreateTableTest52.java
|
{
"start": 1026,
"end": 6100
}
|
class ____ extends OracleTest {
public void test_types() throws Exception {
String sql = //
" CREATE TABLE \"SC_001\".\"TB_001\" \n" +
" ( \"ID\" NUMBER NOT NULL ENABLE, \n" +
" \"GMT_CREATE\" DATE NOT NULL ENABLE, \n" +
" \"GMT_MODIFIED\" DATE NOT NULL ENABLE, \n" +
" \"ADMIN_SEQ\" NUMBER, \n" +
" \"COMPANY_ID\" NUMBER, \n" +
" \"NOW_LEVEL\" NUMBER, \n" +
" \"GMV_30DAY\" NUMBER, \n" +
" \"REFUND_30DAY\" NUMBER, \n" +
" \"REPUTATION_30DAY\" NUMBER, \n" +
" \"REBUY_AMOUNT\" NUMBER, \n" +
" \"STANDARD_CNT\" NUMBER, \n" +
" CONSTRAINT \"WS_SELLER_LEVEL_PK\" PRIMARY KEY (\"ID\")\n" +
" USING INDEX PCTFREE 10 INITRANS 2 MAXTRANS 255 COMPUTE STATISTICS \n" +
" STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APPDATA1M\" ENABLE\n" +
" ) PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 NOCOMPRESS LOGGING\n" +
" STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APPDATA1M\" ENABLE ROW MOVEMENT ";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
assertEquals("CREATE TABLE \"SC_001\".\"TB_001\" (\n" +
"\t\"ID\" NUMBER NOT NULL ENABLE,\n" +
"\t\"GMT_CREATE\" DATE NOT NULL ENABLE,\n" +
"\t\"GMT_MODIFIED\" DATE NOT NULL ENABLE,\n" +
"\t\"ADMIN_SEQ\" NUMBER,\n" +
"\t\"COMPANY_ID\" NUMBER,\n" +
"\t\"NOW_LEVEL\" NUMBER,\n" +
"\t\"GMV_30DAY\" NUMBER,\n" +
"\t\"REFUND_30DAY\" NUMBER,\n" +
"\t\"REPUTATION_30DAY\" NUMBER,\n" +
"\t\"REBUY_AMOUNT\" NUMBER,\n" +
"\t\"STANDARD_CNT\" NUMBER,\n" +
"\tCONSTRAINT \"WS_SELLER_LEVEL_PK\" PRIMARY KEY (\"ID\")\n" +
"\t\tUSING INDEX\n" +
"\t\tPCTFREE 10\n" +
"\t\tINITRANS 2\n" +
"\t\tMAXTRANS 255\n" +
"\t\tTABLESPACE \"APPDATA1M\"\n" +
"\t\tSTORAGE (\n" +
"\t\t\tINITIAL 65536\n" +
"\t\t\tNEXT 1048576\n" +
"\t\t\tMINEXTENTS 1\n" +
"\t\t\tMAXEXTENTS 2147483645\n" +
"\t\t\tPCTINCREASE 0\n" +
"\t\t\tFREELISTS 1\n" +
"\t\t\tFREELIST GROUPS 1\n" +
"\t\t\tBUFFER_POOL DEFAULT\n" +
"\t\t)\n" +
"\t\tCOMPUTE STATISTICS\n" +
"\t\tENABLE\n" +
")\n" +
"PCTFREE 10\n" +
"PCTUSED 40\n" +
"INITRANS 1\n" +
"MAXTRANS 255\n" +
"NOCOMPRESS\n" +
"LOGGING\n" +
"TABLESPACE \"APPDATA1M\"\n" +
"STORAGE (\n" +
"\tINITIAL 65536\n" +
"\tNEXT 1048576\n" +
"\tMINEXTENTS 1\n" +
"\tMAXEXTENTS 2147483645\n" +
"\tPCTINCREASE 0\n" +
"\tFREELISTS 1\n" +
"\tFREELIST GROUPS 1\n" +
"\tBUFFER_POOL DEFAULT\n" +
")",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(11, visitor.getColumns().size());
assertTrue(visitor.containsColumn("SC_001.TB_001", "ID"));
}
}
|
OracleCreateTableTest52
|
java
|
spring-projects__spring-framework
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJWeaverMessageHandler.java
|
{
"start": 1484,
"end": 3035
}
|
class ____ implements IMessageHandler {
private static final String AJ_ID = "[AspectJ] ";
private static final Log logger = LogFactory.getLog("AspectJ Weaver");
@Override
public boolean handleMessage(IMessage message) throws AbortException {
Kind messageKind = message.getKind();
if (messageKind == IMessage.DEBUG) {
if (logger.isDebugEnabled()) {
logger.debug(makeMessageFor(message));
return true;
}
}
else if (messageKind == IMessage.INFO || messageKind == IMessage.WEAVEINFO) {
if (logger.isInfoEnabled()) {
logger.info(makeMessageFor(message));
return true;
}
}
else if (messageKind == IMessage.WARNING) {
if (logger.isWarnEnabled()) {
logger.warn(makeMessageFor(message));
return true;
}
}
else if (messageKind == IMessage.ERROR) {
if (logger.isErrorEnabled()) {
logger.error(makeMessageFor(message));
return true;
}
}
else if (messageKind == IMessage.ABORT) {
if (logger.isFatalEnabled()) {
logger.fatal(makeMessageFor(message));
return true;
}
}
return false;
}
private String makeMessageFor(IMessage aMessage) {
return AJ_ID + aMessage.getMessage();
}
@Override
public boolean isIgnoring(Kind messageKind) {
// We want to see everything, and allow configuration of log levels dynamically.
return false;
}
@Override
public void dontIgnore(Kind messageKind) {
// We weren't ignoring anything anyway...
}
@Override
public void ignore(Kind kind) {
// We weren't ignoring anything anyway...
}
}
|
AspectJWeaverMessageHandler
|
java
|
elastic__elasticsearch
|
x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java
|
{
"start": 3509,
"end": 96036
}
|
class ____ extends ESTestCase {
private static long size(long numPages) {
return numPages * SharedBytes.PAGE_SIZE;
}
private static <E extends Exception> void completeWith(ActionListener<Void> listener, CheckedRunnable<E> runnable) {
ActionListener.completeWith(listener, () -> {
runnable.run();
return null;
});
}
public void testBasicEviction() throws IOException {
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(500)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
RecordingMeterRegistry recordingMeterRegistry = new RecordingMeterRegistry();
BlobCacheMetrics metrics = new BlobCacheMetrics(recordingMeterRegistry);
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
metrics
)
) {
final var cacheKey = generateCacheKey();
assertEquals(5, cacheService.freeRegionCount());
final var region0 = cacheService.get(cacheKey, size(250), 0);
assertEquals(size(100), region0.tracker.getLength());
assertEquals(4, cacheService.freeRegionCount());
final var region1 = cacheService.get(cacheKey, size(250), 1);
assertEquals(size(100), region1.tracker.getLength());
assertEquals(3, cacheService.freeRegionCount());
final var region2 = cacheService.get(cacheKey, size(250), 2);
assertEquals(size(50), region2.tracker.getLength());
assertEquals(2, cacheService.freeRegionCount());
synchronized (cacheService) {
assertTrue(tryEvict(region1));
}
assertEquals(3, cacheService.freeRegionCount());
// one eviction should be reflected in the telemetry for total count of evicted regions
assertThat(
recordingMeterRegistry.getRecorder()
.getMeasurements(InstrumentType.LONG_COUNTER, BLOB_CACHE_COUNT_OF_EVICTED_REGIONS_TOTAL)
.size(),
is(1)
);
synchronized (cacheService) {
assertFalse(tryEvict(region1));
}
assertEquals(3, cacheService.freeRegionCount());
final var bytesReadFuture = new PlainActionFuture<Integer>();
region0.populateAndRead(
ByteRange.of(0L, 1L),
ByteRange.of(0L, 1L),
(channel, channelPos, relativePos, length) -> 1,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> progressUpdater.accept(length)
),
taskQueue.getThreadPool().generic(),
bytesReadFuture
);
synchronized (cacheService) {
assertFalse(tryEvict(region0));
}
assertEquals(3, cacheService.freeRegionCount());
assertFalse(bytesReadFuture.isDone());
taskQueue.runAllRunnableTasks();
synchronized (cacheService) {
assertTrue(tryEvict(region0));
}
assertEquals(4, cacheService.freeRegionCount());
synchronized (cacheService) {
assertTrue(tryEvict(region2));
}
assertEquals(5, cacheService.freeRegionCount());
// another 2 evictions should bump our total evictions telemetry at 3
assertThat(
recordingMeterRegistry.getRecorder()
.getMeasurements(InstrumentType.LONG_COUNTER, BLOB_CACHE_COUNT_OF_EVICTED_REGIONS_TOTAL)
.size(),
is(3)
);
assertTrue(bytesReadFuture.isDone());
assertEquals(Integer.valueOf(1), bytesReadFuture.actionGet());
}
}
public void testCacheMissOnPopulate() throws Exception {
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(50)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(10)).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
RecordingMeterRegistry recordingMeterRegistry = new RecordingMeterRegistry();
BlobCacheMetrics metrics = new BlobCacheMetrics(recordingMeterRegistry);
ExecutorService ioExecutor = Executors.newCachedThreadPool();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(environment, settings, taskQueue.getThreadPool(), ioExecutor, metrics)
) {
ByteRange rangeRead = ByteRange.of(0L, 1L);
ByteRange rangeWrite = ByteRange.of(0L, 1L);
Path tempFile = createTempFile("test", "other");
String resourceDescription = tempFile.toAbsolutePath().toString();
final var cacheKey = generateCacheKey();
SharedBlobCacheService<Object>.CacheFile cacheFile = cacheService.getCacheFile(cacheKey, 1L);
ByteBuffer writeBuffer = ByteBuffer.allocate(1);
final int bytesRead = cacheFile.populateAndRead(
rangeRead,
rangeWrite,
(channel, pos, relativePos, len) -> len,
(channel, channelPos, streamFactory, relativePos, len, progressUpdater, completionListener) -> {
try (var in = Files.newInputStream(tempFile)) {
SharedBytes.copyToCacheFileAligned(channel, in, channelPos, progressUpdater, writeBuffer.clear());
}
ActionListener.completeWith(completionListener, () -> null);
},
resourceDescription
);
assertThat(bytesRead, is(1));
List<Measurement> measurements = recordingMeterRegistry.getRecorder()
.getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.miss_that_triggered_read.total");
Measurement first = measurements.getFirst();
assertThat(first.attributes().get("file_extension"), is("other"));
assertThat(first.value(), is(1L));
Path tempFile2 = createTempFile("test", "cfs");
resourceDescription = tempFile2.toAbsolutePath().toString();
cacheFile = cacheService.getCacheFile(generateCacheKey(), 1L);
ByteBuffer writeBuffer2 = ByteBuffer.allocate(1);
final int bytesRead2 = cacheFile.populateAndRead(
rangeRead,
rangeWrite,
(channel, pos, relativePos, len) -> len,
(channel, channelPos, streamFactory, relativePos, len, progressUpdater, completionListener) -> {
try (var in = Files.newInputStream(tempFile2)) {
SharedBytes.copyToCacheFileAligned(channel, in, channelPos, progressUpdater, writeBuffer2.clear());
}
ActionListener.completeWith(completionListener, () -> null);
},
resourceDescription
);
assertThat(bytesRead2, is(1));
measurements = recordingMeterRegistry.getRecorder()
.getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.miss_that_triggered_read.total");
Measurement measurement = measurements.get(1);
assertThat(measurement.attributes().get("file_extension"), is("cfs"));
assertThat(measurement.value(), is(1L));
}
ioExecutor.shutdown();
}
private static boolean tryEvict(SharedBlobCacheService.CacheFileRegion<Object> region1) {
if (randomBoolean()) {
return region1.tryEvict();
} else {
boolean result = region1.tryEvictNoDecRef();
if (result) {
region1.decRef();
}
return result;
}
}
public void testAutoEviction() throws IOException {
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(200)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
final var cacheKey = generateCacheKey();
assertEquals(2, cacheService.freeRegionCount());
final var region0 = cacheService.get(cacheKey, size(250), 0);
assertEquals(size(100), region0.tracker.getLength());
assertEquals(1, cacheService.freeRegionCount());
final var region1 = cacheService.get(cacheKey, size(250), 1);
assertEquals(size(100), region1.tracker.getLength());
assertEquals(0, cacheService.freeRegionCount());
assertFalse(region0.isEvicted());
assertFalse(region1.isEvicted());
// acquire region 2, which should evict region 0 (oldest)
final var region2 = cacheService.get(cacheKey, size(250), 2);
assertEquals(size(50), region2.tracker.getLength());
assertEquals(0, cacheService.freeRegionCount());
assertTrue(region0.isEvicted());
assertFalse(region1.isEvicted());
// explicitly evict region 1
synchronized (cacheService) {
assertTrue(tryEvict(region1));
}
assertEquals(1, cacheService.freeRegionCount());
}
}
public void testForceEviction() throws IOException {
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(500)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
final var cacheKey1 = generateCacheKey();
final var cacheKey2 = generateCacheKey();
assertEquals(5, cacheService.freeRegionCount());
final var region0 = cacheService.get(cacheKey1, size(250), 0);
assertEquals(4, cacheService.freeRegionCount());
final var region1 = cacheService.get(cacheKey2, size(250), 1);
assertEquals(3, cacheService.freeRegionCount());
assertFalse(region0.isEvicted());
assertFalse(region1.isEvicted());
cacheService.removeFromCache(cacheKey1);
assertTrue(region0.isEvicted());
assertFalse(region1.isEvicted());
assertEquals(4, cacheService.freeRegionCount());
}
}
public void testForceEvictResponse() throws IOException {
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(500)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
final var cacheKey1 = generateCacheKey();
final var cacheKey2 = generateCacheKey();
assertEquals(5, cacheService.freeRegionCount());
final var region0 = cacheService.get(cacheKey1, size(250), 0);
assertEquals(4, cacheService.freeRegionCount());
final var region1 = cacheService.get(cacheKey2, size(250), 1);
assertEquals(3, cacheService.freeRegionCount());
assertFalse(region0.isEvicted());
assertFalse(region1.isEvicted());
assertEquals(1, cacheService.forceEvict(cK -> cK == cacheKey1));
assertEquals(1, cacheService.forceEvict(e -> true));
}
}
public void testAsynchronousEviction() throws Exception {
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(500)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
final var cacheKey1 = generateCacheKey();
final var cacheKey2 = generateCacheKey();
assertEquals(5, cacheService.freeRegionCount());
final var region0 = cacheService.get(cacheKey1, size(250), 0);
assertEquals(4, cacheService.freeRegionCount());
final var region1 = cacheService.get(cacheKey2, size(250), 1);
assertEquals(3, cacheService.freeRegionCount());
assertFalse(region0.isEvicted());
assertFalse(region1.isEvicted());
cacheService.forceEvictAsync(ck -> ck == cacheKey1);
assertFalse(region0.isEvicted());
assertFalse(region1.isEvicted());
// run the async task
taskQueue.runAllRunnableTasks();
assertTrue(region0.isEvicted());
assertFalse(region1.isEvicted());
assertEquals(4, cacheService.freeRegionCount());
}
}
public void testDecay() throws IOException {
RecordingMeterRegistry recordingMeterRegistry = new RecordingMeterRegistry();
BlobCacheMetrics metrics = new BlobCacheMetrics(recordingMeterRegistry);
// we have 8 regions
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(400)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
metrics
)
) {
assertEquals(4, cacheService.freeRegionCount());
final var cacheKey1 = generateCacheKey();
final var cacheKey2 = generateCacheKey();
final var cacheKey3 = generateCacheKey();
// add a region that we can evict when provoking first decay
cacheService.get("evictkey", size(250), 0);
assertEquals(3, cacheService.freeRegionCount());
final var region0 = cacheService.get(cacheKey1, size(250), 0);
assertEquals(2, cacheService.freeRegionCount());
final var region1 = cacheService.get(cacheKey2, size(250), 1);
assertEquals(1, cacheService.freeRegionCount());
final var region2 = cacheService.get(cacheKey3, size(250), 1);
assertEquals(0, cacheService.freeRegionCount());
assertEquals(1, cacheService.getFreq(region0));
assertEquals(1, cacheService.getFreq(region1));
assertEquals(1, cacheService.getFreq(region2));
AtomicLong expectedEpoch = new AtomicLong();
Runnable triggerDecay = () -> {
assertThat(taskQueue.hasRunnableTasks(), is(false));
cacheService.get(expectedEpoch.toString(), size(250), 0);
assertThat(taskQueue.hasRunnableTasks(), is(true));
taskQueue.runAllRunnableTasks();
assertThat(cacheService.epoch(), equalTo(expectedEpoch.incrementAndGet()));
long epochs = recordedEpochs(recordingMeterRegistry);
assertEquals(cacheService.epoch(), epochs);
};
triggerDecay.run();
cacheService.get(cacheKey1, size(250), 0);
cacheService.get(cacheKey2, size(250), 1);
cacheService.get(cacheKey3, size(250), 1);
triggerDecay.run();
final var region0Again = cacheService.get(cacheKey1, size(250), 0);
assertSame(region0Again, region0);
assertEquals(3, cacheService.getFreq(region0));
assertEquals(1, cacheService.getFreq(region1));
assertEquals(1, cacheService.getFreq(region2));
triggerDecay.run();
cacheService.get(cacheKey1, size(250), 0);
assertEquals(4, cacheService.getFreq(region0));
cacheService.get(cacheKey1, size(250), 0);
assertEquals(4, cacheService.getFreq(region0));
assertEquals(0, cacheService.getFreq(region1));
assertEquals(0, cacheService.getFreq(region2));
// ensure no freq=0 entries
cacheService.get(cacheKey2, size(250), 1);
cacheService.get(cacheKey3, size(250), 1);
assertEquals(2, cacheService.getFreq(region1));
assertEquals(2, cacheService.getFreq(region2));
triggerDecay.run();
assertEquals(3, cacheService.getFreq(region0));
assertEquals(1, cacheService.getFreq(region1));
assertEquals(1, cacheService.getFreq(region2));
triggerDecay.run();
assertEquals(2, cacheService.getFreq(region0));
assertEquals(0, cacheService.getFreq(region1));
assertEquals(0, cacheService.getFreq(region2));
// ensure no freq=0 entries
cacheService.get(cacheKey2, size(250), 1);
cacheService.get(cacheKey3, size(250), 1);
assertEquals(2, cacheService.getFreq(region1));
assertEquals(2, cacheService.getFreq(region2));
triggerDecay.run();
assertEquals(1, cacheService.getFreq(region0));
assertEquals(1, cacheService.getFreq(region1));
assertEquals(1, cacheService.getFreq(region2));
triggerDecay.run();
assertEquals(0, cacheService.getFreq(region0));
assertEquals(0, cacheService.getFreq(region1));
assertEquals(0, cacheService.getFreq(region2));
}
}
private static long recordedEpochs(RecordingMeterRegistry recordingMeterRegistry) {
long epochs = recordingMeterRegistry.getRecorder()
.getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.epoch.total")
.stream()
.mapToLong(Measurement::getLong)
.sum();
return epochs;
}
/**
* Test when many objects need to decay, in particular useful to measure how long the decay task takes.
* For 1M objects (with no assertions) it took 26ms locally.
*/
public void testMassiveDecay() throws IOException {
RecordingMeterRegistry recordingMeterRegistry = new RecordingMeterRegistry();
BlobCacheMetrics metrics = new BlobCacheMetrics(recordingMeterRegistry);
int regions = 1024; // to measure decay time, increase to 1024*1024 and disable assertions.
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(regions)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(1)).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
metrics
)
) {
Runnable decay = () -> {
assertThat(taskQueue.hasRunnableTasks(), is(true));
long before = System.currentTimeMillis();
taskQueue.runAllRunnableTasks();
long after = System.currentTimeMillis();
logger.debug("took {} ms", (after - before));
};
long fileLength = size(regions + 100);
Object cacheKey = new Object();
for (int i = 0; i < regions; ++i) {
cacheService.get(cacheKey, fileLength, i);
if (Integer.bitCount(i) == 1) {
logger.debug("did {} gets", i);
}
}
assertThat(taskQueue.hasRunnableTasks(), is(false));
cacheService.get(cacheKey, fileLength, regions);
decay.run();
int maxRounds = 5;
for (int round = 2; round <= maxRounds; ++round) {
for (int i = round; i < regions + round; ++i) {
cacheService.get(cacheKey, fileLength, i);
if (Integer.bitCount(i) == 1) {
logger.debug("did {} gets", i);
}
}
decay.run();
}
Map<Integer, Integer> freqs = new HashMap<>();
for (int i = maxRounds; i < regions + maxRounds; ++i) {
int freq = cacheService.getFreq(cacheService.get(cacheKey, fileLength, i)) - 2;
freqs.compute(freq, (k, v) -> v == null ? 1 : v + 1);
if (Integer.bitCount(i) == 1) {
logger.debug("did {} gets", i);
}
}
assertThat(freqs.get(4), equalTo(regions - maxRounds + 1));
long epochs = recordedEpochs(recordingMeterRegistry);
assertEquals(cacheService.epoch(), epochs);
}
}
/**
* Exercise SharedBlobCacheService#get in multiple threads to trigger any assertion errors.
* @throws IOException
*/
public void testGetMultiThreaded() throws IOException {
final int threads = between(2, 10);
final int regionCount = between(1, 20);
final boolean incRef = randomBoolean();
// if we have enough regions, a get should always have a result (except for explicit evict interference)
// if we incRef, we risk the eviction racing against that, leading to no available region, so allow
// the already closed exception in that case.
final boolean allowAlreadyClosed = regionCount < threads || incRef;
logger.info("{} {} {}", threads, regionCount, allowAlreadyClosed);
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(regionCount * 100L)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_MIN_TIME_DELTA_SETTING.getKey(), randomFrom("0", "1ms", "10s"))
.put("path.home", createTempDir())
.build();
long fileLength = size(500);
ThreadPool threadPool = new TestThreadPool("testGetMultiThreaded");
Set<String> files = randomSet(1, 10, () -> randomAlphaOfLength(5));
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<String>(
environment,
settings,
threadPool,
threadPool.executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
CyclicBarrier ready = new CyclicBarrier(threads);
List<Thread> threadList = IntStream.range(0, threads).mapToObj(no -> {
int iterations = between(100, 500);
String[] cacheKeys = IntStream.range(0, iterations).mapToObj(ignore -> randomFrom(files)).toArray(String[]::new);
int[] regions = IntStream.range(0, iterations).map(ignore -> between(0, 4)).toArray();
int[] yield = IntStream.range(0, iterations).map(ignore -> between(0, 9)).toArray();
int[] evict = IntStream.range(0, iterations).map(ignore -> between(0, 99)).toArray();
return new Thread(() -> {
try {
ready.await();
for (int i = 0; i < iterations; ++i) {
try {
SharedBlobCacheService.CacheFileRegion<String> cacheFileRegion;
try {
cacheFileRegion = cacheService.get(cacheKeys[i], fileLength, regions[i]);
} catch (AlreadyClosedException e) {
assert allowAlreadyClosed || e.getMessage().equals("evicted during free region allocation") : e;
throw e;
}
assertTrue(cacheFileRegion.testOnlyNonVolatileIO() != null || cacheFileRegion.isEvicted());
if (incRef && cacheFileRegion.tryIncRef()) {
if (yield[i] == 0) {
Thread.yield();
}
cacheFileRegion.decRef();
}
if (evict[i] == 0) {
cacheService.forceEvict(x -> true);
}
} catch (AlreadyClosedException e) {
// ignore
}
}
} catch (InterruptedException | BrokenBarrierException e) {
assert false;
throw new RuntimeException(e);
}
});
}).toList();
threadList.forEach(Thread::start);
threadList.forEach(thread -> {
try {
thread.join();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
});
} finally {
threadPool.shutdownNow();
}
}
public void testCacheSizeRejectedOnNonFrozenNodes() {
String cacheSize = randomBoolean()
? ByteSizeValue.ofBytes(size(500)).getStringRep()
: (new RatioValue(between(1, 100))).formatNoTrailingZerosPercent();
final Settings settings = Settings.builder()
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), cacheSize)
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.putList(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), DiscoveryNodeRole.DATA_HOT_NODE_ROLE.roleName())
.build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.get(settings)
);
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause(), instanceOf(SettingsException.class));
assertThat(
e.getCause().getMessage(),
is(
"Setting ["
+ SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey()
+ "] to be positive ["
+ cacheSize
+ "] is only permitted on nodes with the data_frozen, search, or indexing role. Roles are [data_hot]"
)
);
}
public void testMultipleDataPathsRejectedOnFrozenNodes() {
final Settings settings = Settings.builder()
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(500)).getStringRep())
.putList(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE.roleName())
.putList(Environment.PATH_DATA_SETTING.getKey(), List.of("a", "b"))
.build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.get(settings)
);
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause(), instanceOf(SettingsException.class));
assertThat(
e.getCause().getMessage(),
is(
"setting ["
+ SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey()
+ "="
+ ByteSizeValue.ofBytes(size(500)).getStringRep()
+ "] is not permitted on nodes with multiple data paths [a,b]"
)
);
}
public void testDedicateFrozenCacheSizeDefaults() {
final Settings settings = Settings.builder()
.putList(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE.roleName())
.build();
RelativeByteSizeValue relativeCacheSize = SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.get(settings);
assertThat(relativeCacheSize.isAbsolute(), is(false));
assertThat(relativeCacheSize.isNonZeroSize(), is(true));
assertThat(relativeCacheSize.calculateValue(ByteSizeValue.ofBytes(10000), null), equalTo(ByteSizeValue.ofBytes(9000)));
assertThat(SharedBlobCacheService.SHARED_CACHE_SIZE_MAX_HEADROOM_SETTING.get(settings), equalTo(ByteSizeValue.ofGb(100)));
}
public void testNotDedicatedFrozenCacheSizeDefaults() {
final Settings settings = Settings.builder()
.putList(
NodeRoleSettings.NODE_ROLES_SETTING.getKey(),
Sets.union(
Set.of(
randomFrom(
DiscoveryNodeRole.DATA_HOT_NODE_ROLE,
DiscoveryNodeRole.DATA_COLD_NODE_ROLE,
DiscoveryNodeRole.DATA_WARM_NODE_ROLE,
DiscoveryNodeRole.DATA_CONTENT_NODE_ROLE
)
),
new HashSet<>(
randomSubsetOf(
between(0, 3),
DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE,
DiscoveryNodeRole.INGEST_ROLE,
DiscoveryNodeRole.MASTER_ROLE
)
)
).stream().map(DiscoveryNodeRole::roleName).collect(Collectors.toList())
)
.build();
RelativeByteSizeValue relativeCacheSize = SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.get(settings);
assertThat(relativeCacheSize.isNonZeroSize(), is(false));
assertThat(relativeCacheSize.isAbsolute(), is(true));
assertThat(relativeCacheSize.getAbsolute(), equalTo(ByteSizeValue.ZERO));
assertThat(SharedBlobCacheService.SHARED_CACHE_SIZE_MAX_HEADROOM_SETTING.get(settings), equalTo(ByteSizeValue.ofBytes(-1)));
}
public void testSearchOrIndexNodeCacheSizeDefaults() {
final Settings settings = Settings.builder()
.putList(
NodeRoleSettings.NODE_ROLES_SETTING.getKey(),
randomFrom(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.INDEX_ROLE).roleName()
)
.build();
RelativeByteSizeValue relativeCacheSize = SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.get(settings);
assertThat(relativeCacheSize.isAbsolute(), is(false));
assertThat(relativeCacheSize.isNonZeroSize(), is(true));
assertThat(relativeCacheSize.calculateValue(ByteSizeValue.ofBytes(10000), null), equalTo(ByteSizeValue.ofBytes(9000)));
assertThat(SharedBlobCacheService.SHARED_CACHE_SIZE_MAX_HEADROOM_SETTING.get(settings), equalTo(ByteSizeValue.ofGb(100)));
}
public void testMaxHeadroomRejectedForAbsoluteCacheSize() {
String cacheSize = ByteSizeValue.ofBytes(size(500)).getStringRep();
final Settings settings = Settings.builder()
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), cacheSize)
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_MAX_HEADROOM_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.putList(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE.roleName())
.build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> SharedBlobCacheService.SHARED_CACHE_SIZE_MAX_HEADROOM_SETTING.get(settings)
);
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause(), instanceOf(SettingsException.class));
assertThat(
e.getCause().getMessage(),
is(
"setting ["
+ SharedBlobCacheService.SHARED_CACHE_SIZE_MAX_HEADROOM_SETTING.getKey()
+ "] cannot be specified for absolute ["
+ SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey()
+ "="
+ cacheSize
+ "]"
)
);
}
public void testCalculateCacheSize() {
long smallSize = 10000;
long largeSize = ByteSizeValue.ofTb(10).getBytes();
assertThat(SharedBlobCacheService.calculateCacheSize(Settings.EMPTY, smallSize), equalTo(0L));
final Settings settings = Settings.builder()
.putList(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE.roleName())
.build();
assertThat(SharedBlobCacheService.calculateCacheSize(settings, smallSize), equalTo(9000L));
assertThat(SharedBlobCacheService.calculateCacheSize(settings, largeSize), equalTo(largeSize - ByteSizeValue.ofGb(100).getBytes()));
}
private static Object generateCacheKey() {
return new Object();
}
public void testCacheSizeChanges() throws IOException {
ByteSizeValue val1 = ByteSizeValue.of(randomIntBetween(1, 5), ByteSizeUnit.MB);
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), val1.getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
SharedBlobCacheService<?> cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
assertEquals(val1.getBytes(), cacheService.getStats().size());
}
ByteSizeValue val2 = ByteSizeValue.of(randomIntBetween(1, 5), ByteSizeUnit.MB);
settings = Settings.builder()
.put(settings)
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), val2.getStringRep())
.build();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
SharedBlobCacheService<?> cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
assertEquals(val2.getBytes(), cacheService.getStats().size());
}
}
public void testMaybeEvictLeastUsed() throws Exception {
final int numRegions = 10;
final long regionSize = size(1L);
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(numRegions)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<Object>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
final Map<Object, SharedBlobCacheService.CacheFileRegion<Object>> cacheEntries = new HashMap<>();
assertThat("All regions are free", cacheService.freeRegionCount(), equalTo(numRegions));
assertThat("Cache has no entries", cacheService.maybeEvictLeastUsed(), is(false));
// use all regions in cache
for (int i = 0; i < numRegions; i++) {
final var cacheKey = generateCacheKey();
var entry = cacheService.get(cacheKey, regionSize, 0);
entry.populate(
ByteRange.of(0L, regionSize),
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> progressUpdater.accept(length)
),
taskQueue.getThreadPool().generic(),
ActionListener.noop()
);
assertThat(cacheService.getFreq(entry), equalTo(1));
cacheEntries.put(cacheKey, entry);
}
assertThat("All regions are used", cacheService.freeRegionCount(), equalTo(0));
assertThat("Cache entries are not old enough to be evicted", cacheService.maybeEvictLeastUsed(), is(false));
taskQueue.runAllRunnableTasks();
assertThat("All regions are used", cacheService.freeRegionCount(), equalTo(0));
assertThat("Cache entries are not old enough to be evicted", cacheService.maybeEvictLeastUsed(), is(false));
cacheService.maybeScheduleDecayAndNewEpoch();
taskQueue.runAllRunnableTasks();
cacheEntries.keySet().forEach(key -> cacheService.get(key, regionSize, 0));
cacheService.maybeScheduleDecayAndNewEpoch();
taskQueue.runAllRunnableTasks();
// touch some random cache entries
var usedCacheKeys = Set.copyOf(randomSubsetOf(cacheEntries.keySet()));
usedCacheKeys.forEach(key -> cacheService.get(key, regionSize, 0));
cacheEntries.forEach(
(key, entry) -> assertThat(cacheService.getFreq(entry), usedCacheKeys.contains(key) ? equalTo(3) : equalTo(1))
);
assertThat("All regions are used", cacheService.freeRegionCount(), equalTo(0));
assertThat("Cache entries are not old enough to be evicted", cacheService.maybeEvictLeastUsed(), is(false));
cacheService.maybeScheduleDecayAndNewEpoch();
taskQueue.runAllRunnableTasks();
assertThat("All regions are used", cacheService.freeRegionCount(), equalTo(0));
cacheEntries.forEach(
(key, entry) -> assertThat(cacheService.getFreq(entry), usedCacheKeys.contains(key) ? equalTo(2) : equalTo(0))
);
var zeroFrequencyCacheEntries = cacheEntries.size() - usedCacheKeys.size();
for (int i = 0; i < zeroFrequencyCacheEntries; i++) {
assertThat(cacheService.freeRegionCount(), equalTo(i));
assertThat("Cache entry is old enough to be evicted", cacheService.maybeEvictLeastUsed(), is(true));
assertThat(cacheService.freeRegionCount(), equalTo(i + 1));
}
assertThat("No more cache entries old enough to be evicted", cacheService.maybeEvictLeastUsed(), is(false));
assertThat(cacheService.freeRegionCount(), equalTo(zeroFrequencyCacheEntries));
}
}
public void testMaybeFetchRegion() throws Exception {
final long cacheSize = size(500L);
final long regionSize = size(100L);
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(cacheSize).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep())
.put("path.home", createTempDir())
.build();
final var bulkTaskCount = new AtomicInteger(0);
final var threadPool = new TestThreadPool("test");
final var bulkExecutor = new StoppableExecutorServiceWrapper(threadPool.generic()) {
@Override
public void execute(Runnable command) {
super.execute(command);
bulkTaskCount.incrementAndGet();
}
};
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
threadPool,
threadPool.executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
{
// fetch a single region
final var cacheKey = generateCacheKey();
assertEquals(5, cacheService.freeRegionCount());
final long blobLength = size(250); // 3 regions
AtomicLong bytesRead = new AtomicLong(0L);
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.maybeFetchRegion(
cacheKey,
0,
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
assert streamFactory == null : streamFactory;
bytesRead.addAndGet(length);
progressUpdater.accept(length);
}
),
bulkExecutor,
future
);
var fetched = future.get(10, TimeUnit.SECONDS);
assertThat("Region has been fetched", fetched, is(true));
assertEquals(regionSize, bytesRead.get());
assertEquals(4, cacheService.freeRegionCount());
assertEquals(1, bulkTaskCount.get());
}
{
// fetch multiple regions to used all the cache
final int remainingFreeRegions = cacheService.freeRegionCount();
assertEquals(4, cacheService.freeRegionCount());
final var cacheKey = generateCacheKey();
final long blobLength = regionSize * remainingFreeRegions;
AtomicLong bytesRead = new AtomicLong(0L);
final PlainActionFuture<Collection<Boolean>> future = new PlainActionFuture<>();
final var listener = new GroupedActionListener<>(remainingFreeRegions, future);
for (int region = 0; region < remainingFreeRegions; region++) {
cacheService.maybeFetchRegion(
cacheKey,
region,
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
assert streamFactory == null : streamFactory;
bytesRead.addAndGet(length);
progressUpdater.accept(length);
}
),
bulkExecutor,
listener
);
}
var results = future.get(10, TimeUnit.SECONDS);
assertThat(results.stream().allMatch(result -> result), is(true));
assertEquals(blobLength, bytesRead.get());
assertEquals(0, cacheService.freeRegionCount());
assertEquals(1 + remainingFreeRegions, bulkTaskCount.get());
}
{
// cache fully used, no entry old enough to be evicted
assertEquals(0, cacheService.freeRegionCount());
final var cacheKey = generateCacheKey();
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.maybeFetchRegion(
cacheKey,
randomIntBetween(0, 10),
randomLongBetween(1L, regionSize),
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
throw new AssertionError("should not be executed");
}
),
bulkExecutor,
future
);
assertThat("Listener is immediately completed", future.isDone(), is(true));
assertThat("Region already exists in cache", future.get(), is(false));
}
{
cacheService.computeDecay();
// fetch one more region should evict an old cache entry
final var cacheKey = generateCacheKey();
assertEquals(0, cacheService.freeRegionCount());
long blobLength = randomLongBetween(1L, regionSize);
AtomicLong bytesRead = new AtomicLong(0L);
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.maybeFetchRegion(
cacheKey,
0,
blobLength,
(channel, channelPos, ignore, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
assert ignore == null : ignore;
bytesRead.addAndGet(length);
progressUpdater.accept(length);
}
),
bulkExecutor,
future
);
var fetched = future.get(10, TimeUnit.SECONDS);
assertThat("Region has been fetched", fetched, is(true));
assertEquals(blobLength, bytesRead.get());
assertEquals(0, cacheService.freeRegionCount());
}
}
threadPool.shutdown();
}
public void testFetchRegion() throws Exception {
final long cacheSize = size(500L);
final long regionSize = size(100L);
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(cacheSize).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep())
.put("path.home", createTempDir())
.build();
final var bulkTaskCount = new AtomicInteger(0);
final var threadPool = new TestThreadPool("test");
final var bulkExecutor = new StoppableExecutorServiceWrapper(threadPool.generic()) {
@Override
public void execute(Runnable command) {
super.execute(command);
bulkTaskCount.incrementAndGet();
}
};
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
threadPool,
threadPool.executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
{
// fetch a single region
final var cacheKey = generateCacheKey();
assertEquals(5, cacheService.freeRegionCount());
final long blobLength = size(250); // 3 regions
AtomicLong bytesRead = new AtomicLong(0L);
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.fetchRegion(
cacheKey,
0,
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
assert streamFactory == null : streamFactory;
bytesRead.addAndGet(length);
progressUpdater.accept(length);
}
),
bulkExecutor,
true,
future
);
var fetched = future.get(10, TimeUnit.SECONDS);
assertThat("Region has been fetched", fetched, is(true));
assertEquals(regionSize, bytesRead.get());
assertEquals(4, cacheService.freeRegionCount());
assertEquals(1, bulkTaskCount.get());
}
{
// fetch multiple regions to used all the cache
final int remainingFreeRegions = cacheService.freeRegionCount();
assertEquals(4, cacheService.freeRegionCount());
final var cacheKey = generateCacheKey();
final long blobLength = regionSize * remainingFreeRegions;
AtomicLong bytesRead = new AtomicLong(0L);
final PlainActionFuture<Collection<Boolean>> future = new PlainActionFuture<>();
final var listener = new GroupedActionListener<>(remainingFreeRegions, future);
for (int region = 0; region < remainingFreeRegions; region++) {
cacheService.fetchRegion(
cacheKey,
region,
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
assert streamFactory == null : streamFactory;
bytesRead.addAndGet(length);
progressUpdater.accept(length);
}
),
bulkExecutor,
true,
listener
);
}
var results = future.get(10, TimeUnit.SECONDS);
assertThat(results.stream().allMatch(result -> result), is(true));
assertEquals(blobLength, bytesRead.get());
assertEquals(0, cacheService.freeRegionCount());
assertEquals(1 + remainingFreeRegions, bulkTaskCount.get());
}
{
// cache fully used, no entry old enough to be evicted and force=false should not evict entries
assertEquals(0, cacheService.freeRegionCount());
final var cacheKey = generateCacheKey();
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.fetchRegion(
cacheKey,
0,
regionSize,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
throw new AssertionError("should not be executed");
}
),
bulkExecutor,
false,
future
);
assertThat("Listener is immediately completed", future.isDone(), is(true));
assertThat("Region already exists in cache", future.get(), is(false));
}
{
// cache fully used, but force=true, so the cache should evict regions to make space for the requested regions
assertEquals(0, cacheService.freeRegionCount());
AtomicLong bytesRead = new AtomicLong(0L);
final var cacheKey = generateCacheKey();
final PlainActionFuture<Collection<Boolean>> future = new PlainActionFuture<>();
var regionsToFetch = randomIntBetween(1, (int) (cacheSize / regionSize));
final var listener = new GroupedActionListener<>(regionsToFetch, future);
long blobLength = regionsToFetch * regionSize;
for (int region = 0; region < regionsToFetch; region++) {
cacheService.fetchRegion(
cacheKey,
region,
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
assert streamFactory == null : streamFactory;
bytesRead.addAndGet(length);
progressUpdater.accept(length);
}
),
bulkExecutor,
true,
listener
);
}
var results = future.get(10, TimeUnit.SECONDS);
assertThat(results.stream().allMatch(result -> result), is(true));
assertEquals(blobLength, bytesRead.get());
assertEquals(0, cacheService.freeRegionCount());
assertEquals(regionsToFetch + 5, bulkTaskCount.get());
}
{
cacheService.computeDecay();
// We explicitly called computeDecay, meaning that some regions must have been demoted to level 0,
// therefore there should be enough room to fetch the requested range regardless of the force flag.
final var cacheKey = generateCacheKey();
assertEquals(0, cacheService.freeRegionCount());
long blobLength = randomLongBetween(1L, regionSize);
AtomicLong bytesRead = new AtomicLong(0L);
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.fetchRegion(
cacheKey,
0,
blobLength,
(channel, channelPos, ignore, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
assert ignore == null : ignore;
bytesRead.addAndGet(length);
progressUpdater.accept(length);
}
),
bulkExecutor,
randomBoolean(),
future
);
var fetched = future.get(10, TimeUnit.SECONDS);
assertThat("Region has been fetched", fetched, is(true));
assertEquals(blobLength, bytesRead.get());
assertEquals(0, cacheService.freeRegionCount());
}
} finally {
TestThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
}
public void testMaybeFetchRange() throws Exception {
final long cacheSize = size(500L);
final long regionSize = size(100L);
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(cacheSize).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep())
.put("path.home", createTempDir())
.build();
final var bulkTaskCount = new AtomicInteger(0);
final var threadPool = new TestThreadPool("test");
final var bulkExecutor = new StoppableExecutorServiceWrapper(threadPool.generic()) {
@Override
public void execute(Runnable command) {
super.execute(command);
bulkTaskCount.incrementAndGet();
}
};
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
threadPool,
threadPool.executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
{
// fetch a random range in a random region of the blob
final var cacheKey = generateCacheKey();
assertEquals(5, cacheService.freeRegionCount());
// blobLength is 1024000 bytes and requires 3 regions
final long blobLength = size(250);
final var regions = List.of(
// region 0: 0-409600
ByteRange.of(cacheService.getRegionStart(0), cacheService.getRegionEnd(0)),
// region 1: 409600-819200
ByteRange.of(cacheService.getRegionStart(1), cacheService.getRegionEnd(1)),
// region 2: 819200-1228800
ByteRange.of(cacheService.getRegionStart(2), cacheService.getRegionEnd(2))
);
long pos = randomLongBetween(0, blobLength - 1L);
long len = randomLongBetween(1, blobLength - pos);
var range = ByteRange.of(pos, pos + len);
var region = between(0, regions.size() - 1);
var regionRange = cacheService.mapSubRangeToRegion(range, region);
var bytesCopied = new AtomicLong(0L);
var future = new PlainActionFuture<Boolean>();
cacheService.maybeFetchRange(
cacheKey,
region,
range,
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start()));
assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start())));
assertThat(length, equalTo(Math.toIntExact(regionRange.length())));
bytesCopied.addAndGet(length);
}
),
bulkExecutor,
future
);
var fetched = future.get(10, TimeUnit.SECONDS);
assertThat(regionRange.length(), equalTo(bytesCopied.get()));
if (regionRange.isEmpty()) {
assertThat(fetched, is(false));
assertEquals(5, cacheService.freeRegionCount());
assertEquals(0, bulkTaskCount.get());
} else {
assertThat(fetched, is(true));
assertEquals(4, cacheService.freeRegionCount());
assertEquals(1, bulkTaskCount.get());
}
}
{
// fetch multiple ranges to use all the cache
final int remainingFreeRegions = cacheService.freeRegionCount();
assertThat(remainingFreeRegions, greaterThanOrEqualTo(4));
bulkTaskCount.set(0);
final var cacheKey = generateCacheKey();
final long blobLength = regionSize * remainingFreeRegions;
AtomicLong bytesCopied = new AtomicLong(0L);
final PlainActionFuture<Collection<Boolean>> future = new PlainActionFuture<>();
final var listener = new GroupedActionListener<>(remainingFreeRegions, future);
for (int region = 0; region < remainingFreeRegions; region++) {
cacheService.maybeFetchRange(
cacheKey,
region,
ByteRange.of(0L, blobLength),
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> bytesCopied.addAndGet(length)
),
bulkExecutor,
listener
);
}
var results = future.get(10, TimeUnit.SECONDS);
assertThat(results.stream().allMatch(result -> result), is(true));
assertEquals(blobLength, bytesCopied.get());
assertEquals(0, cacheService.freeRegionCount());
assertEquals(remainingFreeRegions, bulkTaskCount.get());
}
{
// cache fully used, no entry old enough to be evicted
assertEquals(0, cacheService.freeRegionCount());
final var cacheKey = generateCacheKey();
final var blobLength = randomLongBetween(1L, regionSize);
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.maybeFetchRange(
cacheKey,
randomIntBetween(0, 10),
ByteRange.of(0L, blobLength),
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
throw new AssertionError("should not be executed");
}
),
bulkExecutor,
future
);
assertThat("Listener is immediately completed", future.isDone(), is(true));
assertThat("Region already exists in cache", future.get(), is(false));
}
{
cacheService.computeDecay();
// fetch one more range should evict an old cache entry
final var cacheKey = generateCacheKey();
assertEquals(0, cacheService.freeRegionCount());
long blobLength = randomLongBetween(1L, regionSize);
AtomicLong bytesCopied = new AtomicLong(0L);
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.maybeFetchRange(
cacheKey,
0,
ByteRange.of(0L, blobLength),
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> bytesCopied.addAndGet(length)
),
bulkExecutor,
future
);
var fetched = future.get(10, TimeUnit.SECONDS);
assertThat("Region has been fetched", fetched, is(true));
assertEquals(blobLength, bytesCopied.get());
assertEquals(0, cacheService.freeRegionCount());
}
}
threadPool.shutdown();
}
public void testFetchRange() throws Exception {
final long cacheSize = size(500L);
final long regionSize = size(100L);
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(cacheSize).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep())
.put("path.home", createTempDir())
.build();
final var bulkTaskCount = new AtomicInteger(0);
final var threadPool = new TestThreadPool("test");
final var bulkExecutor = new StoppableExecutorServiceWrapper(threadPool.generic()) {
@Override
public void execute(Runnable command) {
super.execute(command);
bulkTaskCount.incrementAndGet();
}
};
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
threadPool,
threadPool.executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
{
// fetch a random range in a random region of the blob
final var cacheKey = generateCacheKey();
assertEquals(5, cacheService.freeRegionCount());
// blobLength is 1024000 bytes and requires 3 regions
final long blobLength = size(250);
final var regions = List.of(
// region 0: 0-409600
ByteRange.of(cacheService.getRegionStart(0), cacheService.getRegionEnd(0)),
// region 1: 409600-819200
ByteRange.of(cacheService.getRegionStart(1), cacheService.getRegionEnd(1)),
// region 2: 819200-1228800
ByteRange.of(cacheService.getRegionStart(2), cacheService.getRegionEnd(2))
);
long pos = randomLongBetween(0, blobLength - 1L);
long len = randomLongBetween(1, blobLength - pos);
var range = ByteRange.of(pos, pos + len);
var region = between(0, regions.size() - 1);
var regionRange = cacheService.mapSubRangeToRegion(range, region);
var bytesCopied = new AtomicLong(0L);
var future = new PlainActionFuture<Boolean>();
cacheService.maybeFetchRange(
cacheKey,
region,
range,
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start()));
assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start())));
assertThat(length, equalTo(Math.toIntExact(regionRange.length())));
bytesCopied.addAndGet(length);
}
),
bulkExecutor,
future
);
var fetched = future.get(10, TimeUnit.SECONDS);
assertThat(regionRange.length(), equalTo(bytesCopied.get()));
if (regionRange.isEmpty()) {
assertThat(fetched, is(false));
assertEquals(5, cacheService.freeRegionCount());
assertEquals(0, bulkTaskCount.get());
} else {
assertThat(fetched, is(true));
assertEquals(4, cacheService.freeRegionCount());
assertEquals(1, bulkTaskCount.get());
}
}
{
// fetch multiple ranges to use all the cache
final int remainingFreeRegions = cacheService.freeRegionCount();
assertThat(remainingFreeRegions, greaterThanOrEqualTo(4));
bulkTaskCount.set(0);
final var cacheKey = generateCacheKey();
final long blobLength = regionSize * remainingFreeRegions;
AtomicLong bytesCopied = new AtomicLong(0L);
final PlainActionFuture<Collection<Boolean>> future = new PlainActionFuture<>();
final var listener = new GroupedActionListener<>(remainingFreeRegions, future);
for (int region = 0; region < remainingFreeRegions; region++) {
cacheService.fetchRange(
cacheKey,
region,
ByteRange.of(0L, blobLength),
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> bytesCopied.addAndGet(length)
),
bulkExecutor,
true,
listener
);
}
var results = future.get(10, TimeUnit.SECONDS);
assertThat(results.stream().allMatch(result -> result), is(true));
assertEquals(blobLength, bytesCopied.get());
assertEquals(0, cacheService.freeRegionCount());
assertEquals(remainingFreeRegions, bulkTaskCount.get());
}
{
// cache fully used, no entry old enough to be evicted and force=false
assertEquals(0, cacheService.freeRegionCount());
final var cacheKey = generateCacheKey();
final var blobLength = randomLongBetween(1L, regionSize);
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.fetchRange(
cacheKey,
randomIntBetween(0, 10),
ByteRange.of(0L, blobLength),
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
throw new AssertionError("should not be executed");
}
),
bulkExecutor,
false,
future
);
assertThat("Listener is immediately completed", future.isDone(), is(true));
assertThat("Region already exists in cache", future.get(), is(false));
}
{
// cache fully used, since force=true the range should be populated
final var cacheKey = generateCacheKey();
assertEquals(0, cacheService.freeRegionCount());
long blobLength = randomLongBetween(1L, regionSize);
AtomicLong bytesCopied = new AtomicLong(0L);
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.fetchRange(
cacheKey,
0,
ByteRange.of(0L, blobLength),
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> bytesCopied.addAndGet(length)
),
bulkExecutor,
true,
future
);
var fetched = future.get(10, TimeUnit.SECONDS);
assertThat("Region has been fetched", fetched, is(true));
assertEquals(blobLength, bytesCopied.get());
assertEquals(0, cacheService.freeRegionCount());
}
{
cacheService.computeDecay();
// We explicitly called computeDecay, meaning that some regions must have been demoted to level 0,
// therefore there should be enough room to fetch the requested range regardless of the force flag.
final var cacheKey = generateCacheKey();
assertEquals(0, cacheService.freeRegionCount());
long blobLength = randomLongBetween(1L, regionSize);
AtomicLong bytesCopied = new AtomicLong(0L);
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
cacheService.fetchRange(
cacheKey,
0,
ByteRange.of(0L, blobLength),
blobLength,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> bytesCopied.addAndGet(length)
),
bulkExecutor,
randomBoolean(),
future
);
var fetched = future.get(10, TimeUnit.SECONDS);
assertThat("Region has been fetched", fetched, is(true));
assertEquals(blobLength, bytesCopied.get());
assertEquals(0, cacheService.freeRegionCount());
}
} finally {
TestThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
}
public void testPopulate() throws Exception {
final long regionSize = size(1L);
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
final var cacheKey = generateCacheKey();
final var blobLength = size(12L);
// start populating the first region
var entry = cacheService.get(cacheKey, blobLength, 0);
AtomicLong bytesWritten = new AtomicLong(0L);
final PlainActionFuture<Boolean> future1 = new PlainActionFuture<>();
entry.populate(
ByteRange.of(0, regionSize - 1),
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
bytesWritten.addAndGet(length);
progressUpdater.accept(length);
}
),
taskQueue.getThreadPool().generic(),
future1
);
assertThat(future1.isDone(), is(false));
assertThat(taskQueue.hasRunnableTasks(), is(true));
// start populating the second region
entry = cacheService.get(cacheKey, blobLength, 1);
final PlainActionFuture<Boolean> future2 = new PlainActionFuture<>();
entry.populate(
ByteRange.of(0, regionSize - 1),
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
bytesWritten.addAndGet(length);
progressUpdater.accept(length);
}
),
taskQueue.getThreadPool().generic(),
future2
);
// start populating again the first region, listener should be called immediately
entry = cacheService.get(cacheKey, blobLength, 0);
final PlainActionFuture<Boolean> future3 = new PlainActionFuture<>();
entry.populate(
ByteRange.of(0, regionSize - 1),
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> {
bytesWritten.addAndGet(length);
progressUpdater.accept(length);
}
),
taskQueue.getThreadPool().generic(),
future3
);
assertThat(future3.isDone(), is(true));
var written = future3.get(10L, TimeUnit.SECONDS);
assertThat(written, is(false));
taskQueue.runAllRunnableTasks();
written = future1.get(10L, TimeUnit.SECONDS);
assertThat(future1.isDone(), is(true));
assertThat(written, is(true));
written = future2.get(10L, TimeUnit.SECONDS);
assertThat(future2.isDone(), is(true));
assertThat(written, is(true));
}
}
private void assertThatNonPositiveRecoveryRangeSizeRejected(Setting<ByteSizeValue> setting) {
final String value = randomFrom(ByteSizeValue.MINUS_ONE, ByteSizeValue.ZERO).getStringRep();
final Settings settings = Settings.builder()
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
.putList(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE.roleName())
.put(setting.getKey(), value)
.build();
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> setting.get(settings));
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause(), instanceOf(SettingsException.class));
assertThat(e.getCause().getMessage(), is("setting [" + setting.getKey() + "] must be greater than zero"));
}
public void testNonPositiveRegionSizeRejected() {
assertThatNonPositiveRecoveryRangeSizeRejected(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING);
}
public void testNonPositiveRangeSizeRejected() {
assertThatNonPositiveRecoveryRangeSizeRejected(SharedBlobCacheService.SHARED_CACHE_RANGE_SIZE_SETTING);
}
public void testNonPositiveRecoveryRangeSizeRejected() {
assertThatNonPositiveRecoveryRangeSizeRejected(SharedBlobCacheService.SHARED_CACHE_RECOVERY_RANGE_SIZE_SETTING);
}
public void testUseFullRegionSize() throws IOException {
final long regionSize = size(randomIntBetween(1, 100));
final long cacheSize = regionSize * randomIntBetween(1, 10);
Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(cacheSize).getStringRep())
.put("path.home", createTempDir())
.build();
final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue();
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
taskQueue.getThreadPool(),
taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
) {
@Override
protected int computeCacheFileRegionSize(long fileLength, int region) {
// use full region
return super.getRegionSize();
}
}
) {
final var cacheKey = generateCacheKey();
final var blobLength = randomLongBetween(1L, cacheSize);
int regions = Math.toIntExact(blobLength / regionSize);
regions += (blobLength % regionSize == 0 ? 0 : 1);
assertThat(
cacheService.computeCacheFileRegionSize(blobLength, randomFrom(regions)),
equalTo(BlobCacheUtils.toIntBytes(regionSize))
);
for (int region = 0; region < regions; region++) {
var cacheFileRegion = cacheService.get(cacheKey, blobLength, region);
assertThat(cacheFileRegion.tracker.getLength(), equalTo(regionSize));
}
}
}
public void testUsageSharedSourceInputStreamFactoryInCachePopulation() throws Exception {
final long regionSizeInBytes = size(100);
final Settings settings = Settings.builder()
.put(NODE_NAME_SETTING.getKey(), "node")
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(200)).getStringRep())
.put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSizeInBytes).getStringRep())
.put("path.home", createTempDir())
.build();
final ThreadPool threadPool = new TestThreadPool("test");
try (
NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
var cacheService = new SharedBlobCacheService<>(
environment,
settings,
threadPool,
threadPool.executor(ThreadPool.Names.GENERIC),
BlobCacheMetrics.NOOP
)
) {
final var cacheKey = generateCacheKey();
assertEquals(2, cacheService.freeRegionCount());
final var region = cacheService.get(cacheKey, size(250), 0);
assertEquals(regionSizeInBytes, region.tracker.getLength());
// Read disjoint ranges to create holes in the region
final long interval = regionSizeInBytes / between(5, 20);
for (var start = interval; start < regionSizeInBytes - 2 * SharedBytes.PAGE_SIZE; start += interval) {
final var range = ByteRange.of(start, start + SharedBytes.PAGE_SIZE);
final PlainActionFuture<Integer> future = new PlainActionFuture<>();
region.populateAndRead(
range,
range,
(channel, channelPos, relativePos, length) -> length,
(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
completionListener,
() -> progressUpdater.accept(length)
),
EsExecutors.DIRECT_EXECUTOR_SERVICE,
future
);
safeGet(future);
}
// Read the entire region with a shared source input stream and we want to ensure the following behaviours
// 1. fillCacheRange is invoked as many times as the number of holes/gaps
// 2. fillCacheRange is invoked single threaded with the gap order
// 3. The shared streamFactory is passed to each invocation
// 4. The factory is closed at the end
final int numberGaps = region.tracker.getCompletedRanges().size() + 1;
final var invocationCounter = new AtomicInteger();
final var factoryClosed = new AtomicBoolean(false);
final var dummyStreamFactory = new SourceInputStreamFactory() {
@Override
public void create(int relativePos, ActionListener<InputStream> listener) {
listener.onResponse(null);
}
@Override
public void close() {
factoryClosed.set(true);
}
};
final var rangeMissingHandler = new RangeMissingHandler() {
final AtomicReference<Thread> invocationThread = new AtomicReference<>();
final AtomicInteger position = new AtomicInteger(-1);
@Override
public SourceInputStreamFactory sharedInputStreamFactory(List<SparseFileTracker.Gap> gaps) {
return dummyStreamFactory;
}
@Override
public void fillCacheRange(
SharedBytes.IO channel,
int channelPos,
SourceInputStreamFactory streamFactory,
int relativePos,
int length,
IntConsumer progressUpdater,
ActionListener<Void> completion
) throws IOException {
completeWith(completion, () -> {
if (invocationCounter.incrementAndGet() == 1) {
final Thread witness = invocationThread.compareAndExchange(null, Thread.currentThread());
assertThat(witness, nullValue());
} else {
assertThat(invocationThread.get(), sameInstance(Thread.currentThread()));
}
assertThat(streamFactory, sameInstance(dummyStreamFactory));
assertThat(position.getAndSet(relativePos), lessThan(relativePos));
progressUpdater.accept(length);
});
}
};
final var range = ByteRange.of(0, regionSizeInBytes);
if (randomBoolean()) {
final PlainActionFuture<Integer> future = new PlainActionFuture<>();
region.populateAndRead(
range,
range,
(channel, channelPos, relativePos, length) -> length,
rangeMissingHandler,
threadPool.generic(),
future
);
assertThat(safeGet(future).longValue(), equalTo(regionSizeInBytes));
} else {
final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
region.populate(range, rangeMissingHandler, threadPool.generic(), future);
assertThat(safeGet(future), equalTo(true));
}
assertThat(invocationCounter.get(), equalTo(numberGaps));
assertThat(region.tracker.checkAvailable(regionSizeInBytes), is(true));
assertBusy(() -> assertThat(factoryClosed.get(), is(true)));
} finally {
threadPool.shutdown();
}
}
}
|
SharedBlobCacheServiceTests
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/ProducerTemplate.java
|
{
"start": 34163,
"end": 34940
}
|
class ____)
* @throws CamelExecutionException if the processing of the exchange failed
*/
<T> T requestBody(Endpoint endpoint, Object body, Class<T> type) throws CamelExecutionException;
/**
* Send the body to an endpoint returning any result output body. Uses an {@link ExchangePattern#InOut} message
* exchange pattern. <br/>
* <br/>
* <p/>
* <b>Notice:</b> that if the processing of the exchange failed with an Exception it is thrown from this method as a
* {@link org.apache.camel.CamelExecutionException} with the caused exception wrapped.
*
* @param endpointUri the endpoint URI to send to
* @param body the payload
* @return the result (see
|
javadoc
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/component/properties/SpringPropertiesComponentEIPConvertBodyToTest.java
|
{
"start": 989,
"end": 1335
}
|
class ____ extends PropertiesComponentEIPConvertBodyToTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this,
"org/apache/camel/component/properties/SpringPropertiesComponentEIPConvertBodyToTest.xml");
}
}
|
SpringPropertiesComponentEIPConvertBodyToTest
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/telemetry/endpoints/ontextmessage/MultiTextReceived_NoResponse_Endpoint.java
|
{
"start": 267,
"end": 394
}
|
class ____ {
@OnTextMessage
public void onMessage(Multi<String> message) {
}
}
|
MultiTextReceived_NoResponse_Endpoint
|
java
|
apache__camel
|
components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FtpConsumerThrowExceptionOnLoginFailedIT.java
|
{
"start": 1720,
"end": 2971
}
|
class ____ extends FtpServerTestSupport {
private final CountDownLatch latch = new CountDownLatch(1);
@BindToRegistry("myPoll")
private final MyPoll poll = new MyPoll();
private String getFtpUrl() {
return "ftp://dummy@localhost:{{ftp.server.port}}/badlogin?password=cantremember"
+ "&throwExceptionOnConnectFailed=true&maximumReconnectAttempts=0&pollStrategy=#myPoll&autoCreate=false";
}
@Test
public void testBadLogin() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(0);
assertTrue(latch.await(5, TimeUnit.SECONDS));
MockEndpoint.assertIsSatisfied(context);
// consumer should be stopped
Consumer consumer = context.getRoute("foo").getConsumer();
await().atMost(1, TimeUnit.SECONDS)
.untilAsserted(() -> assertTrue(((ServiceSupport) consumer).isStopped(), "Consumer should be stopped"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(getFtpUrl()).routeId("foo").to("mock:result");
}
};
}
private
|
FtpConsumerThrowExceptionOnLoginFailedIT
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/builders/HttpSecurityDeferAddFilterTests.java
|
{
"start": 8448,
"end": 8763
}
|
class ____ {
@Bean
SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.addFilterAfter(new MyOtherFilter(), MyFilter.class);
// @formatter:on
return http.build();
}
}
@EnableWebSecurity
static
|
MyOtherFilterAfterMyFilterNotRegisteredYetConfig
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/InterceptPropertiesTest.java
|
{
"start": 1018,
"end": 4608
}
|
class ____ extends ContextTestSupport {
@Test
public void testInterceptProperties() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
intercept()
.to("mock:intercept");
from("direct:start")
.routeId("intercept-test")
.process(exchange -> {
})
.setBody(constant("Test"))
.to("log:body");
}
});
getMockEndpoint("mock:intercept").expectedMessageCount(3);
getMockEndpoint("mock:intercept")
.expectedPropertyReceived(ExchangePropertyKey.INTERCEPTED_ROUTE_ID.getName(), "intercept-test");
getMockEndpoint("mock:intercept")
.expectedPropertyReceived(ExchangePropertyKey.INTERCEPTED_ROUTE_ENDPOINT_URI.getName(), "direct://start");
// Node IDs are not always the same
// getMockEndpoint("mock:intercept")
// .expectedPropertyValuesReceivedInAnyOrder(ExchangePropertyKey.INTERCEPTED_NODE_ID.getName(), "to2", "process1",
// "setBody1");
template.sendBody("direct:start", "");
assertMockEndpointsSatisfied();
}
@Test
public void testInterceptFromProperties() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
interceptFrom("direct:startInterceptFrom")
.to("mock:interceptFrom");
from("direct:startInterceptFrom")
.routeId("intercept-from-test")
.setBody(constant("Test"))
.to("log:test");
}
});
getMockEndpoint("mock:interceptFrom").expectedMessageCount(1);
getMockEndpoint("mock:interceptFrom")
.expectedPropertyReceived(ExchangePropertyKey.INTERCEPTED_ROUTE_ID.getName(), "intercept-from-test");
getMockEndpoint("mock:interceptFrom")
.expectedPropertyReceived(ExchangePropertyKey.INTERCEPTED_ROUTE_ENDPOINT_URI.getName(),
"direct://startInterceptFrom");
template.sendBody("direct:startInterceptFrom", "");
assertMockEndpointsSatisfied();
}
@Test
public void testInterceptSendToEndpointProperties() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
interceptSendToEndpoint("log:body")
.to("mock:interceptSendToEndpoint");
from("direct:start")
.routeId("intercept-test")
.process(exchange -> {
})
.setBody(constant("Test"))
.to("log:body");
}
});
getMockEndpoint("mock:interceptSendToEndpoint").expectedMessageCount(1);
getMockEndpoint("mock:interceptSendToEndpoint")
.expectedPropertyReceived(ExchangePropertyKey.INTERCEPTED_ROUTE_ID.getName(), "intercept-test");
getMockEndpoint("mock:interceptSendToEndpoint")
.expectedPropertyReceived(ExchangePropertyKey.INTERCEPTED_ROUTE_ENDPOINT_URI.getName(), "direct://start");
template.sendBody("direct:start", "");
assertMockEndpointsSatisfied();
}
}
|
InterceptPropertiesTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/boot/models/xml/attr/ManyToOneTests.java
|
{
"start": 4473,
"end": 4581
}
|
class ____ {
@Id
private Integer id;
private String name;
private SimpleEntity parent;
}
}
|
SimpleEntity
|
java
|
apache__kafka
|
storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManager.java
|
{
"start": 45302,
"end": 62337
}
|
class ____ extends RLMTask {
private final int customMetadataSizeLimit;
private final Logger logger;
// The copied and log-start offset is empty initially for a new RLMCopyTask, and needs to be fetched inside
// the task's run() method.
private volatile Optional<OffsetAndEpoch> copiedOffsetOption = Optional.empty();
private volatile boolean isLogStartOffsetUpdated = false;
private volatile Optional<String> logDirectory = Optional.empty();
public RLMCopyTask(TopicIdPartition topicIdPartition, int customMetadataSizeLimit) {
super(topicIdPartition);
this.customMetadataSizeLimit = customMetadataSizeLimit;
this.logger = getLogContext().logger(RLMCopyTask.class);
}
@Override
protected void execute(UnifiedLog log) throws InterruptedException {
// In the first run after completing altering logDir within broker, we should make sure the state is reset. (KAFKA-16711)
if (!log.parentDir().equals(logDirectory.orElse(null))) {
copiedOffsetOption = Optional.empty();
isLogStartOffsetUpdated = false;
logDirectory = Optional.of(log.parentDir());
}
copyLogSegmentsToRemote(log);
}
private void maybeUpdateLogStartOffsetOnBecomingLeader(UnifiedLog log) throws RemoteStorageException {
if (!isLogStartOffsetUpdated) {
long logStartOffset = findLogStartOffset(topicIdPartition, log);
updateRemoteLogStartOffset.accept(topicIdPartition.topicPartition(), logStartOffset);
isLogStartOffsetUpdated = true;
logger.info("Found the logStartOffset: {} for partition: {} after becoming leader",
logStartOffset, topicIdPartition);
}
}
private void maybeUpdateCopiedOffset(UnifiedLog log) throws RemoteStorageException {
if (copiedOffsetOption.isEmpty()) {
// This is found by traversing from the latest leader epoch from leader epoch history and find the highest offset
// of a segment with that epoch copied into remote storage. If it can not find an entry then it checks for the
// previous leader epoch till it finds an entry, If there are no entries till the earliest leader epoch in leader
// epoch cache then it starts copying the segments from the earliest epoch entry's offset.
copiedOffsetOption = Optional.of(findHighestRemoteOffset(topicIdPartition, log));
logger.info("Found the highest copiedRemoteOffset: {} for partition: {} after becoming leader", copiedOffsetOption, topicIdPartition);
copiedOffsetOption.ifPresent(offsetAndEpoch -> log.updateHighestOffsetInRemoteStorage(offsetAndEpoch.offset()));
}
}
/**
* Segments which match the following criteria are eligible for copying to remote storage:
* 1) Segment is not the active segment and
* 2) Segment end-offset is less than the last-stable-offset as remote storage should contain only
* committed/acked messages
* @param log The log from which the segments are to be copied
* @param fromOffset The offset from which the segments are to be copied
* @param lastStableOffset The last stable offset of the log
* @return candidate log segments to be copied to remote storage
*/
List<EnrichedLogSegment> candidateLogSegments(UnifiedLog log, Long fromOffset, Long lastStableOffset) {
List<EnrichedLogSegment> candidateLogSegments = new ArrayList<>();
List<LogSegment> segments = log.logSegments(fromOffset, Long.MAX_VALUE);
if (!segments.isEmpty()) {
for (int idx = 1; idx < segments.size(); idx++) {
LogSegment previousSeg = segments.get(idx - 1);
LogSegment currentSeg = segments.get(idx);
if (currentSeg.baseOffset() <= lastStableOffset) {
candidateLogSegments.add(new EnrichedLogSegment(previousSeg, currentSeg.baseOffset()));
}
}
// Discard the last active segment
}
return candidateLogSegments;
}
public void copyLogSegmentsToRemote(UnifiedLog log) throws InterruptedException {
if (isCancelled())
return;
try {
maybeUpdateLogStartOffsetOnBecomingLeader(log);
maybeUpdateCopiedOffset(log);
long copiedOffset = copiedOffsetOption.get().offset();
// LSO indicates the offset below are ready to be consumed (high-watermark or committed)
long lso = log.lastStableOffset();
if (lso < 0) {
logger.warn("lastStableOffset for partition {} is {}, which should not be negative.", topicIdPartition, lso);
} else if (lso > 0 && copiedOffset < lso) {
// log-start-offset can be ahead of the copied-offset, when:
// 1) log-start-offset gets incremented via delete-records API (or)
// 2) enabling the remote log for the first time
long fromOffset = Math.max(copiedOffset + 1, log.logStartOffset());
List<EnrichedLogSegment> candidateLogSegments = candidateLogSegments(log, fromOffset, lso);
logger.debug("Candidate log segments, logStartOffset: {}, copiedOffset: {}, fromOffset: {}, lso: {} " +
"and candidateLogSegments: {}", log.logStartOffset(), copiedOffset, fromOffset, lso, candidateLogSegments);
if (candidateLogSegments.isEmpty()) {
logger.debug("No segments found to be copied for partition {} with copiedOffset: {} and active segment's base-offset: {}",
topicIdPartition, copiedOffset, log.activeSegment().baseOffset());
} else {
for (EnrichedLogSegment candidateLogSegment : candidateLogSegments) {
if (isCancelled()) {
logger.info("Skipping copying log segments as the current task state is changed, cancelled: {}",
isCancelled());
return;
}
copyQuotaManagerLock.lock();
try {
long throttleTimeMs = rlmCopyQuotaManager.getThrottleTimeMs();
while (throttleTimeMs > 0) {
copyQuotaMetrics.sensor().record(throttleTimeMs, time.milliseconds());
logger.debug("Quota exceeded for copying log segments, waiting for the quota to be available.");
// If the thread gets interrupted while waiting, the InterruptedException is thrown
// back to the caller. It's important to note that the task being executed is already
// cancelled before the executing thread is interrupted. The caller is responsible
// for handling the exception gracefully by checking if the task is already cancelled.
@SuppressWarnings("UnusedLocalVariable")
boolean ignored = copyQuotaManagerLockCondition.await(quotaTimeout().toMillis(), TimeUnit.MILLISECONDS);
throttleTimeMs = rlmCopyQuotaManager.getThrottleTimeMs();
}
rlmCopyQuotaManager.record(candidateLogSegment.logSegment.log().sizeInBytes());
// Signal waiting threads to check the quota again
copyQuotaManagerLockCondition.signalAll();
} finally {
copyQuotaManagerLock.unlock();
}
RemoteLogSegmentId segmentId = RemoteLogSegmentId.generateNew(topicIdPartition);
segmentIdsBeingCopied.add(segmentId);
try {
copyLogSegment(log, candidateLogSegment.logSegment, segmentId, candidateLogSegment.nextSegmentOffset);
} catch (Exception e) {
recordLagStats(log);
throw e;
} finally {
segmentIdsBeingCopied.remove(segmentId);
}
}
}
} else {
logger.debug("Skipping copying segments, current read-offset:{}, and LSO:{}", copiedOffset, lso);
}
} catch (CustomMetadataSizeLimitExceededException e) {
// Only stop this task. Logging is done where the exception is thrown.
brokerTopicStats.topicStats(log.topicPartition().topic()).failedRemoteCopyRequestRate().mark();
brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().mark();
this.cancel();
} catch (InterruptedException | RetriableException ex) {
throw ex;
} catch (Exception ex) {
if (!isCancelled()) {
brokerTopicStats.topicStats(log.topicPartition().topic()).failedRemoteCopyRequestRate().mark();
brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().mark();
logger.error("Error occurred while copying log segments of partition: {}", topicIdPartition, ex);
}
}
}
private void copyLogSegment(UnifiedLog log, LogSegment segment, RemoteLogSegmentId segmentId, long nextSegmentBaseOffset)
throws InterruptedException, ExecutionException, RemoteStorageException, IOException,
CustomMetadataSizeLimitExceededException {
File logFile = segment.log().file();
String logFileName = logFile.getName();
logger.info("Copying {} to remote storage.", logFileName);
long endOffset = nextSegmentBaseOffset - 1;
File producerStateSnapshotFile = log.producerStateManager().fetchSnapshot(nextSegmentBaseOffset).orElse(null);
List<EpochEntry> epochEntries = getLeaderEpochEntries(log, segment.baseOffset(), nextSegmentBaseOffset);
Map<Integer, Long> segmentLeaderEpochs = new HashMap<>(epochEntries.size());
epochEntries.forEach(entry -> segmentLeaderEpochs.put(entry.epoch(), entry.startOffset()));
boolean isTxnIdxEmpty = segment.txnIndex().isEmpty();
RemoteLogSegmentMetadata copySegmentStartedRlsm = new RemoteLogSegmentMetadata(segmentId, segment.baseOffset(), endOffset,
segment.largestTimestamp(), brokerId, time.milliseconds(), segment.log().sizeInBytes(),
segmentLeaderEpochs, isTxnIdxEmpty);
remoteLogMetadataManagerPlugin.get().addRemoteLogSegmentMetadata(copySegmentStartedRlsm).get();
ByteBuffer leaderEpochsIndex = epochEntriesAsByteBuffer(getLeaderEpochEntries(log, -1, nextSegmentBaseOffset));
LogSegmentData segmentData = new LogSegmentData(logFile.toPath(), toPathIfExists(segment.offsetIndex().file()),
toPathIfExists(segment.timeIndex().file()), Optional.ofNullable(toPathIfExists(segment.txnIndex().file())),
producerStateSnapshotFile.toPath(), leaderEpochsIndex);
brokerTopicStats.topicStats(log.topicPartition().topic()).remoteCopyRequestRate().mark();
brokerTopicStats.allTopicsStats().remoteCopyRequestRate().mark();
Optional<CustomMetadata> customMetadata;
try {
customMetadata = remoteStorageManagerPlugin.get().copyLogSegmentData(copySegmentStartedRlsm, segmentData);
} catch (RemoteStorageException e) {
logger.info("Copy failed, cleaning segment {}", copySegmentStartedRlsm.remoteLogSegmentId());
try {
deleteRemoteLogSegment(copySegmentStartedRlsm, ignored -> !isCancelled());
LOGGER.info("Cleanup completed for segment {}", copySegmentStartedRlsm.remoteLogSegmentId());
} catch (RemoteStorageException e1) {
LOGGER.info("Cleanup failed, will retry later with segment {}: {}", copySegmentStartedRlsm.remoteLogSegmentId(), e1.getMessage());
}
throw e;
}
RemoteLogSegmentMetadataUpdate copySegmentFinishedRlsm = new RemoteLogSegmentMetadataUpdate(segmentId, time.milliseconds(),
customMetadata, RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId);
if (customMetadata.isPresent()) {
long customMetadataSize = customMetadata.get().value().length;
if (customMetadataSize > this.customMetadataSizeLimit) {
CustomMetadataSizeLimitExceededException e = new CustomMetadataSizeLimitExceededException();
logger.info("Custom metadata size {} exceeds configured limit {}." +
" Copying will be stopped and copied segment will be attempted to clean." +
" Original metadata: {}",
customMetadataSize, this.customMetadataSizeLimit, copySegmentStartedRlsm, e);
// For deletion, we provide back the custom metadata by creating a new metadata object from the update.
// However, the update itself will not be stored in this case.
RemoteLogSegmentMetadata newMetadata = copySegmentStartedRlsm.createWithUpdates(copySegmentFinishedRlsm);
try {
deleteRemoteLogSegment(newMetadata, ignored -> !isCancelled());
LOGGER.info("Cleanup completed for segment {}", newMetadata.remoteLogSegmentId());
} catch (RemoteStorageException e1) {
LOGGER.info("Cleanup failed, will retry later with segment {}: {}", newMetadata.remoteLogSegmentId(), e1.getMessage());
}
throw e;
}
}
remoteLogMetadataManagerPlugin.get().updateRemoteLogSegmentMetadata(copySegmentFinishedRlsm).get();
brokerTopicStats.topicStats(log.topicPartition().topic())
.remoteCopyBytesRate().mark(copySegmentStartedRlsm.segmentSizeInBytes());
brokerTopicStats.allTopicsStats().remoteCopyBytesRate().mark(copySegmentStartedRlsm.segmentSizeInBytes());
// `epochEntries` cannot be empty, there is a pre-condition validation in RemoteLogSegmentMetadata
// constructor
int lastEpochInSegment = epochEntries.get(epochEntries.size() - 1).epoch();
copiedOffsetOption = Optional.of(new OffsetAndEpoch(endOffset, lastEpochInSegment));
// Update the highest offset in remote storage for this partition's log so that the local log segments
// are not deleted before they are copied to remote storage.
log.updateHighestOffsetInRemoteStorage(endOffset);
logger.info("Copied {} to remote storage with segment-id: {}",
logFileName, copySegmentFinishedRlsm.remoteLogSegmentId());
recordLagStats(log);
}
private void recordLagStats(UnifiedLog log) {
long bytesLag = log.onlyLocalLogSegmentsSize() - log.activeSegment().size();
long segmentsLag = log.onlyLocalLogSegmentsCount() - 1;
recordLagStats(bytesLag, segmentsLag);
}
// VisibleForTesting
void recordLagStats(long bytesLag, long segmentsLag) {
if (!isCancelled()) {
String topic = topicIdPartition.topic();
int partition = topicIdPartition.partition();
brokerTopicStats.recordRemoteCopyLagBytes(topic, partition, bytesLag);
brokerTopicStats.recordRemoteCopyLagSegments(topic, partition, segmentsLag);
}
}
void resetLagStats() {
String topic = topicIdPartition.topic();
int partition = topicIdPartition.partition();
brokerTopicStats.recordRemoteCopyLagBytes(topic, partition, 0);
brokerTopicStats.recordRemoteCopyLagSegments(topic, partition, 0);
}
private Path toPathIfExists(File file) {
return file.exists() ? file.toPath() : null;
}
}
|
RLMCopyTask
|
java
|
elastic__elasticsearch
|
test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java
|
{
"start": 1681,
"end": 10150
}
|
class ____ implements MeterRegistry {
protected final MetricRecorder<Instrument> recorder = new MetricRecorder<>();
public MetricRecorder<Instrument> getRecorder() {
return recorder;
}
@Override
public DoubleCounter registerDoubleCounter(String name, String description, String unit) {
DoubleCounter instrument = buildDoubleCounter(name, description, unit);
recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit);
return instrument;
}
@Override
public DoubleCounter getDoubleCounter(String name) {
return (DoubleCounter) recorder.getInstrument(InstrumentType.DOUBLE_COUNTER, name);
}
protected DoubleCounter buildDoubleCounter(String name, String description, String unit) {
return new RecordingInstruments.RecordingDoubleCounter(name, recorder);
}
@Override
public DoubleUpDownCounter registerDoubleUpDownCounter(String name, String description, String unit) {
DoubleUpDownCounter instrument = buildDoubleUpDownCounter(name, description, unit);
recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit);
return instrument;
}
@Override
public DoubleUpDownCounter getDoubleUpDownCounter(String name) {
return (DoubleUpDownCounter) recorder.getInstrument(InstrumentType.DOUBLE_UP_DOWN_COUNTER, name);
}
protected DoubleUpDownCounter buildDoubleUpDownCounter(String name, String description, String unit) {
return new RecordingInstruments.RecordingDoubleUpDownCounter(name, recorder);
}
@Override
public DoubleGauge registerDoubleGauge(String name, String description, String unit, Supplier<DoubleWithAttributes> observer) {
return registerDoublesGauge(name, description, unit, () -> Collections.singleton(observer.get()));
}
@Override
public DoubleGauge registerDoublesGauge(
String name,
String description,
String unit,
Supplier<Collection<DoubleWithAttributes>> observer
) {
DoubleGauge instrument = buildDoubleGauge(name, description, unit, observer);
recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit);
return instrument;
}
@Override
public DoubleGauge getDoubleGauge(String name) {
return (DoubleGauge) recorder.getInstrument(InstrumentType.DOUBLE_GAUGE, name);
}
protected DoubleGauge buildDoubleGauge(
String name,
String description,
String unit,
Supplier<Collection<DoubleWithAttributes>> observer
) {
return new RecordingInstruments.RecordingDoubleGauge(name, observer, recorder);
}
@Override
public DoubleHistogram registerDoubleHistogram(String name, String description, String unit) {
DoubleHistogram instrument = buildDoubleHistogram(name, description, unit);
recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit);
return instrument;
}
@Override
public DoubleHistogram getDoubleHistogram(String name) {
return (DoubleHistogram) recorder.getInstrument(InstrumentType.DOUBLE_HISTOGRAM, name);
}
protected DoubleHistogram buildDoubleHistogram(String name, String description, String unit) {
return new RecordingInstruments.RecordingDoubleHistogram(name, recorder);
}
@Override
public LongCounter registerLongCounter(String name, String description, String unit) {
LongCounter instrument = buildLongCounter(name, description, unit);
recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit);
return instrument;
}
@Override
public LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier<LongWithAttributes> observer) {
return registerLongsAsyncCounter(name, description, unit, () -> Collections.singleton(observer.get()));
}
@Override
public LongAsyncCounter registerLongsAsyncCounter(
String name,
String description,
String unit,
Supplier<Collection<LongWithAttributes>> observer
) {
LongAsyncCounter instrument = new RecordingInstruments.RecordingAsyncLongCounter(name, observer, recorder);
recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit);
return instrument;
}
@Override
public LongAsyncCounter getLongAsyncCounter(String name) {
return (LongAsyncCounter) recorder.getInstrument(InstrumentType.LONG_ASYNC_COUNTER, name);
}
@Override
public DoubleAsyncCounter registerDoubleAsyncCounter(
String name,
String description,
String unit,
Supplier<DoubleWithAttributes> observer
) {
return registerDoublesAsyncCounter(name, description, unit, () -> Collections.singleton(observer.get()));
}
@Override
public DoubleAsyncCounter registerDoublesAsyncCounter(
String name,
String description,
String unit,
Supplier<Collection<DoubleWithAttributes>> observer
) {
DoubleAsyncCounter instrument = new RecordingInstruments.RecordingAsyncDoubleCounter(name, observer, recorder);
recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit);
return instrument;
}
@Override
public DoubleAsyncCounter getDoubleAsyncCounter(String name) {
return (DoubleAsyncCounter) recorder.getInstrument(InstrumentType.DOUBLE_ASYNC_COUNTER, name);
}
@Override
public LongCounter getLongCounter(String name) {
return (LongCounter) recorder.getInstrument(InstrumentType.LONG_COUNTER, name);
}
protected LongCounter buildLongCounter(String name, String description, String unit) {
return new RecordingInstruments.RecordingLongCounter(name, recorder);
}
@Override
public LongUpDownCounter registerLongUpDownCounter(String name, String description, String unit) {
LongUpDownCounter instrument = buildLongUpDownCounter(name, description, unit);
recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit);
return instrument;
}
@Override
public LongUpDownCounter getLongUpDownCounter(String name) {
return (LongUpDownCounter) recorder.getInstrument(InstrumentType.LONG_UP_DOWN_COUNTER, name);
}
protected LongUpDownCounter buildLongUpDownCounter(String name, String description, String unit) {
return new RecordingInstruments.RecordingLongUpDownCounter(name, recorder);
}
@Override
public LongGauge registerLongGauge(String name, String description, String unit, Supplier<LongWithAttributes> observer) {
return registerLongsGauge(name, description, unit, () -> Collections.singleton(observer.get()));
}
@Override
public LongGauge registerLongsGauge(String name, String description, String unit, Supplier<Collection<LongWithAttributes>> observer) {
LongGauge instrument = buildLongGauge(name, description, unit, observer);
recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit);
return instrument;
}
@Override
public LongGauge getLongGauge(String name) {
return (LongGauge) recorder.getInstrument(InstrumentType.LONG_GAUGE, name);
}
protected LongGauge buildLongGauge(String name, String description, String unit, Supplier<Collection<LongWithAttributes>> observer) {
return new RecordingInstruments.RecordingLongGauge(name, observer, recorder);
}
@Override
public LongHistogram registerLongHistogram(String name, String description, String unit) {
LongHistogram instrument = buildLongHistogram(name, description, unit);
recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit);
return instrument;
}
@Override
public LongHistogram getLongHistogram(String name) {
return (LongHistogram) recorder.getInstrument(InstrumentType.LONG_HISTOGRAM, name);
}
protected LongHistogram buildLongHistogram(String name, String description, String unit) {
return new RecordingInstruments.RecordingLongHistogram(name, recorder);
}
}
|
RecordingMeterRegistry
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.