language
stringclasses
1 value
repo
stringclasses
60 values
path
stringlengths
22
294
class_span
dict
source
stringlengths
13
1.16M
target
stringlengths
1
113
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ThreadSafeCheckerTest.java
{ "start": 33567, "end": 34518 }
class ____ { // BUG: Diagnostic contains: final X badX = new X(ImmutableList.of(new ArrayList<String>())); } """) .doTest(); } @Rule public final TemporaryFolder tempFolder = new TemporaryFolder(); static void addClassToJar(JarOutputStream jos, Class<?> clazz) throws IOException { String entryPath = clazz.getName().replace('.', '/') + ".class"; try (InputStream is = clazz.getClassLoader().getResourceAsStream(entryPath)) { jos.putNextEntry(new JarEntry(entryPath)); ByteStreams.copy(is, jos); } } @Test public void knownThreadSafeFlag() { CompilationTestHelper.newInstance(ThreadSafeChecker.class, getClass()) .setArgs(ImmutableList.of("-XepOpt:ThreadSafe:KnownThreadSafe=threadsafety.SomeImmutable")) .addSourceLines( "threadsafety/SomeImmutable.java", """ package threadsafety;
Test
java
elastic__elasticsearch
x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java
{ "start": 1187, "end": 3955 }
class ____ extends Command { private final OptionSpec<String> publicKeyPathOption; private final OptionSpec<String> licenseOption; private final OptionSpec<String> licenseFileOption; public LicenseVerificationTool() { super("Generates signed elasticsearch license(s) for a given license spec(s)"); publicKeyPathOption = parser.accepts("publicKeyPath", "path to public key file").withRequiredArg().required(); // TODO: with jopt-simple 5.0, we can make these requiredUnless each other // which is effectively "one must be present" licenseOption = parser.accepts("license", "license json spec").withRequiredArg(); licenseFileOption = parser.accepts("licenseFile", "license json spec file").withRequiredArg(); } @Override protected void execute(Terminal terminal, OptionSet options, ProcessInfo processInfo) throws Exception { Path publicKeyPath = parsePath(publicKeyPathOption.value(options)); if (Files.exists(publicKeyPath) == false) { throw new UserException(ExitCodes.USAGE, publicKeyPath + " does not exist"); } final License licenseSpec; if (options.has(licenseOption)) { final BytesArray bytes = new BytesArray(licenseOption.value(options).getBytes(StandardCharsets.UTF_8)); licenseSpec = License.fromSource(bytes, XContentType.JSON); } else if (options.has(licenseFileOption)) { Path licenseSpecPath = parsePath(licenseFileOption.value(options)); if (Files.exists(licenseSpecPath) == false) { throw new UserException(ExitCodes.USAGE, licenseSpecPath + " does not exist"); } final BytesArray bytes = new BytesArray(Files.readAllBytes(licenseSpecPath)); licenseSpec = License.fromSource(bytes, XContentType.JSON); } else { throw new UserException(ExitCodes.USAGE, "Must specify either --license or --licenseFile"); } // verify if (LicenseVerifier.verifyLicense(licenseSpec, CryptUtils.readPublicKey(Files.readAllBytes(publicKeyPath))) == false) { throw new UserException(ExitCodes.DATA_ERROR, "Invalid License!"); } XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.startObject(); builder.startObject("license"); licenseSpec.toInnerXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); builder.endObject(); builder.flush(); terminal.println(Strings.toString(builder)); } @SuppressForbidden(reason = "Parsing command line path") private static Path parsePath(String path) { return PathUtils.get(path); } }
LicenseVerificationTool
java
FasterXML__jackson-databind
src/test/java/tools/jackson/databind/deser/enums/EnumDeserializationTest.java
{ "start": 12285, "end": 14605 }
enum ____ is EnumWithJsonValue e = MAPPER.readValue(q("foo"), EnumWithJsonValue.class); assertSame(EnumWithJsonValue.A, e); e = MAPPER.readValue(q("bar"), EnumWithJsonValue.class); assertSame(EnumWithJsonValue.B, e); // then in EnumSet EnumSet<EnumWithJsonValue> set = MAPPER.readValue("[\"bar\"]", new TypeReference<EnumSet<EnumWithJsonValue>>() { }); assertNotNull(set); assertEquals(1, set.size()); assertTrue(set.contains(EnumWithJsonValue.B)); assertFalse(set.contains(EnumWithJsonValue.A)); // and finally EnumMap EnumMap<EnumWithJsonValue,Integer> map = MAPPER.readValue("{\"foo\":13}", new TypeReference<EnumMap<EnumWithJsonValue, Integer>>() { }); assertNotNull(map); assertEquals(1, map.size()); assertEquals(Integer.valueOf(13), map.get(EnumWithJsonValue.A)); } // Ability to ignore unknown Enum values: @Test public void testAllowUnknownEnumValuesReadAsNull() throws Exception { // cannot use shared mapper when changing configs... ObjectReader reader = MAPPER.reader(EnumFeature.READ_UNKNOWN_ENUM_VALUES_AS_NULL); assertNull(reader.forType(TestEnum.class).readValue("\"NO-SUCH-VALUE\"")); assertNull(reader.forType(TestEnum.class).readValue(" 4343 ")); } // Ability to ignore unknown Enum values as null: // [databind#1642] @Test public void testAllowUnknownEnumValuesReadAsNullWithCreatorMethod() throws Exception { // cannot use shared mapper when changing configs... ObjectReader reader = MAPPER.reader(EnumFeature.READ_UNKNOWN_ENUM_VALUES_AS_NULL); assertNull(reader.forType(StrictEnumCreator.class).readValue("\"NO-SUCH-VALUE\"")); assertNull(reader.forType(StrictEnumCreator.class).readValue(" 4343 ")); } @Test public void testAllowUnknownEnumValuesForEnumSets() throws Exception { // 05-Nov-2025, tatu: As per [databind#5203], no longer quietly skippped try { MAPPER.reader(EnumFeature.READ_UNKNOWN_ENUM_VALUES_AS_NULL) .forType(new TypeReference<EnumSet<TestEnum>>() { }) .readValue("[\"NO-SUCH-VALUE\"]"); fail("Expected an exception for bogus
as
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/issues/Issue4454.java
{ "start": 472, "end": 1317 }
class ____ { protected final DbType dbType = DbType.mysql; @Test public void test_idle2() throws Exception { String sql = "CREATE TABLE `test_trigger` (\n" + " `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'trigger id',\n" + " `test` int(11) signed NOT NULL DEFAULT '0' COMMENT 'trigger test',\n" + " PRIMARY KEY (`id`)\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;"; SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, dbType); SQLStatement stmt = parser.parseStatement(); SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(dbType); stmt.accept(visitor); Map<TableStat.Name, TableStat> tableMap = visitor.getTables(); assertFalse(tableMap.isEmpty()); } }
Issue4454
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/query/sqm/tree/domain/SqmTreatedPluralPartJoin.java
{ "start": 633, "end": 3294 }
class ____ extends SqmPluralPartJoin implements SqmTreatedJoin { private final SqmPluralPartJoin wrappedPath; private final SqmEntityDomainType treatTarget; public SqmTreatedPluralPartJoin( SqmPluralPartJoin wrappedPath, SqmEntityDomainType treatTarget, @Nullable String alias) { //noinspection unchecked super( wrappedPath.getLhs(), wrappedPath.getNavigablePath() .treatAs( treatTarget.getHibernateEntityName(), alias ), wrappedPath.getReferencedPathSource(), alias, wrappedPath.getSqmJoinType(), wrappedPath.nodeBuilder() ); this.treatTarget = treatTarget; this.wrappedPath = wrappedPath; } private SqmTreatedPluralPartJoin( NavigablePath navigablePath, SqmPluralPartJoin wrappedPath, SqmEntityDomainType treatTarget, @Nullable String alias) { //noinspection unchecked super( wrappedPath.getLhs(), navigablePath, wrappedPath.getReferencedPathSource(), alias, wrappedPath.getSqmJoinType(), wrappedPath.nodeBuilder() ); this.treatTarget = treatTarget; this.wrappedPath = wrappedPath; } @Override public SqmTreatedPluralPartJoin copy(SqmCopyContext context) { final SqmTreatedPluralPartJoin existing = context.getCopy( this ); if ( existing != null ) { return existing; } final SqmTreatedPluralPartJoin path = context.registerCopy( this, new SqmTreatedPluralPartJoin( getNavigablePath(), wrappedPath.copy( context ), treatTarget, getExplicitAlias() ) ); copyTo( path, context ); return path; } @Override public SqmPluralPartJoin getWrappedPath() { return wrappedPath; } @Override public EntityDomainType getTreatTarget() { return treatTarget; } @Override public @NonNull SqmBindableType getNodeType() { return treatTarget; } @Override public SqmPathSource getReferencedPathSource() { return treatTarget; } @Override public SqmPathSource<?> getResolvedModel() { return treatTarget; } @Override public SqmTreatedPluralPartJoin treatAs(Class treatJavaType, @Nullable String alias, boolean fetch) { //noinspection unchecked return wrappedPath.treatAs( treatJavaType, alias, fetch ); } @Override public SqmTreatedPluralPartJoin treatAs(EntityDomainType treatTarget, @Nullable String alias, boolean fetch) { //noinspection unchecked return wrappedPath.treatAs( treatTarget, alias, fetch ); } @Override public void appendHqlString(StringBuilder hql, SqmRenderContext context) { hql.append( "treat(" ); wrappedPath.appendHqlString( hql, context ); hql.append( " as " ); hql.append( treatTarget.getName() ); hql.append( ')' ); } }
SqmTreatedPluralPartJoin
java
apache__hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
{ "start": 13873, "end": 16235 }
class ____ for * the user jar as well as the HADOOP_CLASSPATH. Otherwise, it creates a * classloader that simply adds the user jar to the classpath. */ private ClassLoader createClassLoader(File file, final File workDir) throws MalformedURLException { ClassLoader loader; // see if the client classloader is enabled if (useClientClassLoader()) { StringBuilder sb = new StringBuilder(); sb.append(workDir).append("/"). append(File.pathSeparator).append(file). append(File.pathSeparator).append(workDir).append("/classes/"). append(File.pathSeparator).append(workDir).append("/lib/*"); // HADOOP_CLASSPATH is added to the client classpath String hadoopClasspath = getHadoopClasspath(); if (hadoopClasspath != null && !hadoopClasspath.isEmpty()) { sb.append(File.pathSeparator).append(hadoopClasspath); } String clientClasspath = sb.toString(); // get the system classes String systemClasses = getSystemClasses(); List<String> systemClassesList = systemClasses == null ? null : Arrays.asList(StringUtils.getTrimmedStrings(systemClasses)); // create an application classloader that isolates the user classes loader = new ApplicationClassLoader(clientClasspath, getClass().getClassLoader(), systemClassesList); } else { List<URL> classPath = new ArrayList<>(); classPath.add(new File(workDir + "/").toURI().toURL()); classPath.add(file.toURI().toURL()); classPath.add(new File(workDir, "classes/").toURI().toURL()); File[] libs = new File(workDir, "lib").listFiles(); if (libs != null) { for (File lib : libs) { classPath.add(lib.toURI().toURL()); } } // create a normal parent-delegating classloader loader = new URLClassLoader(classPath.toArray(new URL[classPath.size()])); } return loader; } boolean useClientClassLoader() { return Boolean.parseBoolean(System.getenv(HADOOP_USE_CLIENT_CLASSLOADER)); } boolean skipUnjar() { return Boolean.parseBoolean(System.getenv(HADOOP_CLIENT_SKIP_UNJAR)); } String getHadoopClasspath() { return System.getenv(HADOOP_CLASSPATH); } String getSystemClasses() { return System.getenv(HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES); } }
space
java
apache__camel
dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java
{ "start": 480570, "end": 483781 }
class ____ extends YamlDeserializerBase<JavaExpression> { public JavaExpressionDeserializer() { super(JavaExpression.class); } @Override protected JavaExpression newInstance() { return new JavaExpression(); } @Override protected JavaExpression newInstance(String value) { return new JavaExpression(value); } @Override protected boolean setProperty(JavaExpression target, String propertyKey, String propertyName, Node node) { propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey); switch(propertyKey) { case "expression": { String val = asText(node); target.setExpression(val); break; } case "id": { String val = asText(node); target.setId(val); break; } case "preCompile": { String val = asText(node); target.setPreCompile(val); break; } case "resultType": { String val = asText(node); target.setResultTypeName(val); break; } case "singleQuotes": { String val = asText(node); target.setSingleQuotes(val); break; } case "trim": { String val = asText(node); target.setTrim(val); break; } default: { ExpressionDefinition ed = target.getExpressionType(); if (ed != null) { throw new org.apache.camel.dsl.yaml.common.exception.DuplicateFieldException(node, propertyName, "as an expression"); } ed = ExpressionDeserializers.constructExpressionType(propertyKey, node); if (ed != null) { target.setExpressionType(ed); } else { return false; } } } return true; } } @YamlType( nodes = "js", inline = true, types = org.apache.camel.model.language.JavaScriptExpression.class, order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1, displayName = "JavaScript", description = "Evaluates a JavaScript expression.", deprecated = false, properties = { @YamlProperty(name = "expression", type = "string", required = true, description = "The expression value in your chosen language syntax", displayName = "Expression"), @YamlProperty(name = "id", type = "string", description = "Sets the id of this node", displayName = "Id"), @YamlProperty(name = "resultType", type = "string", description = "Sets the
JavaExpressionDeserializer
java
resilience4j__resilience4j
resilience4j-metrics/src/test/java/io/github/resilience4j/metrics/RetryMetricsTest.java
{ "start": 839, "end": 8786 }
class ____ extends AbstractRetryMetricsTest { @Override protected Retry givenMetricRegistry(String prefix, MetricRegistry metricRegistry) { RetryRegistry retryRegistry = RetryRegistry .of(RetryConfig.custom().waitDuration(Duration.ofMillis(150)).build()); Retry retry = retryRegistry.retry("testName"); metricRegistry.registerAll(RetryMetrics.ofRetryRegistry(prefix, retryRegistry)); return retry; } @Override protected Retry givenMetricRegistry(MetricRegistry metricRegistry) { RetryRegistry retryRegistry = RetryRegistry .of(RetryConfig.custom().waitDuration(Duration.ofMillis(150)).build()); Retry retry = retryRegistry.retry("testName"); metricRegistry.registerAll(RetryMetrics.ofRetryRegistry(retryRegistry)); return retry; } // returns only success @Test public void shouldReturnTotalNumberOfRequestsAs1ForSuccess() { HelloWorldService helloWorldService = mock(HelloWorldService.class); Retry retry = Retry.of("metrics", RetryConfig.<String>custom() .retryOnResult(String::isEmpty) .maxAttempts(5) .build()); given(helloWorldService.returnHelloWorld()).willReturn("Success"); String result = Retry.decorateSupplier(retry, helloWorldService::returnHelloWorld).get(); assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(1); assertThat(result).isEqualTo("Success"); } @Test public void shouldReturnTotalNumberOfRequestsAs1ForSuccessVoid() { HelloWorldService helloWorldService = mock(HelloWorldService.class); Retry retry = Retry.of("metrics", RetryConfig.custom() .maxAttempts(5) .build()); retry.executeRunnable(helloWorldService::sayHelloWorld); assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(1); } // returns fail twice and then success @Test public void shouldReturnTotalNumberOfRequestsAs3ForFail() { HelloWorldService helloWorldService = mock(HelloWorldService.class); Retry retry = Retry.of("metrics", RetryConfig.<String>custom() .retryExceptions(Exception.class) .maxAttempts(5) .build()); given(helloWorldService.returnHelloWorld()) .willThrow(new HelloWorldException()) .willThrow(new HelloWorldException()) .willReturn("Success"); String result = Retry.decorateSupplier(retry, helloWorldService::returnHelloWorld).get(); assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(3); assertThat(result).isEqualTo("Success"); } // throws only exception finally @Test public void shouldReturnTotalNumberOfRequestsAs5OnlyFails() { HelloWorldService helloWorldService = mock(HelloWorldService.class); Retry retry = Retry.of("metrics", RetryConfig.<String>custom() .retryExceptions(Exception.class) .maxAttempts(5) .failAfterMaxAttempts(true) .build()); given(helloWorldService.returnHelloWorld()) .willThrow(new HelloWorldException()); Try<String> supplier = Try.ofSupplier(Retry.decorateSupplier(retry, helloWorldService::returnHelloWorld)); assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(5); assertThat(supplier.isFailure()).isTrue(); } // throws only checked exception finally @Test public void shouldReturnTotalNumberOfRequestsAs5OnlyFailsChecked() throws IOException { HelloWorldService helloWorldService = mock(HelloWorldService.class); Retry retry = Retry.of("metrics", RetryConfig.<String>custom() .retryExceptions(Exception.class) .maxAttempts(5) .failAfterMaxAttempts(true) .build()); willThrow(new HelloWorldException()).given(helloWorldService).returnHelloWorldWithException(); Callable<String> retryableCallable = Retry.decorateCallable(retry, helloWorldService::returnHelloWorldWithException); Try<Void> run = Try.run(retryableCallable::call); assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(5); assertThat(run.isFailure()).isTrue(); } // returns async success @Test public void shouldReturnTotalNumberOfRequestsAs1ForSuccessAsync() { AsyncHelloWorldService helloWorldService = mock(AsyncHelloWorldService.class); ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(); given(helloWorldService.returnHelloWorld()) .willReturn(completedFuture("Success")); Retry retry = Retry.of("metrics", RetryConfig.<String>custom() .retryExceptions(Exception.class) .maxAttempts(5) .build()); Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(retry, scheduler, helloWorldService::returnHelloWorld); String result = awaitResult(supplier.get(), 5); assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(1); assertThat(result).isEqualTo("Success"); } // returns 1 failed and then 1 success async @Test public void shouldReturnTotalNumberOfRequestsAs3ForFailAsync() { AsyncHelloWorldService helloWorldService = mock(AsyncHelloWorldService.class); ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(); CompletableFuture<String> failedFuture = new CompletableFuture<>(); failedFuture.completeExceptionally(new HelloWorldException()); given(helloWorldService.returnHelloWorld()) .willReturn(failedFuture) .willReturn(completedFuture("Success")); Retry retry = Retry.of("metrics", RetryConfig.<String>custom() .retryExceptions(Exception.class) .maxAttempts(5) .failAfterMaxAttempts(true) .build()); Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(retry, scheduler, helloWorldService::returnHelloWorld); String result = awaitResult(supplier.get(), 5); assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(2); assertThat(result).isEqualTo("Success"); } // throws only exception async @Test public void shouldReturnTotalNumberOfRequestsAs5ForFailAsync() { AsyncHelloWorldService helloWorldService = mock(AsyncHelloWorldService.class); ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(); CompletableFuture<String> failedFuture = new CompletableFuture<>(); failedFuture.completeExceptionally(new HelloWorldException()); given(helloWorldService.returnHelloWorld()).willReturn(failedFuture); Retry retry = Retry.of("metrics", RetryConfig.<String>custom() .retryExceptions(Exception.class) .maxAttempts(5) .failAfterMaxAttempts(true) .build()); Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(retry, scheduler, helloWorldService::returnHelloWorld); assertThat(supplier.get()) .failsWithin(5, TimeUnit.SECONDS) .withThrowableOfType(ExecutionException.class) .havingCause(); assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(5); } public static <T> T awaitResult(CompletionStage<T> completionStage, long timeoutSeconds) { try { return completionStage.toCompletableFuture().get(timeoutSeconds, TimeUnit.SECONDS); } catch (InterruptedException | TimeoutException e) { throw new AssertionError(e); } catch (ExecutionException e) { throw new RuntimeExecutionException(e.getCause()); } } private static
RetryMetricsTest
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
{ "start": 53166, "end": 54176 }
enum ____ { UNMANAGED("User launched the Application Master, since it's unmanaged. "), INACTIVATED("Application is added to the scheduler and is not yet activated. "), ACTIVATED("Application is Activated, waiting for resources to be assigned for AM. "), ASSIGNED("Scheduler has assigned a container for AM, waiting for AM " + "container to be launched"), LAUNCHED("AM container is launched, waiting for AM container to Register " + "with RM") ; private String diagnosticMessage; AMState(String diagnosticMessage) { this.diagnosticMessage = diagnosticMessage; } public String getDiagnosticMessage() { return diagnosticMessage; } } public Map<String, String> getApplicationSchedulingEnvs() { return this.applicationSchedulingEnvs; } @Override public String getPartition() { return nodeLabelExpression == null ? "" : nodeLabelExpression; } @Override public long getStartTime() { return startTime; } }
AMState
java
apache__camel
core/camel-core/src/test/java/org/apache/camel/component/file/strategy/FileChangedReadLockMinAgeTest.java
{ "start": 1274, "end": 3117 }
class ____ extends ContextTestSupport { private static final Logger LOG = LoggerFactory.getLogger(FileChangedReadLockMinAgeTest.class); @Test public void testChangedReadLockMinAge() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(1); mock.expectedFileExists(testFile("out/slowfile.dat")); mock.expectedMessagesMatches( exchangeProperty(Exchange.RECEIVED_TIMESTAMP).convertTo(long.class).isGreaterThan(new Date().getTime() + 500)); writeSlowFile(); assertMockEndpointsSatisfied(); String content = new String(Files.readAllBytes(testFile("out/slowfile.dat"))); String[] lines = content.split(LS); assertEquals(20, lines.length, "There should be 20 lines in the file"); for (int i = 0; i < 20; i++) { assertEquals("Line " + i, lines[i]); } } private void writeSlowFile() throws Exception { LOG.debug("Writing slow file..."); try (OutputStream fos = Files.newOutputStream(testFile("in/slowfile.dat"))) { for (int i = 0; i < 20; i++) { fos.write(("Line " + i + LS).getBytes()); LOG.debug("Writing line {}", i); Thread.sleep(50); } fos.flush(); } LOG.debug("Writing slow file DONE..."); } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { @Override public void configure() { from(fileUri( "in?initialDelay=0&delay=10&readLock=changed&readLockCheckInterval=100&readLockMinAge=1000&readLockTimeout=1500")) .to(fileUri("out"), "mock:result"); } }; } }
FileChangedReadLockMinAgeTest
java
square__retrofit
retrofit/src/main/java/retrofit2/Invocation.java
{ "start": 868, "end": 1144 }
class ____ both the method * that was called and the arguments to the method. * * <p>Retrofit automatically adds an invocation to each OkHttp request as a tag. You can retrieve * the invocation in an OkHttp interceptor for metrics and monitoring. * * <pre><code> *
captures
java
apache__flink
flink-core/src/main/java/org/apache/flink/core/failure/FailureEnricherFactory.java
{ "start": 1006, "end": 1074 }
class ____ creating {@link FailureEnricher}. */ @Experimental public
for
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java
{ "start": 25063, "end": 27939 }
class ____ extends BlockTermState { /** file pointer to the start of the doc ids enumeration, in {@link #DOC_EXTENSION} file */ public long docStartFP; /** file pointer to the start of the positions enumeration, in {@link #POS_EXTENSION} file */ public long posStartFP; /** file pointer to the start of the payloads enumeration, in {@link #PAY_EXTENSION} file */ public long payStartFP; /** * file offset for the start of the skip list, relative to docStartFP, if there are more than * {@link ForUtil#BLOCK_SIZE} docs; otherwise -1 */ public long skipOffset; /** * file offset for the last position in the last block, if there are more than {@link * ForUtil#BLOCK_SIZE} positions; otherwise -1 * * <p>One might think to use total term frequency to track how many positions are left to read * as we decode the blocks, and decode the last block differently when num_left_positions &lt; * BLOCK_SIZE. Unfortunately this won't work since the tracking will be messed up when we skip * blocks as the skipper will only tell us new position offset (start of block) and number of * positions to skip for that block, without telling us how many positions it has skipped. */ public long lastPosBlockOffset; /** * docid when there is a single pulsed posting, otherwise -1. freq is always implicitly * totalTermFreq in this case. */ public int singletonDocID; /** Sole constructor. */ public IntBlockTermState() { skipOffset = -1; lastPosBlockOffset = -1; singletonDocID = -1; } @Override public IntBlockTermState clone() { IntBlockTermState other = new IntBlockTermState(); other.copyFrom(this); return other; } @Override public void copyFrom(TermState _other) { super.copyFrom(_other); IntBlockTermState other = (IntBlockTermState) _other; docStartFP = other.docStartFP; posStartFP = other.posStartFP; payStartFP = other.payStartFP; lastPosBlockOffset = other.lastPosBlockOffset; skipOffset = other.skipOffset; singletonDocID = other.singletonDocID; } @Override public String toString() { return super.toString() + " docStartFP=" + docStartFP + " posStartFP=" + posStartFP + " payStartFP=" + payStartFP + " lastPosBlockOffset=" + lastPosBlockOffset + " singletonDocID=" + singletonDocID; } } }
IntBlockTermState
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
{ "start": 25017, "end": 27423 }
class ____<Response extends TransportResponse> extends ActionListenerResponseHandler<Response> { private final String nodeId; ConnectionCountingHandler( final ActionListener<? super Response> listener, final Writeable.Reader<Response> responseReader, final Transport.Connection connection ) { super(listener, responseReader, TransportResponseHandler.TRANSPORT_WORKER); this.nodeId = connection.getNode().getId(); // Increment the number of connections for this node by one clientConnections.compute(nodeId, (id, conns) -> conns == null ? 1 : conns + 1); } @Override public void handleResponse(Response response) { super.handleResponse(response); decConnectionCount(); } @Override public void handleException(TransportException e) { super.handleException(e); decConnectionCount(); } // Decrement the number of connections or remove it entirely if there are no more connections // We need to remove the entry here so we don't leak when nodes go away forever private void decConnectionCount() { assert assertNodePresent(); clientConnections.computeIfPresent(nodeId, (id, conns) -> conns == 1 ? null : conns - 1); } private boolean assertNodePresent() { var conns = clientConnections.get(nodeId); assert conns != null : "number of connections for " + nodeId + " is null, but should be an integer"; assert conns >= 1 : "number of connections for " + nodeId + " should be >= 1 but was " + conns; // Always return true, there is additional asserting here, the boolean is just so this // can be skipped when assertions are not enabled return true; } } public void cancelSearchTask(SearchTask task, String reason) { CancelTasksRequest req = new CancelTasksRequest().setTargetTaskId(new TaskId(client.getLocalNodeId(), task.getId())) .setReason("Fatal failure during search: " + reason); // force the origin to execute the cancellation as a system user new OriginSettingClient(client, TransportGetTaskAction.TASKS_ORIGIN).admin().cluster().cancelTasks(req, ActionListener.noop()); } }
ConnectionCountingHandler
java
apache__flink
flink-clients/src/main/java/org/apache/flink/client/deployment/application/ApplicationConfiguration.java
{ "start": 1330, "end": 3227 }
class ____ { public static final ConfigOption<List<String>> APPLICATION_ARGS = ConfigOptions.key("$internal.application.program-args") .stringType() .asList() .noDefaultValue(); public static final ConfigOption<String> APPLICATION_MAIN_CLASS = ConfigOptions.key("$internal.application.main").stringType().noDefaultValue(); private final String[] programArguments; @Nullable private final String applicationClassName; public ApplicationConfiguration( final String[] programArguments, @Nullable final String applicationClassName) { this.programArguments = checkNotNull(programArguments); this.applicationClassName = applicationClassName; } public String[] getProgramArguments() { return programArguments; } @Nullable public String getApplicationClassName() { return applicationClassName; } public void applyToConfiguration(final Configuration configuration) { checkNotNull(configuration); ConfigUtils.encodeArrayToConfig( configuration, APPLICATION_ARGS, programArguments, Objects::toString); if (applicationClassName != null) { configuration.set(APPLICATION_MAIN_CLASS, applicationClassName); } } public static ApplicationConfiguration fromConfiguration(final Configuration configuration) { checkNotNull(configuration); final List<String> programArgsList = ConfigUtils.decodeListFromConfig(configuration, APPLICATION_ARGS, String::new); final String[] programArgs = programArgsList.toArray(new String[0]); final String applicationClassName = configuration.get(APPLICATION_MAIN_CLASS); return new ApplicationConfiguration(programArgs, applicationClassName); } }
ApplicationConfiguration
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusWithUpgradeDomain.java
{ "start": 1177, "end": 3913 }
class ____ implements BlockPlacementStatus { private final BlockPlacementStatus parentBlockPlacementStatus; private final Set<String> upgradeDomains; private final int numberOfReplicas; private final int upgradeDomainFactor; /** * @param parentBlockPlacementStatus the parent class' status * @param upgradeDomains the set of upgrade domains of the replicas * @param numberOfReplicas the number of replicas of the block * @param upgradeDomainFactor the configured upgrade domain factor */ public BlockPlacementStatusWithUpgradeDomain( BlockPlacementStatus parentBlockPlacementStatus, Set<String> upgradeDomains, int numberOfReplicas, int upgradeDomainFactor){ this.parentBlockPlacementStatus = parentBlockPlacementStatus; this.upgradeDomains = upgradeDomains; this.numberOfReplicas = numberOfReplicas; this.upgradeDomainFactor = upgradeDomainFactor; } @Override public boolean isPlacementPolicySatisfied() { return parentBlockPlacementStatus.isPlacementPolicySatisfied() && isUpgradeDomainPolicySatisfied(); } private boolean isUpgradeDomainPolicySatisfied() { if (numberOfReplicas <= upgradeDomainFactor) { return (numberOfReplicas <= upgradeDomains.size()); } else { return upgradeDomains.size() >= upgradeDomainFactor; } } @Override public String getErrorDescription() { if (isPlacementPolicySatisfied()) { return null; } StringBuilder errorDescription = new StringBuilder(); if (!parentBlockPlacementStatus.isPlacementPolicySatisfied()) { errorDescription.append(parentBlockPlacementStatus.getErrorDescription()); } if (!isUpgradeDomainPolicySatisfied()) { if (errorDescription.length() != 0) { errorDescription.append(" "); } errorDescription.append("The block has " + numberOfReplicas + " replicas. But it only has " + upgradeDomains.size() + " upgrade domains " + upgradeDomains +"."); } return errorDescription.toString(); } @Override public int getAdditionalReplicasRequired() { if (isPlacementPolicySatisfied()) { return 0; } else { // It is possible for a block to have the correct number of upgrade // domains, but only a single rack, or be on multiple racks, but only in // one upgrade domain. int parent = parentBlockPlacementStatus.getAdditionalReplicasRequired(); int child; if (numberOfReplicas <= upgradeDomainFactor) { child = numberOfReplicas - upgradeDomains.size(); } else { child = upgradeDomainFactor - upgradeDomains.size(); } return Math.max(parent, child); } } }
BlockPlacementStatusWithUpgradeDomain
java
elastic__elasticsearch
x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopFloatFloatAggregator.java
{ "start": 1193, "end": 1426 }
class ____ generated. Edit `X-TopAggregator.java.st` to edit this file. * </p> */ @Aggregator({ @IntermediateState(name = "top", type = "FLOAT_BLOCK"), @IntermediateState(name = "output", type = "FLOAT_BLOCK") }) @GroupingAggregator
is
java
apache__hadoop
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestCommitProtocol.java
{ "start": 1390, "end": 2140 }
class ____ extends TestManifestCommitProtocol { private final ABFSContractTestBinding binding; public ITestAbfsManifestCommitProtocol() throws Exception { binding = new ABFSContractTestBinding(); } @BeforeEach @Override public void setup() throws Exception { binding.setup(); super.setup(); } @Override protected Configuration createConfiguration() { return enableManifestCommitter(prepareTestConfiguration(binding)); } @Override protected AbstractFSContract createContract(final Configuration conf) { return new AbfsFileSystemContract(conf, binding.isSecureMode()); } @Override protected String suitename() { return "ITestAbfsManifestCommitProtocol"; } }
ITestAbfsManifestCommitProtocol
java
elastic__elasticsearch
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/PromqlBaseParser.java
{ "start": 13497, "end": 25910 }
class ____ extends ExpressionContext { public Token operator; public ExpressionContext expression() { return getRuleContext(ExpressionContext.class,0); } public TerminalNode PLUS() { return getToken(PromqlBaseParser.PLUS, 0); } public TerminalNode MINUS() { return getToken(PromqlBaseParser.MINUS, 0); } @SuppressWarnings("this-escape") public ArithmeticUnaryContext(ExpressionContext ctx) { copyFrom(ctx); } @Override public void enterRule(ParseTreeListener listener) { if ( listener instanceof PromqlBaseParserListener ) ((PromqlBaseParserListener)listener).enterArithmeticUnary(this); } @Override public void exitRule(ParseTreeListener listener) { if ( listener instanceof PromqlBaseParserListener ) ((PromqlBaseParserListener)listener).exitArithmeticUnary(this); } @Override public <T> T accept(ParseTreeVisitor<? extends T> visitor) { if ( visitor instanceof PromqlBaseParserVisitor ) return ((PromqlBaseParserVisitor<? extends T>)visitor).visitArithmeticUnary(this); else return visitor.visitChildren(this); } } public final ExpressionContext expression() throws RecognitionException { return expression(0); } private ExpressionContext expression(int _p) throws RecognitionException { ParserRuleContext _parentctx = _ctx; int _parentState = getState(); ExpressionContext _localctx = new ExpressionContext(_ctx, _parentState); ExpressionContext _prevctx = _localctx; int _startState = 2; enterRecursionRule(_localctx, 2, RULE_expression, _p); int _la; try { int _alt; enterOuterAlt(_localctx, 1); { setState(59); _errHandler.sync(this); switch (_input.LA(1)) { case PLUS: case MINUS: { _localctx = new ArithmeticUnaryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; setState(52); ((ArithmeticUnaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { ((ArithmeticUnaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } setState(53); expression(9); } break; case AND: case OR: case UNLESS: case BY: case WITHOUT: case ON: case IGNORING: case GROUP_LEFT: case GROUP_RIGHT: case BOOL: case OFFSET: case LCB: case STRING: case INTEGER_VALUE: case DECIMAL_VALUE: case HEXADECIMAL: case TIME_VALUE_WITH_COLON: case TIME_VALUE: case IDENTIFIER: { _localctx = new ValueExpressionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; setState(54); value(); } break; case LP: { _localctx = new ParenthesizedContext(_localctx); _ctx = _localctx; _prevctx = _localctx; setState(55); match(LP); setState(56); expression(0); setState(57); match(RP); } break; default: throw new NoViableAltException(this); } _ctx.stop = _input.LT(-1); setState(110); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,10,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { setState(108); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,9,_ctx) ) { case 1: { _localctx = new ArithmeticBinaryContext(new ExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_expression); setState(61); if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); setState(62); ((ArithmeticBinaryContext)_localctx).op = match(CARET); setState(64); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,1,_ctx) ) { case 1: { setState(63); modifier(); } break; } setState(66); ((ArithmeticBinaryContext)_localctx).right = expression(10); } break; case 2: { _localctx = new ArithmeticBinaryContext(new ExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_expression); setState(67); if (!(precpred(_ctx, 8))) throw new FailedPredicateException(this, "precpred(_ctx, 8)"); setState(68); ((ArithmeticBinaryContext)_localctx).op = _input.LT(1); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 56L) != 0)) ) { ((ArithmeticBinaryContext)_localctx).op = (Token)_errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } setState(70); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) { case 1: { setState(69); modifier(); } break; } setState(72); ((ArithmeticBinaryContext)_localctx).right = expression(9); } break; case 3: { _localctx = new ArithmeticBinaryContext(new ExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_expression); setState(73); if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); setState(74); ((ArithmeticBinaryContext)_localctx).op = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { ((ArithmeticBinaryContext)_localctx).op = (Token)_errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } setState(76); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,3,_ctx) ) { case 1: { setState(75); modifier(); } break; } setState(78); ((ArithmeticBinaryContext)_localctx).right = expression(8); } break; case 4: { _localctx = new ArithmeticBinaryContext(new ExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_expression); setState(79); if (!(precpred(_ctx, 6))) throw new FailedPredicateException(this, "precpred(_ctx, 6)"); setState(80); ((ArithmeticBinaryContext)_localctx).op = _input.LT(1); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 8064L) != 0)) ) { ((ArithmeticBinaryContext)_localctx).op = (Token)_errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } setState(82); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { case 1: { setState(81); match(BOOL); } break; } setState(85); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { case 1: { setState(84); modifier(); } break; } setState(87); ((ArithmeticBinaryContext)_localctx).right = expression(7); } break; case 5: { _localctx = new ArithmeticBinaryContext(new ExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_expression); setState(88); if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)"); setState(89); ((ArithmeticBinaryContext)_localctx).op = _input.LT(1); _la = _input.LA(1); if ( !(_la==AND || _la==UNLESS) ) { ((ArithmeticBinaryContext)_localctx).op = (Token)_errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } setState(91); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { case 1: { setState(90); modifier(); } break; } setState(93); ((ArithmeticBinaryContext)_localctx).right = expression(6); } break; case 6: { _localctx = new ArithmeticBinaryContext(new ExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_expression); setState(94); if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); setState(95); ((ArithmeticBinaryContext)_localctx).op = match(OR); setState(97); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,7,_ctx) ) { case 1: { setState(96); modifier(); } break; } setState(99); ((ArithmeticBinaryContext)_localctx).right = expression(5); } break; case 7: { _localctx = new SubqueryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); setState(100); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); setState(101); match(LSB); setState(102); ((SubqueryContext)_localctx).range = duration(); setState(103); subqueryResolution(); setState(104); match(RSB); setState(106); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,8,_ctx) ) { case 1: { setState(105); evaluation(); } break; } } break; } } } setState(112); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,10,_ctx); } } } catch (RecognitionException re) { _localctx.exception = re; _errHandler.reportError(this, re); _errHandler.recover(this, re); } finally { unrollRecursionContexts(_parentctx); } return _localctx; } @SuppressWarnings("CheckReturnValue") public static
ArithmeticUnaryContext
java
mapstruct__mapstruct
processor/src/test/java/org/mapstruct/ap/test/bugs/_2663/Nullable.java
{ "start": 274, "end": 1034 }
class ____<T> { @SuppressWarnings("rawtypes") private static final Nullable UNDEFINED = new Nullable<>( null, false ); private final T value; private final boolean present; private Nullable(T value, boolean present) { this.value = value; this.present = present; } public T get() { if (!present) { throw new NoSuchElementException("Value is undefined"); } return value; } public boolean isPresent() { return present; } public static <T> Nullable<T> of(T value) { return new Nullable<>( value, true ); } @SuppressWarnings("unchecked") public static <T> Nullable<T> undefined() { return (Nullable<T>) UNDEFINED; } }
Nullable
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/create/MySqlCreateIndexTest_1.java
{ "start": 966, "end": 1501 }
class ____ extends MysqlTest { @Test public void test_one() throws Exception { String sql = "CREATE SPATIAL INDEX g ON geom (g);"; List<SQLStatement> stmtList = SQLUtils.toStatementList(sql, JdbcConstants.MYSQL); SQLStatement stmt = stmtList.get(0); MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor(); stmt.accept(visitor); String output = SQLUtils.toMySqlString(stmt); assertEquals("CREATE SPATIAL INDEX g ON geom (g);", output); } }
MySqlCreateIndexTest_1
java
ReactiveX__RxJava
src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableSequenceEqual.java
{ "start": 1056, "end": 1909 }
class ____<T> extends Observable<Boolean> { final ObservableSource<? extends T> first; final ObservableSource<? extends T> second; final BiPredicate<? super T, ? super T> comparer; final int bufferSize; public ObservableSequenceEqual(ObservableSource<? extends T> first, ObservableSource<? extends T> second, BiPredicate<? super T, ? super T> comparer, int bufferSize) { this.first = first; this.second = second; this.comparer = comparer; this.bufferSize = bufferSize; } @Override public void subscribeActual(Observer<? super Boolean> observer) { EqualCoordinator<T> ec = new EqualCoordinator<>(observer, bufferSize, first, second, comparer); observer.onSubscribe(ec); ec.subscribe(); } static final
ObservableSequenceEqual
java
apache__dubbo
dubbo-metadata/dubbo-metadata-definition-protobuf/src/test/java/org/apache/dubbo/metadata/definition/protobuf/model/ServiceInterface.java
{ "start": 871, "end": 976 }
interface ____ { GooglePB.PBResponseType sayHello(GooglePB.PBRequestType requestType); }
ServiceInterface
java
mapstruct__mapstruct
integrationtest/src/test/resources/sealedSubclassTest/src/main/java/org/mapstruct/itest/sealedsubclass/VehicleDto.java
{ "start": 216, "end": 588 }
class ____ permits CarDto, BikeDto, MotorDto { private String name; private String maker; public String getName() { return name; } public void setName(String name) { this.name = name; } public String getMaker() { return maker; } public void setMaker(String maker) { this.maker = maker; } }
VehicleDto
java
quarkusio__quarkus
integration-tests/hibernate-orm-panache/src/main/java/io/quarkus/it/panache/defaultpu/ObjectWithCompositeId.java
{ "start": 324, "end": 489 }
class ____ extends PanacheEntityBase { @Id public String part1; @Id public String part2; public String description; static
ObjectWithCompositeId
java
google__gson
gson/src/main/java/com/google/gson/stream/JsonWriter.java
{ "start": 3337, "end": 5635 }
class ____, and can be adjusted with the various {@link * com.google.gson.GsonBuilder} methods. * * <h2>Example</h2> * * Suppose we'd like to encode a stream of messages such as the following: * * <pre>{@code * [ * { * "id": 912345678901, * "text": "How do I stream JSON in Java?", * "geo": null, * "user": { * "name": "json_newb", * "followers_count": 41 * } * }, * { * "id": 912345678902, * "text": "@json_newb just use JsonWriter!", * "geo": [50.454722, -104.606667], * "user": { * "name": "jesse", * "followers_count": 2 * } * } * ] * }</pre> * * This code encodes the above structure: * * <pre>{@code * public void writeJsonStream(OutputStream out, List<Message> messages) throws IOException { * JsonWriter writer = new JsonWriter(new OutputStreamWriter(out, "UTF-8")); * writer.setIndent(" "); * writeMessagesArray(writer, messages); * writer.close(); * } * * public void writeMessagesArray(JsonWriter writer, List<Message> messages) throws IOException { * writer.beginArray(); * for (Message message : messages) { * writeMessage(writer, message); * } * writer.endArray(); * } * * public void writeMessage(JsonWriter writer, Message message) throws IOException { * writer.beginObject(); * writer.name("id").value(message.getId()); * writer.name("text").value(message.getText()); * if (message.getGeo() != null) { * writer.name("geo"); * writeDoublesArray(writer, message.getGeo()); * } else { * writer.name("geo").nullValue(); * } * writer.name("user"); * writeUser(writer, message.getUser()); * writer.endObject(); * } * * public void writeUser(JsonWriter writer, User user) throws IOException { * writer.beginObject(); * writer.name("name").value(user.getName()); * writer.name("followers_count").value(user.getFollowersCount()); * writer.endObject(); * } * * public void writeDoublesArray(JsonWriter writer, List<Double> doubles) throws IOException { * writer.beginArray(); * for (Double value : doubles) { * writer.value(value); * } * writer.endArray(); * } * }</pre> * * <p>Each {@code JsonWriter} may be used to write a single JSON stream. Instances of this
differs
java
FasterXML__jackson-databind
src/test/java/tools/jackson/databind/util/BufferRecyclersDatabindTest.java
{ "start": 4179, "end": 5150 }
class ____ implements RecyclerPool<BufferRecycler> { private static final long serialVersionUID = 1L; private static final Predicate<Thread> isVirtual = VirtualPredicate.findIsVirtualPredicate(); private final RecyclerPool<BufferRecycler> nativePool = JsonRecyclerPools.threadLocalPool(); private final RecyclerPool<BufferRecycler> virtualPool = JsonRecyclerPools.newConcurrentDequePool(); @Override public BufferRecycler acquirePooled() { return isVirtual.test(Thread.currentThread()) ? virtualPool.acquirePooled() : nativePool.acquirePooled(); } @Override public void releasePooled(BufferRecycler pooled) { if (isVirtual.test(Thread.currentThread())) { virtualPool.releasePooled(pooled); } else { nativePool.releasePooled(pooled); } } static
HybridTestPool
java
alibaba__fastjson
src/test/java/com/alibaba/json/bvt/ref/RefTest6.java
{ "start": 941, "end": 1265 }
class ____ { private C c; private A a; public C getC() { return c; } public void setC(C c) { this.c = c; } public A getA() { return a; } public void setA(A a) { this.a = a; } } private
B
java
apache__hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
{ "start": 65433, "end": 65633 }
class ____ extends Options.PathOption implements Option { private FileOption(Path value) { super(value); } } private static
FileOption
java
spring-projects__spring-framework
spring-test/src/test/java/org/springframework/test/context/support/ActiveProfilesUtilsTests.java
{ "start": 7246, "end": 7432 }
class ____ extends LocationsBar { } @ActiveProfiles(profiles = { "dog", "cat" }, inheritProfiles = false) @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.TYPE) private @
Animals
java
elastic__elasticsearch
test/framework/src/main/java/org/elasticsearch/test/CheckedFunctionUtils.java
{ "start": 753, "end": 1974 }
class ____ { /** * Returns a Mockito matcher for any argument that is an {@link CheckedFunction}. * @param <T> the function input type that the caller expects. Do not specify this, it will be inferred * @param <R> the function output type that the caller expects. Do not specify this, it will be inferred * @param <E> the function exception type that the caller expects. Do not specify this, it will be inferred * @return a checked function matcher */ @SuppressWarnings("unchecked") public static <T, R, E extends Exception> CheckedFunction<T, R, E> anyCheckedFunction() { return any(CheckedFunction.class); } /** * Returns a Mockito matcher for any argument that is an {@link CheckedSupplier}. * @param <R> the supplier output type that the caller expects. Do not specify this, it will be inferred * @param <E> the supplier exception type that the caller expects. Do not specify this, it will be inferred * @return a checked supplier matcher */ @SuppressWarnings("unchecked") public static <R, E extends Exception> CheckedSupplier<R, E> anyCheckedSupplier() { return any(CheckedSupplier.class); } }
CheckedFunctionUtils
java
reactor__reactor-core
reactor-core/src/main/java/reactor/core/publisher/FluxWindowWhen.java
{ "start": 9295, "end": 10759 }
class ____<T, U> implements Disposable, Subscriber<U> { @SuppressWarnings("NotNullFieldNotInitialized") // initialized in onSubscribe volatile Subscription subscription; // https://github.com/uber/NullAway/issues/1157 @SuppressWarnings({"rawtypes", "DataFlowIssue"}) static final AtomicReferenceFieldUpdater<WindowWhenOpenSubscriber, @Nullable Subscription> SUBSCRIPTION = AtomicReferenceFieldUpdater.newUpdater(WindowWhenOpenSubscriber.class, Subscription.class, "subscription"); final WindowWhenMainSubscriber<T, U, ?> parent; boolean done; WindowWhenOpenSubscriber(WindowWhenMainSubscriber<T, U, ?> parent) { this.parent = parent; } @Override public void onSubscribe(Subscription s) { if (Operators.setOnce(SUBSCRIPTION, this, s)) { subscription.request(Long.MAX_VALUE); } } @Override public void dispose() { Operators.terminate(SUBSCRIPTION, this); } @Override public boolean isDisposed() { return subscription == Operators.cancelledSubscription(); } @Override public void onNext(U t) { if (done) { return; } parent.open(t); } @Override public void onError(Throwable t) { if (done) { Operators.onErrorDropped(t, parent.actual.currentContext()); return; } done = true; parent.error(t); } @Override public void onComplete() { if (done) { return; } done = true; parent.onComplete(); } } static final
WindowWhenOpenSubscriber
java
mybatis__mybatis-3
src/main/java/org/apache/ibatis/reflection/Reflector.java
{ "start": 1811, "end": 1961 }
class ____ information that allows for easy mapping between property * names and getter/setter methods. * * @author Clinton Begin */ public
definition
java
hibernate__hibernate-orm
hibernate-envers/src/main/java/org/hibernate/envers/internal/revisioninfo/RevisionTimestampValueResolver.java
{ "start": 572, "end": 3112 }
class ____ { private final RevisionTimestampData timestampData; private final Setter revisionTimestampSetter; public RevisionTimestampValueResolver(Class<?> revisionInfoClass, RevisionTimestampData timestampData, ServiceRegistry serviceRegistry) { this.timestampData = timestampData; this.revisionTimestampSetter = ReflectionTools.getSetter( revisionInfoClass, timestampData, serviceRegistry ); } public String getName() { return timestampData.getName(); } public void resolveNow(Object object) { if ( timestampData.isTimestampDate() ) { revisionTimestampSetter.set( object, new Date() ); } else if ( timestampData.isTimestampLocalDateTime() ) { revisionTimestampSetter.set( object, LocalDateTime.now() ); } else if ( timestampData.isInstant() ) { // HHH-17139 truncated to milliseconds to allow Date-based AuditReader functions to // continue to work with the same precision level. revisionTimestampSetter.set( object, Instant.now().truncatedTo( ChronoUnit.MILLIS ) ); } else { revisionTimestampSetter.set( object, System.currentTimeMillis() ); } } public Object resolveByValue(Date date) { if ( date != null ) { if ( timestampData.isTimestampDate() ) { return date; } else if ( timestampData.isTimestampLocalDateTime() ) { return LocalDateTime.ofInstant( date.toInstant(), ZoneId.systemDefault() ); } else if ( timestampData.isInstant() ) { return date.toInstant(); } else { return date.getTime(); } } return null; } public Object resolveByValue(LocalDateTime localDateTime) { if ( localDateTime != null ) { if ( timestampData.isTimestampDate() ) { return Date.from( localDateTime.atZone( ZoneId.systemDefault() ).toInstant() ); } else if ( timestampData.isTimestampLocalDateTime() ) { return localDateTime; } else if ( timestampData.isInstant() ) { return localDateTime.atZone( ZoneId.systemDefault() ).toInstant(); } else { return localDateTime.atZone( ZoneId.systemDefault() ).toInstant().toEpochMilli(); } } return null; } public Object resolveByValue(Instant instant) { if ( instant != null ) { if ( timestampData.isTimestampDate() ) { return Date.from( instant ); } else if ( timestampData.isTimestampLocalDateTime() ) { return LocalDateTime.ofInstant( instant, ZoneId.systemDefault() ); } else if ( timestampData.isInstant() ) { return instant; } else { return instant.toEpochMilli(); } } return null; } }
RevisionTimestampValueResolver
java
alibaba__fastjson
src/test/java/com/alibaba/json/bvt/parser/deser/asm/TestASM_primitive.java
{ "start": 333, "end": 843 }
class ____ extends TestCase { public void test_asm() throws Exception { ASMDeserializerFactory factory = new ASMDeserializerFactory(new ASMClassLoader()); Exception error = null; try { JavaBeanInfo beanInfo = JavaBeanInfo.build(int.class, int.class, null); factory.createJavaBeanDeserializer(ParserConfig.getGlobalInstance(), beanInfo); } catch (Exception ex) { error = ex; } assertNotNull(error); } }
TestASM_primitive
java
elastic__elasticsearch
test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java
{ "start": 1407, "end": 13322 }
class ____ { public static double randomLat() { return GeoTestUtil.nextLatitude(); } public static double randomLon() { return GeoTestUtil.nextLongitude(); } public static double randomAlt() { return ESTestCase.randomDouble(); } public static Circle randomCircle(boolean hasAlt) { org.apache.lucene.geo.Circle luceneCircle = GeoTestUtil.nextCircle(); if (hasAlt) { return new Circle(luceneCircle.getLon(), luceneCircle.getLat(), ESTestCase.randomDouble(), luceneCircle.getRadius()); } else { return new Circle(luceneCircle.getLon(), luceneCircle.getLat(), luceneCircle.getRadius()); } } public static Line randomLine(boolean hasAlts) { // we use nextPolygon because it guarantees no duplicate points org.apache.lucene.geo.Polygon lucenePolygon = GeoTestUtil.nextPolygon(); int size = lucenePolygon.numPoints() - 1; double[] lats = new double[size]; double[] lons = new double[size]; double[] alts = hasAlts ? new double[size] : null; for (int i = 0; i < size; i++) { lats[i] = lucenePolygon.getPolyLat(i); lons[i] = lucenePolygon.getPolyLon(i); if (hasAlts) { alts[i] = randomAlt(); } } if (hasAlts) { return new Line(lons, lats, alts); } return new Line(lons, lats); } public static Point randomPoint() { return randomPoint(ESTestCase.randomBoolean()); } public static Point randomPoint(boolean hasAlt) { if (hasAlt) { return new Point(randomLon(), randomLat(), randomAlt()); } else { return new Point(randomLon(), randomLat()); } } public static Polygon randomPolygon(boolean hasAlt) { org.apache.lucene.geo.Polygon lucenePolygon = randomValueOtherThanMany(p -> area(p) == 0, GeoTestUtil::nextPolygon); if (lucenePolygon.numHoles() > 0) { org.apache.lucene.geo.Polygon[] luceneHoles = lucenePolygon.getHoles(); List<LinearRing> holes = new ArrayList<>(); for (int i = 0; i < lucenePolygon.numHoles(); i++) { org.apache.lucene.geo.Polygon poly = luceneHoles[i]; holes.add(linearRing(poly.getPolyLons(), poly.getPolyLats(), hasAlt)); } return new Polygon(linearRing(lucenePolygon.getPolyLons(), lucenePolygon.getPolyLats(), hasAlt), holes); } return new Polygon(linearRing(lucenePolygon.getPolyLons(), lucenePolygon.getPolyLats(), hasAlt)); } private static double area(org.apache.lucene.geo.Polygon lucenePolygon) { double windingSum = 0; final int numPts = lucenePolygon.numPoints() - 1; for (int i = 0; i < numPts; i++) { // compute signed area windingSum += lucenePolygon.getPolyLon(i) * lucenePolygon.getPolyLat(i + 1) - lucenePolygon.getPolyLat(i) * lucenePolygon .getPolyLon(i + 1); } return Math.abs(windingSum / 2); } private static double[] randomAltRing(int size) { double[] alts = new double[size]; for (int i = 0; i < size - 1; i++) { alts[i] = randomAlt(); } alts[size - 1] = alts[0]; return alts; } public static LinearRing linearRing(double[] lons, double[] lats, boolean generateAlts) { if (generateAlts) { return new LinearRing(lons, lats, randomAltRing(lats.length)); } return new LinearRing(lons, lats); } public static Rectangle randomRectangle() { org.apache.lucene.geo.Rectangle rectangle = GeoTestUtil.nextBox(); return new Rectangle(rectangle.minLon, rectangle.maxLon, rectangle.maxLat, rectangle.minLat); } public static MultiPoint randomMultiPoint(boolean hasAlt) { int size = ESTestCase.randomIntBetween(3, 10); List<Point> points = new ArrayList<>(); for (int i = 0; i < size; i++) { points.add(randomPoint(hasAlt)); } return new MultiPoint(points); } public static MultiLine randomMultiLine(boolean hasAlt) { int size = ESTestCase.randomIntBetween(3, 10); List<Line> lines = new ArrayList<>(); for (int i = 0; i < size; i++) { lines.add(randomLine(hasAlt)); } return new MultiLine(lines); } public static MultiPolygon randomMultiPolygon(boolean hasAlt) { int size = ESTestCase.randomIntBetween(3, 10); List<Polygon> polygons = new ArrayList<>(); for (int i = 0; i < size; i++) { polygons.add(randomPolygon(hasAlt)); } return new MultiPolygon(polygons); } public static GeometryCollection<Geometry> randomGeometryCollection(boolean hasAlt) { return randomGeometryCollection(0, hasAlt); } public static GeometryCollection<Geometry> randomGeometryCollectionWithoutCircle(boolean hasAlt) { return randomGeometryCollectionWithoutCircle(0, hasAlt); } private static GeometryCollection<Geometry> randomGeometryCollection(int level, boolean hasAlt) { int size = ESTestCase.randomIntBetween(1, 10); List<Geometry> shapes = new ArrayList<>(); for (int i = 0; i < size; i++) { shapes.add(randomGeometry(level, hasAlt)); } return new GeometryCollection<>(shapes); } private static GeometryCollection<Geometry> randomGeometryCollectionWithoutCircle(int level, boolean hasAlt) { int size = ESTestCase.randomIntBetween(1, 10); List<Geometry> shapes = new ArrayList<>(); for (int i = 0; i < size; i++) { shapes.add(randomGeometryWithoutCircle(level, hasAlt)); } return new GeometryCollection<>(shapes); } public static Geometry randomGeometry(ShapeType type, boolean hasAlt) { return switch (type) { case GEOMETRYCOLLECTION -> randomGeometryCollection(0, hasAlt); case MULTILINESTRING -> randomMultiLine(hasAlt); case ENVELOPE -> randomRectangle(); case LINESTRING -> randomLine(hasAlt); case POLYGON -> randomPolygon(hasAlt); case MULTIPOLYGON -> randomMultiPolygon(hasAlt); case CIRCLE -> randomCircle(hasAlt); case MULTIPOINT -> randomMultiPoint(hasAlt); case POINT -> randomPoint(hasAlt); default -> throw new IllegalArgumentException("Unsupported shape type [" + type + "]"); }; } public static Geometry randomGeometry(boolean hasAlt) { return randomGeometry(0, hasAlt); } public static Geometry randomGeometry(boolean hasAlt, int maxPoints) { var pointCounter = new GeometryPointCountVisitor(); return randomValueOtherThanMany(g -> g.visit(pointCounter) > maxPoints, () -> randomGeometry(0, hasAlt)); } protected static Geometry randomGeometry(int level, boolean hasAlt) { @SuppressWarnings("unchecked") Function<Boolean, Geometry> geometry = ESTestCase.randomFrom( GeometryTestUtils::randomCircle, GeometryTestUtils::randomLine, GeometryTestUtils::randomPoint, GeometryTestUtils::randomPolygon, GeometryTestUtils::randomMultiLine, GeometryTestUtils::randomMultiPoint, GeometryTestUtils::randomMultiPolygon, hasAlt ? GeometryTestUtils::randomPoint : (b) -> randomRectangle(), level < 3 ? (b) -> randomGeometryCollection(level + 1, b) : GeometryTestUtils::randomPoint // don't build too deep ); return geometry.apply(hasAlt); } public static Geometry randomGeometryWithoutCircle(int level, boolean hasAlt) { @SuppressWarnings("unchecked") Function<Boolean, Geometry> geometry = ESTestCase.randomFrom( GeometryTestUtils::randomPoint, GeometryTestUtils::randomMultiPoint, GeometryTestUtils::randomLine, GeometryTestUtils::randomMultiLine, GeometryTestUtils::randomPolygon, GeometryTestUtils::randomMultiPolygon, hasAlt ? GeometryTestUtils::randomPoint : (b) -> randomRectangle(), level < 3 ? (b) -> randomGeometryWithoutCircleCollection(level + 1, hasAlt) : GeometryTestUtils::randomPoint // don't build too // deep ); return geometry.apply(hasAlt); } private static Geometry randomGeometryWithoutCircleCollection(int level, boolean hasAlt) { int size = ESTestCase.randomIntBetween(1, 10); List<Geometry> shapes = new ArrayList<>(); for (int i = 0; i < size; i++) { shapes.add(randomGeometryWithoutCircle(level, hasAlt)); } return new GeometryCollection<>(shapes); } /** * Extracts all vertices of the supplied geometry */ public static MultiPoint toMultiPoint(Geometry geometry) { return geometry.visit(new GeometryVisitor<>() { @Override public MultiPoint visit(Circle circle) throws RuntimeException { throw new UnsupportedOperationException("not supporting circles yet"); } @Override public MultiPoint visit(GeometryCollection<?> collection) throws RuntimeException { List<Point> points = new ArrayList<>(); collection.forEach(geometry -> toMultiPoint(geometry).forEach(points::add)); return new MultiPoint(points); } @Override public MultiPoint visit(Line line) throws RuntimeException { List<Point> points = new ArrayList<>(); for (int i = 0; i < line.length(); i++) { points.add(new Point(line.getX(i), line.getY(i), line.getZ(i))); } return new MultiPoint(points); } @Override public MultiPoint visit(LinearRing ring) throws RuntimeException { return visit((Line) ring); } @Override public MultiPoint visit(MultiLine multiLine) throws RuntimeException { return visit((GeometryCollection<?>) multiLine); } @Override public MultiPoint visit(MultiPoint multiPoint) throws RuntimeException { return multiPoint; } @Override public MultiPoint visit(MultiPolygon multiPolygon) throws RuntimeException { return visit((GeometryCollection<?>) multiPolygon); } @Override public MultiPoint visit(Point point) throws RuntimeException { return new MultiPoint(Collections.singletonList(point)); } @Override public MultiPoint visit(Polygon polygon) throws RuntimeException { List<Geometry> multiPoints = new ArrayList<>(); multiPoints.add(toMultiPoint(polygon.getPolygon())); for (int i = 0; i < polygon.getNumberOfHoles(); i++) { multiPoints.add(toMultiPoint(polygon.getHole(i))); } return toMultiPoint(new GeometryCollection<>(multiPoints)); } @Override public MultiPoint visit(Rectangle rectangle) throws RuntimeException { return new MultiPoint( Arrays.asList( new Point(rectangle.getMinX(), rectangle.getMinY(), rectangle.getMinZ()), new Point(rectangle.getMaxX(), rectangle.getMaxY(), rectangle.getMaxZ()) ) ); } }); } }
GeometryTestUtils
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ZooKeeperEndpointBuilderFactory.java
{ "start": 21801, "end": 24190 }
interface ____ { /** * ZooKeeper (camel-zookeeper) * Manage ZooKeeper clusters. * * Category: clustering,management,bigdata * Since: 2.9 * Maven coordinates: org.apache.camel:camel-zookeeper * * @return the dsl builder for the headers' name. */ default ZooKeeperHeaderNameBuilder zookeeper() { return ZooKeeperHeaderNameBuilder.INSTANCE; } /** * ZooKeeper (camel-zookeeper) * Manage ZooKeeper clusters. * * Category: clustering,management,bigdata * Since: 2.9 * Maven coordinates: org.apache.camel:camel-zookeeper * * Syntax: <code>zookeeper:serverUrls/path</code> * * Path parameter: serverUrls (required) * The zookeeper server hosts (multiple servers can be separated by * comma) * * Path parameter: path (required) * The node in the ZooKeeper server (aka znode) * * @param path serverUrls/path * @return the dsl builder */ default ZooKeeperEndpointBuilder zookeeper(String path) { return ZooKeeperEndpointBuilderFactory.endpointBuilder("zookeeper", path); } /** * ZooKeeper (camel-zookeeper) * Manage ZooKeeper clusters. * * Category: clustering,management,bigdata * Since: 2.9 * Maven coordinates: org.apache.camel:camel-zookeeper * * Syntax: <code>zookeeper:serverUrls/path</code> * * Path parameter: serverUrls (required) * The zookeeper server hosts (multiple servers can be separated by * comma) * * Path parameter: path (required) * The node in the ZooKeeper server (aka znode) * * @param componentName to use a custom component name for the endpoint * instead of the default name * @param path serverUrls/path * @return the dsl builder */ default ZooKeeperEndpointBuilder zookeeper(String componentName, String path) { return ZooKeeperEndpointBuilderFactory.endpointBuilder(componentName, path); } } /** * The builder of headers' name for the ZooKeeper component. */ public static
ZooKeeperBuilders
java
spring-projects__spring-security
access/src/test/java/org/springframework/security/access/intercept/RunAsUserTokenTests.java
{ "start": 1078, "end": 2886 }
class ____ { @Test public void testAuthenticationSetting() { RunAsUserToken token = new RunAsUserToken("my_password", "Test", "Password", AuthorityUtils.createAuthorityList("ROLE_ONE", "ROLE_TWO"), UsernamePasswordAuthenticationToken.class); assertThat(token.isAuthenticated()).isTrue(); token.setAuthenticated(false); assertThat(!token.isAuthenticated()).isTrue(); } @Test public void testGetters() { RunAsUserToken token = new RunAsUserToken("my_password", "Test", "Password", AuthorityUtils.createAuthorityList("ROLE_ONE", "ROLE_TWO"), UsernamePasswordAuthenticationToken.class); assertThat("Test").isEqualTo(token.getPrincipal()); assertThat("Password").isEqualTo(token.getCredentials()); assertThat("my_password".hashCode()).isEqualTo(token.getKeyHash()); assertThat(UsernamePasswordAuthenticationToken.class).isEqualTo(token.getOriginalAuthentication()); } @Test public void testNoArgConstructorDoesntExist() { assertThatExceptionOfType(NoSuchMethodException.class) .isThrownBy(() -> RunAsUserToken.class.getDeclaredConstructor((Class[]) null)); } @Test public void testToString() { RunAsUserToken token = new RunAsUserToken("my_password", "Test", "Password", AuthorityUtils.createAuthorityList("ROLE_ONE", "ROLE_TWO"), UsernamePasswordAuthenticationToken.class); assertThat(token.toString() .lastIndexOf("Original Class: " + UsernamePasswordAuthenticationToken.class.getName().toString()) != -1) .isTrue(); } // SEC-1792 @Test public void testToStringNullOriginalAuthentication() { RunAsUserToken token = new RunAsUserToken("my_password", "Test", "Password", AuthorityUtils.createAuthorityList("ROLE_ONE", "ROLE_TWO"), null); assertThat(token.toString().lastIndexOf("Original Class: null") != -1).isTrue(); } }
RunAsUserTokenTests
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/access/HierarchyPropertyAccessTest.java
{ "start": 3719, "end": 3869 }
class ____ { protected Integer superProperty; } @Entity(name = "ParentEntity") @DiscriminatorColumn(name = "entity_type") static
AbstractSuperclass
java
mapstruct__mapstruct
processor/src/test/java/org/mapstruct/ap/test/erroneous/ambiguousfactorymethod/a/BarFactory.java
{ "start": 323, "end": 414 }
class ____ { public Bar createBar() { return new Bar( "BAR" ); } }
BarFactory
java
spring-projects__spring-framework
integration-tests/src/test/java/org/springframework/transaction/annotation/EnableTransactionManagementIntegrationTests.java
{ "start": 7555, "end": 7781 }
class ____ { @Bean PlatformTransactionManager txManager(DataSource dataSource) { return new DataSourceTransactionManager(dataSource); } } @Configuration @EnableTransactionManagement static
CustomTxManagerNameConfig
java
elastic__elasticsearch
x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java
{ "start": 5682, "end": 37983 }
class ____ extends RollupIndexer { final Function<SearchRequest, SearchResponse> searchFunction; final Function<BulkRequest, BulkResponse> bulkFunction; final Consumer<Exception> failureConsumer; final BiConsumer<IndexerState, Map<String, Object>> saveStateCheck; private CountDownLatch latch; NonEmptyRollupIndexer( ThreadPool threadPool, RollupJob job, AtomicReference<IndexerState> initialState, Map<String, Object> initialPosition, Function<SearchRequest, SearchResponse> searchFunction, Function<BulkRequest, BulkResponse> bulkFunction, Consumer<Exception> failureConsumer, BiConsumer<IndexerState, Map<String, Object>> saveStateCheck ) { super(threadPool, job, initialState, initialPosition); this.searchFunction = searchFunction; this.bulkFunction = bulkFunction; this.failureConsumer = failureConsumer; this.saveStateCheck = saveStateCheck; } private CountDownLatch newLatch(int count) { return latch = new CountDownLatch(count); } @Override protected void doNextSearch(long waitTimeInNanos, ActionListener<SearchResponse> nextPhase) { assert latch != null; try { latch.await(); } catch (InterruptedException e) { throw new IllegalStateException(e); } try { ActionListener.respondAndRelease(nextPhase, searchFunction.apply(buildSearchRequest())); } catch (Exception e) { nextPhase.onFailure(e); } } @Override protected void doNextBulk(BulkRequest request, ActionListener<BulkResponse> nextPhase) { assert latch != null; try { latch.await(); } catch (InterruptedException e) { throw new IllegalStateException(e); } nextPhase.onResponse(bulkFunction.apply(request)); } @Override protected void doSaveState(IndexerState state, Map<String, Object> position, Runnable next) { assert state == IndexerState.STARTED || state == IndexerState.INDEXING || state == IndexerState.STOPPED; saveStateCheck.accept(state, position); next.run(); } @Override protected void onAbort() { assert false : "onAbort should not be called"; } @Override protected void onFailure(Exception exc) { failureConsumer.accept(exc); } @Override protected void onFinish(ActionListener<Void> listener) { listener.onResponse(null); } } public void testStarted() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); final ThreadPool threadPool = new TestThreadPool(getTestName()); try { RollupIndexer indexer = new EmptyRollupIndexer(threadPool, job, state, null); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED))); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); assertThat(indexer.getStats().getIndexFailures(), equalTo(0L)); assertThat(indexer.getStats().getSearchFailures(), equalTo(0L)); assertThat(indexer.getStats().getSearchTotal(), equalTo(1L)); assertThat(indexer.getStats().getIndexTotal(), equalTo(0L)); assertTrue(indexer.abort()); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } public void testIndexing() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); final ThreadPool threadPool = new TestThreadPool(getTestName()); try { AtomicBoolean isFinished = new AtomicBoolean(false); DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(threadPool, job, state, null) { @Override protected void onFinish(ActionListener<Void> listener) { super.onFinish(ActionListener.wrap(r -> { listener.onResponse(r); }, listener::onFailure)); } @Override protected void doSaveState(IndexerState state, Map<String, Object> position, Runnable next) { super.doSaveState(state, position, () -> { if (state == IndexerState.STARTED) { isFinished.set(true); } next.run(); }); } }; final CountDownLatch latch = indexer.newLatch(); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); assertBusy(() -> assertTrue(isFinished.get())); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); assertThat(indexer.getStats().getIndexFailures(), equalTo(0L)); assertThat(indexer.getStats().getSearchFailures(), equalTo(0L)); assertThat(indexer.getStats().getSearchTotal(), equalTo(1L)); assertTrue(indexer.abort()); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } public void testStateChangeMidTrigger() { AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); RollupIndexerJobStats stats = new RollupIndexerJobStats(); RollupIndexerJobStats spyStats = spy(stats); RollupJobConfig config = ConfigTestHelpers.randomRollupJobConfig(random()); // We call stats before a final state check, so this allows us to flip the state // and make sure the appropriate error is thrown Answer<?> forwardAndChangeState = invocation -> { invocation.callRealMethod(); state.set(IndexerState.STOPPED); return null; }; doAnswer(forwardAndChangeState).when(spyStats).incrementNumInvocations(1L); RollupJob job = new RollupJob(config, Collections.emptyMap()); final ThreadPool threadPool = new TestThreadPool(getTestName()); try { AtomicBoolean isFinished = new AtomicBoolean(false); DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(threadPool, job, state, null, spyStats) { @Override protected void onFinish(ActionListener<Void> listener) { super.onFinish(ActionListener.wrap(r -> { listener.onResponse(r); isFinished.set(true); }, listener::onFailure)); } }; final CountDownLatch latch = indexer.newLatch(); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertFalse(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); latch.countDown(); assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(0L)); assertTrue(indexer.abort()); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } public void testAbortDuringSearch() throws Exception { final AtomicBoolean aborted = new AtomicBoolean(false); RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); final ThreadPool threadPool = new TestThreadPool(getTestName()); final CountDownLatch latch = new CountDownLatch(1); try { EmptyRollupIndexer indexer = new EmptyRollupIndexer(threadPool, job, state, null) { @Override protected void onFinish(ActionListener<Void> listener) { fail("Should not have called onFinish"); } @Override protected void doNextSearch(long waitTimeInNanos, ActionListener<SearchResponse> nextPhase) { try { latch.await(); } catch (InterruptedException e) { throw new IllegalStateException(e); } state.set(IndexerState.ABORTING); // <-- Set to aborting right before we return the (empty) search response super.doNextSearch(waitTimeInNanos, nextPhase); } @Override protected void onAbort() { aborted.set(true); } }; indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); assertBusy(() -> assertTrue(aborted.get())); assertThat(indexer.getState(), equalTo(IndexerState.ABORTING)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(0L)); assertThat(indexer.getStats().getSearchFailures(), equalTo(0L)); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } public void testAbortAfterCompletion() throws Exception { final AtomicBoolean aborted = new AtomicBoolean(false); RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); final ThreadPool threadPool = new TestThreadPool(getTestName()); // Don't use the indexer's latch because we completely change doNextSearch() final CountDownLatch doNextSearchLatch = new CountDownLatch(1); try { DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(threadPool, job, state, null) { @Override protected void onAbort() { aborted.set(true); } @Override protected void doNextSearch(long waitTimeInNanos, ActionListener<SearchResponse> nextPhase) { try { doNextSearchLatch.await(); } catch (InterruptedException e) { throw new IllegalStateException(e); } InternalComposite composite = mock(InternalComposite.class); when(composite.getBuckets()).thenAnswer(invocation -> { // Abort immediately before we are attempting to finish the job because the response // was empty state.set(IndexerState.ABORTING); return List.of(); }); when(composite.getName()).thenReturn(AGGREGATION_NAME); ActionListener.respondAndRelease( nextPhase, SearchResponseUtils.response(SearchHits.EMPTY_WITH_TOTAL_HITS) .aggregations(InternalAggregations.from(composite)) .build() ); } @Override protected void doSaveState(IndexerState state, Map<String, Object> position, Runnable next) { assertTrue(state.equals(IndexerState.ABORTING)); next.run(); } }; indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); doNextSearchLatch.countDown(); assertBusy(() -> assertTrue(aborted.get())); assertThat(indexer.getState(), equalTo(IndexerState.ABORTING)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } public void testStopIndexing() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); final ThreadPool threadPool = new TestThreadPool(getTestName()); try { DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(threadPool, job, state, null); final CountDownLatch latch = indexer.newLatch(); assertFalse(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); assertThat(indexer.stop(), equalTo(IndexerState.STOPPED)); assertThat(indexer.start(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); assertThat(indexer.stop(), equalTo(IndexerState.STOPPING)); latch.countDown(); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STOPPED))); assertTrue(indexer.abort()); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } public void testAbortIndexing() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); final ThreadPool threadPool = new TestThreadPool(getTestName()); try { final AtomicBoolean isAborted = new AtomicBoolean(false); DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(threadPool, job, state, null) { @Override protected void onAbort() { isAborted.set(true); } }; final CountDownLatch latch = indexer.newLatch(); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); assertFalse(indexer.abort()); assertThat(indexer.getState(), equalTo(IndexerState.ABORTING)); latch.countDown(); assertBusy(() -> assertTrue(isAborted.get())); assertFalse(indexer.abort()); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } public void testAbortStarted() { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); final ThreadPool threadPool = new TestThreadPool(getTestName()); try { final AtomicBoolean isAborted = new AtomicBoolean(false); DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(threadPool, job, state, null) { @Override protected void onAbort() { isAborted.set(true); } }; indexer.newLatch(); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.abort()); assertThat(indexer.getState(), equalTo(IndexerState.ABORTING)); assertFalse(isAborted.get()); assertThat(indexer.getStats().getNumInvocations(), equalTo(0L)); assertThat(indexer.getStats().getNumPages(), equalTo(0L)); assertFalse(indexer.abort()); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } public void testMultipleJobTriggering() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); final ThreadPool threadPool = new TestThreadPool(getTestName()); try { DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(threadPool, job, state, null); indexer.start(); for (int i = 0; i < 5; i++) { final CountDownLatch latch = indexer.newLatch(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); // This may take more than one attempt due to a cleanup/transition phase // that happens after state change to STARTED (`isJobFinishing`). assertBusy(() -> assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()))); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); assertFalse(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED))); assertThat(indexer.getStats().getNumPages(), equalTo((long) i + 1)); } final CountDownLatch latch = indexer.newLatch(); assertBusy(() -> assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()))); assertThat(indexer.stop(), equalTo(IndexerState.STOPPING)); assertThat(indexer.getState(), Matchers.either(Matchers.is(IndexerState.STOPPING)).or(Matchers.is(IndexerState.STOPPED))); latch.countDown(); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STOPPED))); assertTrue(indexer.abort()); assertThat(indexer.getStats().getNumInvocations(), greaterThanOrEqualTo(6L)); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } // Tests how we handle unknown keys that come back from composite agg, e.g. if we add support for new types but don't // deal with it everyhwere public void testUnknownKey() throws Exception { AtomicBoolean isFinished = new AtomicBoolean(false); RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); Function<SearchRequest, SearchResponse> searchFunction = searchRequest -> { InternalComposite.InternalBucket bucket = mock(InternalComposite.InternalBucket.class); when(bucket.getKey()).thenReturn(Map.of("foo", "bar")); when(bucket.getDocCount()).thenReturn(1L); when(bucket.getAggregations()).thenReturn(InternalAggregations.EMPTY); InternalComposite composite = mock(InternalComposite.class); when(composite.getBuckets()).thenReturn(List.of(bucket)); when(composite.getName()).thenReturn(RollupField.NAME); return SearchResponseUtils.response(SearchHits.EMPTY_WITH_TOTAL_HITS) .aggregations(InternalAggregations.from(composite)) .build(); }; Function<BulkRequest, BulkResponse> bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); Consumer<Exception> failureConsumer = e -> { assertThat(e.getMessage(), equalTo("Could not identify key in agg [foo]")); }; BiConsumer<IndexerState, Map<String, Object>> stateCheck = (i, p) -> { if (i == IndexerState.STARTED) { isFinished.set(true); } }; final ThreadPool threadPool = new TestThreadPool(getTestName()); try { NonEmptyRollupIndexer indexer = new NonEmptyRollupIndexer( threadPool, job, state, null, searchFunction, bulkFunction, failureConsumer, stateCheck ); final CountDownLatch latch = indexer.newLatch(1); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); assertBusy(() -> assertTrue(isFinished.get())); // Despite failure in bulk, we should move back to STARTED and wait to try again on next trigger assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // There should be one recorded failure assertThat(indexer.getStats().getSearchFailures(), equalTo(1L)); // Note: no docs were indexed assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } // Tests to make sure that errors in search do not interfere with shutdown procedure public void testFailureWhileStopping() throws Exception { AtomicBoolean isFinished = new AtomicBoolean(false); RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); Function<SearchRequest, SearchResponse> searchFunction = searchRequest -> { InternalComposite.InternalBucket bucket = mock(InternalComposite.InternalBucket.class); when(bucket.getKey()).thenAnswer(invocation -> { state.set(IndexerState.STOPPING); // <- Force a stop so we can see how error + non-INDEXING state is handled return Collections.singletonMap("foo", "bar"); // This will throw an exception }); when(bucket.getDocCount()).thenReturn(1L); when(bucket.getAggregations()).thenReturn(InternalAggregations.EMPTY); InternalComposite composite = mock(InternalComposite.class); when(composite.getBuckets()).thenReturn(List.of(bucket)); when(composite.getName()).thenReturn(RollupField.NAME); return SearchResponseUtils.response(SearchHits.EMPTY_WITH_TOTAL_HITS) .aggregations(InternalAggregations.from(composite)) .build(); }; Function<BulkRequest, BulkResponse> bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); Consumer<Exception> failureConsumer = e -> { assertThat(e.getMessage(), equalTo("Could not identify key in agg [foo]")); }; BiConsumer<IndexerState, Map<String, Object>> doSaveStateCheck = (indexerState, position) -> { isFinished.set(true); }; final ThreadPool threadPool = new TestThreadPool(getTestName()); try { NonEmptyRollupIndexer indexer = new NonEmptyRollupIndexer( threadPool, job, state, null, searchFunction, bulkFunction, failureConsumer, doSaveStateCheck ); final CountDownLatch latch = indexer.newLatch(1); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); assertBusy(() -> assertTrue(isFinished.get())); // Despite failure in processing keys, we should continue moving to STOPPED assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // There should be one recorded failure assertThat(indexer.getStats().getSearchFailures(), equalTo(1L)); // Note: no docs were indexed assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } public void testSearchShardFailure() throws Exception { AtomicBoolean isFinished = new AtomicBoolean(false); RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); Function<SearchRequest, SearchResponse> searchFunction = searchRequest -> { throw new SearchPhaseExecutionException( "query", "Partial shards failure", new ShardSearchFailure[] { new ShardSearchFailure(new RuntimeException("failed")) } ); }; Function<BulkRequest, BulkResponse> bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); Consumer<Exception> failureConsumer = e -> { assertThat(e.getMessage(), startsWith("Partial shards failure")); }; BiConsumer<IndexerState, Map<String, Object>> stateCheck = (i, p) -> { if (i == IndexerState.STARTED) { isFinished.set(true); } }; final ThreadPool threadPool = new TestThreadPool(getTestName()); try { NonEmptyRollupIndexer indexer = new NonEmptyRollupIndexer( threadPool, job, state, null, searchFunction, bulkFunction, failureConsumer, stateCheck ); final CountDownLatch latch = indexer.newLatch(1); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); assertBusy(() -> assertTrue(isFinished.get())); // Despite failure in bulk, we should move back to STARTED and wait to try again on next trigger assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); // There should be one recorded failure assertThat(indexer.getStats().getSearchFailures(), equalTo(1L)); // Note: no pages processed, no docs were indexed assertThat(indexer.getStats().getNumPages(), equalTo(0L)); assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } public void testBulkFailure() throws Exception { AtomicBoolean isFinished = new AtomicBoolean(false); RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference<IndexerState> state = new AtomicReference<>(IndexerState.STOPPED); Function<SearchRequest, SearchResponse> searchFunction = searchRequest -> { InternalComposite.InternalBucket bucket = mock(InternalComposite.InternalBucket.class); when(bucket.getKey()).thenReturn(Map.of("foo.terms", "bar")); when(bucket.getDocCount()).thenReturn(1L); when(bucket.getAggregations()).thenReturn(InternalAggregations.EMPTY); InternalComposite composite = mock(InternalComposite.class); when(composite.getName()).thenReturn(RollupField.NAME); when(composite.getBuckets()).thenReturn(List.of(bucket)); return SearchResponseUtils.response(SearchHits.EMPTY_WITH_TOTAL_HITS) .aggregations(InternalAggregations.from(composite)) .build(); }; Function<BulkRequest, BulkResponse> bulkFunction = bulkRequest -> { fail("Should not have reached bulk function"); return null; }; Consumer<Exception> failureConsumer = e -> { assertThat(e.getMessage(), equalTo("failed")); }; BiConsumer<IndexerState, Map<String, Object>> stateCheck = (i, p) -> { if (i == IndexerState.STARTED) { isFinished.set(true); } }; final ThreadPool threadPool = new TestThreadPool(getTestName()); try { NonEmptyRollupIndexer indexer = new NonEmptyRollupIndexer( threadPool, job, state, null, searchFunction, bulkFunction, failureConsumer, stateCheck ) { @Override protected void doNextBulk(BulkRequest request, ActionListener<BulkResponse> nextPhase) { nextPhase.onFailure(new RuntimeException("failed")); } }; final CountDownLatch latch = indexer.newLatch(1); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); assertBusy(() -> assertTrue(isFinished.get())); // Despite failure in bulk, we should move back to STARTED and wait to try again on next trigger assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // There should be one recorded failure assertThat(indexer.getStats().getIndexFailures(), equalTo(1L)); // Note: no docs were indexed assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } } }
NonEmptyRollupIndexer
java
apache__camel
core/camel-util/src/main/java/org/apache/camel/util/ReflectionHelper.java
{ "start": 2515, "end": 3122 }
class ____) */ public static void doWithClasses(Class<?> clazz, ClassCallback cc) throws IllegalArgumentException { // and then nested classes Class<?>[] classes = clazz.getDeclaredClasses(); for (Class<?> aClazz : classes) { try { cc.doWith(aClazz); } catch (IllegalAccessException ex) { throw new IllegalStateException("Shouldn't be illegal to access class '" + aClazz.getName() + "': " + ex); } } } /** * Invoke the given callback on all fields in the target class, going up the
itself
java
eclipse-vertx__vert.x
vertx-core/src/test/java/io/vertx/tests/net/NetTest.java
{ "start": 92912, "end": 164088 }
class ____ extends AbstractVerticle { Context ctx; @Override public void start() { ctx = context; if (worker) { assertTrue(ctx.isWorkerContext()); } else { assertTrue(ctx.isEventLoopContext()); } Thread thr = Thread.currentThread(); server = vertx.createNetServer(); server.connectHandler(sock -> { sock.handler(buff -> { sock.write(buff); }); assertSame(ctx, context); if (!worker) { assertSame(thr, Thread.currentThread()); } }); server.listen(testAddress).onComplete(onSuccess(ar -> { assertSame(ctx, context); if (!worker) { assertSame(thr, Thread.currentThread()); } client = vertx.createNetClient(new NetClientOptions()); client.connect(testAddress).onComplete(onSuccess(sock -> { assertSame(ctx, context); if (!worker) { assertSame(thr, Thread.currentThread()); } Buffer buff = TestUtils.randomBuffer(10000); sock.write(buff); Buffer brec = Buffer.buffer(); sock.handler(rec -> { assertSame(ctx, context); if (!worker) { assertSame(thr, Thread.currentThread()); } brec.appendBuffer(rec); if (brec.length() == buff.length()) { testComplete(); } }); })); })); } } MyVerticle verticle = new MyVerticle(); vertx.deployVerticle(verticle, new DeploymentOptions().setThreadingModel(worker ? ThreadingModel.WORKER : ThreadingModel.EVENT_LOOP)); await(); } @Test public void testContexts() throws Exception { int numConnections = 10; CountDownLatch serverLatch = new CountDownLatch(numConnections); AtomicReference<Context> serverConnectContext = new AtomicReference<>(); server.connectHandler(sock -> { // Server connect handler should always be called with same context Context serverContext = Vertx.currentContext(); if (serverConnectContext.get() != null) { assertSame(serverConnectContext.get(), serverContext); } else { serverConnectContext.set(serverContext); } serverLatch.countDown(); }); CountDownLatch listenLatch = new CountDownLatch(1); AtomicReference<Context> listenContext = new AtomicReference<>(); server.listen(testAddress).onComplete(onSuccess(v -> { listenContext.set(Vertx.currentContext()); listenLatch.countDown(); })); awaitLatch(listenLatch); Set<Context> contexts = ConcurrentHashMap.newKeySet(); AtomicInteger connectCount = new AtomicInteger(); CountDownLatch clientLatch = new CountDownLatch(1); // Each connect should be in its own context for (int i = 0; i < numConnections; i++) { Context context = ((VertxInternal)vertx).createEventLoopContext(); context.runOnContext(v -> { client.connect(testAddress).onComplete(conn -> { contexts.add(Vertx.currentContext()); if (connectCount.incrementAndGet() == numConnections) { assertEquals(numConnections, contexts.size()); clientLatch.countDown(); } }); }); } awaitLatch(clientLatch); awaitLatch(serverLatch); // Close should be in own context server.close().onComplete(onSuccess(ar -> { // Context closeContext = Vertx.currentContext(); // assertFalse(contexts.contains(closeContext)); assertFalse(contexts.contains(listenContext.get())); assertSame(serverConnectContext.get(), listenContext.get()); testComplete(); })); await(); } @Test public void testMultipleServerClose() { this.server = vertx.createNetServer(); // We assume the endHandler and the close completion handler are invoked in the same context task ThreadLocal stack = new ThreadLocal(); stack.set(true); server.close().onComplete(ar1 -> { // assertNull(stack.get()); // assertTrue(Vertx.currentContext().isEventLoopContext()); server.close().onComplete(ar2 -> { server.close().onComplete(ar3 -> { testComplete(); }); }); }); await(); } @Test public void testInWorker() { waitFor(2); vertx.deployVerticle(new AbstractVerticle() { @Override public void start() throws Exception { assertTrue(Vertx.currentContext().isWorkerContext()); assertTrue(Context.isOnWorkerThread()); final Context context = Vertx.currentContext(); NetServer server1 = vertx.createNetServer(); server1.connectHandler(conn -> { assertTrue(Vertx.currentContext().isWorkerContext()); assertTrue(Context.isOnWorkerThread()); assertSame(context, Vertx.currentContext()); conn.handler(buff -> { assertTrue(Vertx.currentContext().isWorkerContext()); assertTrue(Context.isOnWorkerThread()); assertSame(context, Vertx.currentContext()); conn.write(buff); }); conn.closeHandler(v -> { assertTrue(Vertx.currentContext().isWorkerContext()); assertTrue(Context.isOnWorkerThread()); assertSame(context, Vertx.currentContext()); complete(); }); conn.endHandler(v -> { assertTrue(Vertx.currentContext().isWorkerContext()); assertTrue(Context.isOnWorkerThread()); assertSame(context, Vertx.currentContext()); complete(); }); }).listen(testAddress).onComplete(onSuccess(s -> { assertTrue(Vertx.currentContext().isWorkerContext()); assertTrue(Context.isOnWorkerThread()); assertSame(context, Vertx.currentContext()); NetClient client = vertx.createNetClient(); client.connect(testAddress).onComplete(onSuccess(res -> { assertTrue(Vertx.currentContext().isWorkerContext()); assertTrue(Context.isOnWorkerThread()); assertSame(context, Vertx.currentContext()); res.write("foo"); res.handler(buff -> { assertTrue(Vertx.currentContext().isWorkerContext()); assertTrue(Context.isOnWorkerThread()); assertSame(context, Vertx.currentContext()); res.close(); }); })); })); } }, new DeploymentOptions().setThreadingModel(ThreadingModel.WORKER)); await(); } @Test public void testAsyncWriteIsFlushed() throws Exception { // Test that if we do a concurrent write (by another thread) during a channel read operation // the channel will be flished after the concurrent write int num = 128; Buffer expected = TestUtils.randomBuffer(1024); ExecutorService exec = Executors.newFixedThreadPool(1); try { server.connectHandler(so -> { so.handler(buff -> { assertEquals(256, buff.length()); CountDownLatch latch = new CountDownLatch(1); exec.execute(() -> { latch.countDown(); so.write(expected); }); try { awaitLatch(latch); } catch (InterruptedException e) { fail(e); } }); }); startServer(); AtomicInteger done = new AtomicInteger(); for (int i = 0;i < num;i++) { client.connect(testAddress).onComplete(onSuccess(so -> { so.handler(buff -> { assertEquals(expected, buff); so.close(); int val = done.incrementAndGet(); if (val == num) { testComplete(); } }); so.write(TestUtils.randomBuffer(256)); })); } await(); } finally { exec.shutdown(); } } private File setupFile(String testDir, String fileName, String content) throws Exception { File file = new File(testDir, fileName); if (file.exists()) { file.delete(); } BufferedWriter out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8")); out.write(content); out.close(); return file; } @Test public void testServerWorkerMissBufferWhenBufferArriveBeforeConnectCallback() throws Exception { int size = getOptions().getWorkerPoolSize(); List<Context> workers = createWorkers(size + 1); CountDownLatch latch1 = new CountDownLatch(workers.size() - 1); workers.get(0).runOnContext(v -> { NetServer server = vertx.createNetServer(); server.connectHandler(so -> { so.handler(buf -> { assertEquals("hello", buf.toString()); testComplete(); }); }); server.listen(testAddress).onComplete(onSuccess(v2 -> { // Create a one second worker starvation for (int i = 1; i < workers.size(); i++) { workers.get(i).runOnContext(v3 -> { latch1.countDown(); try { Thread.sleep(1000); } catch (InterruptedException ignore) { } }); } })); }); awaitLatch(latch1); NetClient client = vertx.createNetClient(); client.connect(testAddress).onComplete(onSuccess(so -> { so.write(Buffer.buffer("hello")); })); await(); } @Test public void testClientWorkerMissBufferWhenBufferArriveBeforeConnectCallback() throws Exception { int size = getOptions().getWorkerPoolSize(); List<Context> workers = createWorkers(size + 1); CountDownLatch latch1 = new CountDownLatch(1); CountDownLatch latch2 = new CountDownLatch(size); NetServer server = vertx.createNetServer(); server.connectHandler(so -> { try { awaitLatch(latch2); } catch (InterruptedException e) { fail(e.getMessage()); return; } so.write(Buffer.buffer("hello")); }); server.listen(testAddress).onComplete(onSuccess(v -> { latch1.countDown(); })); awaitLatch(latch1); workers.get(0).runOnContext(v -> { NetClient client = vertx.createNetClient(); client.connect(testAddress).onComplete(onSuccess(so -> { so.handler(buf -> { assertEquals("hello", buf.toString()); testComplete(); }); })); // Create a one second worker starvation for (int i = 1; i < workers.size(); i++) { workers.get(i).runOnContext(v2 -> { latch2.countDown(); try { Thread.sleep(1000); } catch (InterruptedException ignore) { } }); } }); await(); } @Test public void testHostVerificationHttpsNotMatching() { server.close(); NetServerOptions options = new NetServerOptions() .setPort(1234) .setHost("localhost") .setSsl(true) .setKeyCertOptions(new JksOptions().setPath("tls/mim-server-keystore.jks").setPassword("wibble")); NetServer server = vertx.createNetServer(options); NetClientOptions clientOptions = new NetClientOptions() .setSsl(true) .setTrustAll(true) .setHostnameVerificationAlgorithm("HTTPS"); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); server .listen() .compose(v -> client.connect(1234, "localhost")) .onComplete(onFailure(err -> { //Should not be able to connect testComplete(); })); await(); } // this test sets HostnameVerification but also trustAll, it fails if hostname is // incorrect but does not verify the certificate validity @Test public void testHostVerificationHttpsMatching() { server.close(); NetServerOptions options = new NetServerOptions() .setPort(1234) .setHost("localhost") .setSsl(true) .setKeyCertOptions(new JksOptions().setPath("tls/server-keystore.jks").setPassword("wibble")); NetServer server = vertx.createNetServer(options); NetClientOptions clientOptions = new NetClientOptions() .setSsl(true) .setTrustAll(true) .setHostnameVerificationAlgorithm("HTTPS"); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); server.listen().onComplete(onSuccess(v -> { client.connect(1234, "localhost").onComplete(onSuccess(so -> { //Should be able to connect testComplete(); })); })); await(); } @Test public void testClientMissingHostnameVerificationAlgorithm1() { testClientMissingHostnameVerificationAlgorithm(client -> client.connect(1234, "localhost")); } @Test public void testClientMissingHostnameVerificationAlgorithm2() { testClientMissingHostnameVerificationAlgorithm(client -> client.connect(new ConnectOptions() .setHost("localhost") .setPort(1234) .setSsl(true))); } @Test public void testClientMissingHostnameVerificationAlgorithm3() { testClientMissingHostnameVerificationAlgorithm(client -> client.connect(new ConnectOptions() .setHost("localhost") .setPort(1234)).compose(so -> so.upgradeToSsl(new ClientSSLOptions()))); } private void testClientMissingHostnameVerificationAlgorithm(Function<NetClient, Future<?>> consumer) { server.close(); NetServerOptions options = new NetServerOptions() .setPort(1234) .setHost("localhost") .setSsl(true) .setKeyCertOptions(new JksOptions().setPath("tls/server-keystore.jks").setPassword("wibble")); NetServer server = vertx.createNetServer(options); NetClientOptions clientOptions = new NetClientOptions() .setSsl(true) .setTrustAll(true); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); server.listen().onComplete(onSuccess(v -> { consumer.apply(client).onComplete(onFailure(err -> { assertTrue(err.getMessage().contains("Missing hostname verification algorithm")); testComplete(); })); })); await(); } @Test public void testMissingClientSSLOptions() throws Exception { server = vertx.createNetServer(new NetServerOptions() .setSsl(true) .setKeyCertOptions(Cert.SERVER_JKS.get())) .connectHandler(conn -> { fail(); }); startServer(testAddress); client.connect(new ConnectOptions().setPort(8443).setHost("localhost").setSsl(true)).onComplete(onFailure(err -> { assertTrue(err.getMessage().contains("ClientSSLOptions")); testComplete(); })); await(); } @Test public void testReuseDefaultClientSSLOptions() throws Exception { waitFor(2); server = vertx.createNetServer(new NetServerOptions() .setSsl(true) .setKeyCertOptions(Cert.SERVER_JKS.get())) .connectHandler(conn -> { complete(); }); startServer(testAddress); client = vertx.createNetClient(new NetClientOptions().setTrustAll(true).setHostnameVerificationAlgorithm("")); client.connect(new ConnectOptions().setRemoteAddress(testAddress).setSsl(true)).onComplete(onSuccess(so -> { complete(); })); await(); } @Test public void testNoLogging() throws Exception { TestLoggerFactory factory = testLogging(); assertFalse(factory.hasName("io.netty.handler.logging.LoggingHandler")); } @Test public void testServerLogging() throws Exception { server.close(); server = vertx.createNetServer(new NetServerOptions().setLogActivity(true)); TestLoggerFactory factory = testLogging(); assertTrue(factory.hasName("io.netty.handler.logging.LoggingHandler")); } @Test public void testClientLogging() throws Exception { client.close(); client = vertx.createNetClient(new NetClientOptions().setLogActivity(true)); TestLoggerFactory factory = testLogging(); assertTrue(factory.hasName("io.netty.handler.logging.LoggingHandler")); } private TestLoggerFactory testLogging() throws Exception { return TestUtils.testLogging(() -> { server.connectHandler(so -> { so.end(Buffer.buffer("fizzbuzz")); }); server.listen(testAddress).onComplete(onSuccess(v1 -> { client.connect(testAddress).onComplete(onSuccess(so -> { so.closeHandler(v2 -> testComplete()); })); })); await(); }); } /** * test socks5 proxy for accessing arbitrary server port. */ @Test public void testWithSocks5Proxy() throws Exception { NetClientOptions clientOptions = new NetClientOptions() .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setPort(11080)); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); proxy = new SocksProxy(); proxy.start(vertx); server.listen(1234, "localhost") .onComplete(onSuccess(v -> { client.connect(1234, "localhost").onComplete(onSuccess(so -> { // make sure we have gone through the proxy assertEquals("localhost:1234", proxy.getLastUri()); testComplete(); })); })); await(); } /** * test socks5 proxy for accessing arbitrary server port with authentication. */ @Test public void testWithSocks5ProxyAuth() throws Exception { NetClientOptions clientOptions = new NetClientOptions() .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setPort(11080) .setUsername("username").setPassword("username")); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); proxy = new SocksProxy().username("username"); proxy.start(vertx); server.listen(1234, "localhost").onComplete(onSuccess(c -> { client.connect(1234, "localhost").onComplete(onSuccess(so -> { testComplete(); })); })); await(); } /** * test socks5 proxy when accessing ssl server port with correct cert. */ @Test public void testConnectSSLWithSocks5Proxy() throws Exception { server.close(); NetServerOptions options = new NetServerOptions() .setPort(1234) .setHost("localhost") .setSsl(true) .setKeyCertOptions(Cert.SERVER_JKS_ROOT_CA.get()); server = vertx.createNetServer(options); NetClientOptions clientOptions = new NetClientOptions() .setHostnameVerificationAlgorithm("HTTPS") .setSsl(true) .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setHost("127.0.0.1").setPort(11080)) .setTrustOptions(Trust.SERVER_JKS_ROOT_CA.get()); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); proxy = new SocksProxy(); proxy.start(vertx); server.listen().onComplete(onSuccess(v -> { client.connect(1234, "localhost").onComplete(onSuccess(so -> { testComplete(); })); })); await(); } /** * test socks5 proxy for accessing ssl server port with upgradeToSsl. * https://github.com/eclipse/vert.x/issues/1602 */ @Test public void testUpgradeSSLWithSocks5Proxy() throws Exception { server.close(); NetServerOptions options = new NetServerOptions() .setPort(1234) .setHost("localhost") .setSsl(true) .setKeyCertOptions(Cert.SERVER_JKS_ROOT_CA.get()); server = vertx.createNetServer(options); NetClientOptions clientOptions = new NetClientOptions() .setHostnameVerificationAlgorithm("HTTPS") .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setHost("127.0.0.1").setPort(11080)) .setTrustOptions(Trust.SERVER_JKS_ROOT_CA.get()); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); proxy = new SocksProxy(); proxy.start(vertx); server.listen().onComplete(onSuccess(v -> { client.connect(1234, "localhost").onComplete(onSuccess(ns -> { ns.upgradeToSsl().onComplete(onSuccess(v2 -> { testComplete(); })); })); })); await(); } /** * test http connect proxy for accessing a arbitrary server port * note that this may not work with a "real" proxy since there are usually access rules defined * that limit the target host and ports (e.g. connecting to localhost or to port 25 may not be allowed) */ @Test public void testWithHttpConnectProxy() throws Exception { NetClientOptions clientOptions = new NetClientOptions() .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setPort(13128)); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); proxy = new HttpProxy(); proxy.start(vertx); server.listen(1234, "localhost").onComplete(onSuccess(ar -> { client.connect(1234, "localhost").onComplete(onSuccess(so -> { // make sure we have gone through the proxy assertEquals("localhost:1234", proxy.getLastUri()); testComplete(); })); })); await(); } /** * test socks4a proxy for accessing arbitrary server port. */ @Test public void testWithSocks4aProxy() throws Exception { NetClientOptions clientOptions = new NetClientOptions() .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS4).setPort(11080)); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); proxy = new Socks4Proxy(); proxy.start(vertx); server.listen(1234, "localhost").onComplete(onSuccess(v -> { client.connect(1234, "localhost").onComplete(onSuccess(so -> { // make sure we have gone through the proxy assertEquals("localhost:1234", proxy.getLastUri()); testComplete(); })); })); await(); } /** * test socks4a proxy for accessing arbitrary server port using username auth. */ @Test public void testWithSocks4aProxyAuth() throws Exception { NetClientOptions clientOptions = new NetClientOptions() .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS4).setPort(11080) .setUsername("username")); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); proxy = new Socks4Proxy().username("username"); proxy.start(vertx); server.listen(1234, "localhost").onComplete(onSuccess(v -> { client.connect(1234, "localhost").onComplete(onSuccess(so -> { // make sure we have gone through the proxy assertEquals("localhost:1234", proxy.getLastUri()); testComplete(); })); })); await(); } /** * test socks4a proxy for accessing arbitrary server port using an already resolved address. */ @Test public void testWithSocks4LocalResolver() throws Exception { NetClientOptions clientOptions = new NetClientOptions() .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS4).setPort(11080)); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); proxy = new Socks4Proxy().start(vertx); server.listen(1234, "localhost").onComplete(onSuccess(v -> { client.connect(1234, "127.0.0.1").onComplete(onSuccess(so -> { // make sure we have gone through the proxy assertEquals("127.0.0.1:1234", proxy.getLastUri()); testComplete(); })); })); await(); } @Test public void testNonProxyHosts() throws Exception { NetClientOptions clientOptions = new NetClientOptions() .addNonProxyHost("example.com") .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setPort(13128)); NetClient client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { }); proxy = new HttpProxy(); proxy.start(vertx); server.listen(1234, "localhost").onComplete(onSuccess(s -> { client.connect(1234, "example.com").onComplete(onSuccess(so -> { assertNull(proxy.getLastUri()); testComplete(); })); })); await(); } @Test public void testTLSHostnameCertCheckCorrect() { NetClientOptions options = new NetClientOptions() .setHostnameVerificationAlgorithm("HTTPS") .setTrustOptions(Trust.SERVER_JKS_ROOT_CA.get()); client.close(); client = vertx.createNetClient(options); server.close(); server = vertx.createNetServer(new NetServerOptions().setSsl(true).setPort(DEFAULT_HTTPS_PORT) .setKeyCertOptions(Cert.SERVER_JKS_ROOT_CA.get())); server.connectHandler(netSocket -> netSocket.close()).listen().onComplete(onSuccess(v -> { client.connect(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST).onComplete(onSuccess(ns -> { ns.upgradeToSsl().onComplete(onSuccess(v2 -> { testComplete(); })); })); })); await(); } @Test public void testTLSHostnameCertCheckIncorrect() { server.close(); server = vertx.createNetServer(new NetServerOptions().setSsl(true).setPort(DEFAULT_HTTPS_PORT) .setKeyCertOptions(Cert.SERVER_JKS_ROOT_CA.get())); server.connectHandler(netSocket -> netSocket.close()).listen().onComplete(onSuccess(v -> { NetClientOptions options = new NetClientOptions() .setHostnameVerificationAlgorithm("HTTPS") .setTrustOptions(Trust.SERVER_JKS_ROOT_CA.get()); NetClient client = vertx.createNetClient(options); client.connect(DEFAULT_HTTPS_PORT, "127.0.0.1").onComplete(onSuccess(ns -> { ns.upgradeToSsl().onComplete(onFailure(err -> { testComplete(); })); })); })); await(); } /** * Test that NetSocket.upgradeToSsl() should fail the handler if no TLS configuration was set. */ @Test public void testUpgradeToSSLIncorrectClientOptions1() { NetClient client = vertx.createNetClient(); try { testUpgradeToSSLIncorrectClientOptions(() -> client.connect(DEFAULT_HTTPS_PORT, "127.0.0.1")); } finally { client.close(); } } /** * Test that NetSocket.upgradeToSsl() should fail the handler if no TLS configuration was set. */ @Test public void testUpgradeToSSLIncorrectClientOptions2() { NetClient client = vertx.createNetClient(); try { testUpgradeToSSLIncorrectClientOptions(() -> client.connect(new ConnectOptions().setPort(DEFAULT_HTTPS_PORT).setHost("127.0.0.1"))); } finally { client.close(); } } /** * Test that NetSocket.upgradeToSsl() should fail the handler if no TLS configuration was set. */ private void testUpgradeToSSLIncorrectClientOptions(Supplier<Future<NetSocket>> connect) { server.close(); server = vertx.createNetServer(new NetServerOptions().setSsl(true).setPort(DEFAULT_HTTPS_PORT) .setKeyCertOptions(Cert.SERVER_JKS_ROOT_CA.get())); server.connectHandler(ns -> {}).listen().onComplete(onSuccess(v -> { connect.get().onComplete(onSuccess(ns -> { ns.upgradeToSsl().onComplete(onFailure(err -> { assertTrue(err.getMessage().contains("Missing SSL options")); testComplete(); })); })); })); await(); } @Test public void testOverrideClientSSLOptions() { waitFor(4); server.close(); client.close(); client = vertx.createNetClient(new NetClientOptions() .setTrustOptions(Trust.CLIENT_JKS.get())); server = vertx.createNetServer(new NetServerOptions().setSsl(true).setPort(DEFAULT_HTTPS_PORT) .setKeyCertOptions(Cert.SERVER_JKS.get())); server.connectHandler(ns -> { complete(); }).listen().onComplete(onSuccess(v -> { client.connect(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST).onComplete(onSuccess(ns -> { ns.upgradeToSsl().onComplete(onFailure(err -> { ClientSSLOptions sslOptions = new ClientSSLOptions().setHostnameVerificationAlgorithm("").setTrustOptions(Trust.SERVER_JKS.get()); client.connect(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST).onComplete(onSuccess(ns2 -> { ns2.upgradeToSsl(sslOptions).onComplete(onSuccess(v2 -> { complete(); })); })); client.connect(new ConnectOptions().setPort(DEFAULT_HTTPS_PORT).setHost(DEFAULT_HTTPS_HOST).setSslOptions(sslOptions)).onComplete(onSuccess(ns2 -> { ns2.upgradeToSsl().onComplete(onSuccess(v2 -> { complete(); })); })); })); })); })); await(); } @Test public void testClientLocalAddress() { String expectedAddress = TestUtils.loopbackAddress(); NetClientOptions clientOptions = new NetClientOptions().setLocalAddress(expectedAddress); client.close(); client = vertx.createNetClient(clientOptions); server.connectHandler(sock -> { assertEquals(expectedAddress, sock.remoteAddress().host()); sock.close(); }); server.listen(1234, "localhost").onComplete(onSuccess(v -> { client.connect(1234, "localhost").onComplete(onSuccess(socket -> { socket.closeHandler(v2 -> { testComplete(); }); })); })); await(); } @Test public void testWorkerClient() throws Exception { String expected = TestUtils.randomAlphaString(2000); server.connectHandler(so -> { so.write(expected); so.close(); }); startServer(); vertx.deployVerticle(new AbstractVerticle() { @Override public void start() throws Exception { NetClient client = vertx.createNetClient(); client.connect(testAddress).onComplete(onSuccess(so ->{ assertTrue(Context.isOnWorkerThread()); Buffer received = Buffer.buffer(); so.handler(buff -> { assertTrue(Context.isOnWorkerThread()); received.appendBuffer(buff); }); so.closeHandler(v -> { assertEquals(expected, received.toString()); testComplete(); }); try { Thread.sleep(500); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } })); } }, new DeploymentOptions().setThreadingModel(ThreadingModel.WORKER)); await(); } @Test public void testWorkerServer() { String expected = TestUtils.randomAlphaString(2000); vertx.deployVerticle(new AbstractVerticle() { @Override public void start(Promise<Void> startPromise) throws Exception { NetServer server = vertx.createNetServer(); server.connectHandler(so -> { assertTrue(Context.isOnWorkerThread()); Buffer received = Buffer.buffer(); so.handler(buffer -> { assertTrue(Context.isOnWorkerThread()); received.appendBuffer(buffer); }); so.closeHandler(v -> { assertTrue(Context.isOnWorkerThread()); assertEquals(expected, received.toString()); testComplete(); }); try { Thread.sleep(500); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }); server.listen(testAddress).<Void>mapEmpty().onComplete(startPromise); } }, new DeploymentOptions().setThreadingModel(ThreadingModel.WORKER)).onComplete(onSuccess(v -> { client.connect(testAddress).onComplete(onSuccess(so -> { so.write(expected); so.close(); })); })); await(); } @Test public void testNetServerInternal() throws Exception { testNetServerInternal_(new HttpClientOptions(), false); } @Test public void testNetServerInternalTLS() throws Exception { server.close(); server = vertx.createNetServer(new NetServerOptions() .setPort(1234) .setHost("localhost") .setSsl(true) .setKeyCertOptions(Cert.SERVER_JKS.get())); testNetServerInternal_(new HttpClientOptions() .setSsl(true) .setTrustOptions(Trust.SERVER_JKS.get()) , true); } private void testNetServerInternal_(HttpClientOptions clientOptions, boolean expectSSL) throws Exception { waitFor(2); server.connectHandler(so -> { NetSocketInternal internal = (NetSocketInternal) so; assertEquals(expectSSL, internal.isSsl()); ChannelHandlerContext chctx = internal.channelHandlerContext(); ChannelPipeline pipeline = chctx.pipeline(); pipeline.addBefore("handler", "http", new HttpServerCodec()); internal.handler(buff -> fail()); AtomicBoolean last = new AtomicBoolean(); internal.messageHandler(obj -> { last.set(obj instanceof LastHttpContent); ReferenceCountUtil.release(obj); }); internal.readCompletionHandler(v1 -> { assertTrue(last.get()); DefaultFullHttpResponse response = new DefaultFullHttpResponse( HttpVersion.HTTP_1_1, HttpResponseStatus.OK, Unpooled.copiedBuffer("Hello World", StandardCharsets.UTF_8)); response.headers().set(HttpHeaderNames.CONTENT_LENGTH, "11"); internal.writeMessage(response).onComplete(onSuccess(v2 -> complete())); }); }); startServer(SocketAddress.inetSocketAddress(1234, "localhost")); HttpClient client = vertx.createHttpClient(clientOptions); client.request(io.vertx.core.http.HttpMethod.GET, 1234, "localhost", "/somepath") .compose(req -> req .send() .expecting(HttpResponseExpectation.SC_OK) .compose(HttpClientResponse::body)).onComplete(onSuccess(body -> { assertEquals("Hello World", body.toString()); complete(); })); await(); } @Test public void testNetClientInternal() throws Exception { testNetClientInternal_(new HttpServerOptions().setHost("localhost").setPort(1234), false); } @Test public void testNetClientInternalTLS() throws Exception { client.close(); client = vertx.createNetClient(new NetClientOptions() .setSsl(true) .setHostnameVerificationAlgorithm("") .setTrustOptions(Trust.SERVER_JKS.get()) ); testNetClientInternal_(new HttpServerOptions() .setHost("localhost") .setPort(1234) .setSsl(true) .setKeyCertOptions(Cert.SERVER_JKS.get()), true); } // This test is here to cover a WildFly use case for passing in an SSLContext for which there are no // configuration options. // This is currently done by casing to NetClientImpl and calling setSuppliedSSLContext(). @Test public void testNetClientInternalTLSWithSuppliedSSLContext() throws Exception { client.close(); Buffer trust = vertx.fileSystem().readFileBlocking(Trust.SERVER_JKS.get().getPath()); TrustManagerFactory tmFactory; try (InputStream trustStoreStream = new ByteArrayInputStream(trust.getBytes())){ KeyStore trustStore = KeyStore.getInstance("jks"); trustStore.load(trustStoreStream, Trust.SERVER_JKS.get().getPassword().toCharArray()); tmFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); tmFactory.init(trustStore); } SSLContext sslContext = SSLContext.getInstance("TLS"); sslContext.init( null, tmFactory.getTrustManagers(), null ); client = vertx.createNetClient(new NetClientOptions().setSsl(true).setHostnameVerificationAlgorithm("") .setSslEngineOptions(new JdkSSLEngineOptions() { @Override public SslContextFactory sslContextFactory() { return () -> new JdkSslContext( sslContext, true, null, IdentityCipherSuiteFilter.INSTANCE, ApplicationProtocolConfig.DISABLED, io.netty.handler.ssl.ClientAuth.NONE, null, false); } })); testNetClientInternal_(new HttpServerOptions() .setHost("localhost") .setPort(1234) .setSsl(true) .setKeyCertOptions(Cert.SERVER_JKS.get()), true); } private void testNetClientInternal_(HttpServerOptions options, boolean expectSSL) throws Exception { waitFor(2); HttpServer server = vertx.createHttpServer(options); server.requestHandler(req -> { req.response().end("Hello World"); }); CountDownLatch latch = new CountDownLatch(1); server.listen().onComplete(onSuccess(v -> { latch.countDown(); })); awaitLatch(latch); client.connect(1234, "localhost").onComplete(onSuccess(so -> { NetSocketInternal soInt = (NetSocketInternal) so; assertEquals(expectSSL, soInt.isSsl()); ChannelHandlerContext chctx = soInt.channelHandlerContext(); ChannelPipeline pipeline = chctx.pipeline(); pipeline.addBefore("handler", "http", new HttpClientCodec()); AtomicInteger status = new AtomicInteger(); soInt.handler(buff -> fail()); soInt.messageHandler(obj -> { switch (status.getAndIncrement()) { case 0: assertTrue(obj instanceof HttpResponse); HttpResponse resp = (HttpResponse) obj; assertEquals(200, resp.status().code()); break; case 1: assertTrue(obj instanceof LastHttpContent); ByteBuf content = ((LastHttpContent) obj).content(); assertTrue(content.isDirect()); assertEquals(1, content.refCnt()); String val = content.toString(StandardCharsets.UTF_8); assertTrue(content.release()); assertEquals("Hello World", val); complete(); break; default: fail(); } }); soInt.writeMessage(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/somepath")).onComplete(onSuccess(v -> complete())); })); await(); } @Test public void testNetSocketInternalBuffer() throws Exception { server.connectHandler(so -> { NetSocketInternal soi = (NetSocketInternal) so; soi.handler(msg -> { ByteBuf byteBuf = ((BufferInternal)msg).getByteBuf(); assertFalse(byteBuf.isDirect()); assertEquals(1, byteBuf.refCnt()); assertFalse(byteBuf.release()); assertEquals(1, byteBuf.refCnt()); soi.write(msg); }); }); startServer(); client.connect(testAddress).onComplete(onSuccess(so -> { NetSocketInternal soi = (NetSocketInternal) so; soi.write(Buffer.buffer("Hello World")); soi.handler(msg -> { ByteBuf byteBuf = ((BufferInternal)msg).getByteBuf(); assertFalse(byteBuf.isDirect()); assertEquals(1, byteBuf.refCnt()); assertFalse(byteBuf.release()); assertEquals(1, byteBuf.refCnt()); assertEquals("Hello World", msg.toString()); testComplete(); }); })); await(); } @Test public void testNetSocketInternalDirectBuffer() throws Exception { waitFor(2); server.connectHandler(so -> { NetSocketInternal soi = (NetSocketInternal) so; soi.messageHandler(msg -> { ByteBuf byteBuf = (ByteBuf) msg; assertTrue(byteBuf.isDirect()); assertEquals(1, byteBuf.refCnt()); soi.writeMessage(msg).onSuccess(v -> { assertEquals(0, byteBuf.refCnt()); complete(); }); }); }); startServer(); client.connect(testAddress).onComplete(onSuccess(so -> { NetSocketInternal soi = (NetSocketInternal) so; soi.write(Buffer.buffer("Hello World")); // soi.messageHandler(msg -> fail("Unexpected")); soi.messageHandler(msg -> { ByteBuf byteBuf = (ByteBuf) msg; assertTrue(byteBuf.isDirect()); assertEquals(1, byteBuf.refCnt()); assertEquals("Hello World", byteBuf.toString(StandardCharsets.UTF_8)); assertTrue(byteBuf.release()); assertEquals(0, byteBuf.refCnt()); complete(); }); })); await(); } @Test public void testNetSocketInternalRemoveVertxHandler() throws Exception { client.close(); client = vertx.createNetClient(new NetClientOptions().setConnectTimeout(1000).setRegisterWriteHandler(true)); server.connectHandler(so -> { so.closeHandler(v -> testComplete()); }); startServer(); client.connect(testAddress).onComplete(onSuccess(so -> { NetSocketInternal soi = (NetSocketInternal) so; String id = soi.writeHandlerID(); ChannelHandlerContext ctx = soi.channelHandlerContext(); ChannelPipeline pipeline = ctx.pipeline(); pipeline.remove(VertxHandler.class); vertx.eventBus().request(id, "test").onComplete(onFailure(what -> { ctx.close(); })); })); await(); } @Test public void testCloseCompletionHandlerNotCalledWhenActualServerFailed() { server.close(); server = vertx.createNetServer( new NetServerOptions() .setSsl(true) .setKeyCertOptions(new PemKeyCertOptions().setKeyPath("invalid"))) .connectHandler(c -> { }); server.listen(10000).onComplete(onFailure(err -> { server.close().onComplete(onSuccess(v -> { testComplete(); })); })); await(); } // We only do it for server, as client uses the same NetSocket implementation @Test public void testServerNetSocketShouldBeClosedWhenTheClosedHandlerIsCalled() throws Exception { waitFor(2); server.connectHandler(so -> { CheckingSender sender = new CheckingSender(vertx.getOrCreateContext(), 2, so); sender.send(); so.closeHandler(v -> { Throwable failure = sender.close(); if (failure != null) { fail(failure); } else { complete(); } }); so.endHandler(v -> { Throwable failure = sender.close(); if (failure != null) { fail(failure); } else { complete(); } }); }); startServer(); client.connect(testAddress).onComplete(onSuccess(so -> { vertx.setTimer(1000, id -> { so.close(); }); })); await(); } @Test public void testNetSocketInternalEvent() throws Exception { server.connectHandler(so -> { NetSocketInternal soi = (NetSocketInternal) so; Object expectedEvent = new Object(); soi.eventHandler(event -> { assertSame(expectedEvent, event); soi.close(); }); ChannelPipeline pipeline = soi.channelHandlerContext().pipeline(); pipeline.addFirst(new ChannelHandlerAdapter() { @Override public void handlerAdded(ChannelHandlerContext ctx) throws Exception { super.handlerAdded(ctx); ctx.executor().schedule(() -> { ctx.fireUserEventTriggered(expectedEvent); }, 10, TimeUnit.MILLISECONDS); } }); }); startServer(); client.connect(testAddress).onComplete(onSuccess(so -> { so.closeHandler(v -> testComplete()); })); await(); } @Test public void testServerWithIdleTimeoutSendChunkedFile() throws Exception { testIdleTimeoutSendChunkedFile(true); } @Test public void testClientWithIdleTimeoutSendChunkedFile() throws Exception { testIdleTimeoutSendChunkedFile(false); } private void testIdleTimeoutSendChunkedFile(boolean idleOnServer) throws Exception { Assume.assumeFalse(TRANSPORT == Transport.IO_URING); int expected = 16 * 1024 * 1024; // We estimate this will take more than 200ms to transfer with a 1ms pause in chunks File sent = TestUtils.tmpFile(".dat", expected); server.close(); AtomicReference<AsyncResult<Void>> sendResult = new AtomicReference<>(); AtomicReference<Integer> remaining = new AtomicReference<>(); AtomicLong now = new AtomicLong(); Runnable testChecker = () -> { if (sendResult.get() != null && remaining.get() != null) { if (remaining.get() > 0) { // It might fail sometimes assertTrue(sendResult.get().failed()); } else { assertTrue(sendResult.get().succeeded()); assertTrue(System.currentTimeMillis() - now.get() > 200); } testComplete(); } }; Consumer<NetSocket> sender = so -> { so.sendFile(sent.getAbsolutePath()).onComplete(ar -> { sendResult.set(ar); testChecker.run(); }); }; Consumer<NetSocket> receiver = so -> { now.set(System.currentTimeMillis()); int[] len = { 0 }; so.handler(buff -> { len[0] += buff.length(); so.pause(); vertx.setTimer(1, id -> { so.resume(); }); }); so.exceptionHandler(this::fail); so.endHandler(v -> { remaining.set(expected - len[0]); testChecker.run(); }); }; server = vertx .createNetServer(new NetServerOptions().setIdleTimeout(200).setIdleTimeoutUnit(TimeUnit.MILLISECONDS)) .connectHandler((idleOnServer ? sender : receiver)::accept); startServer(); client.close(); client = vertx.createNetClient(new NetClientOptions().setIdleTimeout(200).setIdleTimeoutUnit(TimeUnit.MILLISECONDS)); client.connect(testAddress).onComplete(onSuccess(idleOnServer ? receiver : sender)); await(); } @Test public void testHalfCloseCallsEndHandlerAfterBuffersAreDelivered() throws Exception { // Synchronized on purpose StringBuffer expected = new StringBuffer(); server.connectHandler(so -> { Context ctx = vertx.getOrCreateContext(); for (int i = 0;i < 8;i++) { int val = i; ctx.runOnContext(v -> { String chunk = "chunk-" + val + "\r\n"; so.write(chunk); expected.append(chunk); }); } ctx.runOnContext(v -> { // This will half close the connection so.close(); }); }); startServer(); vertx.runOnContext(v1 -> { client.connect(testAddress, "localhost").onComplete(onSuccess(so -> { so.pause(); AtomicBoolean closed = new AtomicBoolean(); AtomicBoolean ended = new AtomicBoolean(); Buffer received = Buffer.buffer(); so.handler(received::appendBuffer); so.closeHandler(v2 -> { assertFalse(ended.get()); assertEquals(Buffer.buffer(), received); closed.set(true); so.resume(); }); so.endHandler(v -> { assertEquals(expected.toString(), received.toString()); assertTrue(closed.get()); ended.set(true); testComplete(); }); })); }); await(); } @Test public void testSslHandshakeTimeoutHappenedOnServer() throws Exception { testSslHandshakeTimeoutHappened(false, false); } @Test public void testSslHandshakeTimeoutHappenedOnSniServer() throws Exception { testSslHandshakeTimeoutHappened(false, true); } public void testSslHandshakeTimeoutHappened(boolean onClient, boolean sni) throws Exception { server.close(); client.close(); // set up a normal server to force the SSL handshake time out in client NetServerOptions serverOptions = new NetServerOptions() .setSsl(!onClient) .setSslHandshakeTimeout(200) .setKeyCertOptions(Cert.SERVER_JKS.get()) .setSni(sni) .setSslHandshakeTimeoutUnit(TimeUnit.MILLISECONDS); server = vertx.createNetServer(serverOptions); NetClientOptions clientOptions = new NetClientOptions() .setSsl(onClient) .setTrustAll(true) .setSslHandshakeTimeout(200) .setSslHandshakeTimeoutUnit(TimeUnit.MILLISECONDS); client = vertx.createNetClient(clientOptions); Consumer<Throwable> checker = err -> { assertTrue(err instanceof SSLException); assertEquals("handshake timed out after 200ms", err.getMessage()); testComplete(); }; if (!onClient) { server.exceptionHandler(checker::accept); } server.connectHandler(s -> { }).listen(testAddress).onComplete(onSuccess(s -> { client.connect(testAddress).onComplete(ar -> { if (onClient) { assertTrue(ar.failed()); checker.accept(ar.cause()); } }); })); await(); } @Test public void testSslHandshakeTimeoutNotHappened() throws Exception { server.close(); client.close(); NetServerOptions serverOptions = new NetServerOptions() .setSsl(true) .setKeyCertOptions(Cert.SERVER_JKS.get()) // set 100ms to let the connection established .setSslHandshakeTimeout(100) .setSslHandshakeTimeoutUnit(TimeUnit.MILLISECONDS); server = vertx.createNetServer(serverOptions); NetClientOptions clientOptions = new NetClientOptions() .setSsl(true) .setTrustAll(true) .setHostnameVerificationAlgorithm(""); client = vertx.createNetClient(clientOptions); server.connectHandler(s -> { }).listen(testAddress).onComplete(onSuccess(v -> { client.connect(testAddress).onComplete(onSuccess(so -> { testComplete(); })); })); await(); } @Test public void testSslHandshakeTimeoutHappenedWhenUpgradeSsl() { server.close(); client.close(); // set up a normal server to force the SSL handshake time out in client NetServerOptions serverOptions = new NetServerOptions() .setSsl(false); server = vertx.createNetServer(serverOptions); NetClientOptions clientOptions = new NetClientOptions() .setSsl(false) .setTrustAll(true) .setHostnameVerificationAlgorithm("") .setSslHandshakeTimeout(200) .setSslHandshakeTimeoutUnit(TimeUnit.MILLISECONDS); client = vertx.createNetClient(clientOptions); server.connectHandler(s -> { }).listen(testAddress).onComplete(onSuccess(v -> { client.connect(testAddress).onComplete(onSuccess(socket -> { assertFalse(socket.isSsl()); socket.upgradeToSsl().onComplete(onFailure(err -> { assertTrue(err instanceof SSLException); assertEquals("handshake timed out after 200ms", err.getMessage()); testComplete(); })); })); })); await(); } protected void startServer(SocketAddress remoteAddress) throws Exception { startServer(remoteAddress, vertx.getOrCreateContext()); } protected void startServer(SocketAddress remoteAddress, NetServer server) throws Exception { startServer(remoteAddress, vertx.getOrCreateContext(), server); } protected void startServer(SocketAddress remoteAddress, Context context) throws Exception { startServer(remoteAddress, context, server); } protected void startServer(SocketAddress remoteAddress, Context context, NetServer server) throws Exception { CountDownLatch latch = new CountDownLatch(1); context.runOnContext(v -> { server.listen(remoteAddress).onComplete(onSuccess(s -> latch.countDown())); }); awaitLatch(latch); } @Test public void testPausedDuringLastChunk() throws Exception { server.connectHandler(so -> { AtomicBoolean paused = new AtomicBoolean(); paused.set(true); so.pause(); so.closeHandler(v -> { paused.set(false); so.resume(); }); so.endHandler(v -> { assertFalse(paused.get()); testComplete(); }); }); startServer(); client.connect(testAddress, "localhost").onComplete(onSuccess(so -> { so.close(); })); await(); } protected void startServer() throws Exception { startServer(testAddress, vertx.getOrCreateContext()); } protected void startServer(NetServer server) throws Exception { startServer(testAddress, vertx.getOrCreateContext(), server); } protected void startServer(Context context) throws Exception { startServer(testAddress, context, server); } protected void startServer(Context context, NetServer server) throws Exception { startServer(testAddress, context, server); } @Test public void testUnresolvedSocketAddress() { InetSocketAddress a = InetSocketAddress.createUnresolved("localhost", 8080); SocketAddress converted = ((VertxInternal) vertx).transport().convert(a); assertEquals(8080, converted.port()); assertEquals("localhost", converted.host()); } @Test public void testNetSocketHandlerFailureReportedToContextExceptionHandler() throws Exception { server.connectHandler(so -> { Context ctx = Vertx.currentContext(); List<Throwable> reported = new ArrayList<>(); ctx.exceptionHandler(reported::add); NullPointerException err1 = new NullPointerException(); so.handler(buff -> { throw err1; }); NullPointerException err2 = new NullPointerException(); so.endHandler(v ->{ throw err2; }); NullPointerException err3 = new NullPointerException(); so.closeHandler(v1 -> { ctx.runOnContext(v2 -> { assertEquals(Arrays.asList(err1, err2, err3), reported); testComplete(); }); throw err3; }); }); startServer(testAddress); client.connect(testAddress).onComplete(onSuccess(so -> { so.write("ping"); so.close(); })); await(); } @Test public void testHAProxyProtocolIdleTimeout() throws Exception { HAProxy proxy = new HAProxy(testAddress, Buffer.buffer()); proxy.start(vertx); server.close(); server = vertx.createNetServer(new NetServerOptions() .setProxyProtocolTimeout(2) .setUseProxyProtocol(true)) .connectHandler(u -> fail("Should not be called")); startServer(); client.connect(proxy.getPort(), proxy.getHost()) .onSuccess(so -> { so.closeHandler(event -> testComplete()); }) .onFailure(this::fail); try { await(); } finally { proxy.stop(); } } @Test public void testHAProxyProtocolIdleTimeoutNotHappened() throws Exception { waitFor(2); SocketAddress remote = SocketAddress.inetSocketAddress(56324, "192.168.0.1"); SocketAddress local = SocketAddress.inetSocketAddress(443, "192.168.0.11"); Buffer header = HAProxy.createVersion1TCP4ProtocolHeader(remote, local); HAProxy proxy = new HAProxy(testAddress, header); proxy.start(vertx); server.close(); server = vertx.createNetServer(new NetServerOptions() .setProxyProtocolTimeout(100) .setProxyProtocolTimeoutUnit(TimeUnit.MILLISECONDS) .setUseProxyProtocol(true)) .connectHandler(u -> complete()); startServer(); client.connect(proxy.getPort(), proxy.getHost()) .onSuccess(so -> { so.close(); complete(); }) .onFailure(this::fail); try { await(); } finally { proxy.stop(); } } @Test public void testHAProxyProtocolConnectSSL() throws Exception { assumeTrue(testAddress.isInetSocket()); waitFor(2); SocketAddress remote = SocketAddress.inetSocketAddress(56324, "192.168.0.1"); SocketAddress local = SocketAddress.inetSocketAddress(443, "192.168.0.11"); Buffer header = HAProxy.createVersion1TCP4ProtocolHeader(remote, local); HAProxy proxy = new HAProxy(testAddress, header); proxy.start(vertx); server.close(); NetServerOptions options = new NetServerOptions() .setSsl(true) .setKeyCertOptions(Cert.SERVER_JKS_ROOT_CA.get()) .setUseProxyProtocol(true); server = vertx.createNetServer(options) .connectHandler(event -> { assertAddresses(remote, event.remoteAddress()); assertAddresses(remote, event.remoteAddress(false)); assertAddresses(proxy.getConnectionLocalAddress(), event.remoteAddress(true)); assertAddresses(local, event.localAddress()); assertAddresses(local, event.localAddress(false)); assertAddresses(USE_DOMAIN_SOCKETS ? null : SocketAddress.inetSocketAddress(server.actualPort(), "127.0.0.1"), event.localAddress(true)); complete(); }); startServer(); NetClientOptions clientOptions = new NetClientOptions() .setHostnameVerificationAlgorithm("HTTPS") .setSsl(true) .setTrustOptions(Trust.SERVER_JKS_ROOT_CA.get()); vertx.createNetClient(clientOptions) .connect(proxy.getPort(), proxy.getHost()) .onSuccess(so -> { so.close(); complete(); }) .onFailure(this::fail); try { await(); } finally { proxy.stop(); } } @Test public void testHAProxyProtocolVersion1TCP4() throws Exception { SocketAddress remote = SocketAddress.inetSocketAddress(56324, "192.168.0.1"); SocketAddress local = SocketAddress.inetSocketAddress(443, "192.168.0.11"); Buffer header = HAProxy.createVersion1TCP4ProtocolHeader(remote, local); testHAProxyProtocolAccepted(header, remote, local); } @Test public void testHAProxyProtocolVersion1TCP6() throws Exception { SocketAddress remote = SocketAddress.inetSocketAddress(56324, "2001:db8:85a3:0:0:8a2e:370:7334"); SocketAddress local = SocketAddress.inetSocketAddress(443, "2001:db8:85a3:0:0:8a2e:370:7333"); Buffer header = HAProxy.createVersion1TCP6ProtocolHeader(remote, local); testHAProxyProtocolAccepted(header, remote, local); } @Test public void testHAProxyProtocolVersion1Unknown() throws Exception { assumeTrue(testAddress.isInetSocket()); Buffer header = HAProxy.createVersion1UnknownProtocolHeader(); testHAProxyProtocolAccepted(header, null, null); } @Test public void testHAProxyProtocolVersion2TCP4() throws Exception { SocketAddress remote = SocketAddress.inetSocketAddress(56324, "192.168.0.1"); SocketAddress local = SocketAddress.inetSocketAddress(443, "192.168.0.11"); Buffer header = HAProxy.createVersion2TCP4ProtocolHeader(remote, local); testHAProxyProtocolAccepted(header, remote, local); } @Test public void testHAProxyProtocolVersion2TCP6() throws Exception { SocketAddress remote = SocketAddress.inetSocketAddress(56324, "2001:db8:85a3:0:0:8a2e:370:7334"); SocketAddress local = SocketAddress.inetSocketAddress(443, "2001:db8:85a3:0:0:8a2e:370:7333"); Buffer header = HAProxy.createVersion2TCP6ProtocolHeader(remote, local); testHAProxyProtocolAccepted(header, remote, local); } @Test public void testHAProxyProtocolVersion2UnixSocket() throws Exception { SocketAddress remote = SocketAddress.domainSocketAddress("/tmp/remoteSocket"); SocketAddress local = SocketAddress.domainSocketAddress("/tmp/localSocket"); Buffer header = HAProxy.createVersion2UnixStreamProtocolHeader(remote, local); testHAProxyProtocolAccepted(header, remote, local); } @Test public void testHAProxyProtocolVersion2Unknown() throws Exception { assumeTrue(testAddress.isInetSocket()); Buffer header = HAProxy.createVersion2UnknownProtocolHeader(); testHAProxyProtocolAccepted(header, null, null); } private void testHAProxyProtocolAccepted(Buffer header, SocketAddress remote, SocketAddress local) throws Exception { /* * In case remote / local is null then we will use the connected remote / local address from the proxy. This is needed * in order to test unknown protocol since we will use the actual connected addresses and ports. * This is only valid when testAddress is an InetSocketAddress. If testAddress is a DomainSocketAddress then * remoteAddress and localAddress are null * * Have in mind that proxies connectionRemoteAddress is the server request local address and proxies connectionLocalAddress is the * server request remote address. * */ waitFor(2); HAProxy proxy = new HAProxy(testAddress, header); proxy.start(vertx); server.close(); server = vertx.createNetServer(new NetServerOptions() .setUseProxyProtocol(true)) .connectHandler(so -> { assertAddresses(remote == null && testAddress.isInetSocket() ? proxy.getConnectionLocalAddress() : remote, so.remoteAddress()); assertAddresses(local == null && testAddress.isInetSocket() ? proxy.getConnectionRemoteAddress() : local, so.localAddress()); complete(); }); startServer(); client.connect(proxy.getPort(), proxy.getHost()) .onSuccess(so -> { so.close(); complete(); }) .onFailure(this::fail); try { await(); } finally { proxy.stop(); } } @Test public void testHAProxyProtocolVersion2UDP4() throws Exception { SocketAddress remote = SocketAddress.inetSocketAddress(56324, "192.168.0.1"); SocketAddress local = SocketAddress.inetSocketAddress(443, "192.168.0.11"); Buffer header = HAProxy.createVersion2UDP4ProtocolHeader(remote, local); testHAProxyProtocolRejected(header); } @Test public void testHAProxyProtocolVersion2UDP6() throws Exception { SocketAddress remote = SocketAddress.inetSocketAddress(56324, "2001:db8:85a3:0:0:8a2e:370:7334"); SocketAddress local = SocketAddress.inetSocketAddress(443, "2001:db8:85a3:0:0:8a2e:370:7333"); Buffer header = HAProxy.createVersion2UDP6ProtocolHeader(remote, local); testHAProxyProtocolRejected(header); } @Test public void testHAProxyProtocolVersion2UnixDataGram() throws Exception { SocketAddress remote = SocketAddress.domainSocketAddress("/tmp/remoteSocket"); SocketAddress local = SocketAddress.domainSocketAddress("/tmp/localSocket"); Buffer header = HAProxy.createVersion2UnixDatagramProtocolHeader(remote, local); testHAProxyProtocolRejected(header); } private void testHAProxyProtocolRejected(Buffer header) throws Exception { waitFor(2); HAProxy proxy = new HAProxy(testAddress, header); proxy.start(vertx); server.close(); server = vertx.createNetServer(new NetServerOptions() .setUseProxyProtocol(true)) .exceptionHandler(ex -> { if (ex.equals(HAProxyMessageCompletionHandler.UNSUPPORTED_PROTOCOL_EXCEPTION)) complete(); }) .connectHandler(so -> fail()); startServer(); client.connect(proxy.getPort(), proxy.getHost()) .onSuccess(so -> { so.close(); complete(); }) .onFailure(this::fail); try { await(); } finally { proxy.stop(); } } @Test public void testHAProxyProtocolIllegalHeader1() throws Exception { testHAProxyProtocolIllegal(Buffer.buffer("This is an illegal HA PROXY protocol header\r\n")); } @Test public void testHAProxyProtocolIllegalHeader2() throws Exception { //IPv4 remote IPv6 Local SocketAddress remote = SocketAddress.inetSocketAddress(56324, "192.168.0.1"); SocketAddress local = SocketAddress.inetSocketAddress(443, "2001:db8:85a3:0:0:8a2e:370:7333"); Buffer header = HAProxy.createVersion1TCP4ProtocolHeader(remote, local); testHAProxyProtocolIllegal(header); } private void testHAProxyProtocolIllegal(Buffer header) throws Exception { waitFor(2); HAProxy proxy = new HAProxy(testAddress, header); proxy.start(vertx); server.close(); server = vertx.createNetServer(new NetServerOptions().setUseProxyProtocol(true)) .connectHandler(u -> fail("Should not be called")).exceptionHandler(exception -> { if (exception instanceof io.netty.handler.codec.haproxy.HAProxyProtocolException) complete(); }); startServer(); client.connect(proxy.getPort(), proxy.getHost()) .onSuccess(so -> { so.close(); complete(); }) .onFailure(this::fail); try { await(); } finally { proxy.stop(); } } private void assertAddresses(SocketAddress address1, SocketAddress address2) { if (address1 == null || address2 == null) assertEquals(address1, address2); else { assertEquals(address1.hostAddress(), address2.hostAddress()); assertEquals(address1.port(), address2.port()); } } @Test public void testConnectTimeout() { client.close(); client = vertx.createNetClient(new NetClientOptions().setConnectTimeout(1)); client.connect(1234, TestUtils.NON_ROUTABLE_HOST) .onComplete(onFailure(err -> { assertTrue(err instanceof ConnectTimeoutException); testComplete(); })); await(); } @Test public void testConnectTimeoutOverride() { client.close(); client = vertx.createNetClient(); client.connect(new ConnectOptions() .setPort(1234) .setHost(TestUtils.NON_ROUTABLE_HOST) .setTimeout(1)) .onComplete(onFailure(err -> { assertTrue(err instanceof ConnectTimeoutException); testComplete(); })); await(); } @Test public void testInvalidPort() { try { server.connectHandler(so -> { }).listen(65536); fail(); } catch (IllegalArgumentException ignore) { } } @Test public void testClientShutdownHandlerTimeout() throws Exception { testClientShutdown(false, true, now -> System.currentTimeMillis() - now >= 2000); } @Test public void testClientShutdownHandlerOverride() throws Exception { testClientShutdown(true, true, now -> System.currentTimeMillis() - now <= 2000); } @Test public void testClientShutdown() throws Exception { testClientShutdown(true, false, now -> System.currentTimeMillis() - now <= 2000); } private void testClientShutdown(boolean override, boolean useHandler, LongPredicate checker) throws Exception { waitFor(2); server.connectHandler(so -> { }); startServer(); NetClientInternal client = ((CleanableNetClient) vertx.createNetClient()).unwrap(); CountDownLatch latch = new CountDownLatch(1); long now = System.currentTimeMillis(); client.connect(testAddress) .onComplete(onSuccess(so -> { AtomicInteger eventCount = new AtomicInteger(); if (useHandler) { so.shutdownHandler(v -> { assertEquals(1, eventCount.incrementAndGet()); if (override) { so.close(); } }); } so.closeHandler(v -> { assertEquals(useHandler ? 1 : 0, eventCount.get()); assertTrue(checker.test(now)); complete(); }); latch.countDown(); })); awaitLatch(latch); Future<Void> fut = client.shutdown(2, TimeUnit.SECONDS); fut.onComplete(onSuccess(v -> { assertTrue(checker.test(now)); complete(); })); await(); } @Test public void testServerShutdownHandlerTimeout() throws Exception { testServerShutdown(false, true, now -> System.currentTimeMillis() - now >= 2000); } @Test public void testServerShutdownHandlerOverride() throws Exception { testServerShutdown(true, true, now -> System.currentTimeMillis() - now <= 2000); } @Test public void testServerShutdown() throws Exception { testServerShutdown(false, false, now -> System.currentTimeMillis() - now <= 2000); } public void testServerShutdown(boolean override, boolean useHandler, LongPredicate checker) throws Exception { waitFor(2); long now = System.currentTimeMillis(); server.connectHandler(so -> { AtomicInteger eventCount = new AtomicInteger(); if (useHandler) { so.shutdownHandler(v -> { eventCount.incrementAndGet(); if (override) { so.close(); } }); } so.closeHandler(v -> { assertEquals(useHandler ? 1 : 0, eventCount.getAndIncrement()); assertTrue(checker.test(now)); complete(); }); so.write("ping"); }); startServer(); CountDownLatch latch = new CountDownLatch(1); client.connect(testAddress).onComplete(onSuccess(so -> { so.handler(buff -> { latch.countDown(); }); })); awaitLatch(latch); Future<Void> fut = server.shutdown(2, TimeUnit.SECONDS); fut.onComplete(onSuccess(v -> { assertTrue(checker.test(now)); complete(); })); await(); } @Test public void testConnectToServerShutdown() throws Exception { AtomicBoolean shutdown = new AtomicBoolean(); server.connectHandler(so -> { if (!shutdown.get()) { so.shutdownHandler(v -> { shutdown.set(true); }); so.handler(buff -> { if (buff.toString().equals("close")) { so.close(); } else { so.write(buff); } }); } else { so.close(); } }); startServer(); NetSocket so = client.connect(testAddress).await(); CountDownLatch latch = new CountDownLatch(1); so.handler(buff -> { latch.countDown(); }); so.write("hello"); awaitLatch(latch); Future<Void> fut = server.shutdown(20, TimeUnit.SECONDS); assertWaitUntil(shutdown::get); boolean refused = false; for (int i = 0;i < 10;i++) { try { client.connect(testAddress).await(); } catch (Exception e) { // Connection refused refused = true; break; } Thread.sleep(100); } assertTrue(refused); so.handler(buff -> { so.write("close"); }); AtomicBoolean closed = new AtomicBoolean(); so.closeHandler(v -> closed.set(true)); // Verify the socket still works so.write("ping"); assertWaitUntil(closed::get); fut.await(); } /** * Test that for NetServer, the peer host and port info is available in the SSLEngine * when the X509ExtendedKeyManager.chooseEngineServerAlias is called. * * @throws Exception if an error occurs */ @Test public void testTLSServerSSLEnginePeerHost() throws Exception { testTLSServerSSLEnginePeerHostImpl(false); } /** * Test that for NetServer with start TLS, the peer host and port info is available * in the SSLEngine when the X509ExtendedKeyManager.chooseEngineServerAlias is called. * * @throws Exception if an error occurs */ @Test public void testStartTLSServerSSLEnginePeerHost() throws Exception { testTLSServerSSLEnginePeerHostImpl(true); } private void testTLSServerSSLEnginePeerHostImpl(boolean startTLS) throws Exception { AtomicBoolean called = new AtomicBoolean(false); testTLS(Cert.NONE, Trust.SERVER_JKS, testPeerHostServerCert(Cert.SERVER_JKS, called), Trust.NONE, false, false, true, startTLS); assertTrue("X509ExtendedKeyManager.chooseEngineServerAlias is not called", called.get()); } /** * Test that for NetServer with SNI, the peer host and port info is available * in the SSLEngine when the X509ExtendedKeyManager.chooseEngineServerAlias is called. * * @throws Exception if an error occurs */ @Test public void testSNIServerSSLEnginePeerHost() throws Exception { AtomicBoolean called = new AtomicBoolean(false); TLSTest test = new TLSTest() .clientTrust(Trust.SNI_JKS_HOST2) .address(SocketAddress.inetSocketAddress(DEFAULT_HTTPS_PORT, "host2.com")) .serverCert(testPeerHostServerCert(Cert.SNI_JKS, called)) .sni(true); test.run(true); await(); assertEquals("host2.com", cnOf(test.clientPeerCert())); assertEquals("host2.com", test.indicatedServerName); assertTrue("X509ExtendedKeyManager.chooseEngineServerAlias is not called", called.get()); } }
MyVerticle
java
apache__flink
flink-runtime/src/test/java/org/apache/flink/runtime/operators/CoGroupTaskTest.java
{ "start": 14366, "end": 15008 }
class ____ extends RichCoGroupFunction<Record, Record, Record> { private static final long serialVersionUID = 1L; private final OneShotLatch delayCoGroupProcessingLatch; public MockDelayingCoGroupStub(OneShotLatch delayCoGroupProcessingLatch) { this.delayCoGroupProcessingLatch = delayCoGroupProcessingLatch; } @Override public void coGroup( Iterable<Record> records1, Iterable<Record> records2, Collector<Record> out) throws InterruptedException { delayCoGroupProcessingLatch.await(); } } }
MockDelayingCoGroupStub
java
quarkusio__quarkus
extensions/elytron-security-oauth2/runtime/src/main/java/io/quarkus/elytron/security/oauth2/runtime/OAuth2Recorder.java
{ "start": 1163, "end": 4884 }
class ____ { private final RuntimeValue<OAuth2RuntimeConfig> runtimeConfig; public OAuth2Recorder(final RuntimeValue<OAuth2RuntimeConfig> runtimeConfig) { this.runtimeConfig = runtimeConfig; } public RuntimeValue<SecurityRealm> createRealm() throws IOException, NoSuchAlgorithmException, CertificateException, KeyStoreException, KeyManagementException { OAuth2RuntimeConfig runtimeConfig = this.runtimeConfig.getValue(); if (!runtimeConfig.clientId().isPresent() || !runtimeConfig.clientSecret().isPresent() || !runtimeConfig.introspectionUrl().isPresent()) { throw new ConfigurationException( "client-id, client-secret and introspection-url must be configured when the oauth2 extension is enabled"); } OAuth2IntrospectValidator.Builder validatorBuilder = OAuth2IntrospectValidator.builder() .clientId(runtimeConfig.clientId().get()) .clientSecret(runtimeConfig.clientSecret().get()) .tokenIntrospectionUrl(URI.create(runtimeConfig.introspectionUrl().get()).toURL()); if (runtimeConfig.caCertFile().isPresent()) { validatorBuilder.useSslContext(createSSLContext(runtimeConfig)); } else { validatorBuilder.useSslContext(SSLContext.getDefault()); } if (runtimeConfig.connectionTimeout().isPresent()) { validatorBuilder.connectionTimeout((int) runtimeConfig.connectionTimeout().get().toMillis()); } if (runtimeConfig.readTimeout().isPresent()) { validatorBuilder.readTimeout((int) runtimeConfig.readTimeout().get().toMillis()); } OAuth2IntrospectValidator validator = validatorBuilder.build(); TokenSecurityRealm tokenRealm = TokenSecurityRealm.builder() .validator(validator) .claimToPrincipal(claims -> new ElytronOAuth2CallerPrincipal(attributesToMap(claims))) .build(); return new RuntimeValue<>(tokenRealm); } private Map<String, Object> attributesToMap(Attributes claims) { Map<String, Object> attributeMap = new HashMap<>(); for (Attributes.Entry entry : claims.entries()) { if (entry.size() > 1) { attributeMap.put(entry.getKey(), entry.subList(0, entry.size())); } else { attributeMap.put(entry.getKey(), entry.get(0)); } } return attributeMap; } private SSLContext createSSLContext(OAuth2RuntimeConfig runtimeConfig) throws IOException, CertificateException, NoSuchAlgorithmException, KeyStoreException, KeyManagementException { try (InputStream is = new FileInputStream(runtimeConfig.caCertFile().get())) { CertificateFactory cf = CertificateFactory.getInstance("X.509"); X509Certificate caCert = (X509Certificate) cf.generateCertificate(is); TrustManagerFactory tmf = TrustManagerFactory .getInstance(TrustManagerFactory.getDefaultAlgorithm()); KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); ks.load(null); // You don't need the KeyStore instance to come from a file. ks.setCertificateEntry("caCert", caCert); tmf.init(ks); SSLContext sslContext = SSLContext.getInstance("TLS"); sslContext.init(null, tmf.getTrustManagers(), null); return sslContext; } } public RuntimeValue<OAuth2Augmentor> augmentor(OAuth2BuildTimeConfig buildTimeConfig) { return new RuntimeValue<>(new OAuth2Augmentor(buildTimeConfig.roleClaim())); } }
OAuth2Recorder
java
quarkusio__quarkus
core/deployment/src/main/java/io/quarkus/deployment/dev/testing/LogCapturingOutputFilter.java
{ "start": 373, "end": 3321 }
class ____ implements BiPredicate<String, Boolean> { private static final Logger log = Logger.getLogger(LogCapturingOutputFilter.class); private final CuratedApplication application; private final List<String> logOutput = new ArrayList<>(); private final List<String> errorOutput = new ArrayList<>(); private final boolean mergeErrorStream; private final boolean convertToHtml; private final Supplier<Boolean> finalPredicate; public LogCapturingOutputFilter(CuratedApplication application, boolean mergeErrorStream, boolean convertToHtml, Supplier<Boolean> finalPredicate) { this.application = application; this.mergeErrorStream = mergeErrorStream; this.convertToHtml = convertToHtml; this.finalPredicate = finalPredicate; } public List<String> captureOutput() { List<String> ret = new ArrayList<>(logOutput); logOutput.clear(); return ret; } public List<String> captureErrorOutput() { List<String> ret = new ArrayList<>(errorOutput); errorOutput.clear(); return ret; } @Override public boolean test(String logRecord, Boolean errorStream) { Thread thread = Thread.currentThread(); ClassLoader cl = thread.getContextClassLoader(); if (cl == null) { return true; } while (cl.getParent() != null) { if (cl == application.getAugmentClassLoader() || cl == application.getBaseRuntimeClassLoader()) { //TODO: for convenience we save the log records as HTML rather than ANSI here synchronized (logOutput) { if (convertToHtml) { ByteArrayOutputStream out = new ByteArrayOutputStream(); try (HtmlAnsiOutputStream outputStream = new HtmlAnsiOutputStream(out)) { outputStream.write(logRecord.getBytes(StandardCharsets.UTF_8)); if (mergeErrorStream || !errorStream) { logOutput.add(out.toString(StandardCharsets.UTF_8)); } else { errorOutput.add(out.toString(StandardCharsets.UTF_8)); } } catch (IOException e) { log.error("Failed to capture log record", e); logOutput.add(logRecord); } } else { if (mergeErrorStream || !errorStream) { logOutput.add(logRecord); } else { errorOutput.add(logRecord); } } } return finalPredicate.get(); } cl = cl.getParent(); } return true; } }
LogCapturingOutputFilter
java
mapstruct__mapstruct
processor/src/test/java/org/mapstruct/ap/test/bugs/_1148/Entity.java
{ "start": 489, "end": 843 }
class ____ { //CHECKSTYLE:OFF public long recipientId; public long senderId; public NestedDto nestedDto; public NestedDto nestedDto2; public ClientDto sameLevel; public ClientDto sameLevel2; public ClientDto level; public ClientDto level2; //CHECKSTYLE:ON } static
Dto
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/type/StandardBasicTypeTemplate.java
{ "start": 467, "end": 1106 }
class ____<J> extends AbstractSingleColumnStandardBasicType<J> { private final String name; private final String[] registrationKeys; public StandardBasicTypeTemplate( JdbcType jdbcType, JavaType<J> javaType, String... registrationKeys) { super( jdbcType, javaType ); this.registrationKeys = registrationKeys; this.name = javaType.getJavaType() == null ? "(map-mode)" : javaType.getTypeName() + " -> " + jdbcType.getDefaultSqlTypeCode(); } @Override public String getName() { return name; } @Override public String[] getRegistrationKeys() { return registrationKeys; } }
StandardBasicTypeTemplate
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
{ "start": 11306, "end": 11572 }
enum ____ { DIAGNOSTIC } /** * Defines the required action of app activities: * REFRESH means to turn on activities recording for the required app, * GET means the required app activities should be involved in response. */ public
ActivitiesGroupBy
java
apache__hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java
{ "start": 3127, "end": 3203 }
class ____ sub-sum and sub-total of SampleStat. */ private static
maintains
java
spring-projects__spring-framework
spring-websocket/src/main/java/org/springframework/web/socket/sockjs/transport/session/StreamingSockJsSession.java
{ "start": 1336, "end": 2941 }
class ____ extends AbstractHttpSockJsSession { private int byteCount; public StreamingSockJsSession(String sessionId, SockJsServiceConfig config, WebSocketHandler wsHandler, Map<String, Object> attributes) { super(sessionId, config, wsHandler, attributes); } /** * Get the prelude to write to the response before any other data. * @since 4.2 */ protected abstract byte[] getPrelude(ServerHttpRequest request); @Override protected void handleRequestInternal(ServerHttpRequest request, ServerHttpResponse response, boolean initialRequest) throws IOException { byte[] prelude = getPrelude(request); response.getBody().write(prelude); response.flush(); if (initialRequest) { writeFrame(SockJsFrame.openFrame()); } flushCache(); } @Override protected void flushCache() throws SockJsTransportFailureException { while (!getMessageCache().isEmpty()) { String message = getMessageCache().poll(); SockJsMessageCodec messageCodec = getSockJsServiceConfig().getMessageCodec(); SockJsFrame frame = SockJsFrame.messageFrame(messageCodec, message); writeFrame(frame); this.byteCount += (frame.getContentBytes().length + 1); if (logger.isTraceEnabled()) { logger.trace(this.byteCount + " bytes written so far, " + getMessageCache().size() + " more messages not flushed"); } if (this.byteCount >= getSockJsServiceConfig().getStreamBytesLimit()) { logger.trace("Streamed bytes limit reached, recycling current request"); resetRequest(); this.byteCount = 0; break; } } scheduleHeartbeat(); } }
StreamingSockJsSession
java
elastic__elasticsearch
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundDoubleEvaluator.java
{ "start": 1185, "end": 5095 }
class ____ implements EvalOperator.ExpressionEvaluator { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(RoundDoubleEvaluator.class); private final Source source; private final EvalOperator.ExpressionEvaluator val; private final EvalOperator.ExpressionEvaluator decimals; private final DriverContext driverContext; private Warnings warnings; public RoundDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator val, EvalOperator.ExpressionEvaluator decimals, DriverContext driverContext) { this.source = source; this.val = val; this.decimals = decimals; this.driverContext = driverContext; } @Override public Block eval(Page page) { try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { try (LongBlock decimalsBlock = (LongBlock) decimals.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { return eval(page.getPositionCount(), valBlock, decimalsBlock); } LongVector decimalsVector = decimalsBlock.asVector(); if (decimalsVector == null) { return eval(page.getPositionCount(), valBlock, decimalsBlock); } return eval(page.getPositionCount(), valVector, decimalsVector).asBlock(); } } } @Override public long baseRamBytesUsed() { long baseRamBytesUsed = BASE_RAM_BYTES_USED; baseRamBytesUsed += val.baseRamBytesUsed(); baseRamBytesUsed += decimals.baseRamBytesUsed(); return baseRamBytesUsed; } public DoubleBlock eval(int positionCount, DoubleBlock valBlock, LongBlock decimalsBlock) { try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { position: for (int p = 0; p < positionCount; p++) { switch (valBlock.getValueCount(p)) { case 0: result.appendNull(); continue position; case 1: break; default: warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); result.appendNull(); continue position; } switch (decimalsBlock.getValueCount(p)) { case 0: result.appendNull(); continue position; case 1: break; default: warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); result.appendNull(); continue position; } double val = valBlock.getDouble(valBlock.getFirstValueIndex(p)); long decimals = decimalsBlock.getLong(decimalsBlock.getFirstValueIndex(p)); result.appendDouble(Round.process(val, decimals)); } return result.build(); } } public DoubleVector eval(int positionCount, DoubleVector valVector, LongVector decimalsVector) { try(DoubleVector.FixedBuilder result = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { position: for (int p = 0; p < positionCount; p++) { double val = valVector.getDouble(p); long decimals = decimalsVector.getLong(p); result.appendDouble(p, Round.process(val, decimals)); } return result.build(); } } @Override public String toString() { return "RoundDoubleEvaluator[" + "val=" + val + ", decimals=" + decimals + "]"; } @Override public void close() { Releasables.closeExpectNoException(val, decimals); } private Warnings warnings() { if (warnings == null) { this.warnings = Warnings.createWarnings( driverContext.warningsMode(), source.source().getLineNumber(), source.source().getColumnNumber(), source.text() ); } return warnings; } static
RoundDoubleEvaluator
java
quarkusio__quarkus
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/InvalidURITest.java
{ "start": 414, "end": 1858 }
class ____ { @RegisterExtension static final QuarkusUnitTest TEST = new QuarkusUnitTest(); @Test void shouldFailOnMissingSchema() { Client client = clientWithUri("localhost:8080"); Assertions.assertThatThrownBy(client::get).isInstanceOf(IllegalArgumentException.class); } @Test void shouldFailOnMissingColon() { Client client = clientWithUri("http//localhost:8080"); Assertions.assertThatThrownBy(client::get).isInstanceOf(IllegalArgumentException.class); } @Test void shouldFailOnStorkUrlWithoutColonAfterScheme() { Client client = clientWithUri("stork//localhost:8080"); Assertions.assertThatThrownBy(client::get).isInstanceOf(IllegalArgumentException.class); } @Test void shouldFailOnStorkUrlWithoutSlashes() { Client client = clientWithUri("stork:somethingwrong"); Assertions.assertThatThrownBy(client::get).isInstanceOf(IllegalArgumentException.class); } @Test void shouldWork() { Client client = clientWithUri( "http://localhost:" + ConfigProvider.getConfig().getValue("quarkus.http.test-port", Integer.class)); assertThat(client.get()).isEqualTo("bar-of-chocolate"); } private Client clientWithUri(String uri) { return QuarkusRestClientBuilder.newBuilder().baseUri(URI.create(uri)).build(Client.class); } @Path("/foo") public
InvalidURITest
java
spring-projects__spring-framework
spring-core/src/main/java/org/springframework/util/ClassUtils.java
{ "start": 20589, "end": 24883 }
class ____ check * @return the original class, or a primitive wrapper for the original primitive type */ @SuppressWarnings("NullAway") // Dataflow analysis limitation public static Class<?> resolvePrimitiveIfNecessary(Class<?> clazz) { Assert.notNull(clazz, "Class must not be null"); return (clazz.isPrimitive() && clazz != void.class ? primitiveTypeToWrapperMap.get(clazz) : clazz); } /** * Determine if the given type represents either {@code Void} or {@code void}. * @param type the type to check * @return {@code true} if the type represents {@code Void} or {@code void} * @since 6.1.4 * @see Void * @see Void#TYPE */ @Contract("null -> false") public static boolean isVoidType(@Nullable Class<?> type) { return (type == void.class || type == Void.class); } /** * Delegate for {@link org.springframework.beans.BeanUtils#isSimpleValueType}. * Also used by {@link ObjectUtils#nullSafeConciseToString}. * <p>Check if the given type represents a common "simple" value type: * primitive or primitive wrapper, {@link Enum}, {@link String} or other * {@link CharSequence}, {@link Number}, {@link Date}, {@link Temporal}, * {@link ZoneId}, {@link TimeZone}, {@link File}, {@link Path}, {@link URI}, * {@link URL}, {@link InetAddress}, {@link Charset}, {@link Currency}, * {@link Locale}, {@link UUID}, {@link Pattern}, or {@link Class}. * <p>{@code Void} and {@code void} are not considered simple value types. * @param type the type to check * @return whether the given type represents a "simple" value type, * suggesting value-based data binding and {@code toString} output * @since 6.1 */ public static boolean isSimpleValueType(Class<?> type) { return (!isVoidType(type) && (isPrimitiveOrWrapper(type) || Enum.class.isAssignableFrom(type) || CharSequence.class.isAssignableFrom(type) || Number.class.isAssignableFrom(type) || Date.class.isAssignableFrom(type) || Temporal.class.isAssignableFrom(type) || ZoneId.class.isAssignableFrom(type) || TimeZone.class.isAssignableFrom(type) || File.class.isAssignableFrom(type) || Path.class.isAssignableFrom(type) || Charset.class.isAssignableFrom(type) || Currency.class.isAssignableFrom(type) || InetAddress.class.isAssignableFrom(type) || URI.class == type || URL.class == type || UUID.class == type || Locale.class == type || Pattern.class == type || Class.class == type)); } /** * Check if the right-hand side type may be assigned to the left-hand side * type, assuming setting by reflection. Considers primitive wrapper * classes as assignable to the corresponding primitive types. * @param lhsType the target type (left-hand side (LHS) type) * @param rhsType the value type (right-hand side (RHS) type) that should * be assigned to the target type * @return {@code true} if {@code rhsType} is assignable to {@code lhsType} * @see TypeUtils#isAssignable(java.lang.reflect.Type, java.lang.reflect.Type) */ public static boolean isAssignable(Class<?> lhsType, Class<?> rhsType) { Assert.notNull(lhsType, "Left-hand side type must not be null"); Assert.notNull(rhsType, "Right-hand side type must not be null"); if (lhsType.isAssignableFrom(rhsType)) { return true; } if (lhsType.isPrimitive()) { Class<?> resolvedPrimitive = primitiveWrapperTypeMap.get(rhsType); return (lhsType == resolvedPrimitive); } else if (rhsType.isPrimitive()) { Class<?> resolvedWrapper = primitiveTypeToWrapperMap.get(rhsType); return (resolvedWrapper != null && lhsType.isAssignableFrom(resolvedWrapper)); } return false; } /** * Determine if the given type is assignable from the given value, * assuming setting by reflection. Considers primitive wrapper classes * as assignable to the corresponding primitive types. * @param type the target type * @param value the value that should be assigned to the type * @return if the type is assignable from the value */ public static boolean isAssignableValue(Class<?> type, @Nullable Object value) { Assert.notNull(type, "Type must not be null"); return (value != null ? isAssignable(type, value.getClass()) : !type.isPrimitive()); } /** * Convert a "/"-based resource path to a "."-based fully qualified
to
java
apache__flink
flink-formats/flink-compress/src/main/java/org/apache/flink/formats/compress/extractor/Extractor.java
{ "start": 1056, "end": 1136 }
interface ____<T> extends Serializable { byte[] extract(T element); }
Extractor
java
spring-projects__spring-boot
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/diagnostics/analyzer/NoSuchBeanDefinitionFailureAnalyzerTests.java
{ "start": 12029, "end": 12216 }
class ____ { } @Configuration(proxyBeanMethods = false) @ImportAutoConfiguration(TestNullBeanConfiguration.class) @Import(StringHandler.class) static
StringMissingBeanNameConfiguration
java
FasterXML__jackson-databind
src/test/java/tools/jackson/databind/mixins/TestMixinSerWithViews.java
{ "start": 953, "end": 2301 }
class ____ { String nameNull = null; String nameComplex = "complexValue"; String nameComplexHidden = "nameComplexHiddenValue"; SimpleTestData testData = new SimpleTestData( ); SimpleTestData[] testDataArray = new SimpleTestData[] { new SimpleTestData( ), null }; public String getNameNull() { return nameNull; } public void setNameNull( String nameNull ) { this.nameNull = nameNull; } public String getNameComplex() { return nameComplex; } public void setNameComplex( String nameComplex ) { this.nameComplex = nameComplex; } public String getNameComplexHidden() { return nameComplexHidden; } public void setNameComplexHidden( String nameComplexHidden ) { this.nameComplexHidden = nameComplexHidden; } public SimpleTestData getTestData() { return testData; } public void setTestData( SimpleTestData testData ) { this.testData = testData; } public SimpleTestData[] getTestDataArray() { return testDataArray; } public void setTestDataArray( SimpleTestData[] testDataArray ) { this.testDataArray = testDataArray; } } public
ComplexTestData
java
resilience4j__resilience4j
resilience4j-spring-boot3/src/main/java/io/github/resilience4j/springboot3/nativeimage/configuration/NativeHintsConfiguration.java
{ "start": 434, "end": 1606 }
class ____ implements RuntimeHintsRegistrar { @Override public void registerHints(RuntimeHints hints, ClassLoader classLoader) { hints.reflection().registerType(io.github.resilience4j.spring6.bulkhead.configure.BulkheadAspect.class, builder -> builder.withMembers(MemberCategory.INVOKE_DECLARED_METHODS)); hints.reflection().registerType(io.github.resilience4j.spring6.circuitbreaker.configure.CircuitBreakerAspect.class, builder -> builder.withMembers(MemberCategory.INVOKE_DECLARED_METHODS)); hints.reflection().registerType(io.github.resilience4j.spring6.ratelimiter.configure.RateLimiterAspect.class, builder -> builder.withMembers(MemberCategory.INVOKE_DECLARED_METHODS)); hints.reflection().registerType(io.github.resilience4j.spring6.retry.configure.RetryAspect.class, builder -> builder.withMembers(MemberCategory.INVOKE_DECLARED_METHODS)); hints.reflection().registerType(io.github.resilience4j.spring6.timelimiter.configure.TimeLimiterAspect.class, builder -> builder.withMembers(MemberCategory.INVOKE_DECLARED_METHODS)); } }
NativeHintsConfiguration
java
redisson__redisson
redisson-hibernate/redisson-hibernate-4/src/test/java/org/redisson/hibernate/ItemTransactional.java
{ "start": 905, "end": 1994 }
class ____ { @Id @GeneratedValue(generator = "increment") @GenericGenerator(name = "increment", strategy = "increment") private Long id; private String name; @NaturalId private String nid; @ElementCollection @JoinTable(name = "Entries", joinColumns = @JoinColumn(name="Item_id")) @Column(name = "entry", nullable = false) @Cache(usage = CacheConcurrencyStrategy.TRANSACTIONAL, region = "item_entries") private List<String> entries = new ArrayList<String>(); public ItemTransactional() { } public ItemTransactional(String name) { this.name = name; } public String getNid() { return nid; } public void setNid(String nid) { this.nid = nid; } public Long getId() { return id; } public void setId(Long id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public List<String> getEntries() { return entries; } }
ItemTransactional
java
apache__flink
flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/AbstractTestBaseJUnit4.java
{ "start": 2228, "end": 4713 }
class ____ extends TestLogger { private static final Logger LOG = LoggerFactory.getLogger(AbstractTestBaseJUnit4.class); private static final int DEFAULT_PARALLELISM = 4; @ClassRule public static final MiniClusterWithClientResource MINI_CLUSTER_RESOURCE = new MiniClusterWithClientResource( new MiniClusterResourceConfiguration.Builder() .setNumberTaskManagers(1) .setNumberSlotsPerTaskManager(DEFAULT_PARALLELISM) .build()); @ClassRule public static final TemporaryFolder TEMPORARY_FOLDER = new TemporaryFolder(); @After public final void cleanupRunningJobs() throws Exception { if (!MINI_CLUSTER_RESOURCE.getMiniCluster().isRunning()) { // do nothing if the MiniCluster is not running LOG.warn("Mini cluster is not running after the test!"); return; } for (JobStatusMessage path : MINI_CLUSTER_RESOURCE.getClusterClient().listJobs().get()) { if (!path.getJobState().isTerminalState()) { try { MINI_CLUSTER_RESOURCE.getClusterClient().cancel(path.getJobId()).get(); } catch (Exception ignored) { // ignore exceptions when cancelling dangling jobs } } } } // -------------------------------------------------------------------------------------------- // Temporary File Utilities // -------------------------------------------------------------------------------------------- public String getTempDirPath(String dirName) throws IOException { File f = createAndRegisterTempFile(dirName); return f.toURI().toString(); } public String getTempFilePath(String fileName) throws IOException { File f = createAndRegisterTempFile(fileName); return f.toURI().toString(); } public String createTempFile(String fileName, String contents) throws IOException { File f = createAndRegisterTempFile(fileName); if (!f.getParentFile().exists()) { f.getParentFile().mkdirs(); } f.createNewFile(); FileUtils.writeFileUtf8(f, contents); return f.toURI().toString(); } public File createAndRegisterTempFile(String fileName) throws IOException { return new File(TEMPORARY_FOLDER.newFolder(), fileName); } }
AbstractTestBaseJUnit4
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/TwitterDirectMessageEndpointBuilderFactory.java
{ "start": 46739, "end": 52174 }
interface ____ extends EndpointProducerBuilder { default AdvancedTwitterDirectMessageEndpointProducerBuilder advanced() { return (AdvancedTwitterDirectMessageEndpointProducerBuilder) this; } /** * The http proxy host which can be used for the camel-twitter. Can also * be configured on the TwitterComponent level instead. * * The option is a: <code>java.lang.String</code> type. * * Group: proxy * * @param httpProxyHost the value to set * @return the dsl builder */ default TwitterDirectMessageEndpointProducerBuilder httpProxyHost(String httpProxyHost) { doSetProperty("httpProxyHost", httpProxyHost); return this; } /** * The http proxy password which can be used for the camel-twitter. Can * also be configured on the TwitterComponent level instead. * * The option is a: <code>java.lang.String</code> type. * * Group: proxy * * @param httpProxyPassword the value to set * @return the dsl builder */ default TwitterDirectMessageEndpointProducerBuilder httpProxyPassword(String httpProxyPassword) { doSetProperty("httpProxyPassword", httpProxyPassword); return this; } /** * The http proxy port which can be used for the camel-twitter. Can also * be configured on the TwitterComponent level instead. * * The option is a: <code>java.lang.Integer</code> type. * * Group: proxy * * @param httpProxyPort the value to set * @return the dsl builder */ default TwitterDirectMessageEndpointProducerBuilder httpProxyPort(Integer httpProxyPort) { doSetProperty("httpProxyPort", httpProxyPort); return this; } /** * The http proxy port which can be used for the camel-twitter. Can also * be configured on the TwitterComponent level instead. * * The option will be converted to a <code>java.lang.Integer</code> * type. * * Group: proxy * * @param httpProxyPort the value to set * @return the dsl builder */ default TwitterDirectMessageEndpointProducerBuilder httpProxyPort(String httpProxyPort) { doSetProperty("httpProxyPort", httpProxyPort); return this; } /** * The http proxy user which can be used for the camel-twitter. Can also * be configured on the TwitterComponent level instead. * * The option is a: <code>java.lang.String</code> type. * * Group: proxy * * @param httpProxyUser the value to set * @return the dsl builder */ default TwitterDirectMessageEndpointProducerBuilder httpProxyUser(String httpProxyUser) { doSetProperty("httpProxyUser", httpProxyUser); return this; } /** * The access token. Can also be configured on the TwitterComponent * level instead. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param accessToken the value to set * @return the dsl builder */ default TwitterDirectMessageEndpointProducerBuilder accessToken(String accessToken) { doSetProperty("accessToken", accessToken); return this; } /** * The access secret. Can also be configured on the TwitterComponent * level instead. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param accessTokenSecret the value to set * @return the dsl builder */ default TwitterDirectMessageEndpointProducerBuilder accessTokenSecret(String accessTokenSecret) { doSetProperty("accessTokenSecret", accessTokenSecret); return this; } /** * The consumer key. Can also be configured on the TwitterComponent * level instead. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param consumerKey the value to set * @return the dsl builder */ default TwitterDirectMessageEndpointProducerBuilder consumerKey(String consumerKey) { doSetProperty("consumerKey", consumerKey); return this; } /** * The consumer secret. Can also be configured on the TwitterComponent * level instead. * * The option is a: <code>java.lang.String</code> type. * * Group: security * * @param consumerSecret the value to set * @return the dsl builder */ default TwitterDirectMessageEndpointProducerBuilder consumerSecret(String consumerSecret) { doSetProperty("consumerSecret", consumerSecret); return this; } } /** * Advanced builder for endpoint producers for the Twitter Direct Message component. */ public
TwitterDirectMessageEndpointProducerBuilder
java
quarkusio__quarkus
integration-tests/jpa/src/test/java/io/quarkus/it/jpa/nonquarkus/NonQuarkusApiTest.java
{ "start": 615, "end": 873 }
class ____ { @Test public void test() { given().queryParam("expectedSchema", "SCHEMA1") .when().get("/jpa-test/non-quarkus/test").then() .body(is("OK")) .statusCode(200); } }
NonQuarkusApiTest
java
grpc__grpc-java
core/src/test/java/io/grpc/internal/MessageFramerTest.java
{ "start": 15390, "end": 15860 }
class ____ implements WritableBufferAllocator { public int minSize; public int maxSize; public int allocCount = 0; BytesWritableBufferAllocator(int minSize, int maxSize) { this.minSize = minSize; this.maxSize = maxSize; } @Override public WritableBuffer allocate(int capacityHint) { allocCount++; return new ByteWritableBuffer(Math.min(maxSize, Math.max(capacityHint, minSize))); } } }
BytesWritableBufferAllocator
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/TestAsyncUtil.java
{ "start": 2881, "end": 3113 }
class ____ { private static final Logger LOG = LoggerFactory.getLogger(TestAsyncUtil.class); private static final long TIME_CONSUMING = 100; private BaseClass baseClass; private boolean enableAsync; public
TestAsyncUtil
java
google__guice
core/test/com/google/inject/util/ProvidersTest.java
{ "start": 1978, "end": 2477 }
class ____ implements jakarta.inject.Provider<Integer> { private final int value; public JakartaProvider(int value) { this.value = value; } @Override public Integer get() { return value; } @Override public int hashCode() { return Objects.hashCode(value); } @Override public boolean equals(Object obj) { return (obj instanceof JakartaProvider) && (value == ((JakartaProvider) obj).value); } } private static
JakartaProvider
java
spring-projects__spring-framework
spring-context-indexer/src/main/java/org/springframework/context/index/processor/SortedProperties.java
{ "start": 1559, "end": 5211 }
class ____ extends Properties { static final String EOL = System.lineSeparator(); private static final Comparator<Object> keyComparator = Comparator.comparing(String::valueOf); private static final Comparator<Entry<Object, Object>> entryComparator = Entry.comparingByKey(keyComparator); private final boolean omitComments; /** * Construct a new {@code SortedProperties} instance that honors the supplied * {@code omitComments} flag. * @param omitComments {@code true} if comments should be omitted when * storing properties in a file */ SortedProperties(boolean omitComments) { this.omitComments = omitComments; } /** * Construct a new {@code SortedProperties} instance with properties populated * from the supplied {@link Properties} object and honoring the supplied * {@code omitComments} flag. * <p>Default properties from the supplied {@code Properties} object will * not be copied. * @param properties the {@code Properties} object from which to copy the * initial properties * @param omitComments {@code true} if comments should be omitted when * storing properties in a file */ SortedProperties(Properties properties, boolean omitComments) { this(omitComments); putAll(properties); } @Override public void store(OutputStream out, String comments) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); super.store(baos, (this.omitComments ? null : comments)); String contents = baos.toString(StandardCharsets.ISO_8859_1); for (String line : contents.split(EOL)) { if (!(this.omitComments && line.startsWith("#"))) { out.write((line + EOL).getBytes(StandardCharsets.ISO_8859_1)); } } } @Override public void store(Writer writer, String comments) throws IOException { StringWriter stringWriter = new StringWriter(); super.store(stringWriter, (this.omitComments ? null : comments)); String contents = stringWriter.toString(); for (String line : contents.split(EOL)) { if (!(this.omitComments && line.startsWith("#"))) { writer.write(line + EOL); } } } @Override public void storeToXML(OutputStream out, String comments) throws IOException { super.storeToXML(out, (this.omitComments ? null : comments)); } @Override public void storeToXML(OutputStream out, String comments, String encoding) throws IOException { super.storeToXML(out, (this.omitComments ? null : comments), encoding); } /** * Return a sorted enumeration of the keys in this {@link Properties} object. * @see #keySet() */ @Override public synchronized Enumeration<Object> keys() { return Collections.enumeration(keySet()); } /** * Return a sorted set of the keys in this {@link Properties} object. * <p>The keys will be converted to strings if necessary using * {@link String#valueOf(Object)} and sorted alphanumerically according to * the natural order of strings. */ @Override public Set<Object> keySet() { Set<Object> sortedKeys = new TreeSet<>(keyComparator); sortedKeys.addAll(super.keySet()); return Collections.synchronizedSet(sortedKeys); } /** * Return a sorted set of the entries in this {@link Properties} object. * <p>The entries will be sorted based on their keys, and the keys will be * converted to strings if necessary using {@link String#valueOf(Object)} * and compared alphanumerically according to the natural order of strings. */ @Override public Set<Entry<Object, Object>> entrySet() { Set<Entry<Object, Object>> sortedEntries = new TreeSet<>(entryComparator); sortedEntries.addAll(super.entrySet()); return Collections.synchronizedSet(sortedEntries); } }
SortedProperties
java
apache__rocketmq
client/src/main/java/org/apache/rocketmq/client/impl/mqclient/MQClientAPIFactory.java
{ "start": 1656, "end": 6318 }
class ____ implements StartAndShutdown { private MQClientAPIExt[] clients; private final String namePrefix; private final int clientNum; private final ClientRemotingProcessor clientRemotingProcessor; private final RPCHook rpcHook; private final ScheduledExecutorService scheduledExecutorService; private final NameserverAccessConfig nameserverAccessConfig; private final ObjectCreator<RemotingClient> remotingClientCreator; public MQClientAPIFactory( NameserverAccessConfig nameserverAccessConfig, String namePrefix, int clientNum, ClientRemotingProcessor clientRemotingProcessor, RPCHook rpcHook, ScheduledExecutorService scheduledExecutorService ) { this(nameserverAccessConfig, namePrefix, clientNum, clientRemotingProcessor, rpcHook, scheduledExecutorService, null); } public MQClientAPIFactory( NameserverAccessConfig nameserverAccessConfig, String namePrefix, int clientNum, ClientRemotingProcessor clientRemotingProcessor, RPCHook rpcHook, ScheduledExecutorService scheduledExecutorService, ObjectCreator<RemotingClient> remotingClientCreator ) { this.nameserverAccessConfig = nameserverAccessConfig; this.namePrefix = namePrefix; this.clientNum = clientNum; this.clientRemotingProcessor = clientRemotingProcessor; this.rpcHook = rpcHook; this.scheduledExecutorService = scheduledExecutorService; this.remotingClientCreator = remotingClientCreator; this.init(); } protected void init() { System.setProperty(ClientConfig.SEND_MESSAGE_WITH_VIP_CHANNEL_PROPERTY, "false"); if (StringUtils.isEmpty(nameserverAccessConfig.getNamesrvDomain())) { if (Strings.isNullOrEmpty(nameserverAccessConfig.getNamesrvAddr())) { throw new RuntimeException("The configuration item NamesrvAddr is not configured"); } System.setProperty(MixAll.NAMESRV_ADDR_PROPERTY, nameserverAccessConfig.getNamesrvAddr()); } else { System.setProperty("rocketmq.namesrv.domain", nameserverAccessConfig.getNamesrvDomain()); System.setProperty("rocketmq.namesrv.domain.subgroup", nameserverAccessConfig.getNamesrvDomainSubgroup()); } } public MQClientAPIExt getClient() { if (clients.length == 1) { return this.clients[0]; } int index = ThreadLocalRandom.current().nextInt(this.clients.length); return this.clients[index]; } @Override public void start() throws Exception { this.clients = new MQClientAPIExt[this.clientNum]; for (int i = 0; i < this.clientNum; i++) { clients[i] = createAndStart(this.namePrefix + "N_" + i); } } @Override public void shutdown() throws Exception { AsyncShutdownHelper helper = new AsyncShutdownHelper(); for (int i = 0; i < this.clientNum; i++) { helper.addTarget(clients[i]); } helper.shutdown().await(Integer.MAX_VALUE, TimeUnit.SECONDS); } protected MQClientAPIExt createAndStart(String instanceName) { ClientConfig clientConfig = new ClientConfig(); clientConfig.setInstanceName(instanceName); clientConfig.setDecodeReadBody(true); clientConfig.setDecodeDecompressBody(false); NettyClientConfig nettyClientConfig = new NettyClientConfig(); nettyClientConfig.setDisableCallbackExecutor(true); MQClientAPIExt mqClientAPIExt = new MQClientAPIExt( clientConfig, nettyClientConfig, clientRemotingProcessor, rpcHook, remotingClientCreator ); if (StringUtils.isEmpty(nameserverAccessConfig.getNamesrvDomain())) { mqClientAPIExt.updateNameServerAddressList(nameserverAccessConfig.getNamesrvAddr()); } else { mqClientAPIExt.fetchNameServerAddr(); this.scheduledExecutorService.scheduleAtFixedRate( mqClientAPIExt::fetchNameServerAddr, Duration.ofSeconds(10).toMillis(), Duration.ofMinutes(2).toMillis(), TimeUnit.MILLISECONDS ); } mqClientAPIExt.start(); return mqClientAPIExt; } public void onNameServerAddressChange(String namesrvAddress) { for (MQClientAPIExt client : clients) { client.onNameServerAddressChange(namesrvAddress); } } public MQClientAPIExt[] getClients() { return clients; } }
MQClientAPIFactory
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ValidatorEndpointBuilderFactory.java
{ "start": 4397, "end": 9659 }
interface ____ extends EndpointProducerBuilder { default ValidatorEndpointBuilder basic() { return (ValidatorEndpointBuilder) this; } /** * Whether the producer should be started lazy (on the first message). * By starting lazy you can use this to allow CamelContext and routes to * startup in situations where a producer may otherwise fail during * starting and cause the route to fail being started. By deferring this * startup to be lazy then the startup failure can be handled during * routing messages via Camel's routing error handlers. Beware that when * the first message is processed then creating and starting the * producer may take a little time and prolong the total processing time * of the processing. * * The option is a: <code>boolean</code> type. * * Default: false * Group: producer (advanced) * * @param lazyStartProducer the value to set * @return the dsl builder */ default AdvancedValidatorEndpointBuilder lazyStartProducer(boolean lazyStartProducer) { doSetProperty("lazyStartProducer", lazyStartProducer); return this; } /** * Whether the producer should be started lazy (on the first message). * By starting lazy you can use this to allow CamelContext and routes to * startup in situations where a producer may otherwise fail during * starting and cause the route to fail being started. By deferring this * startup to be lazy then the startup failure can be handled during * routing messages via Camel's routing error handlers. Beware that when * the first message is processed then creating and starting the * producer may take a little time and prolong the total processing time * of the processing. * * The option will be converted to a <code>boolean</code> type. * * Default: false * Group: producer (advanced) * * @param lazyStartProducer the value to set * @return the dsl builder */ default AdvancedValidatorEndpointBuilder lazyStartProducer(String lazyStartProducer) { doSetProperty("lazyStartProducer", lazyStartProducer); return this; } /** * To use a custom * org.apache.camel.processor.validation.ValidatorErrorHandler. The * default error handler captures the errors and throws an exception. * * The option is a: * <code>org.apache.camel.support.processor.validation.ValidatorErrorHandler</code> type. * * Group: advanced * * @param errorHandler the value to set * @return the dsl builder */ default AdvancedValidatorEndpointBuilder errorHandler(org.apache.camel.support.processor.validation.ValidatorErrorHandler errorHandler) { doSetProperty("errorHandler", errorHandler); return this; } /** * To use a custom * org.apache.camel.processor.validation.ValidatorErrorHandler. The * default error handler captures the errors and throws an exception. * * The option will be converted to a * <code>org.apache.camel.support.processor.validation.ValidatorErrorHandler</code> type. * * Group: advanced * * @param errorHandler the value to set * @return the dsl builder */ default AdvancedValidatorEndpointBuilder errorHandler(String errorHandler) { doSetProperty("errorHandler", errorHandler); return this; } /** * To use a custom LSResourceResolver. Do not use together with * resourceResolverFactory. * * The option is a: <code>org.w3c.dom.ls.LSResourceResolver</code> type. * * Group: advanced * * @param resourceResolver the value to set * @return the dsl builder */ default AdvancedValidatorEndpointBuilder resourceResolver(org.w3c.dom.ls.LSResourceResolver resourceResolver) { doSetProperty("resourceResolver", resourceResolver); return this; } /** * To use a custom LSResourceResolver. Do not use together with * resourceResolverFactory. * * The option will be converted to a * <code>org.w3c.dom.ls.LSResourceResolver</code> type. * * Group: advanced * * @param resourceResolver the value to set * @return the dsl builder */ default AdvancedValidatorEndpointBuilder resourceResolver(String resourceResolver) { doSetProperty("resourceResolver", resourceResolver); return this; } /** * To use a custom LSResourceResolver which depends on a dynamic * endpoint resource URI. The default resource resolver factory returns * a resource resolver which can read files from the
AdvancedValidatorEndpointBuilder
java
square__retrofit
samples/src/main/java/com/example/retrofit/ErrorHandlingAdapter.java
{ "start": 5604, "end": 7088 }
class ____ { String origin; } public static void main(String... args) { Retrofit retrofit = new Retrofit.Builder() .baseUrl("http://httpbin.org") .addCallAdapterFactory(new ErrorHandlingCallAdapterFactory()) .addConverterFactory(GsonConverterFactory.create()) .build(); HttpBinService service = retrofit.create(HttpBinService.class); MyCall<Ip> ip = service.getIp(); ip.enqueue( new MyCallback<Ip>() { @Override public void success(Response<Ip> response) { System.out.println("SUCCESS! " + response.body().origin); } @Override public void unauthenticated(Response<?> response) { System.out.println("UNAUTHENTICATED"); } @Override public void clientError(Response<?> response) { System.out.println("CLIENT ERROR " + response.code() + " " + response.message()); } @Override public void serverError(Response<?> response) { System.out.println("SERVER ERROR " + response.code() + " " + response.message()); } @Override public void networkError(IOException e) { System.err.println("NETWORK ERROR " + e.getMessage()); } @Override public void unexpectedError(Throwable t) { System.err.println("FATAL ERROR " + t.getMessage()); } }); } }
Ip
java
quarkusio__quarkus
extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/options/TlsCertificateReloader.java
{ "start": 8802, "end": 9936 }
class ____ { private final long it; private final Supplier<CompletionStage<Boolean>> action; ReloadCertificateTask(long it, Supplier<CompletionStage<Boolean>> action) { this.it = it; this.action = action; } public long it() { return it; } public Supplier<CompletionStage<Boolean>> action() { return action; } @Override public boolean equals(Object obj) { if (obj == this) return true; if (obj == null || obj.getClass() != this.getClass()) return false; var that = (ReloadCertificateTask) obj; return this.it == that.it && Objects.equals(this.action, that.action); } @Override public int hashCode() { return Objects.hash(it, action); } @Override public String toString() { return "ReloadCertificateTask[" + "it=" + it + ", " + "action=" + action + ']'; } } }
ReloadCertificateTask
java
grpc__grpc-java
benchmarks/src/generated/main/grpc/io/grpc/benchmarks/proto/WorkerServiceGrpc.java
{ "start": 12793, "end": 13143 }
class ____ implements io.grpc.BindableService, AsyncService { @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { return WorkerServiceGrpc.bindService(this); } } /** * A stub to allow clients to do asynchronous rpc calls to service WorkerService. */ public static final
WorkerServiceImplBase
java
junit-team__junit5
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/ParameterResolverTests.java
{ "start": 15777, "end": 16332 }
class ____ { @Test void numberParameterInjection(Number number) { assertEquals(Integer.valueOf(42), number); } @Test void integerParameterInjection(Integer number) { assertEquals(Integer.valueOf(42), number); } /** * This test must fail, since {@link Double} is a {@link Number} but not an {@link Integer}. * @see NumberParameterResolver */ @Test void doubleParameterInjection(Double number) { /* no-op */ } } @SuppressWarnings("JUnitMalformedDeclaration") static
PotentiallyIncompatibleTypeMethodInjectionTestCase
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
{ "start": 1103, "end": 1497 }
class ____ extends JerseyTest { public static final String JERSEY_RANDOM_PORT = "0"; @Override protected Application configure() { return new Application(); } @BeforeEach @Override public void setUp() throws Exception { super.setUp(); } public final WebTarget targetWithJsonObject() { return target().register(new JettisonObjectProvider.App()); } }
JerseyTestBase
java
mapstruct__mapstruct
processor/src/test/java/org/mapstruct/ap/test/bugs/_1801/domain/Item.java
{ "start": 245, "end": 313 }
class ____ { public abstract String getId(); public static
Item
java
apache__camel
components/camel-syslog/src/test/java/org/apache/camel/component/syslog/AutomatedConversionTest.java
{ "start": 1384, "end": 4002 }
class ____ extends CamelTestSupport { private static int serverPort; private final int messageCount = 1; private final String rfc3164Message = "<165>Aug 4 05:34:00 mymachine myproc[10]: %% It's\n time to make the do-nuts. %% Ingredients: Mix=OK, Jelly=OK #\n" + " Devices: Mixer=OK, Jelly_Injector=OK, Frier=OK # Transport:\n" + " Conveyer1=OK, Conveyer2=OK # %%"; private final String rfc5424Message = "<34>1 2003-10-11T22:14:15.003Z mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8"; @BeforeAll public static void initPort() { serverPort = AvailablePortFinder.getNextAvailable(); } @Test public void testSendingRawUDP() throws IOException, InterruptedException { MockEndpoint mock = getMockEndpoint("mock:syslogReceiver"); MockEndpoint mock2 = getMockEndpoint("mock:syslogReceiver2"); mock.expectedMessageCount(2); mock2.expectedMessageCount(2); mock2.expectedBodiesReceived(rfc3164Message, rfc5424Message); DatagramSocket socket = new DatagramSocket(); try { InetAddress address = InetAddress.getByName("127.0.0.1"); for (int i = 0; i < messageCount; i++) { byte[] data = rfc3164Message.getBytes(); DatagramPacket packet = new DatagramPacket(data, data.length, address, serverPort); socket.send(packet); Thread.sleep(100); } for (int i = 0; i < messageCount; i++) { byte[] data = rfc5424Message.getBytes(); DatagramPacket packet = new DatagramPacket(data, data.length, address, serverPort); socket.send(packet); Thread.sleep(100); } } finally { socket.close(); } MockEndpoint.assertIsSatisfied(context); } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { @Override public void configure() { // we setup a Syslog listener on a random port. from("mina:udp://127.0.0.1:" + serverPort).unmarshal().syslog().process(new Processor() { @Override public void process(Exchange ex) { assertTrue(ex.getIn().getBody() instanceof SyslogMessage); } }).to("mock:syslogReceiver").marshal().syslog().to("mock:syslogReceiver2"); } }; } }
AutomatedConversionTest
java
apache__kafka
clients/src/main/java/org/apache/kafka/common/errors/InvalidFetchSessionEpochException.java
{ "start": 848, "end": 1122 }
class ____ extends RetriableException { private static final long serialVersionUID = 1L; public InvalidFetchSessionEpochException() { } public InvalidFetchSessionEpochException(String message) { super(message); } }
InvalidFetchSessionEpochException
java
alibaba__druid
core/src/main/java/com/alibaba/druid/sql/dialect/mysql/ast/statement/MySqlShowPlanCacheStatement.java
{ "start": 806, "end": 1477 }
class ____ extends MySqlStatementImpl implements MySqlShowStatement { private SQLSelect select; public MySqlShowPlanCacheStatement() { } public MySqlShowPlanCacheStatement(SQLSelect select) { setSelect(select); } public void accept0(MySqlASTVisitor v) { if (v.visit(this)) { if (select != null) { select.accept(v); } } v.endVisit(this); } public SQLSelect getSelect() { return select; } public void setSelect(SQLSelect x) { if (x != null) { x.setParent(this); } this.select = x; } }
MySqlShowPlanCacheStatement
java
spring-projects__spring-framework
spring-expression/src/test/java/org/springframework/expression/spel/ParsingTests.java
{ "start": 5247, "end": 5593 }
class ____ { @Test void literalLong() { parseCheck("37L", "37"); } @Test void literalIntegers() { parseCheck("1"); parseCheck("1415"); } @Test void literalReal() { parseCheck("6.0221415E+23", "6.0221415E23"); } @Test void literalHex() { parseCheck("0x7FFFFFFF", "2147483647"); } } @Nested
LiteralNumbers
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponseBuilder.java
{ "start": 1026, "end": 2320 }
class ____ extends StatusBuilder { private TimeValue took; private BulkByScrollTask.Status status; private List<Failure> bulkFailures = new ArrayList<>(); private List<SearchFailure> searchFailures = new ArrayList<>(); private boolean timedOut; BulkByScrollResponseBuilder() {} public void setTook(long took) { setTook(new TimeValue(took, TimeUnit.MILLISECONDS)); } public void setTook(TimeValue took) { this.took = took; } public void setStatus(BulkByScrollTask.Status status) { this.status = status; } public void setFailures(List<Object> failures) { if (failures != null) { for (Object object : failures) { if (object instanceof Failure) { bulkFailures.add((Failure) object); } else if (object instanceof SearchFailure) { searchFailures.add((SearchFailure) object); } } } } public void setTimedOut(boolean timedOut) { this.timedOut = timedOut; } public BulkByScrollResponse buildResponse() { status = super.buildStatus(); return new BulkByScrollResponse(took, status, bulkFailures, searchFailures, timedOut); } }
BulkByScrollResponseBuilder
java
FasterXML__jackson-databind
src/test/java/tools/jackson/databind/ser/SerializationAnnotationsTest.java
{ "start": 4910, "end": 6508 }
class ____ are used * as expected */ @Test public void testGetterInheritance() throws Exception { Map<String,Object> result = writeAndMap(MAPPER, new SubClassBean()); assertEquals(3, result.size()); assertEquals(Integer.valueOf(1), result.get("x")); assertEquals(Integer.valueOf(2), result.get("y")); assertEquals(Integer.valueOf(3), result.get("z")); } /** * Unit test to verify that {@link JsonSerialize#using} annotation works * when applied to a class */ @Test public void testClassSerializer() throws Exception { StringWriter sw = new StringWriter(); MAPPER.writeValue(sw, new ClassSerializer()); assertEquals("true", sw.toString()); } /** * Unit test to verify that {@code @JsonSerialize} annotation works * when applied to a Method */ @Test public void testActiveMethodSerializer() throws Exception { StringWriter sw = new StringWriter(); MAPPER.writeValue(sw, new ClassMethodSerializer(13)); // Here we will get wrapped as an object, since we have // full object, just override a single property assertEquals("{\"x\":\"X13X\"}", sw.toString()); } @Test public void testInactiveMethodSerializer() throws Exception { String json = MAPPER.writeValueAsString(new InactiveClassMethodSerializer(8)); // Here we will get wrapped as an object, since we have // full object, just override a single property assertEquals("{\"x\":8}", json); } }
getters
java
apache__camel
components/camel-pqc/src/test/java/org/apache/camel/component/pqc/PQCMLKEMGenerateEncapsulationCryptoRoundTripDESedeTest.java
{ "start": 1721, "end": 5228 }
class ____ extends CamelTestSupport { @EndpointInject("mock:encapsulate") protected MockEndpoint resultEncapsulate; @Produce("direct:encapsulate") protected ProducerTemplate templateEncapsulate; @EndpointInject("mock:encrypted") protected MockEndpoint resultEncrypted; @EndpointInject("mock:unencrypted") protected MockEndpoint resultDecrypted; public PQCMLKEMGenerateEncapsulationCryptoRoundTripDESedeTest() throws NoSuchAlgorithmException { } @Override protected RouteBuilder createRouteBuilder() { CryptoDataFormat cryptoFormat = new CryptoDataFormat("DESede", null); return new RouteBuilder() { @Override public void configure() { from("direct:encapsulate") .to("pqc:keyenc?operation=generateSecretKeyEncapsulation&symmetricKeyAlgorithm=DESEDE") .to("mock:encapsulate") .to("pqc:keyenc?operation=extractSecretKeyEncapsulation&symmetricKeyAlgorithm=DESEDE") .to("pqc:keyenc?operation=extractSecretKeyFromEncapsulation&symmetricKeyAlgorithm=DESEDE") .setHeader(CryptoDataFormat.KEY, body()) .setBody(constant("Hello")) .marshal(cryptoFormat) .log("Encrypted ${body}") .to("mock:encrypted") .unmarshal(cryptoFormat) .log("Unencrypted ${body}") .to("mock:unencrypted"); ; } }; } @BeforeAll public static void startup() throws Exception { Security.addProvider(new BouncyCastleProvider()); Security.addProvider(new BouncyCastlePQCProvider()); } @Test void testSignAndVerify() throws Exception { resultEncapsulate.expectedMessageCount(1); resultEncrypted.expectedMessageCount(1); resultDecrypted.expectedMessageCount(1); templateEncapsulate.sendBody("Hello"); resultEncapsulate.assertIsSatisfied(); assertNotNull(resultEncapsulate.getExchanges().get(0).getMessage().getBody(SecretKeyWithEncapsulation.class)); assertEquals("DESEDE", resultEncapsulate.getExchanges().get(0).getMessage().getBody(SecretKeyWithEncapsulation.class).getAlgorithm()); assertNotNull(resultEncrypted.getExchanges().get(0).getMessage().getBody()); assertEquals("Hello", resultDecrypted.getExchanges().get(0).getMessage().getBody(String.class)); } @BindToRegistry("Keypair") public KeyPair setKeyPair() throws NoSuchAlgorithmException, NoSuchProviderException, InvalidAlgorithmParameterException { KeyPairGenerator kpg = KeyPairGenerator.getInstance(PQCKeyEncapsulationAlgorithms.MLKEM.getAlgorithm(), PQCKeyEncapsulationAlgorithms.MLKEM.getBcProvider()); kpg.initialize(MLKEMParameterSpec.ml_kem_512, new SecureRandom()); KeyPair kp = kpg.generateKeyPair(); return kp; } @BindToRegistry("KeyGenerator") public KeyGenerator setKeyGenerator() throws NoSuchAlgorithmException, NoSuchProviderException, InvalidAlgorithmParameterException { KeyGenerator kg = KeyGenerator.getInstance(PQCKeyEncapsulationAlgorithms.MLKEM.getAlgorithm(), PQCKeyEncapsulationAlgorithms.MLKEM.getBcProvider()); return kg; } }
PQCMLKEMGenerateEncapsulationCryptoRoundTripDESedeTest
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFWithSWebhdfsFileSystem.java
{ "start": 1313, "end": 3319 }
class ____ extends TestHttpFSWithHttpFSFileSystem { private static String classpathDir; private static final String BASEDIR = GenericTestUtils.getTempPath(UUID.randomUUID().toString()); private static String keyStoreDir; private static Configuration sslConf; { URL url = Thread.currentThread().getContextClassLoader(). getResource("classutils.txt"); classpathDir = url.toExternalForm(); if (classpathDir.startsWith("file:")) { classpathDir = classpathDir.substring("file:".length()); classpathDir = classpathDir.substring(0, classpathDir.length() - "/classutils.txt".length()); } else { throw new RuntimeException("Cannot find test classes dir"); } File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); keyStoreDir = new File(BASEDIR).getAbsolutePath(); try { sslConf = new Configuration(); KeyStoreTestUtil.setupSSLConfig(keyStoreDir, classpathDir, sslConf, false); } catch (Exception ex) { throw new RuntimeException(ex); } jettyTestHelper = new TestJettyHelper("jks", keyStoreDir + "/serverKS.jks", "serverP"); } @AfterAll public static void cleanUp() throws Exception { new File(classpathDir, "ssl-client.xml").delete(); new File(classpathDir, "ssl-server.xml").delete(); KeyStoreTestUtil.cleanupSSLConfig(keyStoreDir, classpathDir); } public TestHttpFSFWithSWebhdfsFileSystem() { } @Override protected Class getFileSystemClass() { return SWebHdfsFileSystem.class; } @Override protected String getScheme() { return "swebhdfs"; } @Override protected FileSystem getHttpFSFileSystem() throws Exception { Configuration conf = new Configuration(sslConf); conf.set("fs.swebhdfs.impl", getFileSystemClass().getName()); URI uri = new URI("swebhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority()); return FileSystem.get(uri, conf); } }
TestHttpFSFWithSWebhdfsFileSystem
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KubernetesNodesEndpointBuilderFactory.java
{ "start": 51481, "end": 54105 }
class ____ { /** * The internal instance of the builder used to access to all the * methods representing the name of headers. */ private static final KubernetesNodesHeaderNameBuilder INSTANCE = new KubernetesNodesHeaderNameBuilder(); /** * The Producer operation. * * The option is a: {@code String} type. * * Group: producer * * @return the name of the header {@code KubernetesOperation}. */ public String kubernetesOperation() { return "CamelKubernetesOperation"; } /** * The node labels. * * The option is a: {@code Map<String, String>} type. * * Group: producer * * @return the name of the header {@code KubernetesNodesLabels}. */ public String kubernetesNodesLabels() { return "CamelKubernetesNodesLabels"; } /** * The node name. * * The option is a: {@code String} type. * * Group: producer * * @return the name of the header {@code KubernetesNodeName}. */ public String kubernetesNodeName() { return "CamelKubernetesNodeName"; } /** * The spec for a node. * * The option is a: {@code io.fabric8.kubernetes.api.model.NodeSpec} * type. * * Group: producer * * @return the name of the header {@code KubernetesNodeSpec}. */ public String kubernetesNodeSpec() { return "CamelKubernetesNodeSpec"; } /** * Action watched by the consumer. * * The option is a: {@code io.fabric8.kubernetes.client.Watcher.Action} * type. * * Group: consumer * * @return the name of the header {@code KubernetesEventAction}. */ public String kubernetesEventAction() { return "CamelKubernetesEventAction"; } /** * Timestamp of the action watched by the consumer. * * The option is a: {@code long} type. * * Group: consumer * * @return the name of the header {@code KubernetesEventTimestamp}. */ public String kubernetesEventTimestamp() { return "CamelKubernetesEventTimestamp"; } } static KubernetesNodesEndpointBuilder endpointBuilder(String componentName, String path) {
KubernetesNodesHeaderNameBuilder
java
apache__flink
flink-state-backends/flink-statebackend-changelog/src/test/java/org/apache/flink/state/changelog/ChangelogStateBackendLoadingTest.java
{ "start": 12921, "end": 14054 }
class ____ extends AbstractStateBackend implements CheckpointStorage, ConfigurableStateBackend { private boolean configUpdated = false; @Override public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend( KeyedStateBackendParameters<K> parameters) { return null; } @Override public OperatorStateBackend createOperatorStateBackend( OperatorStateBackendParameters parameters) { return null; } @Override public CompletedCheckpointStorageLocation resolveCheckpoint(String externalPointer) { return null; } @Override public CheckpointStorageAccess createCheckpointStorage(JobID jobId) { return null; } @Override public StateBackend configure(ReadableConfig config, ClassLoader classLoader) throws IllegalConfigurationException { configUpdated = true; return this; } boolean isConfigUpdated() { return configUpdated; } } }
MockStateBackend
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/engine/creation/internal/AbstractCommonBuilder.java
{ "start": 779, "end": 4709 }
class ____<T extends CommonBuilder> implements CommonBuilder { protected final SessionFactoryImplementor sessionFactory; protected StatementInspector statementInspector; protected Interceptor interceptor; protected boolean allowInterceptor = true; protected boolean allowSessionInterceptorCreation = true; protected Connection connection; protected PhysicalConnectionHandlingMode connectionHandlingMode; protected Object tenantIdentifier; protected boolean readOnly; protected CacheMode cacheMode; protected TimeZone jdbcTimeZone; public AbstractCommonBuilder(SessionFactoryImplementor factory) { sessionFactory = factory; final var options = factory.getSessionFactoryOptions(); statementInspector = options.getStatementInspector(); cacheMode = options.getInitialSessionCacheMode(); connectionHandlingMode = options.getPhysicalConnectionHandlingMode(); jdbcTimeZone = options.getJdbcTimeZone(); tenantIdentifier = factory.resolveTenantIdentifier(); } Interceptor configuredInterceptor() { // If we were explicitly asked for no interceptor, always return null. if ( !allowInterceptor ) { return null; } // NOTE: DO NOT return EmptyInterceptor.INSTANCE from here as a "default for the Session". // We "filter" that one out here. The interceptor returned here should represent the // explicitly configured Interceptor (if there is one). Return null from here instead; // Session will handle it. if ( interceptor != null && interceptor != EmptyInterceptor.INSTANCE ) { return interceptor; } final var options = sessionFactory.getSessionFactoryOptions(); // prefer the SessionFactory-scoped interceptor, prefer that to any Session-scoped interceptor prototype final var optionsInterceptor = options.getInterceptor(); if ( optionsInterceptor != null && optionsInterceptor != EmptyInterceptor.INSTANCE ) { return optionsInterceptor; } if ( allowSessionInterceptorCreation ) { // then check the Session-scoped interceptor prototype final var statelessInterceptorImplementorSupplier = options.getStatelessInterceptorImplementorSupplier(); if ( statelessInterceptorImplementorSupplier != null ) { return statelessInterceptorImplementorSupplier.get(); } } return null; } protected abstract T getThis(); @Override public T connection(Connection connection) { this.connection = connection; return getThis(); } @Override public T connectionHandling(ConnectionAcquisitionMode acquisitionMode, ConnectionReleaseMode releaseMode) { this.connectionHandlingMode = PhysicalConnectionHandlingMode.interpret( acquisitionMode, releaseMode ); return getThis(); } @Override public T interceptor(Interceptor interceptor) { if ( interceptor == null ) { noInterceptor(); } else { this.interceptor = interceptor; this.allowInterceptor = true; } return getThis(); } @Override public T noInterceptor() { this.interceptor = null; this.allowInterceptor = false; return getThis(); } @Override public T noSessionInterceptorCreation() { this.allowSessionInterceptorCreation = false; return getThis(); } @Override public T tenantIdentifier(Object tenantIdentifier) { this.tenantIdentifier = tenantIdentifier; return getThis(); } @Override public T readOnly(boolean readOnly) { this.readOnly = readOnly; return getThis(); } @Override public T initialCacheMode(CacheMode cacheMode) { this.cacheMode = cacheMode; return getThis(); } @Override public T statementInspector(UnaryOperator<String> operator) { if ( operator == null ) { noStatementInspector(); } else { this.statementInspector = operator::apply; } return getThis(); } @Override public T noStatementInspector() { this.statementInspector = null; return getThis(); } @Override public T jdbcTimeZone(TimeZone timeZone) { jdbcTimeZone = timeZone; return getThis(); } }
AbstractCommonBuilder
java
apache__camel
components/camel-jcache/src/test/java/org/apache/camel/component/jcache/JCacheProducerRemoveTest.java
{ "start": 1185, "end": 6035 }
class ____ extends JCacheComponentTestSupport { @Test public void testRemove() throws Exception { final Map<String, Object> headers = new HashMap<>(); final Cache<Object, Object> cache = getCacheFromEndpoint("jcache://test-cache"); final String key = randomString(); final String val = randomString(); cache.put(key, val); headers.clear(); headers.put(JCacheConstants.ACTION, "REMOVE"); headers.put(JCacheConstants.KEY, key); sendBody("direct:remove", null, headers); MockEndpoint mock = getMockEndpoint("mock:remove"); mock.expectedMinimumMessageCount(1); mock.expectedHeaderReceived(JCacheConstants.KEY, key); mock.expectedHeaderReceived(JCacheConstants.RESULT, true); mock.assertIsSatisfied(); assertFalse(cache.containsKey(key)); } @Test public void testRemoveIf() throws Exception { final Map<String, Object> headers = new HashMap<>(); final Cache<Object, Object> cache = getCacheFromEndpoint("jcache://test-cache"); final String key = randomString(); final String val = randomString(); cache.put(key, val); headers.clear(); headers.put(JCacheConstants.ACTION, "REMOVE"); headers.put(JCacheConstants.KEY, key); headers.put(JCacheConstants.OLD_VALUE, val); sendBody("direct:remove-if", null, headers); MockEndpoint mock = getMockEndpoint("mock:remove-if"); mock.expectedMinimumMessageCount(1); mock.expectedHeaderReceived(JCacheConstants.KEY, key); mock.expectedHeaderReceived(JCacheConstants.RESULT, true); mock.assertIsSatisfied(); assertFalse(cache.containsKey(key)); } @Test public void testRemoveIfFailure() throws Exception { final Map<String, Object> headers = new HashMap<>(); final Cache<Object, Object> cache = getCacheFromEndpoint("jcache://test-cache"); final String key = randomString(); final String val = randomString(); cache.put(key, val); headers.clear(); headers.put(JCacheConstants.ACTION, "REMOVE"); headers.put(JCacheConstants.KEY, key); headers.put(JCacheConstants.OLD_VALUE, "x"); sendBody("direct:remove-if-failure", null, headers); MockEndpoint mock = getMockEndpoint("mock:remove-if-failure"); mock.expectedMinimumMessageCount(1); mock.expectedHeaderReceived(JCacheConstants.KEY, key); mock.expectedHeaderReceived(JCacheConstants.RESULT, false); mock.assertIsSatisfied(); assertTrue(cache.containsKey(key)); } @Test public void testRemoveAll() throws Exception { final Map<String, Object> headers = new HashMap<>(); final Cache<Object, Object> cache = getCacheFromEndpoint("jcache://test-cache"); Map<Object, Object> values = generateRandomMap(2); cache.putAll(values); headers.clear(); headers.put(JCacheConstants.ACTION, "REMOVEALL"); sendBody("direct:remove-all", null, headers); for (Object key : values.keySet()) { assertFalse(cache.containsKey(key)); } } @Test public void testRemoveSubset() throws Exception { final Map<String, Object> headers = new HashMap<>(); final Cache<Object, Object> cache = getCacheFromEndpoint("jcache://test-cache"); Map<Object, Object> values1 = generateRandomMap(4); Map<Object, Object> values2 = generateRandomMap(2); cache.putAll(values1); cache.putAll(values2); headers.clear(); headers.put(JCacheConstants.ACTION, "REMOVEALL"); headers.put(JCacheConstants.KEYS, values2.keySet()); sendBody("direct:remove-subset", null, headers); for (Object key : values1.keySet()) { assertTrue(cache.containsKey(key)); } for (Object key : values2.keySet()) { assertFalse(cache.containsKey(key)); } } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { public void configure() { from("direct:remove") .to("jcache://test-cache") .to("mock:remove"); from("direct:remove-if") .to("jcache://test-cache") .to("mock:remove-if"); from("direct:remove-if-failure") .to("jcache://test-cache") .to("mock:remove-if-failure"); from("direct:remove-all") .to("jcache://test-cache"); from("direct:remove-subset") .to("jcache://test-cache"); } }; } }
JCacheProducerRemoveTest
java
quarkusio__quarkus
independent-projects/qute/core/src/main/java/io/quarkus/qute/ReflectionValueResolver.java
{ "start": 3941, "end": 7998 }
interface ____ first... List<Class<?>> classes = new ArrayList<>(); Collections.addAll(classes, clazz.getInterfaces()); Class<?> superClass = clazz.getSuperclass(); while (superClass != null) { Collections.addAll(classes, superClass.getInterfaces()); superClass = superClass.getSuperclass(); } classes.add(clazz); for (Class<?> clazzToTest : classes) { for (Method method : clazzToTest.getMethods()) { if (!isMethodProperty(method)) { continue; } if (name.equals(method.getName())) { foundMatch = method; } else if (matchesPrefix(name, method.getName(), GET_PREFIX)) { foundGetterMatch = method; } else if (isBoolean(method.getReturnType()) && (matchesPrefix(name, method.getName(), IS_PREFIX) || matchesPrefix(name, method.getName(), HAS_PREFIX))) { foundBooleanMatch = method; } } if (foundMatch == null) { foundMatch = (foundGetterMatch != null ? foundGetterMatch : foundBooleanMatch); } if (foundMatch != null) { break; } } return foundMatch; } private Field findField(Class<?> clazz, String name) { Field found = null; for (Field field : clazz.getFields()) { if (!Modifier.isStatic(field.getModifiers()) && field.getName().equals(name)) { found = field; } } return found; } private static List<Method> findMethods(Class<?> clazz, String name, int numberOfParams) { List<Method> foundMatch = new ArrayList<>(); List<Class<?>> hierarchy = new ArrayList<>(); Collections.addAll(hierarchy, clazz.getInterfaces()); Class<?> superClass = clazz.getSuperclass(); while (superClass != null) { Collections.addAll(hierarchy, superClass.getInterfaces()); superClass = superClass.getSuperclass(); } hierarchy.add(clazz); for (Class<?> clazzToTest : hierarchy) { for (Method method : clazzToTest.getMethods()) { if (isMethodCandidate(method) && name.equals(method.getName())) { foundMatch.add(method); method.trySetAccessible(); } } } return foundMatch.size() == 1 ? List.of(foundMatch.get(0)) : foundMatch; } private static boolean isMethodCandidate(Method method) { return method != null && Modifier.isPublic(method.getModifiers()) && !Modifier.isStatic(method.getModifiers()) && !method.getReturnType().equals(Void.TYPE) && !method.isBridge() && !Object.class.equals(method.getDeclaringClass()); } private static boolean isMethodProperty(Method method) { return isMethodCandidate(method) && method.getParameterCount() == 0; } private static boolean matchesPrefix(String name, String methodName, String prefix) { return methodName.startsWith(prefix) && decapitalize(methodName.substring(prefix.length(), methodName.length())).equals(name); } private static boolean isBoolean(Class<?> type) { return type.equals(Boolean.class) || type.equals(boolean.class); } static String decapitalize(String name) { if (name == null || name.length() == 0) { return name; } if (name.length() > 1 && Character.isUpperCase(name.charAt(1)) && Character.isUpperCase(name.charAt(0))) { return name; } char chars[] = name.toCharArray(); chars[0] = Character.toLowerCase(chars[0]); return new String(chars); } static
methods
java
apache__camel
test-infra/camel-test-infra-openai-mock/src/main/java/org/apache/camel/test/infra/openai/mock/OpenAIMockServerHandler.java
{ "start": 1169, "end": 2099 }
class ____ implements HttpHandler { private final RequestHandler requestHandler; public OpenAIMockServerHandler(List<MockExpectation> expectations, ObjectMapper objectMapper) { this.requestHandler = new RequestHandler(expectations, objectMapper); } @Override public void handle(HttpExchange exchange) throws IOException { if ("POST".equalsIgnoreCase(exchange.getRequestMethod())) { try { byte[] response = requestHandler.handleRequest(exchange).getBytes(); if (exchange.getResponseCode() == -1) { exchange.sendResponseHeaders(200, response.length); } try (OutputStream os = exchange.getResponseBody()) { os.write(response); } } catch (Exception e) { throw new RuntimeException(e); } } } }
OpenAIMockServerHandler
java
quarkusio__quarkus
independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/providers/serialisers/jsonp/ServerJsonObjectHandler.java
{ "start": 755, "end": 2000 }
class ____ extends JsonObjectHandler implements ServerMessageBodyWriter<JsonObject>, ServerMessageBodyReader<JsonObject> { @Override public boolean isWriteable(Class<?> type, Type genericType, ResteasyReactiveResourceInfo target, MediaType mediaType) { return JsonObject.class.isAssignableFrom(type); } @Override public void writeResponse(JsonObject o, Type genericType, ServerRequestContext context) throws WebApplicationException { ByteArrayOutputStream out = new ByteArrayOutputStream(); try (JsonWriter writer = JsonpUtil.writer(out, context.getResponseMediaType())) { writer.writeObject(o); } context.serverResponse().end(out.toByteArray()); } @Override public boolean isReadable(Class<?> type, Type genericType, ResteasyReactiveResourceInfo lazyMethod, MediaType mediaType) { return JsonObject.class.isAssignableFrom(type); } @Override public JsonObject readFrom(Class<JsonObject> type, Type genericType, MediaType mediaType, ServerRequestContext context) throws WebApplicationException { return JsonpUtil.reader(context.getInputStream(), mediaType).readObject(); } }
ServerJsonObjectHandler
java
spring-projects__spring-boot
module/spring-boot-data-commons/src/main/java/org/springframework/boot/data/autoconfigure/web/DataWebProperties.java
{ "start": 3792, "end": 4063 }
class ____ { /** * Sort parameter name. */ private String sortParameter = "sort"; public String getSortParameter() { return this.sortParameter; } public void setSortParameter(String sortParameter) { this.sortParameter = sortParameter; } } }
Sort
java
elastic__elasticsearch
client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java
{ "start": 1386, "end": 2337 }
class ____ extends RestClient.FailureListener { private volatile Sniffer sniffer; private final AtomicBoolean set; public SniffOnFailureListener() { this.set = new AtomicBoolean(false); } /** * Sets the {@link Sniffer} instance used to perform sniffing * @throws IllegalStateException if the sniffer was already set, as it can only be set once */ public void setSniffer(Sniffer sniffer) { Objects.requireNonNull(sniffer, "sniffer must not be null"); if (set.compareAndSet(false, true)) { this.sniffer = sniffer; } else { throw new IllegalStateException("sniffer can only be set once"); } } @Override public void onFailure(Node node) { if (sniffer == null) { throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); } sniffer.sniffOnFailure(); } }
SniffOnFailureListener
java
apache__dubbo
dubbo-common/src/main/java/org/apache/dubbo/common/status/Status.java
{ "start": 850, "end": 1546 }
class ____ { private final Level level; private final String message; private final String description; public Status(Level level) { this(level, null, null); } public Status(Level level, String message) { this(level, message, null); } public Status(Level level, String message, String description) { this.level = level; this.message = message; this.description = description; } public Level getLevel() { return level; } public String getMessage() { return message; } public String getDescription() { return description; } /** * Level */ public
Status
java
apache__logging-log4j2
log4j-1.2-api/src/main/java/org/apache/log4j/builders/BuilderManager.java
{ "start": 4481, "end": 8113 }
class ____ {} with key {}", className, key); } return (PluginType<T>) pluginType; } private <T extends Builder<U>, U> U newInstance( final PluginType<T> plugin, final Function<T, U> consumer, final U invalidValue) { if (plugin != null) { try { final T builder = LoaderUtil.newInstanceOf(plugin.getPluginClass()); if (builder != null) { final U result = consumer.apply(builder); // returning an empty wrapper is short for "we support this legacy class, but it has validation // errors" return result != null ? result : invalidValue; } } catch (final ReflectiveOperationException ex) { LOGGER.warn("Unable to load plugin: {} due to: {}", plugin.getKey(), ex.getMessage()); } } return null; } public <P extends Parser<T>, T> T parse( final String className, final String prefix, final Properties props, final PropertiesConfiguration config, final T invalidValue) { final P parser = createBuilder(getPlugin(className), prefix, props); if (parser != null) { final T value = parser.parse(config); return value != null ? value : invalidValue; } return null; } public Appender parseAppender( final String className, final Element appenderElement, final XmlConfiguration config) { return newInstance( this.<AppenderBuilder<Appender>>getPlugin(className), b -> b.parseAppender(appenderElement, config), INVALID_APPENDER); } public Appender parseAppender( final String name, final String className, final String prefix, final String layoutPrefix, final String filterPrefix, final Properties props, final PropertiesConfiguration config) { final AppenderBuilder<Appender> builder = createBuilder(getPlugin(className), prefix, props); if (builder != null) { final Appender appender = builder.parseAppender(name, prefix, layoutPrefix, filterPrefix, props, config); return appender != null ? appender : INVALID_APPENDER; } return null; } public Filter parseFilter(final String className, final Element filterElement, final XmlConfiguration config) { return newInstance( this.<FilterBuilder>getPlugin(className), b -> b.parse(filterElement, config), INVALID_FILTER); } public Layout parseLayout(final String className, final Element layoutElement, final XmlConfiguration config) { return newInstance( this.<LayoutBuilder>getPlugin(className), b -> b.parse(layoutElement, config), INVALID_LAYOUT); } public RewritePolicy parseRewritePolicy( final String className, final Element rewriteElement, final XmlConfiguration config) { return newInstance( this.<RewritePolicyBuilder>getPlugin(className), b -> b.parse(rewriteElement, config), INVALID_REWRITE_POLICY); } public TriggeringPolicy parseTriggeringPolicy( final String className, final Element policyElement, final XmlConfiguration config) { return newInstance( this.<TriggeringPolicyBuilder>getPlugin(className), b -> b.parse(policyElement, config), (TriggeringPolicy) null); } }
name
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java
{ "start": 968, "end": 6222 }
class ____ extends SingleShardRequest<MultiGetShardRequest> { private int shardId; private String preference; private boolean realtime; private boolean refresh; List<Integer> locations; List<MultiGetRequest.Item> items; /** * Should this request force {@link SourceLoader.Synthetic synthetic source}? * Use this to test if the mapping supports synthetic _source and to get a sense * of the worst case performance. Fetches with this enabled will be slower the * enabling synthetic source natively in the index. */ private boolean forceSyntheticSource; MultiGetShardRequest(MultiGetRequest multiGetRequest, String index, int shardId) { super(index); this.shardId = shardId; locations = new ArrayList<>(); items = new ArrayList<>(); preference = multiGetRequest.preference; realtime = multiGetRequest.realtime; refresh = multiGetRequest.refresh; forceSyntheticSource = multiGetRequest.isForceSyntheticSource(); } @Override public boolean equals(Object o) { if (this == o) return true; if (o instanceof MultiGetShardRequest == false) return false; MultiGetShardRequest other = (MultiGetShardRequest) o; return shardId == other.shardId && realtime == other.realtime && refresh == other.refresh && forceSyntheticSource == other.forceSyntheticSource && Objects.equals(preference, other.preference) && Objects.equals(index, other.index) && Objects.equals(locations, other.locations) && Objects.equals(items, other.items); } @Override public int hashCode() { return Objects.hash(shardId, preference, realtime, refresh, index, locations, items, forceSyntheticSource); } MultiGetShardRequest(StreamInput in) throws IOException { super(in); int size = in.readVInt(); locations = new ArrayList<>(size); items = new ArrayList<>(size); for (int i = 0; i < size; i++) { locations.add(in.readVInt()); items.add(new MultiGetRequest.Item(in)); } preference = in.readOptionalString(); refresh = in.readBoolean(); realtime = in.readBoolean(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { forceSyntheticSource = in.readBoolean(); } else { forceSyntheticSource = false; } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(locations.size()); for (int i = 0; i < locations.size(); i++) { out.writeVInt(locations.get(i)); items.get(i).writeTo(out); } out.writeOptionalString(preference); out.writeBoolean(refresh); out.writeBoolean(realtime); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeBoolean(forceSyntheticSource); } else { if (forceSyntheticSource) { throw new IllegalArgumentException("force_synthetic_source is not supported before 8.4.0"); } } } @Override public ActionRequestValidationException validate() { return super.validateNonNullIndex(); } public int shardId() { return this.shardId; } /** * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public MultiGetShardRequest preference(String preference) { this.preference = preference; return this; } public String preference() { return this.preference; } public boolean realtime() { return this.realtime; } public MultiGetShardRequest realtime(boolean realtime) { this.realtime = realtime; return this; } public boolean refresh() { return this.refresh; } public MultiGetShardRequest refresh(boolean refresh) { this.refresh = refresh; return this; } /** * Should this request force {@link SourceLoader.Synthetic synthetic source}? * Use this to test if the mapping supports synthetic _source and to get a sense * of the worst case performance. Fetches with this enabled will be slower the * enabling synthetic source natively in the index. */ public boolean isForceSyntheticSource() { return forceSyntheticSource; } public MultiGetShardRequest setForceSyntheticSource(boolean forceSyntheticSource) { this.forceSyntheticSource = forceSyntheticSource; return this; } void add(int location, MultiGetRequest.Item item) { this.locations.add(location); this.items.add(item); } @Override public String[] indices() { String[] indices = new String[items.size()]; for (int i = 0; i < indices.length; i++) { indices[i] = items.get(i).index(); } return indices; } }
MultiGetShardRequest
java
apache__flink
flink-streaming-java/src/test/java/org/apache/flink/streaming/api/legacy/io/TextInputFormatTest.java
{ "start": 1698, "end": 9440 }
class ____ { @Test void testSimpleRead(@TempDir File tempDir) throws IOException { final String first = "First line"; final String second = "Second line"; File tempFile = File.createTempFile("TextInputFormatTest", "tmp", tempDir); tempFile.setWritable(true); try (PrintStream ps = new PrintStream(tempFile)) { ps.println(first); ps.println(second); } TextInputFormat inputFormat = new TextInputFormat(new Path(tempFile.toURI().toString())); Configuration parameters = new Configuration(); inputFormat.configure(parameters); FileInputSplit[] splits = inputFormat.createInputSplits(1); assertThat(splits).as("expected at least one input split").isNotEmpty(); inputFormat.open(splits[0]); try { assertThat(inputFormat.reachedEnd()).isFalse(); String result = inputFormat.nextRecord(""); assertThat(result).as("Expecting first record here").isNotNull().isEqualTo(first); assertThat(inputFormat.reachedEnd()).isFalse(); result = inputFormat.nextRecord(result); assertThat(result).as("Expecting second record here").isNotNull().isEqualTo(second); assertThat(inputFormat.reachedEnd() || null == inputFormat.nextRecord(result)).isTrue(); } finally { inputFormat.close(); } } @Test void testNestedFileRead(@TempDir File parentDir) throws IOException { String[] dirs = new String[] {"first", "second"}; List<String> expectedFiles = new ArrayList<>(); for (String dir : dirs) { // create input file File tmpDir = new File(parentDir, dir); Files.createDirectories(tmpDir.toPath()); File tempFile = File.createTempFile("TextInputFormatTest", ".tmp", tmpDir); expectedFiles.add( new Path(tempFile.getAbsolutePath()) .makeQualified(FileSystem.getLocalFileSystem()) .toString()); } TextInputFormat inputFormat = new TextInputFormat(new Path(parentDir.toURI())); inputFormat.setNestedFileEnumeration(true); inputFormat.setNumLineSamples(10); // this is to check if the setter overrides the configuration (as expected) Configuration config = new Configuration(); config.set(getBooleanConfigOption("recursive.file.enumeration"), false); config.setString("delimited-format.numSamples", "20"); inputFormat.configure(config); assertThat(inputFormat.getNestedFileEnumeration()).isTrue(); assertThat(inputFormat.getNumLineSamples()).isEqualTo(10); FileInputSplit[] splits = inputFormat.createInputSplits(expectedFiles.size()); List<String> paths = new ArrayList<>(); for (FileInputSplit split : splits) { paths.add(split.getPath().toString()); } Collections.sort(expectedFiles); Collections.sort(paths); for (int i = 0; i < expectedFiles.size(); i++) { assertThat(paths.get(i)).isEqualTo(expectedFiles.get(i)); } } /** * This tests cases when line ends with \r\n and \n is used as delimiter, the last \r should be * removed. */ @Test void testRemovingTrailingCR(@TempDir File tmpFile) throws IOException { testRemovingTrailingCR(tmpFile, "\n", "\n"); testRemovingTrailingCR(tmpFile, "\r\n", "\n"); testRemovingTrailingCR(tmpFile, "|", "|"); testRemovingTrailingCR(tmpFile, "|", "\n"); } private void testRemovingTrailingCR(File tmpFile, String lineBreaker, String delimiter) throws IOException { String first = "First line"; String second = "Second line"; String content = first + lineBreaker + second + lineBreaker; // create input file File tempFile = File.createTempFile("TextInputFormatTest", "tmp", tmpFile); tempFile.setWritable(true); try (OutputStreamWriter wrt = new OutputStreamWriter(new FileOutputStream(tempFile))) { wrt.write(content); } TextInputFormat inputFormat = new TextInputFormat(new Path(tempFile.toURI().toString())); inputFormat.setFilePath(tempFile.toURI().toString()); Configuration parameters = new Configuration(); inputFormat.configure(parameters); inputFormat.setDelimiter(delimiter); FileInputSplit[] splits = inputFormat.createInputSplits(1); inputFormat.open(splits[0]); String result; if ((delimiter.equals("\n") && (lineBreaker.equals("\n") || lineBreaker.equals("\r\n"))) || (lineBreaker.equals(delimiter))) { result = inputFormat.nextRecord(""); assertThat(result).as("Expecting first record here").isNotNull().isEqualTo(first); result = inputFormat.nextRecord(result); assertThat(result).as("Expecting second record here").isNotNull().isEqualTo(second); result = inputFormat.nextRecord(result); assertThat(result).as("The input file is over").isNull(); } else { result = inputFormat.nextRecord(""); assertThat(result).as("Expecting first record here").isNotNull().isEqualTo(content); } } @Test void testCompressedRead(@TempDir File tempDir) throws IOException { TextInputFormat.registerInflaterInputStreamFactory( "compressed", new InflaterInputStreamFactory<InputStream>() { @Override public InputStream create(InputStream in) { return in; } @Override public Collection<String> getCommonFileExtensions() { return Collections.singletonList("compressed"); } }); final String first = "First line"; final String second = "Second line"; // create input file File tempFile = File.createTempFile("TextInputFormatTest", ".compressed", tempDir); tempFile.setWritable(true); try (PrintStream ps = new PrintStream(tempFile)) { ps.println(first); ps.println(second); } TextInputFormat inputFormat = new TextInputFormat(new Path(tempFile.toURI().toString())); Configuration parameters = new Configuration(); inputFormat.configure(parameters); FileInputSplit[] splits = inputFormat.createInputSplits(1); assertThat(splits).as("expected at least one input split").isNotEmpty(); inputFormat.open(splits[0]); try { assertThat(inputFormat.reachedEnd()).isFalse(); String result = inputFormat.nextRecord(""); assertThat(result).as("Expecting first record here").isNotNull().isEqualTo(first); assertThat(inputFormat.reachedEnd()).isFalse(); Long currentOffset = inputFormat.getCurrentState(); inputFormat.close(); inputFormat = new TextInputFormat(new Path(tempFile.toURI().toString())); inputFormat.configure(parameters); inputFormat.reopen(splits[0], currentOffset); assertThat(inputFormat.reachedEnd()).isFalse(); result = inputFormat.nextRecord(result); assertThat(result).as("Expecting second record here").isNotNull().isEqualTo(second); assertThat(inputFormat.reachedEnd() || null == inputFormat.nextRecord(result)).isTrue(); } finally { inputFormat.close(); } } }
TextInputFormatTest
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/lazytoone/mappedby/InverseToOneAllowProxyTests.java
{ "start": 1865, "end": 6412 }
class ____ { @Test public void testOwnerIsProxy(SessionFactoryScope scope) { final EntityPersister supplementalInfoDescriptor = scope.getSessionFactory().getMappingMetamodel().getEntityDescriptor( SupplementalInfo.class ); final BytecodeEnhancementMetadata supplementalInfoEnhancementMetadata = supplementalInfoDescriptor.getBytecodeEnhancementMetadata(); assertThat( supplementalInfoEnhancementMetadata.isEnhancedForLazyLoading(), is( true ) ); final EntityPersister customerDescriptor = scope.getSessionFactory().getMappingMetamodel().getEntityDescriptor( Customer.class ); final BytecodeEnhancementMetadata customerEnhancementMetadata = customerDescriptor.getBytecodeEnhancementMetadata(); assertThat( customerEnhancementMetadata.isEnhancedForLazyLoading(), is( true ) ); SQLStatementInspector statementInspector = scope.getCollectingStatementInspector(); scope.inTransaction( (session) -> { // Get a reference to the SupplementalInfo we created final SupplementalInfo supplementalInfo = session.byId( SupplementalInfo.class ).getReference( 1 ); // 1) we should have just the uninitialized SupplementalInfo enhanced proxy assertThat( statementInspector.getSqlQueries().size(), is( 0 ) ); final BytecodeLazyAttributeInterceptor initialInterceptor = supplementalInfoEnhancementMetadata.extractLazyInterceptor( supplementalInfo ); assertThat( initialInterceptor, instanceOf( EnhancementAsProxyLazinessInterceptor.class ) ); // (2) Access the SupplementalInfo's id value - should trigger no SQL supplementalInfo.getId(); assertThat( statementInspector.getSqlQueries().size(), is( 0 ) ); assertThat( initialInterceptor, sameInstance( supplementalInfoEnhancementMetadata.extractLazyInterceptor( supplementalInfo ) ) ); // 3) Access SupplementalInfo's `something` state // - should trigger loading the "base group" state, which only include `something`. // NOTE: `customer` is not part of this lazy group because we do not know the // Customer PK from this side supplementalInfo.getSomething(); assertThat( statementInspector.getSqlQueries().size(), is( 1 ) ); final BytecodeLazyAttributeInterceptor interceptor = supplementalInfoEnhancementMetadata.extractLazyInterceptor( supplementalInfo ); assertThat( initialInterceptor, not( sameInstance( interceptor ) ) ); assertThat( interceptor, instanceOf( LazyAttributeLoadingInterceptor.class ) ); // 4) Access SupplementalInfo's `customer` state // - should trigger load from Customer table, by FK final Customer customer = supplementalInfo.getCustomer(); assertThat( statementInspector.getSqlQueries().size(), is( 2 ) ); // just as above, accessing id should trigger no loads customer.getId(); assertThat( statementInspector.getSqlQueries().size(), is( 2 ) ); customer.getName(); assertThat( statementInspector.getSqlQueries().size(), is( 2 ) ); } ); } @Test @JiraKey("HHH-14659") public void testQueryJoinFetch(SessionFactoryScope scope) { SupplementalInfo info = scope.fromTransaction( (session) -> { final SupplementalInfo result = session.createQuery( "select s from SupplementalInfo s join fetch s.customer", SupplementalInfo.class ) .uniqueResult(); assertThat( scope.getCollectingStatementInspector().getSqlQueries().size(), is( 1 ) ); return result; } ); // The "join fetch" should have already initialized the property, // so that the getter can safely be called outside of a session. assertTrue( Hibernate.isPropertyInitialized( info, "customer" ) ); // The "join fetch" should have already initialized the associated entity. Customer customer = info.getCustomer(); assertTrue( Hibernate.isInitialized( customer ) ); assertThat( scope.getCollectingStatementInspector().getSqlQueries().size(), is( 1 ) ); } @BeforeEach public void createTestData(SessionFactoryScope scope) { scope.inTransaction( (session) -> { final Customer customer = new Customer( 1, "Acme Brick" ); session.persist( customer ); final SupplementalInfo supplementalInfo = new SupplementalInfo( 1, customer, "extra details" ); session.persist( supplementalInfo ); } ); scope.getCollectingStatementInspector().clear(); } @AfterEach public void dropTestData(SessionFactoryScope scope) { scope.getSessionFactory().getSchemaManager().truncate(); } @Entity( name = "Customer" ) @Table( name = "customer" ) public static
InverseToOneAllowProxyTests
java
redisson__redisson
redisson/src/main/java/org/redisson/api/mapreduce/RCollectionMapReduce.java
{ "start": 1920, "end": 2833 }
class ____ implements RCollator&lt;String, Integer, Integer&gt; { * * public Integer collate(Map&lt;String, Integer&gt; resultMap) { * int result = 0; * for (Integer count : resultMap.values()) { * result += count; * } * return result; * } * * } * * RList&lt;String&gt; list = redisson.getList(&quot;myWords&quot;); * * Map&lt;String, Integer&gt; wordsCount = list.&lt;String, Integer&gt;mapReduce() * .mapper(new WordMapper()) * .reducer(new WordReducer()) * .execute(); * * Integer totalCount = list.&lt;String, Integer&gt;mapReduce() * .mapper(new WordMapper()) * .reducer(new WordReducer()) * .execute(new WordCollator()); * * </pre> * * @author Nikita Koksharov * * @param <VIn> input value * @param <KOut> output key * @param <VOut> output value */ public
WordCollator
java
apache__avro
lang/java/trevni/avro/src/main/java/org/apache/trevni/avro/AvroColumnReader.java
{ "start": 1662, "end": 2114 }
class ____<D> implements Iterator<D>, Iterable<D>, Closeable { private ColumnFileReader reader; private GenericData model; private Schema fileSchema; private Schema readSchema; private ColumnValues[] values; private int[] arrayWidths; private int column; // current index in values private Map<String, Map<String, Object>> defaults = new HashMap<>(); /** Parameters for reading an Avro column file. */ public static
AvroColumnReader
java
micronaut-projects__micronaut-core
inject-java/src/test/groovy/io/micronaut/inject/factory/composite/SomeRegistryA.java
{ "start": 100, "end": 148 }
class ____ implements SomeRegistry { }
SomeRegistryA