language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | google__dagger | javatests/dagger/functional/generictypes/subpackage/PackagePrivate.java | {
"start": 685,
"end": 740
} | class ____ {
@Inject PackagePrivate() {}
}
| PackagePrivate |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/core/ServerResponseBuilderFactory.java | {
"start": 357,
"end": 745
} | class ____ implements ResponseBuilderFactory {
@Override
public Response.ResponseBuilder create() {
return new ResponseBuilderImpl();
}
@Override
public int priority() {
return 100;
}
@Override
public <T> RestResponse.ResponseBuilder<T> createRestResponse() {
return new RestResponseBuilderImpl();
}
}
| ServerResponseBuilderFactory |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/ModelToXMLDumper.java | {
"start": 1031,
"end": 3113
} | interface ____ {
/**
* Service factory key.
*/
String FACTORY = "modelxml-dumper";
/**
* Dumps the definition as XML
*
* @param context the CamelContext
* @param definition the definition, such as a {@link org.apache.camel.NamedNode}
* @return the output in XML (is formatted)
* @throws Exception is throw if error marshalling to XML
*/
String dumpModelAsXml(CamelContext context, NamedNode definition) throws Exception;
/**
* Dumps the definition as XML
*
* @param context the CamelContext
* @param definition the definition, such as a {@link org.apache.camel.NamedNode}
* @param resolvePlaceholders whether to resolve property placeholders in the dumped XML
* @param generatedIds whether to include auto generated IDs
* @param sourceLocation whether to include source location:line
* @return the output in XML (is formatted)
* @throws Exception is throw if error marshalling to XML
*/
String dumpModelAsXml(
CamelContext context, NamedNode definition, boolean resolvePlaceholders,
boolean generatedIds, boolean sourceLocation)
throws Exception;
/**
* Dumps the beans as XML
*
* @param context the CamelContext
* @param beans list of beans (BeanFactoryDefinition)
* @return the output in XML (is formatted)
* @throws Exception is throw if error marshalling to XML
*/
String dumpBeansAsXml(CamelContext context, List<Object> beans) throws Exception;
/**
* Dumps the global data formats as XML
*
* @param context the CamelContext
* @param dataFormats list of data formats (DataFormatDefinition)
* @return the output in XML (is formatted)
* @throws Exception is throw if error marshalling to XML
*/
String dumpDataFormatsAsXml(CamelContext context, Map<String, Object> dataFormats) throws Exception;
}
| ModelToXMLDumper |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/metamodel/CompositeIdAttributeAccessTests.java | {
"start": 4516,
"end": 4656
} | class ____ {
@Id Long key1;
String name;
}
@Entity(name = "IdClassEntity")
@IdClass( IdClassEntity.PK.class )
public static | BasicEntity |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ServletAnnotationControllerHandlerMethodTests.java | {
"start": 131902,
"end": 132336
} | class ____ {
@RequestMapping(value = "/something", method = RequestMethod.PUT)
@ResponseBody
public String handle(@RequestBody String body) throws IOException {
return body;
}
@RequestMapping(value = "/something", method = RequestMethod.PATCH)
@ResponseBody
public String handlePartialUpdate(@RequestBody String content) throws IOException {
return content;
}
}
@Controller
static | RequestResponseBodyController |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/EqualsMissingNullableTest.java | {
"start": 3422,
"end": 3728
} | class ____ {
public abstract boolean equals(@Nullable Object o);
}
""")
.doTest();
}
@Test
public void negativeAlreadyAnnotatedWithProtobufAnnotation() {
aggressiveHelper
.addSourceLines(
"ProtoMethodAcceptsNullParameter.java", "@ | Foo |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest18.java | {
"start": 977,
"end": 2924
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "CREATE TABLE IF NOT EXISTS `t_awards` (" +
" `id` int(11) NOT NULL AUTO_INCREMENT," +
" `seller_id` int(11) NOT NULL," +
" `shop_id` int(11) DEFAULT NULL," +
" `mode` int(4) NOT NULL," +
" `draw_rate_1` int(11) NOT NULL," +
" `draw_rate_2` int(11) NOT NULL," +
" `amount` int(11) NOT NULL," +
" `position_code` int(11) NOT NULL," +
" `f_denomination` int(11) DEFAULT NULL," +
" `f_description` text," +
" `f_url` text," +
" `f_type` int(4) DEFAULT NULL," +
" PRIMARY KEY (`id`)," +
" UNIQUE KEY `id` (`id`)" +
") ENGINE=InnoDB DEFAULT CHARSET=gbk" +
";";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
statemen.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(12, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertTrue(visitor.getTables().containsKey(
new TableStat.Name("t_awards")));
assertTrue(visitor.containsColumn("t_awards", "f_type"));
}
}
| MySqlCreateTableTest18 |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/errors/DeserializationExceptionHandler.java | {
"start": 6160,
"end": 6389
} | class ____ a {@link Result},
* indicating whether processing should continue or fail, along with an optional list of
* {@link ProducerRecord} instances to be sent to a dead letter queue.
* </p>
*/
| encapsulates |
java | elastic__elasticsearch | x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/patterntext/PatternTextCompositeValues.java | {
"start": 988,
"end": 1045
} | class ____ a doc value interface.
*/
public final | implements |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/method/configuration/PrePostMethodSecurityConfigurationTests.java | {
"start": 68004,
"end": 68331
} | class ____ {
private final AuthorizationEventPublisher publisher = mock(AuthorizationEventPublisher.class);
@Bean
AuthorizationEventPublisher authorizationEventPublisher() {
return this.publisher;
}
}
@EnableMethodSecurity(mode = AdviceMode.ASPECTJ, securedEnabled = true)
static | AuthorizationEventPublisherConfig |
java | quarkusio__quarkus | extensions/quartz/deployment/src/test/java/io/quarkus/quartz/test/MissingConfigCronExpressionTest.java | {
"start": 631,
"end": 734
} | class ____ {
@Scheduled(cron = "{my.cron}")
void wrong() {
}
}
}
| InvalidBean |
java | apache__dubbo | dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/mesh/util/MeshRuleListener.java | {
"start": 911,
"end": 1077
} | interface ____ {
void onRuleChange(String appName, List<Map<String, Object>> rules);
void clearRule(String appName);
String ruleSuffix();
}
| MeshRuleListener |
java | apache__camel | components/camel-smb/src/test/java/org/apache/camel/component/smb/FromSmbRenameReadLockIT.java | {
"start": 1317,
"end": 3393
} | class ____ extends SmbServerTestSupport {
@Override
public void doPostSetup() throws Exception {
prepareSmbServer();
}
protected String getSmbPollingUrl() {
return String.format(
"smb:%s/%s/renamerl?username=%s&password=%s&delete=true&delay=1000&initialDelay=1500&readLock=rename",
service.address(), service.shareName(), service.userName(), service.password());
}
protected String getSmbUrl() {
return String.format(
"smb:%s/%s/renamerl?username=%s&password=%s",
service.address(), service.shareName(), service.userName(), service.password());
}
@Test
public void testFromFileToSmb() throws Exception {
// verify binary file written to smb dir
await().atMost(3, TimeUnit.SECONDS)
.untilAsserted(() -> assertNotNull((copyFileContentFromContainer("/data/rw/renamerl/logo.jpeg"))));
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
MockEndpoint.assertIsSatisfied(context);
// verify binary file removed during processing
await().atMost(3, TimeUnit.SECONDS)
.untilAsserted(() -> assertNull((copyFileContentFromContainer("/data/rw/renamerl/logo.jpeg"))));
}
private void prepareSmbServer() throws Exception {
// write binary file to smb dir
Endpoint endpoint = context.getEndpoint(getSmbUrl());
Exchange exchange = endpoint.createExchange();
exchange.getIn().setBody(new File("src/test/data/smbbinarytest/logo.jpeg"));
exchange.getIn().setHeader(Exchange.FILE_NAME, "logo.jpeg");
Producer producer = endpoint.createProducer();
producer.start();
producer.process(exchange);
producer.stop();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(getSmbPollingUrl()).to("mock:result");
}
};
}
}
| FromSmbRenameReadLockIT |
java | quarkusio__quarkus | devtools/cli/src/main/java/io/quarkus/cli/ProjectExtensionsList.java | {
"start": 1477,
"end": 7453
} | class ____ extends BaseBuildCommand implements Callable<Integer> {
@CommandLine.Mixin
RunModeOption runMode;
@CommandLine.ArgGroup(order = 2, heading = "%nQuarkus version (absolute):%n")
TargetQuarkusPlatformGroup targetQuarkusVersion = new TargetQuarkusPlatformGroup();
@CommandLine.Option(names = { "-i",
"--installable" }, defaultValue = "false", order = 2, description = "List extensions that can be installed (relative)")
boolean installable = false;
@CommandLine.Option(names = { "-s",
"--search" }, defaultValue = "*", paramLabel = "PATTERN", order = 3, description = "Search for matching extensions (simple glob using '*' and '?').")
String searchPattern;
@CommandLine.Option(names = { "-c",
"--category" }, defaultValue = "", paramLabel = "CATEGORY_ID", order = 4, description = "Only list extensions from the specified category.")
String category;
@CommandLine.ArgGroup(heading = "%nOutput format:%n")
ListFormatOptions format = new ListFormatOptions();
@Override
public Integer call() {
try {
output.debug("List extensions with initial parameters: %s", this);
output.throwIfUnmatchedArguments(spec.commandLine());
// Test for an existing project
BuildTool buildTool = QuarkusProjectHelper.detectExistingBuildTool(projectRoot()); // nullable
boolean categorySet = category != null && !category.isBlank();
if (buildTool == null || targetQuarkusVersion.isPlatformSpecified() || targetQuarkusVersion.isStreamSpecified()) {
// do not evaluate installables for list of arbitrary version (project-agnostic)
installable = false;
// check if any format was specified
boolean formatSpecified = format.isSpecified();
if (runMode.isDryRun()) {
return dryRunList(spec.commandLine().getHelp(), null);
}
Integer exitCode = listPlatformExtensions();
printHints(buildTool, !formatSpecified, !categorySet, buildTool != null);
return exitCode;
} else {
BuildSystemRunner runner = getRunner();
if (runMode.isDryRun()) {
return dryRunList(spec.commandLine().getHelp(), runner.getBuildTool());
}
Integer exitCode = runner.listExtensions(runMode, format, installable, searchPattern, category);
printHints(buildTool, !format.isSpecified(), installable && !categorySet, installable);
return exitCode;
}
} catch (Exception e) {
return output.handleCommandException(e,
"Unable to list extensions: " + e.getMessage());
}
}
Integer dryRunList(CommandLine.Help help, BuildTool buildTool) {
Map<String, String> dryRunOutput = new TreeMap<>();
if (buildTool == null) {
output.printText(new String[] {
"\nList extensions for specified platform\n",
"\t" + targetQuarkusVersion.dryRun()
});
} else {
output.printText(new String[] {
"\nList extensions for current project\n",
"\t" + projectRoot().toString()
});
dryRunOutput.put("Build tool", buildTool.name());
}
dryRunOutput.put("Batch (non-interactive mode)", Boolean.toString(runMode.isBatchMode()));
dryRunOutput.put("List format", format.getFormatString());
dryRunOutput.put("List installable extensions", Boolean.toString(installable));
dryRunOutput.put("Search pattern", searchPattern);
dryRunOutput.put("Category", category);
dryRunOutput.put("Registry Client", Boolean.toString(registryClient.enabled()));
output.info(help.createTextTable(dryRunOutput).toString());
return CommandLine.ExitCode.OK;
}
Integer listPlatformExtensions() throws QuarkusCommandException, RegistryResolutionException {
QuarkusProject qp = registryClient.createQuarkusProject(projectRoot(), targetQuarkusVersion,
BuildTool.MAVEN, output);
QuarkusCommandOutcome outcome = new ListExtensions(qp, output)
.fromCli(true)
.all(true)
.format(format.getFormatString())
.search(searchPattern)
.category(category)
.batchMode(runMode.isBatchMode())
.execute();
return outcome.isSuccess() ? CommandLine.ExitCode.OK : CommandLine.ExitCode.SOFTWARE;
}
private void printHints(BuildTool buildTool, boolean formatHint, boolean filterHint, boolean addExtensionHint) {
if (runMode.isBatchMode())
return;
if (formatHint) {
output.info("");
output.info(ListExtensions.MORE_INFO_HINT, "--full");
}
if (filterHint) {
output.info("");
output.info(ListExtensions.FILTER_HINT, "--category \"categoryId\"");
}
if (addExtensionHint) {
output.info("");
if (BuildTool.GRADLE.equals(buildTool) || BuildTool.GRADLE_KOTLIN_DSL.equals(buildTool)) {
output.info(ListExtensions.ADD_EXTENSION_HINT, "build.gradle", "quarkus extension add \"artifactId\"");
} else if (BuildTool.MAVEN.equals(buildTool)) {
output.info(ListExtensions.ADD_EXTENSION_HINT, "pom.xml", "quarkus extension add \"artifactId\"");
}
}
}
@Override
public String toString() {
return "ProjectExtensionList [format=" + format
+ ", installable=" + installable
+ ", searchPattern=" + searchPattern
+ ", output=" + output
+ ", runMode=" + runMode
+ "]";
}
}
| ProjectExtensionsList |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/concurrent/BasicThreadFactory.java | {
"start": 4590,
"end": 4866
} | class ____ methods that correspond to
* the configuration options supported by {@link BasicThreadFactory}. Method
* chaining is supported. Refer to the documentation of {@code
* BasicThreadFactory} for a usage example.
* </p>
*/
public static | provides |
java | redisson__redisson | redisson/src/main/java/org/redisson/reactive/RedissonListReactive.java | {
"start": 1150,
"end": 4593
} | class ____<V> {
private final RList<V> instance;
public RedissonListReactive(RList<V> instance) {
this.instance = instance;
}
public RedissonListReactive(CommandReactiveExecutor commandExecutor, String name) {
this.instance = new RedissonList<V>(commandExecutor, name, null);
}
public RedissonListReactive(Codec codec, CommandReactiveExecutor commandExecutor, String name) {
this.instance = new RedissonList<V>(codec, commandExecutor, name, null);
}
public Publisher<V> descendingIterator() {
return iterator(-1, false);
}
public Publisher<V> iterator() {
return iterator(0, true);
}
public Publisher<V> descendingIterator(int startIndex) {
return iterator(startIndex, false);
}
public Publisher<V> iterator(int startIndex) {
return iterator(startIndex, true);
}
private Publisher<V> iterator(int startIndex, boolean forward) {
return Flux.create(new Consumer<FluxSink<V>>() {
@Override
public void accept(FluxSink<V> emitter) {
emitter.onRequest(new LongConsumer() {
int currentIndex = startIndex;
volatile boolean maxAccepted;
@Override
public void accept(long value) {
if (Long.MAX_VALUE == value) {
maxAccepted = true;
}
if (maxAccepted && value != Long.MAX_VALUE) {
return;
}
onRequest(forward, emitter, value);
}
private void onRequest(boolean forward, FluxSink<V> emitter, long n) {
getAsync(currentIndex).whenComplete((value, e) -> {
if (e != null) {
emitter.error(e);
return;
}
if (value != null) {
emitter.next(value);
if (forward) {
currentIndex++;
} else {
currentIndex--;
}
}
if (value == null) {
emitter.complete();
return;
}
if (n-1 == 0) {
return;
}
onRequest(forward, emitter, n-1);
});
}
});
}
});
}
RFuture<V> getAsync(int currentIndex) {
return instance.getAsync(currentIndex);
}
public Publisher<Boolean> addAll(Publisher<? extends V> c) {
return new PublisherAdder<V>() {
@Override
public RFuture<Boolean> add(Object o) {
return addAsync((V) o);
}
}.addAll(c);
}
RFuture<Boolean> addAsync(V o) {
return instance.addAsync(o);
}
}
| RedissonListReactive |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/HttpsComponentBuilderFactory.java | {
"start": 31718,
"end": 38466
} | class ____
extends AbstractComponentBuilder<HttpComponent>
implements HttpsComponentBuilder {
@Override
protected HttpComponent buildConcreteComponent() {
return new HttpComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "lazyStartProducer": ((HttpComponent) component).setLazyStartProducer((boolean) value); return true;
case "logHttpActivity": ((HttpComponent) component).setLogHttpActivity((boolean) value); return true;
case "skipControlHeaders": ((HttpComponent) component).setSkipControlHeaders((boolean) value); return true;
case "skipRequestHeaders": ((HttpComponent) component).setSkipRequestHeaders((boolean) value); return true;
case "skipResponseHeaders": ((HttpComponent) component).setSkipResponseHeaders((boolean) value); return true;
case "contentTypeCharsetEnabled": ((HttpComponent) component).setContentTypeCharsetEnabled((boolean) value); return true;
case "cookieStore": ((HttpComponent) component).setCookieStore((org.apache.hc.client5.http.cookie.CookieStore) value); return true;
case "copyHeaders": ((HttpComponent) component).setCopyHeaders((boolean) value); return true;
case "followRedirects": ((HttpComponent) component).setFollowRedirects((boolean) value); return true;
case "httpActivityListener": ((HttpComponent) component).setHttpActivityListener((org.apache.camel.component.http.HttpActivityListener) value); return true;
case "responsePayloadStreamingThreshold": ((HttpComponent) component).setResponsePayloadStreamingThreshold((int) value); return true;
case "userAgent": ((HttpComponent) component).setUserAgent((java.lang.String) value); return true;
case "allowJavaSerializedObject": ((HttpComponent) component).setAllowJavaSerializedObject((boolean) value); return true;
case "authCachingDisabled": ((HttpComponent) component).setAuthCachingDisabled((boolean) value); return true;
case "automaticRetriesDisabled": ((HttpComponent) component).setAutomaticRetriesDisabled((boolean) value); return true;
case "autowiredEnabled": ((HttpComponent) component).setAutowiredEnabled((boolean) value); return true;
case "clientConnectionManager": ((HttpComponent) component).setClientConnectionManager((org.apache.hc.client5.http.io.HttpClientConnectionManager) value); return true;
case "connectionsPerRoute": ((HttpComponent) component).setConnectionsPerRoute((int) value); return true;
case "connectionStateDisabled": ((HttpComponent) component).setConnectionStateDisabled((boolean) value); return true;
case "connectionTimeToLive": ((HttpComponent) component).setConnectionTimeToLive((long) value); return true;
case "contentCompressionDisabled": ((HttpComponent) component).setContentCompressionDisabled((boolean) value); return true;
case "cookieManagementDisabled": ((HttpComponent) component).setCookieManagementDisabled((boolean) value); return true;
case "defaultUserAgentDisabled": ((HttpComponent) component).setDefaultUserAgentDisabled((boolean) value); return true;
case "httpBinding": ((HttpComponent) component).setHttpBinding((org.apache.camel.http.common.HttpBinding) value); return true;
case "httpClientConfigurer": ((HttpComponent) component).setHttpClientConfigurer((org.apache.camel.component.http.HttpClientConfigurer) value); return true;
case "httpConfiguration": ((HttpComponent) component).setHttpConfiguration((org.apache.camel.http.common.HttpConfiguration) value); return true;
case "httpContext": ((HttpComponent) component).setHttpContext((org.apache.hc.core5.http.protocol.HttpContext) value); return true;
case "maxTotalConnections": ((HttpComponent) component).setMaxTotalConnections((int) value); return true;
case "redirectHandlingDisabled": ((HttpComponent) component).setRedirectHandlingDisabled((boolean) value); return true;
case "useSystemProperties": ((HttpComponent) component).setUseSystemProperties((boolean) value); return true;
case "headerFilterStrategy": ((HttpComponent) component).setHeaderFilterStrategy((org.apache.camel.spi.HeaderFilterStrategy) value); return true;
case "proxyAuthDomain": ((HttpComponent) component).setProxyAuthDomain((java.lang.String) value); return true;
case "proxyAuthHost": ((HttpComponent) component).setProxyAuthHost((java.lang.String) value); return true;
case "proxyAuthMethod": ((HttpComponent) component).setProxyAuthMethod((java.lang.String) value); return true;
case "proxyAuthNtHost": ((HttpComponent) component).setProxyAuthNtHost((java.lang.String) value); return true;
case "proxyAuthPassword": ((HttpComponent) component).setProxyAuthPassword((java.lang.String) value); return true;
case "proxyAuthPort": ((HttpComponent) component).setProxyAuthPort((java.lang.Integer) value); return true;
case "proxyAuthScheme": ((HttpComponent) component).setProxyAuthScheme((java.lang.String) value); return true;
case "proxyAuthUsername": ((HttpComponent) component).setProxyAuthUsername((java.lang.String) value); return true;
case "proxyHost": ((HttpComponent) component).setProxyHost((java.lang.String) value); return true;
case "proxyPort": ((HttpComponent) component).setProxyPort((java.lang.Integer) value); return true;
case "sslContextParameters": ((HttpComponent) component).setSslContextParameters((org.apache.camel.support.jsse.SSLContextParameters) value); return true;
case "useGlobalSslContextParameters": ((HttpComponent) component).setUseGlobalSslContextParameters((boolean) value); return true;
case "x509HostnameVerifier": ((HttpComponent) component).setX509HostnameVerifier((javax.net.ssl.HostnameVerifier) value); return true;
case "connectionRequestTimeout": ((HttpComponent) component).setConnectionRequestTimeout((long) value); return true;
case "connectTimeout": ((HttpComponent) component).setConnectTimeout((long) value); return true;
case "responseTimeout": ((HttpComponent) component).setResponseTimeout((long) value); return true;
case "soTimeout": ((HttpComponent) component).setSoTimeout((long) value); return true;
default: return false;
}
}
}
} | HttpsComponentBuilderImpl |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/SealedTypesWithSubtypesTest.java | {
"start": 12828,
"end": 13902
} | class ____ different names
.registerSubtypes(new NamedType(Sub.class, "sub1"))
.registerSubtypes(new NamedType(Sub.class, "sub2"))
.build();
// fields of a POJO will be deserialized correctly according to their field name
POJOWrapper pojoWrapper = mapper.readValue("{\"sub1\":{\"#type\":\"sub1\",\"a\":10},\"sub2\":{\"#type\":\"sub2\",\"a\":50}}", POJOWrapper.class);
assertEquals(10, pojoWrapper.sub1.a);
assertEquals(50, pojoWrapper.sub2.a);
// Instances of the same object can be deserialized with multiple names
SuperTypeWithoutDefault sub1 = mapper.readValue("{\"#type\":\"sub1\", \"a\":20}", SuperTypeWithoutDefault.class);
assertSame(Sub.class, sub1.getClass());
assertEquals(20, ((Sub) sub1).a);
SuperTypeWithoutDefault sub2 = mapper.readValue("{\"#type\":\"sub2\", \"a\":30}", SuperTypeWithoutDefault.class);
assertSame(Sub.class, sub2.getClass());
assertEquals(30, ((Sub) sub2).a);
}
}
| with |
java | quarkusio__quarkus | independent-projects/tools/codestarts/src/test/java/io/quarkus/devtools/codestarts/core/strategy/CodestartFileStrategyTest.java | {
"start": 190,
"end": 2412
} | class ____ {
@Test
void testFilterStart() {
final CodestartFileStrategy strategy = new CodestartFileStrategy("*.txt", mock(CodestartFileStrategyHandler.class));
assertThat(strategy.test("myfile.txt")).isTrue();
assertThat(strategy.test(null)).isFalse();
assertThat(strategy.test("foo/bar/myfile.txt")).isTrue();
assertThat(strategy.test(".txt")).isTrue();
assertThat(strategy.test("foo/bar/myfile.zip")).isFalse();
assertThat(strategy.test("")).isFalse();
}
@Test
void testFilterEnd() {
final CodestartFileStrategy strategy = new CodestartFileStrategy("/foo/bar/*",
mock(CodestartFileStrategyHandler.class));
assertThat(strategy.test("/foo/bar/myfile.txt")).isTrue();
assertThat(strategy.test("/foo/bar/baz/anoter_file")).isTrue();
assertThat(strategy.test(null)).isFalse();
assertThat(strategy.test("foo/bar/myfile.txt")).isFalse();
assertThat(strategy.test("something")).isFalse();
assertThat(strategy.test("")).isFalse();
}
@Test
void testFilterMiddle() {
final CodestartFileStrategy strategy = new CodestartFileStrategy("/foo/bar/my*.txt",
mock(CodestartFileStrategyHandler.class));
assertThat(strategy.test("/foo/bar/myfile.txt")).isTrue();
assertThat(strategy.test("/foo/bar/baz/anoter_file")).isFalse();
assertThat(strategy.test(null)).isFalse();
assertThat(strategy.test("foo/bar/myfile.txt")).isFalse();
assertThat(strategy.test("something")).isFalse();
assertThat(strategy.test("")).isFalse();
}
@Test
void testFilter() {
final CodestartFileStrategy strategy = new CodestartFileStrategy("/foo/bar/myfile.txt",
mock(CodestartFileStrategyHandler.class));
assertThat(strategy.test("/foo/bar/myfile.txt")).isTrue();
assertThat(strategy.test("/foo/bar/myfile.tx")).isFalse();
assertThat(strategy.test(null)).isFalse();
assertThat(strategy.test("foo/bar/myfile.txt")).isFalse();
assertThat(strategy.test("something")).isFalse();
assertThat(strategy.test("")).isFalse();
}
} | CodestartFileStrategyTest |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/server/reactive/AbstractListenerReadPublisher.java | {
"start": 8423,
"end": 9599
} | class ____ implements Subscription {
@Override
public void request(long n) {
if (rsReadLogger.isTraceEnabled()) {
rsReadLogger.trace(getLogPrefix() + "request " + (n != Long.MAX_VALUE ? n : "Long.MAX_VALUE"));
}
state.get().request(AbstractListenerReadPublisher.this, n);
}
@Override
public void cancel() {
State state = AbstractListenerReadPublisher.this.state.get();
if (rsReadLogger.isTraceEnabled()) {
rsReadLogger.trace(getLogPrefix() + "cancel [" + state + "]");
}
state.cancel(AbstractListenerReadPublisher.this);
}
}
/**
* The states that a read {@link Publisher} transitions through.
* <p><pre>
* UNSUBSCRIBED
* |
* v
* SUBSCRIBING
* |
* v
* +---- NO_DEMAND ---------------> DEMAND ---+
* | ^ ^ |
* | | | |
* | +------- READING <--------+ |
* | | |
* | v |
* +--------------> COMPLETED <---------------+
* </pre>
*/
private | ReadSubscription |
java | elastic__elasticsearch | x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/spatial/SpatialPushDownCartesianShapeIT.java | {
"start": 398,
"end": 904
} | class ____ extends SpatialPushDownShapeTestCase {
@Override
protected String fieldType() {
return "shape";
}
@Override
protected Geometry getIndexGeometry() {
return ShapeTestUtils.randomGeometryWithoutCircle(false);
}
@Override
protected Geometry getQueryGeometry() {
return ShapeTestUtils.randomGeometry(false);
}
@Override
protected String castingFunction() {
return "TO_CARTESIANSHAPE";
}
}
| SpatialPushDownCartesianShapeIT |
java | square__retrofit | samples/src/main/java/com/example/retrofit/ErrorHandlingAdapter.java | {
"start": 2889,
"end": 3517
} | class ____<R> implements CallAdapter<R, MyCall<R>> {
private final Type responseType;
private final Executor callbackExecutor;
ErrorHandlingCallAdapter(Type responseType, Executor callbackExecutor) {
this.responseType = responseType;
this.callbackExecutor = callbackExecutor;
}
@Override
public Type responseType() {
return responseType;
}
@Override
public MyCall<R> adapt(Call<R> call) {
return new MyCallAdapter<>(call, callbackExecutor);
}
}
}
/** Adapts a {@link Call} to {@link MyCall}. */
static | ErrorHandlingCallAdapter |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/named/FetchMemento.java | {
"start": 433,
"end": 685
} | interface ____ extends ModelPartReferenceMemento {
}
/**
* Resolve the fetch-memento into the result-graph-node builder
*/
FetchBuilder resolve(Parent parent, Consumer<String> querySpaceConsumer, ResultSetMappingResolutionContext context);
}
| Parent |
java | apache__camel | components/camel-sql/src/test/java/org/apache/camel/component/sql/SqlProducerNoopTest.java | {
"start": 1261,
"end": 4220
} | class ____ extends CamelTestSupport {
private EmbeddedDatabase db;
@Override
public void doPreSetup() throws Exception {
db = new EmbeddedDatabaseBuilder()
.setName(getClass().getSimpleName())
.setType(EmbeddedDatabaseType.H2)
.addScript("sql/createAndPopulateDatabase.sql").build();
}
@Override
public void doPostTearDown() throws Exception {
if (db != null) {
db.shutdown();
}
}
@Test
public void testInsertNoop() throws InterruptedException {
MockEndpoint mock = getMockEndpoint("mock:insert");
mock.expectedMessageCount(1);
mock.expectedHeaderReceived(SqlConstants.SQL_UPDATE_COUNT, 1);
mock.message(0).body().isEqualTo("Hi there!");
template.requestBody("direct:insert", "Hi there!");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testQueryNoop() throws InterruptedException {
MockEndpoint mock = getMockEndpoint("mock:query");
mock.expectedMessageCount(1);
mock.expectedHeaderReceived(SqlConstants.SQL_ROW_COUNT, 3);
mock.message(0).body().isEqualTo("Hi there!");
template.requestBody("direct:query", "Hi there!");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testUpdateNoop() throws InterruptedException {
MockEndpoint mock = getMockEndpoint("mock:update");
mock.expectedMessageCount(1);
mock.expectedHeaderReceived(SqlConstants.SQL_UPDATE_COUNT, 1);
mock.message(0).body().isEqualTo("Hi there!");
template.requestBody("direct:update", "Hi there!");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testDeleteNoop() throws InterruptedException {
MockEndpoint mock = getMockEndpoint("mock:delete");
mock.expectedMessageCount(1);
mock.expectedHeaderReceived(SqlConstants.SQL_UPDATE_COUNT, 1);
mock.message(0).body().isEqualTo("Hi there!");
template.requestBody("direct:delete", "Hi there!");
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// required for the sql component
getContext().getComponent("sql", SqlComponent.class).setDataSource(db);
from("direct:query").to("sql:select * from projects?noop=true").to("mock:query");
from("direct:update").to("sql:update projects set license='MIT' where id=3?noop=true").to("mock:update");
from("direct:insert").to("sql:insert into projects values (4, 'Zookeeper', 'ASF')?noop=true").to("mock:insert");
from("direct:delete").to("sql:delete from projects where id=1?noop=true").to("mock:delete");
}
};
}
}
| SqlProducerNoopTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote.java | {
"start": 1451,
"end": 3156
} | class ____ {
private BlockReaderTestUtil util;
private byte[] blockData;
private BlockReader reader;
/**
* if override this, make sure return array length is less than
* block size.
*/
byte [] getBlockData() {
int length = 1 << 22;
byte[] data = new byte[length];
for (int i = 0; i < length; i++) {
data[i] = (byte) (i % 133);
}
return data;
}
private BlockReader getBlockReader(LocatedBlock block) throws Exception {
return util.getBlockReader(block, 0, blockData.length);
}
@BeforeEach
public void setup() throws Exception {
util = new BlockReaderTestUtil(1, new HdfsConfiguration());
blockData = getBlockData();
DistributedFileSystem fs = util.getCluster().getFileSystem();
Path testfile = new Path("/testfile");
FSDataOutputStream fout = fs.create(testfile);
fout.write(blockData);
fout.close();
LocatedBlock blk = util.getFileBlocks(testfile, blockData.length).get(0);
reader = getBlockReader(blk);
}
@AfterEach
public void shutdown() throws Exception {
util.shutdown();
}
@Test
@Timeout(value = 60)
public void testSkip() throws IOException {
Random random = new Random();
byte [] buf = new byte[1];
for (int pos = 0; pos < blockData.length;) {
long skip = random.nextInt(100) + 1;
long skipped = reader.skip(skip);
if (pos + skip >= blockData.length) {
assertEquals(blockData.length, pos + skipped);
break;
} else {
assertEquals(skip, skipped);
pos += skipped;
assertEquals(1, reader.read(buf, 0, 1));
assertEquals(blockData[pos], buf[0]);
pos += 1;
}
}
}
}
| TestBlockReaderRemote |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/compliance/EntityManagerFindTest.java | {
"start": 2507,
"end": 2624
} | class ____ {
@Id
@Temporal(TemporalType.DATE)
protected java.util.Date id;
private String name;
}
}
| TestEntity2 |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/sqlserver/ast/SQLServerObject.java | {
"start": 875,
"end": 1132
} | interface ____ extends TransactSQLObject {
default void accept0(SQLASTVisitor v) {
if (v instanceof SQLServerASTVisitor) {
accept0((SQLServerASTVisitor) v);
}
}
void accept0(SQLServerASTVisitor visitor);
}
| SQLServerObject |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/transformation/AnnotationTransformationTest.java | {
"start": 5468,
"end": 5585
} | interface ____ {
}
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.PARAMETER })
public @ | MyGet |
java | grpc__grpc-java | benchmarks/src/generated/main/grpc/io/grpc/benchmarks/proto/ReportQpsScenarioServiceGrpc.java | {
"start": 8255,
"end": 9288
} | class ____
extends io.grpc.stub.AbstractBlockingStub<ReportQpsScenarioServiceBlockingStub> {
private ReportQpsScenarioServiceBlockingStub(
io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
super(channel, callOptions);
}
@java.lang.Override
protected ReportQpsScenarioServiceBlockingStub build(
io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
return new ReportQpsScenarioServiceBlockingStub(channel, callOptions);
}
/**
* <pre>
* Report results of a QPS test benchmark scenario.
* </pre>
*/
public io.grpc.benchmarks.proto.Control.Void reportScenario(io.grpc.benchmarks.proto.Control.ScenarioResult request) {
return io.grpc.stub.ClientCalls.blockingUnaryCall(
getChannel(), getReportScenarioMethod(), getCallOptions(), request);
}
}
/**
* A stub to allow clients to do ListenableFuture-style rpc calls to service ReportQpsScenarioService.
*/
public static final | ReportQpsScenarioServiceBlockingStub |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy/runtime/src/main/java/io/quarkus/resteasy/runtime/SecurityContextFilter.java | {
"start": 1025,
"end": 3562
} | class ____ implements ContainerRequestFilter {
@Inject
SecurityIdentity old;
@Inject
CurrentIdentityAssociation currentIdentityAssociation;
@Inject
RoutingContext routingContext;
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
SecurityContext modified = requestContext.getSecurityContext();
if (modified instanceof ServletSecurityContext || modified instanceof QuarkusResteasySecurityContext) {
//an original security context, it has not been modified
return;
}
Set<Credential> oldCredentials = old.getCredentials();
Map<String, Object> oldAttributes = old.getAttributes();
SecurityIdentity newIdentity = new SecurityIdentity() {
@Override
public Principal getPrincipal() {
return modified.getUserPrincipal();
}
@Override
public boolean isAnonymous() {
return modified.getUserPrincipal() == null;
}
@Override
public Set<String> getRoles() {
throw new UnsupportedOperationException(
"retrieving all roles not supported when JAX-RS security context has been replaced");
}
@Override
public boolean hasRole(String role) {
return modified.isUserInRole(role);
}
@Override
public <T extends Credential> T getCredential(Class<T> credentialType) {
for (Credential cred : getCredentials()) {
if (credentialType.isAssignableFrom(cred.getClass())) {
return (T) cred;
}
}
return null;
}
@Override
public Set<Credential> getCredentials() {
return oldCredentials;
}
@Override
public <T> T getAttribute(String name) {
return (T) oldAttributes.get(name);
}
@Override
public Map<String, Object> getAttributes() {
return oldAttributes;
}
@Override
public Uni<Boolean> checkPermission(Permission permission) {
return Uni.createFrom().nullItem();
}
};
routingContext.setUser(new QuarkusHttpUser(newIdentity));
currentIdentityAssociation.setIdentity(newIdentity);
}
}
| SecurityContextFilter |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java | {
"start": 12298,
"end": 14839
} | class ____ implements HttpChannel {
private final AtomicBoolean open = new AtomicBoolean(true);
private final SubscribableListener<ActionListener<Void>> closeListener = new SubscribableListener<>();
private final CountDownLatch closeLatch = new CountDownLatch(1);
@Override
public void sendResponse(HttpResponse response, ActionListener<Void> listener) {}
@Override
public InetSocketAddress getLocalAddress() {
return null;
}
@Override
public InetSocketAddress getRemoteAddress() {
return null;
}
@Override
public void close() {
assertTrue("HttpChannel is already closed", open.compareAndSet(true, false));
closeListener.andThenAccept(listener -> {
boolean failure = randomBoolean();
threadPool.generic().submit(() -> {
if (failure) {
listener.onFailure(new IllegalStateException());
} else {
listener.onResponse(null);
}
closeLatch.countDown();
});
});
}
private void awaitClose() throws InterruptedException {
close();
closeLatch.await();
}
@Override
public boolean isOpen() {
return open.get();
}
@Override
public void addCloseListener(ActionListener<Void> listener) {
// if the channel is already closed, the listener gets notified immediately, from the same thread.
if (open.get() == false) {
listener.onResponse(null);
// Ensure closeLatch is pulled by completing the closeListener with a noop that is ignored if it is already completed.
// Note that when the channel is closed we may see multiple addCloseListener() calls, so we do not assert on isDone() here,
// and since closeListener may already be completed we cannot rely on it to complete the current listener, so we first
// complete it directly and then pass a noop to closeListener.
closeListener.onResponse(ActionListener.assertOnce(ActionListener.noop()));
} else {
assertFalse("close listener already set, only one is allowed!", closeListener.isDone());
closeListener.onResponse(ActionListener.assertOnce(listener));
}
}
}
}
| TestHttpChannel |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/dualvalue/DualValue_hasPotentialCyclingValues_Test.java | {
"start": 1385,
"end": 2372
} | class ____ {
private static final List<String> PATH = list("foo", "bar");
@ParameterizedTest(name = "actual {0} / expected {1}")
@MethodSource("values")
void should_return_false_when_actual_or_expected_is_a_container_type_and_true_otherwise(Object actual, Object expected,
boolean expectedResult) {
// GIVEN
DualValue dualValue = new DualValue(PATH, actual, expected);
// WHEN
boolean hasPotentialCyclingValuess = dualValue.hasPotentialCyclingValues();
// THEN
then(hasPotentialCyclingValuess).isEqualTo(expectedResult);
}
static Stream<Arguments> values() {
FriendlyPerson person1 = new FriendlyPerson();
FriendlyPerson person2 = new FriendlyPerson();
person1.otherFriends.add(person1);
person1.otherFriends.add(person2);
person2.otherFriends.add(person2);
person2.otherFriends.add(person1);
| DualValue_hasPotentialCyclingValues_Test |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/ClassUtils.java | {
"start": 8718,
"end": 9421
} | class ____, {@code null} if null input.
* @throws ClassCastException if classNames contains a non String entry.
*/
public static List<Class<?>> convertClassNamesToClasses(final List<String> classNames) {
if (classNames == null) {
return null;
}
final List<Class<?>> classes = new ArrayList<>(classNames.size());
classNames.forEach(className -> {
try {
classes.add(Class.forName(className));
} catch (final Exception ex) {
classes.add(null);
}
});
return classes;
}
/**
* Gets the abbreviated name of a {@link Class}.
*
* @param cls the | names |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/SerializationUtilsTest.java | {
"start": 2013,
"end": 2317
} | class ____ implements Serializable {
private static final long serialVersionUID = 1L;
private void readObject(final ObjectInputStream in) throws ClassNotFoundException {
throw new ClassNotFoundException(SerializationUtilsTest.CLASS_NOT_FOUND_MESSAGE);
}
}
| ClassNotFoundSerialization |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/DiscoverySelectorIdentifierParsers.java | {
"start": 2254,
"end": 3184
} | enum ____ {
INSTANCE;
private final Map<String, DiscoverySelectorIdentifierParser> parsersByPrefix;
Singleton() {
Map<String, DiscoverySelectorIdentifierParser> parsersByPrefix = new HashMap<>();
Iterable<DiscoverySelectorIdentifierParser> loadedParsers = ServiceLoader.load(
DiscoverySelectorIdentifierParser.class, ClassLoaderUtils.getDefaultClassLoader());
for (DiscoverySelectorIdentifierParser parser : loadedParsers) {
DiscoverySelectorIdentifierParser previous = parsersByPrefix.put(parser.getPrefix(), parser);
Preconditions.condition(previous == null,
() -> "Duplicate parser for prefix: [%s]; candidate a: [%s]; candidate b: [%s]".formatted(
parser.getPrefix(), requireNonNull(previous).getClass().getName(),
parser.getClass().getName()));
}
this.parsersByPrefix = unmodifiableMap(parsersByPrefix);
}
}
private DiscoverySelectorIdentifierParsers() {
}
}
| Singleton |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/async/AbstractAsynchronousOperationHandlersTest.java | {
"start": 10212,
"end": 10542
} | class ____ {
@Nullable private final Throwable throwable;
@Nullable private final Acknowledge value;
OperationResult(@Nullable Acknowledge value, @Nullable Throwable throwable) {
this.value = value;
this.throwable = throwable;
}
}
private static final | OperationResult |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/transaction/InvalidIsolationLevelException.java | {
"start": 929,
"end": 1172
} | class ____ extends TransactionUsageException {
/**
* Constructor for InvalidIsolationLevelException.
* @param msg the detail message
*/
public InvalidIsolationLevelException(String msg) {
super(msg);
}
}
| InvalidIsolationLevelException |
java | quarkusio__quarkus | extensions/security/deployment/src/test/java/io/quarkus/security/test/cdi/events/AsyncAuthZFailureEventObserver.java | {
"start": 286,
"end": 647
} | class ____ {
private final List<AuthorizationFailureEvent> observerEvents = new CopyOnWriteArrayList<>();
void logAuthZEvents(@ObservesAsync AuthorizationFailureEvent authZEvent) {
observerEvents.add(authZEvent);
}
List<AuthorizationFailureEvent> getObserverEvents() {
return observerEvents;
}
}
| AsyncAuthZFailureEventObserver |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/conversion/AbstractJodaTypeToStringConversion.java | {
"start": 683,
"end": 789
} | class ____ conversions between Joda-Time types and String.
*
* @author Timo Eckhardt
*/
public abstract | for |
java | alibaba__nacos | console/src/test/java/com/alibaba/nacos/console/filter/XssFilterTest.java | {
"start": 1090,
"end": 1773
} | class ____ {
private static final String CONTENT_SECURITY_POLICY_HEADER = "Content-Security-Policy";
private static final String CONTENT_SECURITY_POLICY = "script-src 'self'";
@Mock
private HttpServletRequest request;
@Mock
private HttpServletResponse response;
@Mock
private FilterChain filterChain;
@Test
void testSetResponseHeader() throws ServletException, IOException {
XssFilter xssFilter = new XssFilter();
xssFilter.doFilterInternal(request, response, filterChain);
Mockito.verify(response).setHeader(CONTENT_SECURITY_POLICY_HEADER, CONTENT_SECURITY_POLICY);
}
}
| XssFilterTest |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/type/EnumTypeHandlerTest.java | {
"start": 1022,
"end": 2927
} | enum ____ {
ONE, TWO
}
private static final TypeHandler<MyEnum> TYPE_HANDLER = new EnumTypeHandler<>(MyEnum.class);
@Override
@Test
public void shouldSetParameter() throws Exception {
TYPE_HANDLER.setParameter(ps, 1, MyEnum.ONE, null);
verify(ps).setString(1, "ONE");
}
@Test
void shouldSetNullParameter() throws Exception {
TYPE_HANDLER.setParameter(ps, 1, null, JdbcType.VARCHAR);
verify(ps).setNull(1, JdbcType.VARCHAR.TYPE_CODE);
}
@Override
@Test
public void shouldGetResultFromResultSetByName() throws Exception {
when(rs.getString("column")).thenReturn("ONE");
assertEquals(MyEnum.ONE, TYPE_HANDLER.getResult(rs, "column"));
verify(rs, never()).wasNull();
}
@Override
@Test
public void shouldGetResultNullFromResultSetByName() throws Exception {
when(rs.getString("column")).thenReturn(null);
assertNull(TYPE_HANDLER.getResult(rs, "column"));
verify(rs, never()).wasNull();
}
@Override
@Test
public void shouldGetResultFromResultSetByPosition() throws Exception {
when(rs.getString(1)).thenReturn("ONE");
assertEquals(MyEnum.ONE, TYPE_HANDLER.getResult(rs, 1));
verify(rs, never()).wasNull();
}
@Override
@Test
public void shouldGetResultNullFromResultSetByPosition() throws Exception {
when(rs.getString(1)).thenReturn(null);
assertNull(TYPE_HANDLER.getResult(rs, 1));
verify(rs, never()).wasNull();
}
@Override
@Test
public void shouldGetResultFromCallableStatement() throws Exception {
when(cs.getString(1)).thenReturn("ONE");
assertEquals(MyEnum.ONE, TYPE_HANDLER.getResult(cs, 1));
verify(cs, never()).wasNull();
}
@Override
@Test
public void shouldGetResultNullFromCallableStatement() throws Exception {
when(cs.getString(1)).thenReturn(null);
assertNull(TYPE_HANDLER.getResult(cs, 1));
verify(cs, never()).wasNull();
}
}
| MyEnum |
java | spring-projects__spring-boot | module/spring-boot-jackson2/src/main/java/org/springframework/boot/jackson2/JsonComponent.java | {
"start": 1585,
"end": 1700
} | class ____ extends JsonSerializer<Customer> {
*
* // ...
*
* }
*
* public static | Serializer |
java | quarkusio__quarkus | integration-tests/jackson/src/main/java/io/quarkus/it/jackson/model/ModelWithJsonTypeIdResolver.java | {
"start": 339,
"end": 499
} | class ____ {
public ModelWithJsonTypeIdResolver() {
}
@JsonIgnore
public abstract String getType();
public static | ModelWithJsonTypeIdResolver |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java | {
"start": 3485,
"end": 15597
} | class ____ extends ESTestCase {
private final IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("foo", Settings.EMPTY);
public void testNodeLock() throws IOException {
final Settings settings = buildEnvSettings(Settings.EMPTY);
NodeEnvironment env = newNodeEnvironment(settings);
List<String> dataPaths = Environment.PATH_DATA_SETTING.get(settings);
// Reuse the same location and attempt to lock again
IllegalStateException ex = expectThrows(
IllegalStateException.class,
() -> new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings))
);
assertThat(ex.getMessage(), containsString("failed to obtain node lock"));
// Close the environment that holds the lock and make sure we can get the lock after release
env.close();
env = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
assertThat(env.nodeDataPaths(), arrayWithSize(dataPaths.size()));
for (int i = 0; i < dataPaths.size(); i++) {
assertTrue(env.nodeDataPaths()[i].startsWith(PathUtils.get(dataPaths.get(i))));
}
env.close();
assertThat(env.lockedShards(), empty());
}
@SuppressForbidden(reason = "System.out.*")
public void testSegmentInfosTracing() {
// Defaults to not hooking up std out
assertNull(SegmentInfos.getInfoStream());
try {
// False means don't hook up std out
NodeEnvironment.applySegmentInfosTrace(
Settings.builder().put(NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING.getKey(), false).build()
);
assertNull(SegmentInfos.getInfoStream());
// But true means hook std out up statically
NodeEnvironment.applySegmentInfosTrace(
Settings.builder().put(NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING.getKey(), true).build()
);
assertEquals(System.out, SegmentInfos.getInfoStream());
} finally {
// Clean up after ourselves
SegmentInfos.setInfoStream(null);
}
}
// using a literal string here because the logger is mentioned in the docs, and therefore must only be changed with care
private static final String NODE_ENVIRONMENT_LOGGER_NAME = "org.elasticsearch.env.NodeEnvironment";
@TestLogging(reason = "test includes assertions about DEBUG logging", value = NODE_ENVIRONMENT_LOGGER_NAME + ":DEBUG")
public void testShardLock() throws Exception {
try (var env = newNodeEnvironment()) {
Index index = new Index("foo", "fooUUID");
try (var mockLog = MockLog.capture(NodeEnvironment.class); var lock = env.shardLock(new ShardId(index, 0), "1")) {
mockLog.addExpectation(
new MockLog.SeenEventExpectation("hot threads logging", NODE_ENVIRONMENT_LOGGER_NAME, Level.DEBUG, """
hot threads while failing to obtain shard lock for [foo][0]: obtaining shard lock for [2] timed out after [*ms]; \
this shard lock is still held by a different instance of the shard and has been in state [1] for [*/*ms]*""")
);
mockLog.addExpectation(
new MockLog.UnseenEventExpectation(
"second attempt should be suppressed due to throttling",
NODE_ENVIRONMENT_LOGGER_NAME,
Level.DEBUG,
"*obtaining shard lock for [3] timed out*"
)
);
assertEquals(new ShardId(index, 0), lock.getShardId());
assertThat(
expectThrows(ShardLockObtainFailedException.class, () -> env.shardLock(new ShardId(index, 0), "2")).getMessage(),
matchesPattern("""
\\[foo]\\[0]: obtaining shard lock for \\[2] timed out after \\[0ms]; \
this shard lock is still held by a different instance of the shard \
and has been in state \\[1] for \\[.*/[0-9]+ms]""")
);
for (Path path : env.indexPaths(index)) {
Files.createDirectories(path.resolve("0"));
Files.createDirectories(path.resolve("1"));
}
expectThrows(
ShardLockObtainFailedException.class,
() -> env.lockAllForIndex(index, idxSettings, "3", randomIntBetween(0, 10))
);
mockLog.assertAllExpectationsMatched();
}
// can lock again?
env.shardLock(new ShardId(index, 0), "4").close();
List<ShardLock> locks = new ArrayList<>();
try {
locks.addAll(env.lockAllForIndex(index, idxSettings, "5", randomIntBetween(0, 10)));
expectThrows(ShardLockObtainFailedException.class, () -> env.shardLock(new ShardId(index, 0), "6"));
} finally {
IOUtils.close(locks);
}
assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
}
}
public void testAvailableIndexFolders() throws Exception {
final NodeEnvironment env = newNodeEnvironment();
final int numIndices = randomIntBetween(1, 10);
Set<String> actualPaths = new HashSet<>();
for (int i = 0; i < numIndices; i++) {
Index index = new Index("foo" + i, "fooUUID" + i);
for (Path path : env.indexPaths(index)) {
Files.createDirectories(path.resolve(MetadataStateFormat.STATE_DIR_NAME));
actualPaths.add(path.getFileName().toString());
}
}
assertThat(actualPaths, equalTo(env.availableIndexFolders()));
assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
env.close();
}
public void testAvailableIndexFoldersWithExclusions() throws Exception {
final NodeEnvironment env = newNodeEnvironment();
final int numIndices = randomIntBetween(1, 10);
Set<String> excludedPaths = new HashSet<>();
Set<String> actualPaths = new HashSet<>();
for (int i = 0; i < numIndices; i++) {
Index index = new Index("foo" + i, "fooUUID" + i);
for (Path path : env.indexPaths(index)) {
Files.createDirectories(path.resolve(MetadataStateFormat.STATE_DIR_NAME));
actualPaths.add(path.getFileName().toString());
}
if (randomBoolean()) {
excludedPaths.add(env.indexPaths(index)[0].getFileName().toString());
}
}
assertThat(Sets.difference(actualPaths, excludedPaths), equalTo(env.availableIndexFolders(excludedPaths::contains)));
assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
env.close();
}
public void testResolveIndexFolders() throws Exception {
final NodeEnvironment env = newNodeEnvironment();
final int numIndices = randomIntBetween(1, 10);
Map<String, List<Path>> actualIndexDataPaths = new HashMap<>();
for (int i = 0; i < numIndices; i++) {
Index index = new Index("foo" + i, "fooUUID" + i);
Path[] indexPaths = env.indexPaths(index);
for (Path path : indexPaths) {
Files.createDirectories(path);
String fileName = path.getFileName().toString();
List<Path> paths = actualIndexDataPaths.get(fileName);
if (paths == null) {
paths = new ArrayList<>();
}
paths.add(path);
actualIndexDataPaths.put(fileName, paths);
}
}
for (Map.Entry<String, List<Path>> actualIndexDataPathEntry : actualIndexDataPaths.entrySet()) {
List<Path> actual = actualIndexDataPathEntry.getValue();
Path[] actualPaths = actual.toArray(new Path[actual.size()]);
assertThat(actualPaths, equalTo(env.resolveIndexFolder(actualIndexDataPathEntry.getKey())));
}
assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
env.close();
}
public void testDeleteSafe() throws Exception {
final NodeEnvironment env = newNodeEnvironment();
final Index index = new Index("foo", "fooUUID");
final ShardLock fooLock = env.shardLock(new ShardId(index, 0), "1");
assertEquals(new ShardId(index, 0), fooLock.getShardId());
for (Path path : env.indexPaths(index)) {
Files.createDirectories(path.resolve("0"));
Files.createDirectories(path.resolve("1"));
}
expectThrows(
ShardLockObtainFailedException.class,
() -> env.deleteShardDirectorySafe(new ShardId(index, 0), idxSettings, shardPaths -> {
assert false : "should not be called " + shardPaths;
})
);
for (Path path : env.indexPaths(index)) {
assertTrue(Files.exists(path.resolve("0")));
assertTrue(Files.exists(path.resolve("1")));
}
{
SetOnce<Path[]> listener = new SetOnce<>();
env.deleteShardDirectorySafe(new ShardId(index, 1), idxSettings, listener::set);
Path[] deletedPaths = listener.get();
for (int i = 0; i < env.dataPaths().length; i++) {
assertThat(deletedPaths[i], equalTo(env.dataPaths()[i].resolve(index).resolve("1")));
}
}
for (Path path : env.indexPaths(index)) {
assertTrue(Files.exists(path.resolve("0")));
assertFalse(Files.exists(path.resolve("1")));
}
expectThrows(
ShardLockObtainFailedException.class,
() -> env.deleteIndexDirectorySafe(index, randomIntBetween(0, 10), idxSettings, indexPaths -> {
assert false : "should not be called " + indexPaths;
})
);
fooLock.close();
for (Path path : env.indexPaths(index)) {
assertTrue(Files.exists(path));
}
final AtomicReference<Throwable> threadException = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch blockLatch = new CountDownLatch(1);
final CountDownLatch start = new CountDownLatch(1);
if (randomBoolean()) {
Thread t = new Thread(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.error("unexpected error", e);
threadException.set(e);
latch.countDown();
blockLatch.countDown();
}
@Override
protected void doRun() throws Exception {
start.await();
try (ShardLock autoCloses = env.shardLock(new ShardId(index, 0), "2")) {
blockLatch.countDown();
Thread.sleep(randomIntBetween(1, 10));
}
latch.countDown();
}
});
t.start();
} else {
latch.countDown();
blockLatch.countDown();
}
start.countDown();
blockLatch.await();
final SetOnce<Path[]> listener = new SetOnce<>();
env.deleteIndexDirectorySafe(index, 5000, idxSettings, listener::set);
assertArrayEquals(env.indexPaths(index), listener.get());
assertNull(threadException.get());
for (Path path : env.indexPaths(index)) {
assertFalse(Files.exists(path));
}
latch.await();
assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
env.close();
}
public void testStressShardLock() throws IOException, InterruptedException {
| NodeEnvironmentTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/metamodel/mapping/internal/PluralAttributeMappingImpl.java | {
"start": 4220,
"end": 36564
} | interface ____ {
/**
* Injects the created attribute mapping
*/
void injectAttributeMapping(PluralAttributeMapping attributeMapping);
}
private final CollectionMappingType<?> collectionMappingType;
private final String referencedPropertyName;
private final String mapKeyPropertyName;
private final CollectionPart elementDescriptor;
private final CollectionPart indexDescriptor;
private final CollectionIdentifierDescriptor identifierDescriptor;
private final FetchTiming fetchTiming;
private final FetchStyle fetchStyle;
private final SoftDeleteMapping softDeleteMapping;
private Boolean hasSoftDelete;
private final String bidirectionalAttributeName;
private final CollectionPersister collectionDescriptor;
private final String separateCollectionTable;
private final String sqlAliasStem;
private final IndexMetadata indexMetadata;
private ForeignKeyDescriptor fkDescriptor;
private OrderByFragment orderByFragment;
private OrderByFragment manyToManyOrderByFragment;
public PluralAttributeMappingImpl(
String attributeName,
Collection bootDescriptor,
PropertyAccess propertyAccess,
AttributeMetadata attributeMetadata,
CollectionMappingType<?> collectionMappingType,
int stateArrayPosition,
int fetchableIndex,
CollectionPart elementDescriptor,
CollectionPart indexDescriptor,
CollectionIdentifierDescriptor identifierDescriptor,
FetchTiming fetchTiming,
FetchStyle fetchStyle,
CascadeStyle cascadeStyle,
ManagedMappingType declaringType,
CollectionPersister collectionDescriptor,
MappingModelCreationProcess creationProcess) {
super( attributeName, fetchableIndex, declaringType, attributeMetadata, stateArrayPosition, propertyAccess );
this.collectionMappingType = collectionMappingType;
this.elementDescriptor = elementDescriptor;
this.indexDescriptor = indexDescriptor;
this.identifierDescriptor = identifierDescriptor;
this.fetchTiming = fetchTiming;
this.fetchStyle = fetchStyle;
this.collectionDescriptor = collectionDescriptor;
this.referencedPropertyName = bootDescriptor.getReferencedPropertyName();
this.mapKeyPropertyName = bootDescriptor instanceof Map map ? map.getMapKeyPropertyName() : null;
this.bidirectionalAttributeName = subStringNullIfEmpty( bootDescriptor.getMappedByProperty(), '.');
this.sqlAliasStem = SqlAliasStemHelper.INSTANCE.generateStemFromAttributeName( attributeName );
separateCollectionTable = bootDescriptor.isOneToMany() ? null : collectionDescriptor.getTableName();
final int baseIndex = bootDescriptor instanceof List list ? list.getBaseIndex() : -1;
indexMetadata = new IndexMetadata() {
@Override
public CollectionPart getIndexDescriptor() {
return indexDescriptor;
}
@Override
public int getListIndexBase() {
return baseIndex;
}
@Override
public String getIndexPropertyName() {
return mapKeyPropertyName;
}
};
softDeleteMapping = resolveSoftDeleteMapping( this, bootDescriptor, getSeparateCollectionTable(), creationProcess );
injectAttributeMapping( elementDescriptor, indexDescriptor, collectionDescriptor, this );
}
/**
* For Hibernate Reactive
*/
protected PluralAttributeMappingImpl(PluralAttributeMappingImpl original) {
super( original );
this.collectionMappingType = original.collectionMappingType;
this.elementDescriptor = original.elementDescriptor;
this.indexDescriptor = original.indexDescriptor;
this.identifierDescriptor = original.identifierDescriptor;
this.fetchTiming = original.fetchTiming;
this.fetchStyle = original.fetchStyle;
this.softDeleteMapping = original.softDeleteMapping;
this.hasSoftDelete = original.hasSoftDelete;
this.collectionDescriptor = original.collectionDescriptor;
this.referencedPropertyName = original.referencedPropertyName;
this.mapKeyPropertyName = original.mapKeyPropertyName;
this.bidirectionalAttributeName = original.bidirectionalAttributeName;
this.sqlAliasStem = original.sqlAliasStem;
this.separateCollectionTable = original.separateCollectionTable;
this.indexMetadata = original.indexMetadata;
this.fkDescriptor = original.fkDescriptor;
this.orderByFragment = original.orderByFragment;
this.manyToManyOrderByFragment = original.manyToManyOrderByFragment;
injectAttributeMapping( elementDescriptor, indexDescriptor, collectionDescriptor, this );
}
private static void injectAttributeMapping(
CollectionPart elementDescriptor,
CollectionPart indexDescriptor,
CollectionPersister collectionDescriptor,
PluralAttributeMapping mapping) {
if ( collectionDescriptor instanceof Aware aware ) {
aware.injectAttributeMapping( mapping );
}
if ( elementDescriptor instanceof Aware aware ) {
aware.injectAttributeMapping( mapping );
}
if ( indexDescriptor instanceof Aware aware ) {
aware.injectAttributeMapping( mapping );
}
}
@Override
public boolean isBidirectionalAttributeName(NavigablePath fetchablePath, ToOneAttributeMapping modelPart) {
return bidirectionalAttributeName == null
// If the FK-target of the to-one mapping is the same as the FK-target of this plural mapping,
// then we say this is bidirectional, given that this is only invoked for model parts of the
// collection elements
? fkDescriptor.getTargetPart() == modelPart.getForeignKeyDescriptor().getTargetPart()
: fetchablePath.getLocalName().endsWith( bidirectionalAttributeName );
}
public void finishInitialization(
@SuppressWarnings("unused")
Property bootProperty,
Collection bootDescriptor,
@SuppressWarnings("unused")
MappingModelCreationProcess creationProcess) {
final boolean hasOrder = bootDescriptor.getOrderBy() != null;
final boolean hasManyToManyOrder = bootDescriptor.getManyToManyOrdering() != null;
if ( hasOrder || hasManyToManyOrder ) {
final TranslationContext context = collectionDescriptor::getFactory;
if ( hasOrder ) {
orderByFragment = OrderByFragmentTranslator.translate(
bootDescriptor.getOrderBy(),
this,
context
);
}
if ( hasManyToManyOrder ) {
manyToManyOrderByFragment = OrderByFragmentTranslator.translate(
bootDescriptor.getManyToManyOrdering(),
this,
context
);
}
}
}
@Override
public NavigableRole getNavigableRole() {
return getCollectionDescriptor().getNavigableRole();
}
@Override
public CollectionMappingType<?> getMappedType() {
return collectionMappingType;
}
@Override
public ForeignKeyDescriptor getKeyDescriptor() {
return fkDescriptor;
}
@Override
public CollectionPersister getCollectionDescriptor() {
return collectionDescriptor;
}
@Override
public CollectionPart getElementDescriptor() {
return elementDescriptor;
}
@Override
public CollectionPart getIndexDescriptor() {
return indexDescriptor;
}
@Override
public IndexMetadata getIndexMetadata() {
return indexMetadata;
}
@Override
public CollectionIdentifierDescriptor getIdentifierDescriptor() {
return identifierDescriptor;
}
@Override
public SoftDeleteMapping getSoftDeleteMapping() {
return softDeleteMapping;
}
@Override
public TableDetails getSoftDeleteTableDetails() {
return ( (CollectionMutationTarget) getCollectionDescriptor() ).getCollectionTableMapping();
}
@Override
public OrderByFragment getOrderByFragment() {
return orderByFragment;
}
@Override
public OrderByFragment getManyToManyOrderByFragment() {
return manyToManyOrderByFragment;
}
@Override
public String getSeparateCollectionTable() {
return separateCollectionTable;
}
@Override
public boolean containsTableReference(String tableExpression) {
return tableExpression.equals( separateCollectionTable );
}
@Override
public Generator getGenerator() {
// can never be a generated value
return null;
}
@Override
public String getFetchableName() {
return getAttributeName();
}
@Override
public FetchOptions getMappedFetchOptions() {
return this;
}
@Override
public FetchStyle getStyle() {
return fetchStyle;
}
@Override
public FetchTiming getTiming() {
return fetchTiming;
}
@Override
public boolean hasPartitionedSelectionMapping() {
return false;
}
@Override
public void applySoftDeleteRestrictions(TableGroup tableGroup, PredicateConsumer predicateConsumer) {
if ( hasSoftDelete() ) {
final var descriptor = getCollectionDescriptor();
if ( descriptor.isOneToMany() || descriptor.isManyToMany() ) {
// see if the associated entity has soft-delete defined
final var elementDescriptor = (EntityCollectionPart) getElementDescriptor();
final var associatedEntityDescriptor = elementDescriptor.getAssociatedEntityMappingType();
final var softDeleteMapping = associatedEntityDescriptor.getSoftDeleteMapping();
if ( softDeleteMapping != null ) {
final String primaryTableName =
associatedEntityDescriptor.getSoftDeleteTableDetails().getTableName();
final var primaryTableReference =
tableGroup.resolveTableReference( primaryTableName );
final var softDeleteRestriction =
softDeleteMapping.createNonDeletedRestriction( primaryTableReference );
predicateConsumer.applyPredicate( softDeleteRestriction );
}
}
// apply the collection's soft-delete mapping, if one
final var softDeleteMapping = getSoftDeleteMapping();
if ( softDeleteMapping != null ) {
final var primaryTableReference =
tableGroup.resolveTableReference( getSoftDeleteTableDetails().getTableName() );
final var softDeleteRestriction =
softDeleteMapping.createNonDeletedRestriction( primaryTableReference );
predicateConsumer.applyPredicate( softDeleteRestriction );
}
}
}
@Override
public <T> DomainResult<T> createDomainResult(
NavigablePath navigablePath,
TableGroup tableGroup,
String resultVariable,
DomainResultCreationState creationState) {
final var collectionTableGroup =
creationState.getSqlAstCreationState().getFromClauseAccess()
.getTableGroup( navigablePath );
assert collectionTableGroup != null;
// This is only used for collection initialization where we know the owner is available, so we mark it as visited
// which will cause bidirectional to-one associations to be treated as such and avoid a join
creationState.registerVisitedAssociationKey( fkDescriptor.getAssociationKey() );
//noinspection unchecked
return new CollectionDomainResult( navigablePath, this, resultVariable, tableGroup, creationState );
}
@Override
public Fetch generateFetch(
FetchParent fetchParent,
NavigablePath fetchablePath,
FetchTiming fetchTiming,
boolean selected,
String resultVariable,
DomainResultCreationState creationState) {
final var sqlAstCreationState = creationState.getSqlAstCreationState();
final boolean added = creationState.registerVisitedAssociationKey( fkDescriptor.getAssociationKey() );
try {
if ( fetchTiming == FetchTiming.IMMEDIATE ) {
if ( selected ) {
final var collectionTableGroup = resolveCollectionTableGroup(
fetchParent,
fetchablePath,
creationState,
sqlAstCreationState
);
return buildEagerCollectionFetch(
fetchablePath,
this,
collectionTableGroup,
referencedPropertyName != null,
fetchParent,
creationState
);
}
else {
return createSelectEagerCollectionFetch(
fetchParent,
fetchablePath,
creationState,
sqlAstCreationState
);
}
}
if ( getCollectionDescriptor().getCollectionType().hasHolder() ) {
return createSelectEagerCollectionFetch(
fetchParent,
fetchablePath,
creationState,
sqlAstCreationState
);
}
return createDelayedCollectionFetch( fetchParent, fetchablePath, creationState, sqlAstCreationState );
}
finally {
// This is only necessary because the association key is too general i.e. also matching FKs that other associations would match
// and on top of this, we are not handling circular fetches for plural attributes yet
if ( added ) {
creationState.removeVisitedAssociationKey( fkDescriptor.getAssociationKey() );
}
}
}
/**
* For Hibernate Reactive
*/
protected Fetch buildDelayedCollectionFetch(
NavigablePath fetchedPath,
PluralAttributeMapping fetchedAttribute,
FetchParent fetchParent,
DomainResult<?> collectionKeyResult,
boolean unfetched) {
return new DelayedCollectionFetch( fetchedPath, fetchedAttribute, fetchParent, collectionKeyResult, unfetched );
}
/**
* For Hibernate Reactive
*/
protected Fetch buildSelectEagerCollectionFetch(
NavigablePath fetchedPath,
PluralAttributeMapping fetchedAttribute,
DomainResult<?> collectionKeyDomainResult,
FetchParent fetchParent) {
return new SelectEagerCollectionFetch( fetchedPath, fetchedAttribute, collectionKeyDomainResult, fetchParent );
}
/**
* For Hibernate Reactive
*/
protected Fetch buildEagerCollectionFetch(
NavigablePath fetchedPath,
PluralAttributeMapping fetchedAttribute,
TableGroup collectionTableGroup,
boolean needsCollectionKeyResult,
FetchParent fetchParent,
DomainResultCreationState creationState) {
return new EagerCollectionFetch(
fetchedPath,
fetchedAttribute,
collectionTableGroup,
needsCollectionKeyResult,
fetchParent,
creationState
);
}
@Override
public Fetch resolveCircularFetch(
NavigablePath fetchablePath,
FetchParent fetchParent,
FetchTiming fetchTiming,
DomainResultCreationState creationState) {
if ( fetchTiming == FetchTiming.IMMEDIATE ) {
final boolean alreadyVisited = creationState.isAssociationKeyVisited( fkDescriptor.getAssociationKey() );
if ( alreadyVisited ) {
return createSelectEagerCollectionFetch(
fetchParent,
fetchablePath,
creationState,
creationState.getSqlAstCreationState()
);
}
}
return null;
}
private Fetch createSelectEagerCollectionFetch(
FetchParent fetchParent,
NavigablePath fetchablePath,
DomainResultCreationState creationState,
SqlAstCreationState sqlAstCreationState) {
final DomainResult<?> collectionKeyDomainResult;
if ( referencedPropertyName != null ) {
collectionKeyDomainResult = getKeyDescriptor().createTargetDomainResult(
fetchablePath,
sqlAstCreationState.getFromClauseAccess()
.getTableGroup( fetchParent.getNavigablePath() ),
fetchParent,
creationState
);
}
else {
collectionKeyDomainResult = null;
}
return buildSelectEagerCollectionFetch( fetchablePath, this, collectionKeyDomainResult, fetchParent );
}
private TableGroup resolveCollectionTableGroup(
FetchParent fetchParent,
NavigablePath fetchablePath,
DomainResultCreationState creationState,
SqlAstCreationState sqlAstCreationState) {
final var fromClauseAccess = sqlAstCreationState.getFromClauseAccess();
return fromClauseAccess.resolveTableGroup(
fetchablePath,
p -> {
final var lhsTableGroup = fromClauseAccess.getTableGroup( fetchParent.getNavigablePath() );
final var tableGroupJoin = createTableGroupJoin(
fetchablePath,
lhsTableGroup,
null,
null,
SqlAstJoinType.LEFT,
true,
false,
creationState.getSqlAstCreationState()
);
lhsTableGroup.addTableGroupJoin( tableGroupJoin );
return tableGroupJoin.getJoinedGroup();
}
);
}
private Fetch createDelayedCollectionFetch(
FetchParent fetchParent,
NavigablePath fetchablePath,
DomainResultCreationState creationState,
SqlAstCreationState sqlAstCreationState) {
final DomainResult<?> collectionKeyDomainResult;
// Lazy property. A null foreign key domain result will lead to
// returning a domain result assembler that returns LazyPropertyInitializer.UNFETCHED_PROPERTY
final var containingEntityMapping = findContainingEntityMapping();
final boolean unfetched;
if ( fetchParent.getReferencedMappingContainer() == containingEntityMapping
&& containingEntityMapping.getEntityPersister().getPropertyLaziness()[getStateArrayPosition()] ) {
collectionKeyDomainResult = null;
unfetched = true;
}
else {
if ( referencedPropertyName != null ) {
collectionKeyDomainResult = getKeyDescriptor().createTargetDomainResult(
fetchablePath,
sqlAstCreationState.getFromClauseAccess().getTableGroup( fetchParent.getNavigablePath() ),
fetchParent,
creationState
);
}
else {
collectionKeyDomainResult = null;
}
unfetched = false;
}
return buildDelayedCollectionFetch(
fetchablePath,
this,
fetchParent,
collectionKeyDomainResult,
unfetched
);
}
@Override
public String getSqlAliasStem() {
return sqlAliasStem;
}
@Override
public SqlAstJoinType getDefaultSqlAstJoinType(TableGroup parentTableGroup) {
return SqlAstJoinType.LEFT;
}
@Override
public boolean isSimpleJoinPredicate(Predicate predicate) {
return fkDescriptor.isSimpleJoinPredicate( predicate );
}
@Override
public TableGroupJoin createTableGroupJoin(
NavigablePath navigablePath,
TableGroup lhs,
@Nullable String explicitSourceAlias,
@Nullable SqlAliasBase explicitSqlAliasBase,
@Nullable SqlAstJoinType requestedJoinType,
boolean fetched,
boolean addsPredicate,
SqlAstCreationState creationState) {
final var collectionPredicateCollector = new PredicateCollector();
final var tableGroup = createRootTableGroupJoin(
navigablePath,
lhs,
explicitSourceAlias,
explicitSqlAliasBase,
requestedJoinType,
fetched,
addsPredicate,
collectionPredicateCollector::applyPredicate,
creationState
);
final var predicateCollector =
tableGroup.getNestedTableGroupJoins().isEmpty()
// No nested table group joins means that the predicate has to be pushed to the last join
? new PredicateCollector()
: collectionPredicateCollector;
getCollectionDescriptor().applyBaseRestrictions(
predicateCollector::applyPredicate,
tableGroup,
true,
creationState.getLoadQueryInfluencers().getEnabledFilters(),
false,
null,
creationState
);
getCollectionDescriptor().applyBaseManyToManyRestrictions(
predicateCollector::applyPredicate,
tableGroup,
true,
creationState.getLoadQueryInfluencers().getEnabledFilters(),
null,
creationState
);
applySoftDeleteRestriction(
predicateCollector::applyPredicate,
tableGroup,
creationState
);
if ( fetched ) {
if ( orderByFragment != null ) {
creationState.applyOrdering( tableGroup, orderByFragment );
}
if ( manyToManyOrderByFragment != null ) {
creationState.applyOrdering( tableGroup, manyToManyOrderByFragment );
}
}
final var tableGroupJoin = new TableGroupJoin(
navigablePath,
determineSqlJoinType( lhs, requestedJoinType, fetched ),
tableGroup,
collectionPredicateCollector.getPredicate()
);
if ( predicateCollector != collectionPredicateCollector ) {
final var joinForPredicate = TableGroupJoinHelper.determineJoinForPredicateApply( tableGroupJoin );
joinForPredicate.applyPredicate( predicateCollector.getPredicate() );
}
return tableGroupJoin;
}
private boolean hasSoftDelete() {
// NOTE : this needs to be done lazily because the associated entity mapping (if one)
// does not know its SoftDeleteMapping yet when this is created
if ( hasSoftDelete == null ) {
hasSoftDelete =
softDeleteMapping != null
|| getElementDescriptor() instanceof EntityCollectionPart collectionPart
&& collectionPart.getAssociatedEntityMappingType().getSoftDeleteMapping() != null;
}
return hasSoftDelete;
}
private void applySoftDeleteRestriction(
Consumer<Predicate> predicateConsumer,
TableGroup tableGroup,
SqlAstCreationState creationState) {
if ( hasSoftDelete() ) {
if ( getElementDescriptor() instanceof EntityCollectionPart entityCollectionPart ) {
final var entityMappingType = entityCollectionPart.getAssociatedEntityMappingType();
final var softDeleteMapping = entityMappingType.getSoftDeleteMapping();
if ( softDeleteMapping != null ) {
final var softDeleteTable = entityMappingType.getSoftDeleteTableDetails();
predicateConsumer.accept( softDeleteMapping.createNonDeletedRestriction(
tableGroup.resolveTableReference( softDeleteTable.getTableName() ),
creationState.getSqlExpressionResolver()
) );
}
}
final var softDeleteMapping = getSoftDeleteMapping();
if ( softDeleteMapping != null ) {
final var softDeleteTable = getSoftDeleteTableDetails();
predicateConsumer.accept( softDeleteMapping.createNonDeletedRestriction(
tableGroup.resolveTableReference( softDeleteTable.getTableName() ),
creationState.getSqlExpressionResolver()
) );
}
}
}
public SqlAstJoinType determineSqlJoinType(TableGroup lhs, @Nullable SqlAstJoinType requestedJoinType, boolean fetched) {
if ( hasSoftDelete() ) {
return SqlAstJoinType.LEFT;
}
else if ( requestedJoinType == null ) {
if ( fetched ) {
return getDefaultSqlAstJoinType( lhs );
}
else {
return SqlAstJoinType.INNER;
}
}
else {
return requestedJoinType;
}
}
@Override
public TableGroup createRootTableGroupJoin(
NavigablePath navigablePath,
TableGroup lhs,
@Nullable String explicitSourceAlias,
@Nullable SqlAliasBase explicitSqlAliasBase,
@Nullable SqlAstJoinType requestedJoinType,
boolean fetched,
@Nullable Consumer<Predicate> predicateConsumer,
SqlAstCreationState creationState) {
return createRootTableGroupJoin(
navigablePath,
lhs,
explicitSourceAlias,
explicitSqlAliasBase,
requestedJoinType,
fetched,
false,
predicateConsumer,
creationState
);
}
private TableGroup createRootTableGroupJoin(
NavigablePath navigablePath,
TableGroup lhs,
String explicitSourceAlias,
SqlAliasBase explicitSqlAliasBase,
SqlAstJoinType requestedJoinType,
boolean fetched,
boolean addsPredicate,
Consumer<Predicate> predicateConsumer,
SqlAstCreationState creationState) {
final CollectionPersister collectionDescriptor = getCollectionDescriptor();
final SqlAstJoinType joinType = determineSqlJoinType( lhs, requestedJoinType, fetched );
final SqlAliasBase sqlAliasBase = creationState.getSqlAliasBaseGenerator().createSqlAliasBase( getSqlAliasStem() );
final TableGroup tableGroup;
if ( collectionDescriptor.isOneToMany() ) {
tableGroup = createOneToManyTableGroup(
lhs.canUseInnerJoins() && joinType == SqlAstJoinType.INNER,
joinType,
navigablePath,
fetched,
addsPredicate,
explicitSourceAlias,
sqlAliasBase,
creationState
);
}
else {
tableGroup = createCollectionTableGroup(
lhs.canUseInnerJoins() && joinType == SqlAstJoinType.INNER,
joinType,
navigablePath,
fetched,
addsPredicate,
explicitSourceAlias,
sqlAliasBase,
creationState
);
}
if ( predicateConsumer != null ) {
predicateConsumer.accept( getKeyDescriptor().generateJoinPredicate( lhs, tableGroup, creationState ) );
}
return tableGroup;
}
@Override
public void setForeignKeyDescriptor(ForeignKeyDescriptor fkDescriptor) {
this.fkDescriptor = fkDescriptor;
}
private TableGroup createOneToManyTableGroup(
boolean canUseInnerJoins,
SqlAstJoinType joinType,
NavigablePath navigablePath,
boolean fetched,
boolean addsPredicate,
String sourceAlias,
SqlAliasBase explicitSqlAliasBase,
SqlAstCreationState creationState) {
final var sqlAliasBase = SqlAliasBase.from(
explicitSqlAliasBase,
sourceAlias,
this,
creationState.getSqlAliasBaseGenerator()
);
final var oneToManyCollectionPart = (OneToManyCollectionPart) elementDescriptor;
final var elementTableGroup = oneToManyCollectionPart.createAssociatedTableGroup(
canUseInnerJoins,
navigablePath.append( CollectionPart.Nature.ELEMENT.getName() ),
fetched,
sourceAlias,
sqlAliasBase,
creationState
);
final var tableGroup = new OneToManyTableGroup(
this,
elementTableGroup,
creationState.getCreationContext().getSessionFactory()
);
// For inner joins we never need join nesting
final boolean nestedJoin = joinType != SqlAstJoinType.INNER
// For outer joins we need nesting if there might be an on-condition that refers to the element table
&& ( addsPredicate
|| isAffectedByEnabledFilters( creationState.getLoadQueryInfluencers(), creationState.applyOnlyLoadByKeyFilters() )
|| collectionDescriptor.hasWhereRestrictions() );
if ( indexDescriptor instanceof TableGroupJoinProducer tableGroupJoinProducer ) {
final var tableGroupJoin = tableGroupJoinProducer.createTableGroupJoin(
navigablePath.append( CollectionPart.Nature.INDEX.getName() ),
tableGroup,
null,
sqlAliasBase,
joinType,
fetched,
false,
creationState
);
tableGroup.registerIndexTableGroup( tableGroupJoin, nestedJoin );
}
return tableGroup;
}
private TableGroup createCollectionTableGroup(
boolean canUseInnerJoins,
SqlAstJoinType joinType,
NavigablePath navigablePath,
boolean fetched,
boolean addsPredicate,
String sourceAlias,
SqlAliasBase explicitSqlAliasBase,
SqlAstCreationState creationState) {
assert !getCollectionDescriptor().isOneToMany();
final var sqlAliasBase = SqlAliasBase.from(
explicitSqlAliasBase,
sourceAlias,
this,
creationState.getSqlAliasBaseGenerator()
);
final String collectionTableName = collectionDescriptor.getTableName();
final var collectionTableReference = new NamedTableReference(
collectionTableName,
sqlAliasBase.generateNewAlias(),
true
);
final var tableGroup = new CollectionTableGroup(
canUseInnerJoins,
navigablePath,
this,
fetched,
sourceAlias,
collectionTableReference,
true,
sqlAliasBase,
s -> false,
null,
creationState.getCreationContext().getSessionFactory()
);
// For inner joins we never need join nesting
final boolean nestedJoin = joinType != SqlAstJoinType.INNER
// For outer joins we need nesting if there might be an on-condition that refers to the element table
&& ( addsPredicate
|| isAffectedByEnabledFilters( creationState.getLoadQueryInfluencers(), creationState.applyOnlyLoadByKeyFilters() )
|| collectionDescriptor.hasWhereRestrictions() );
if ( elementDescriptor instanceof TableGroupJoinProducer tableGroupJoinProducer ) {
final var tableGroupJoin = tableGroupJoinProducer.createTableGroupJoin(
navigablePath.append( CollectionPart.Nature.ELEMENT.getName() ),
tableGroup,
null,
sqlAliasBase,
nestedJoin ? SqlAstJoinType.INNER : joinType,
fetched,
false,
creationState
);
tableGroup.registerElementTableGroup( tableGroupJoin, nestedJoin );
}
if ( indexDescriptor instanceof TableGroupJoinProducer tableGroupJoinProducer ) {
final var tableGroupJoin = tableGroupJoinProducer.createTableGroupJoin(
navigablePath.append( CollectionPart.Nature.INDEX.getName() ),
tableGroup,
null,
sqlAliasBase,
nestedJoin ? SqlAstJoinType.INNER : joinType,
fetched,
false,
creationState
);
tableGroup.registerIndexTableGroup( tableGroupJoin, nestedJoin );
}
return tableGroup;
}
@Override
public TableGroup createRootTableGroup(
boolean canUseInnerJoins,
NavigablePath navigablePath,
String explicitSourceAlias,
SqlAliasBase explicitSqlAliasBase, Supplier<Consumer<Predicate>> additionalPredicateCollectorAccess,
SqlAstCreationState creationState) {
if ( getCollectionDescriptor().isOneToMany() ) {
return createOneToManyTableGroup(
canUseInnerJoins,
SqlAstJoinType.INNER,
navigablePath,
false,
false,
explicitSourceAlias,
explicitSqlAliasBase,
creationState
);
}
else {
return createCollectionTableGroup(
canUseInnerJoins,
SqlAstJoinType.INNER,
navigablePath,
false,
false,
explicitSourceAlias,
explicitSqlAliasBase,
creationState
);
}
}
@Override
public int getBatchSize() {
return getCollectionDescriptor().getBatchSize();
}
@Override
public boolean isAffectedByEnabledFilters(LoadQueryInfluencers influencers, boolean onlyApplyForLoadByKeyFilters) {
return getCollectionDescriptor().isAffectedByEnabledFilters( influencers, onlyApplyForLoadByKeyFilters );
}
@Override
public boolean isAffectedByEntityGraph(LoadQueryInfluencers influencers) {
return getCollectionDescriptor().isAffectedByEntityGraph( influencers );
}
@Override
public void registerAffectingFetchProfile(String fetchProfileName) {
if ( collectionDescriptor instanceof FetchProfileAffectee affectee ) {
affectee.registerAffectingFetchProfile( fetchProfileName);
}
}
@Override
public boolean isAffectedByEnabledFetchProfiles(LoadQueryInfluencers influencers) {
return getCollectionDescriptor().isAffectedByEnabledFetchProfiles( influencers );
}
@Override
public String getRootPathName() {
return getCollectionDescriptor().getRole();
}
@Override
public ModelPart findSubPart(String name, EntityMappingType treatTargetType) {
if ( elementDescriptor instanceof ModelPartContainer modelPartContainer ) {
final var subPart = modelPartContainer.findSubPart( name, null );
if ( subPart != null ) {
return subPart;
}
}
final var nature = CollectionPart.Nature.fromName( name );
if ( nature != null ) {
return switch ( nature ) {
case ELEMENT -> elementDescriptor;
case INDEX -> indexDescriptor;
case ID -> identifierDescriptor;
};
}
return null;
}
@Override
public void forEachSubPart(IndexedConsumer<ModelPart> consumer, EntityMappingType treatTarget) {
consumer.accept( 0, elementDescriptor );
int position = 1;
if ( indexDescriptor != null ) {
consumer.accept( position++, indexDescriptor );
}
if ( identifierDescriptor != null ) {
consumer.accept( position+1, identifierDescriptor );
}
}
@Override
public void applySqlSelections(
NavigablePath navigablePath, TableGroup tableGroup, DomainResultCreationState creationState) {
elementDescriptor.applySqlSelections( navigablePath, tableGroup, creationState );
}
@Override
public void applySqlSelections(
NavigablePath navigablePath,
TableGroup tableGroup,
DomainResultCreationState creationState,
BiConsumer<SqlSelection, JdbcMapping> selectionConsumer) {
elementDescriptor.applySqlSelections( navigablePath, tableGroup, creationState, selectionConsumer );
}
@Override
public <X, Y> int breakDownJdbcValues(
Object domainValue,
int offset,
X x,
Y y,
JdbcValueBiConsumer<X, Y> valueConsumer,
SharedSessionContractImplementor session) {
throw new UnsupportedOperationException();
}
@Override
public void visitSubParts(Consumer<ModelPart> consumer, EntityMappingType treatTargetType) {
consumer.accept( elementDescriptor );
if ( indexDescriptor != null ) {
consumer.accept( indexDescriptor );
}
}
@Override
public String getContainingTableExpression() {
return getKeyDescriptor().getKeyTable();
}
@Override
public int getJdbcTypeCount() {
return 0;
}
@Override
public JdbcMapping getJdbcMapping(int index) {
throw new IndexOutOfBoundsException( index );
}
@Override
public SelectableMapping getSelectable(int columnIndex) {
return null;
}
@Override
public int forEachJdbcType(int offset, IndexedConsumer<JdbcMapping> action) {
return 0;
}
@Override
public Object disassemble(Object value, SharedSessionContractImplementor session) {
return elementDescriptor.disassemble( value, session );
}
@Override
public void addToCacheKey(MutableCacheKeyBuilder cacheKey, Object value, SharedSessionContractImplementor session) {
elementDescriptor.addToCacheKey( cacheKey, value, session );
}
@Override
public <X, Y> int forEachDisassembledJdbcValue(
Object value,
int offset,
X x,
Y y,
JdbcValuesBiConsumer<X, Y> valuesConsumer,
SharedSessionContractImplementor session) {
return elementDescriptor.forEachDisassembledJdbcValue( value, offset, x, y, valuesConsumer, session );
}
@Override
public String toString() {
return "PluralAttribute(" + getCollectionDescriptor().getRole() + ")";
}
}
| Aware |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/support/monitor/MonitorClient.java | {
"start": 1983,
"end": 14900
} | class ____ {
private static final Log LOG = LogFactory.getLog(MonitorClient.class);
private static final long DEFAULT_TIME_BETWEEN_COLLECT = 60 * 5;
private ScheduledExecutorService scheduler;
private int schedulerThreadSize = 1;
private long timeBetweenSqlCollect = DEFAULT_TIME_BETWEEN_COLLECT;
private long timeBetweenSpringCollect = DEFAULT_TIME_BETWEEN_COLLECT;
private long timeBetweenWebUriCollect = DEFAULT_TIME_BETWEEN_COLLECT;
private TimeUnit timeUnit = TimeUnit.SECONDS;
private boolean collectSqlEnable = true;
private boolean collectSqlWallEnable = true;
private boolean collectSpringMethodEnable = true;
private boolean collectWebAppEnable = true;
private boolean collectWebURIEnable = true;
private MonitorDao dao;
private String domain;
private String app;
private String cluster;
private String host;
private String ip;
private int pid;
public MonitorClient() {
String name = ManagementFactory.getRuntimeMXBean().getName();
String[] items = name.split("@");
pid = Integer.parseInt(items[0]);
host = items[1];
ip = getLocalIPAddress().getHostAddress();
configFromProperty(System.getProperties());
}
public void configFromProperty(Properties properties) {
{
Integer value = getInteger(properties, "druid.monitor.client.schedulerThreadSize");
if (value != null) {
this.setSchedulerThreadSize(value);
}
}
{
Integer value = getInteger(properties, "druid.monitor.client.timeBetweenSqlCollect");
if (value != null) {
this.setTimeBetweenSqlCollect(value);
}
}
{
Integer value = getInteger(properties, "druid.monitor.client.timeBetweenSpringCollect");
if (value != null) {
this.setTimeBetweenSpringCollect(value);
}
}
{
Integer value = getInteger(properties, "druid.monitor.client.timeBetweenWebUriCollect");
if (value != null) {
this.setTimeBetweenWebUriCollect(value);
}
}
{
Boolean value = getBoolean(properties, "druid.monitor.client.collectSqlEnable");
if (value != null) {
this.setCollectSqlEnable(value);
}
}
{
Boolean value = getBoolean(properties, "druid.monitor.client.collectSqlWallEnable");
if (value != null) {
this.setCollectSqlWallEnable(value);
}
}
{
Boolean value = getBoolean(properties, "druid.monitor.client.collectSpringMethodEnable");
if (value != null) {
this.setCollectSpringMethodEnable(value);
}
}
{
Boolean value = getBoolean(properties, "druid.monitor.client.collectWebAppEnable");
if (value != null) {
this.setCollectWebAppEnable(value);
}
}
{
Boolean value = getBoolean(properties, "druid.monitor.client.collectWebURIEnable");
if (value != null) {
this.setCollectWebURIEnable(value);
}
}
{
domain = properties.getProperty("druid.monitor.domain");
if (StringUtils.isEmpty(domain)) {
domain = "default";
}
}
{
app = properties.getProperty("druid.monitor.app");
if (StringUtils.isEmpty(app)) {
app = "default";
}
}
{
cluster = properties.getProperty("druid.monitor.cluster");
if (StringUtils.isEmpty(cluster)) {
cluster = "default";
}
}
}
public void stop() {
}
public void start() {
checkInst();
if (scheduler == null) {
scheduler = new ScheduledThreadPoolExecutor(schedulerThreadSize);
}
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
collectSql();
}
}, timeBetweenSqlCollect, timeBetweenSqlCollect, timeUnit);
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
collectSpringMethod();
}
}, timeBetweenSpringCollect, timeBetweenSpringCollect, timeUnit);
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
collectWebURI();
}
}, timeBetweenWebUriCollect, timeBetweenWebUriCollect, timeUnit);
}
public ScheduledExecutorService getScheduler() {
return scheduler;
}
public void setScheduler(ScheduledExecutorService scheduler) {
this.scheduler = scheduler;
}
public void checkInst() {
try {
dao.insertAppIfNotExits(domain, app);
dao.insertClusterIfNotExits(domain, app, cluster);
dao.insertOrUpdateInstance(domain, app, cluster, host, ip, Utils.getStartTime(), pid);
} catch (Exception ex) {
LOG.error("checkInst error", ex);
}
}
@SuppressWarnings("resource")
public void collectSql() {
if ((!collectSqlEnable) && !collectSqlWallEnable) {
return;
}
Set<Object> dataSources = DruidDataSourceStatManager.getInstances().keySet();
List<DruidDataSourceStatValue> statValueList = new ArrayList<DruidDataSourceStatValue>(dataSources.size());
List<WallProviderStatValue> wallStatValueList = new ArrayList<WallProviderStatValue>();
for (Object item : dataSources) {
if (!(item instanceof DruidDataSource)) {
continue;
}
DruidDataSource dataSource = (DruidDataSource) item;
if (collectSqlEnable) {
DruidDataSourceStatValue statValue = dataSource.getStatValueAndReset();
statValueList.add(statValue);
}
if (collectSqlWallEnable) {
WallProviderStatValue wallStatValue = dataSource.getWallStatValue(true);
if (wallStatValue != null && wallStatValue.getCheckCount() > 0) {
wallStatValueList.add(wallStatValue);
}
}
}
MonitorContext ctx = createContext();
if (statValueList.size() > 0) {
dao.saveSql(ctx, statValueList);
}
if (wallStatValueList.size() > 0) {
dao.saveSqlWall(ctx, wallStatValueList);
}
}
private MonitorContext createContext() {
MonitorContext ctx = new MonitorContext();
ctx.setDomain(domain);
ctx.setApp(app);
ctx.setCluster(cluster);
ctx.setCollectTime(new Date());
ctx.setPID(pid);
ctx.setHost(host);
ctx.setCollectTime(Utils.getStartTime());
return ctx;
}
private void collectSpringMethod() {
if (!collectSpringMethodEnable) {
return;
}
List<SpringMethodStatValue> statValueList = new ArrayList<SpringMethodStatValue>();
Set<Object> stats = SpringStatManager.getInstance().getSpringStatSet();
for (Object item : stats) {
if (!(item instanceof SpringStat)) {
continue;
}
SpringStat sprintStat = (SpringStat) item;
statValueList.addAll(sprintStat.getStatList(true));
}
if (statValueList.size() > 0) {
MonitorContext ctx = createContext();
dao.saveSpringMethod(ctx, statValueList);
}
}
private void collectWebURI() {
if ((!collectWebAppEnable) && !collectWebURIEnable) {
return;
}
List<WebURIStatValue> webURIValueList = new ArrayList<WebURIStatValue>();
List<WebAppStatValue> webAppStatValueList = new ArrayList<WebAppStatValue>();
Set<Object> stats = WebAppStatManager.getInstance().getWebAppStatSet();
for (Object item : stats) {
if (!(item instanceof WebAppStat)) {
continue;
}
WebAppStat webAppStat = (WebAppStat) item;
if (collectWebAppEnable) {
WebAppStatValue webAppStatValue = webAppStat.getStatValue(true);
webAppStatValueList.add(webAppStatValue);
}
if (collectWebURIEnable) {
webURIValueList.addAll(webAppStat.getURIStatValueList(true));
}
}
MonitorContext ctx = createContext();
if (webURIValueList.size() > 0) {
dao.saveWebURI(ctx, webURIValueList);
}
if (webAppStatValueList.size() > 0) {
dao.saveWebApp(ctx, webAppStatValueList);
}
}
public List<JdbcSqlStatValue> loadSqlList(Map<String, Object> filters) {
return dao.loadSqlList(filters);
}
public MonitorDao getDao() {
return dao;
}
public void setDao(MonitorDao dao) {
this.dao = dao;
}
public long getTimeBetweenSqlCollect() {
return timeBetweenSqlCollect;
}
public void setTimeBetweenSqlCollect(long timeBetweenSqlCollect) {
this.timeBetweenSqlCollect = timeBetweenSqlCollect;
}
public long getTimeBetweenSpringCollect() {
return timeBetweenSpringCollect;
}
public void setTimeBetweenSpringCollect(long timeBetweenSpringCollect) {
this.timeBetweenSpringCollect = timeBetweenSpringCollect;
}
public long getTimeBetweenWebUriCollect() {
return timeBetweenWebUriCollect;
}
public void setTimeBetweenWebUriCollect(long timeBetweenWebUriCollect) {
this.timeBetweenWebUriCollect = timeBetweenWebUriCollect;
}
public TimeUnit getTimeUnit() {
return timeUnit;
}
public void setTimeUnit(TimeUnit timeUnit) {
this.timeUnit = timeUnit;
}
public boolean isCollectSqlEnable() {
return collectSqlEnable;
}
public void setCollectSqlEnable(boolean collectSqlEnable) {
this.collectSqlEnable = collectSqlEnable;
}
public boolean isCollectSqlWallEnable() {
return collectSqlWallEnable;
}
public void setCollectSqlWallEnable(boolean collectSqlWallEnable) {
this.collectSqlWallEnable = collectSqlWallEnable;
}
public boolean isCollectSpringMethodEnable() {
return collectSpringMethodEnable;
}
public void setCollectSpringMethodEnable(boolean collectSpringMethodEnable) {
this.collectSpringMethodEnable = collectSpringMethodEnable;
}
public boolean isCollectWebAppEnable() {
return collectWebAppEnable;
}
public void setCollectWebAppEnable(boolean collectWebAppEnable) {
this.collectWebAppEnable = collectWebAppEnable;
}
public boolean isCollectWebURIEnable() {
return collectWebURIEnable;
}
public void setCollectWebURIEnable(boolean collectWebURIEnable) {
this.collectWebURIEnable = collectWebURIEnable;
}
public int getSchedulerThreadSize() {
return schedulerThreadSize;
}
public void setSchedulerThreadSize(int schedulerThreadSize) {
this.schedulerThreadSize = schedulerThreadSize;
}
public String getDomain() {
return domain;
}
public void setDomain(String domain) {
this.domain = domain;
}
public String getApp() {
return app;
}
public void setApp(String app) {
this.app = app;
}
public String getCluster() {
return cluster;
}
public void setCluster(String cluster) {
this.cluster = cluster;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPid() {
return pid;
}
public void setPid(int pid) {
this.pid = pid;
}
public static InetAddress getLocalIPAddress() {
try {
Enumeration<?> netInterfaces = NetworkInterface.getNetworkInterfaces();
InetAddress inetAddress = null;
while (netInterfaces.hasMoreElements()) {
NetworkInterface ni = (NetworkInterface) netInterfaces.nextElement();
Enumeration<?> e2 = ni.getInetAddresses();
while (e2.hasMoreElements()) {
inetAddress = (InetAddress) e2.nextElement();
if (!inetAddress.isLoopbackAddress() && !inetAddress.getHostAddress().contains(":")) {
return inetAddress;
}
}
}
} catch (Exception e) {
LOG.error("getLocalIP error", e);
}
return null;
}
}
| MonitorClient |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/event/parser/field/SearchResultAnalyticsEventFieldTests.java | {
"start": 1252,
"end": 2354
} | class ____ extends AnalyticsEventFieldParserTestCase<Object> {
@Override
public List<String> requiredFields() {
return Collections.emptyList();
}
@Override
protected Map<String, Object> createTestInstance() {
return new HashMap<>(randomEventSearchResultField());
}
@Override
protected ContextParser<AnalyticsEvent.Context, Map<String, Object>> parser() {
return SearchResultAnalyticsEventField::fromXContent;
}
public static Map<String, Object> randomEventSearchResultField() {
List<?> items = randomList(
between(1, 10),
() -> Map.ofEntries(
entry(DOCUMENT_FIELD.getPreferredName(), randomEventDocumentField()),
entry(PAGE_FIELD.getPreferredName(), PageAnalyticsEventFieldTests.randomEventPageField())
)
);
return Map.ofEntries(
entry(SEARCH_RESULTS_TOTAL_FIELD.getPreferredName(), randomNonNegativeInt()),
entry(SEARCH_RESULT_ITEMS_FIELD.getPreferredName(), items)
);
}
}
| SearchResultAnalyticsEventFieldTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/joinfetch/JoinFetchImplicitOwnerTest.java | {
"start": 4456,
"end": 4957
} | class ____ {
@Id
public Long id;
@ManyToOne( fetch = FetchType.LAZY )
@JoinColumn( name = "lateral_id" )
public LateralEntity lateralEntity;
public ImageLog() {
}
public ImageLog(Long id, LateralEntity lateralEntity) {
this.id = id;
this.lateralEntity = lateralEntity;
}
public Long getId() {
return id;
}
public LateralEntity getLateralEntity() {
return lateralEntity;
}
}
@Entity( name = "LateralEntity" )
@Table( name = "laterals" )
public static | ImageLog |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiUtils.java | {
"start": 397,
"end": 931
} | class ____ {
public static final String HOST = "api.openai.com";
public static final String VERSION_1 = "v1";
public static final String EMBEDDINGS_PATH = "embeddings";
public static final String CHAT_PATH = "chat";
public static final String COMPLETIONS_PATH = "completions";
public static final String ORGANIZATION_HEADER = "OpenAI-Organization";
public static Header createOrgHeader(String org) {
return new BasicHeader(ORGANIZATION_HEADER, org);
}
private OpenAiUtils() {}
}
| OpenAiUtils |
java | apache__flink | flink-formats/flink-parquet/src/test/java/org/apache/flink/formats/parquet/avro/AvroParquetStreamingFileSinkITCase.java | {
"start": 7686,
"end": 8589
} | class ____ extends AbstractCollection<GenericRecord>
implements Serializable {
@Override
public Iterator<GenericRecord> iterator() {
final GenericRecord rec1 = new GenericData.Record(Address.getClassSchema());
rec1.put(0, 1);
rec1.put(1, "a");
rec1.put(2, "b");
rec1.put(3, "c");
rec1.put(4, "12345");
final GenericRecord rec2 = new GenericData.Record(Address.getClassSchema());
rec2.put(0, 2);
rec2.put(1, "x");
rec2.put(2, "y");
rec2.put(3, "z");
rec2.put(4, "98765");
return Arrays.asList(rec1, rec2).iterator();
}
@Override
public int size() {
return 2;
}
}
// ------------------------------------------------------------------------
}
| GenericTestDataCollection |
java | netty__netty | microbench/src/main/java/io/netty/handler/codec/http2/HpackBenchmarkUtil.java | {
"start": 1581,
"end": 3796
} | class ____ {
final HpackHeadersSize size;
final boolean limitToAscii;
HeadersKey(HpackHeadersSize size, boolean limitToAscii) {
this.size = size;
this.limitToAscii = limitToAscii;
}
List<HpackHeader> newHeaders() {
return size.newHeaders(limitToAscii);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
HeadersKey that = (HeadersKey) o;
if (limitToAscii != that.limitToAscii) {
return false;
}
return size == that.size;
}
@Override
public int hashCode() {
int result = size.hashCode();
result = 31 * result + (limitToAscii ? 1 : 0);
return result;
}
}
private static final Map<HeadersKey, List<HpackHeader>> headersMap;
static {
HpackHeadersSize[] sizes = HpackHeadersSize.values();
headersMap = new HashMap<HeadersKey, List<HpackHeader>>(sizes.length * 2);
for (HpackHeadersSize size : sizes) {
HeadersKey key = new HeadersKey(size, true);
headersMap.put(key, key.newHeaders());
key = new HeadersKey(size, false);
headersMap.put(key, key.newHeaders());
}
}
/**
* Gets headers for the given size and whether the key/values should be limited to ASCII.
*/
static List<HpackHeader> headers(HpackHeadersSize size, boolean limitToAscii) {
return headersMap.get(new HeadersKey(size, limitToAscii));
}
static Http2Headers http2Headers(HpackHeadersSize size, boolean limitToAscii) {
List<HpackHeader> hpackHeaders = headersMap.get(new HeadersKey(size, limitToAscii));
Http2Headers http2Headers = new DefaultHttp2Headers(false);
for (int i = 0; i < hpackHeaders.size(); ++i) {
HpackHeader hpackHeader = hpackHeaders.get(i);
http2Headers.add(hpackHeader.name, hpackHeader.value);
}
return http2Headers;
}
}
| HeadersKey |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/function/array/ArrayRemoveTest.java | {
"start": 1605,
"end": 5728
} | class ____ {
@BeforeEach
public void prepareData(SessionFactoryScope scope) {
scope.inTransaction( em -> {
em.persist( new EntityWithArrays( 1L, new String[]{} ) );
em.persist( new EntityWithArrays( 2L, new String[]{ "abc", null, "def" } ) );
em.persist( new EntityWithArrays( 3L, null ) );
} );
}
@AfterEach
public void cleanup(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testRemove(SessionFactoryScope scope) {
scope.inSession( em -> {
//tag::hql-array-remove-example[]
List<Tuple> results = em.createQuery( "select e.id, array_remove(e.theArray, 'abc') from EntityWithArrays e order by e.id", Tuple.class )
.getResultList();
//end::hql-array-remove-example[]
assertEquals( 3, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertArrayEquals( new String[] {}, results.get( 0 ).get( 1, String[].class ) );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertArrayEquals( new String[] { null, "def" }, results.get( 1 ).get( 1, String[].class ) );
assertEquals( 3L, results.get( 2 ).get( 0 ) );
assertNull( results.get( 2 ).get( 1, String[].class ) );
} );
}
@Test
public void testRemoveNullElement(SessionFactoryScope scope) {
scope.inSession( em -> {
List<Tuple> results = em.createQuery( "select e.id, array_remove(e.theArray, null) from EntityWithArrays e order by e.id", Tuple.class )
.getResultList();
assertEquals( 3, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertArrayEquals( new String[] {}, results.get( 0 ).get( 1, String[].class ) );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertArrayEquals( new String[] { "abc", "def" }, results.get( 1 ).get( 1, String[].class ) );
assertEquals( 3L, results.get( 2 ).get( 0 ) );
assertNull( results.get( 2 ).get( 1, String[].class ) );
} );
}
@Test
public void testRemoveNonExisting(SessionFactoryScope scope) {
scope.inSession( em -> {
List<Tuple> results = em.createQuery( "select e.id, array_remove(e.theArray, 'aaa') from EntityWithArrays e order by e.id", Tuple.class )
.getResultList();
assertEquals( 3, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertArrayEquals( new String[] {}, results.get( 0 ).get( 1, String[].class ) );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertArrayEquals( new String[] { "abc", null, "def" }, results.get( 1 ).get( 1, String[].class ) );
assertEquals( 3L, results.get( 2 ).get( 0 ) );
assertNull( results.get( 2 ).get( 1, String[].class ) );
} );
}
@Test
public void testNodeBuilderArray(SessionFactoryScope scope) {
scope.inSession( em -> {
final NodeBuilder cb = (NodeBuilder) em.getCriteriaBuilder();
final JpaCriteriaQuery<Tuple> cq = cb.createTupleQuery();
final JpaRoot<EntityWithArrays> root = cq.from( EntityWithArrays.class );
cq.multiselect(
root.get( "id" ),
cb.arrayRemove( root.<String[]>get( "theArray" ), cb.literal( "xyz" ) ),
cb.arrayRemove( root.get( "theArray" ), "xyz" )
);
em.createQuery( cq ).getResultList();
// Should all fail to compile
// cb.arrayRemove( root.<Integer[]>get( "theArray" ), cb.literal( "xyz" ) );
// cb.arrayRemove( root.<Integer[]>get( "theArray" ), "xyz" );
} );
}
@Test
public void testNodeBuilderCollection(SessionFactoryScope scope) {
scope.inSession( em -> {
final NodeBuilder cb = (NodeBuilder) em.getCriteriaBuilder();
final JpaCriteriaQuery<Tuple> cq = cb.createTupleQuery();
final JpaRoot<EntityWithArrays> root = cq.from( EntityWithArrays.class );
cq.multiselect(
root.get( "id" ),
cb.collectionRemove( root.<Collection<String>>get( "theCollection" ), cb.literal( "xyz" ) ),
cb.collectionRemove( root.get( "theCollection" ), "xyz" )
);
em.createQuery( cq ).getResultList();
// Should all fail to compile
// cb.collectionRemove( root.<Collection<Integer>>get( "theCollection" ), cb.literal( "xyz" ) );
// cb.collectionRemove( root.<Collection<Integer>>get( "theCollection" ), "xyz" );
} );
}
}
| ArrayRemoveTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseStorageMonitor.java | {
"start": 1769,
"end": 3843
} | class ____ extends TimelineStorageMonitor {
protected static final TimelineEntityFilters MONITOR_FILTERS =
new TimelineEntityFilters.Builder().entityLimit(1L).build();
protected static final TimelineDataToRetrieve DATA_TO_RETRIEVE =
new TimelineDataToRetrieve(null, null, null, null, null, null);
private Configuration monitorHBaseConf;
private Connection monitorConn;
private TimelineEntityReader reader;
public HBaseStorageMonitor(Configuration conf) throws Exception {
super(conf, Storage.HBase);
this.initialize(conf);
}
private void initialize(Configuration conf) throws Exception {
monitorHBaseConf = HBaseTimelineStorageUtils.
getTimelineServiceHBaseConf(conf);
monitorHBaseConf.setInt("hbase.client.retries.number", 3);
monitorHBaseConf.setLong("hbase.client.pause", 1000);
long monitorInterval = conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_STORAGE_MONITOR_INTERVAL_MS
);
monitorHBaseConf.setLong("hbase.rpc.timeout", monitorInterval);
monitorHBaseConf.setLong("hbase.client.scanner.timeout.period",
monitorInterval);
monitorHBaseConf.setInt("zookeeper.recovery.retry", 1);
monitorConn = ConnectionFactory.createConnection(monitorHBaseConf);
String clusterId = conf.get(YarnConfiguration.RM_CLUSTER_ID,
YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
TimelineReaderContext monitorContext =
new TimelineReaderContext(clusterId, null, null, null, null,
TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null, null);
reader = TimelineEntityReaderFactory.createMultipleEntitiesReader(
monitorContext, MONITOR_FILTERS, DATA_TO_RETRIEVE);
}
@Override
public void healthCheck() throws Exception {
reader.readEntities(monitorHBaseConf, monitorConn);
}
@Override
public void start() {
super.start();
}
@Override
public void stop() throws Exception {
super.stop();
monitorConn.close();
}
}
| HBaseStorageMonitor |
java | apache__dubbo | dubbo-spring-boot-project/dubbo-spring-boot-autoconfigure/src/main/java/org/apache/dubbo/spring/boot/autoconfigure/observability/DubboObservationAutoConfiguration.java | {
"start": 8655,
"end": 9432
} | class ____ {
@Bean
@ConditionalOnClass(
name = {
"io.micrometer.tracing.handler.TracingAwareMeterObservationHandler",
"io.micrometer.tracing.Tracer",
"io.micrometer.core.instrument.MeterRegistry"
})
TracingAwareMeterObservationHandler<Observation.Context> tracingAwareMeterObservationHandler(
MeterRegistry meterRegistry, Tracer tracer) {
DefaultMeterObservationHandler delegate = new DefaultMeterObservationHandler(meterRegistry);
return new TracingAwareMeterObservationHandler<>(delegate, tracer);
}
}
}
}
| TracingAndMetricsObservationHandlerConfiguration |
java | spring-projects__spring-boot | build-plugin/spring-boot-gradle-plugin/src/test/java/org/springframework/boot/gradle/docs/RunningDocumentationTests.java | {
"start": 1258,
"end": 3461
} | class ____ {
GradleBuild gradleBuild;
@TestTemplate
void bootRunMain() throws IOException {
writeMainClass();
assertThat(this.gradleBuild.script(Examples.DIR + "running/boot-run-main").build("bootRun").getOutput())
.contains("com.example.ExampleApplication");
}
@TestTemplate
void applicationPluginMainClassName() throws IOException {
writeMainClass();
assertThat(this.gradleBuild.script(Examples.DIR + "running/application-plugin-main-class-name")
.build("bootRun")
.getOutput()).contains("com.example.ExampleApplication");
}
@TestTemplate
void springBootDslMainClassName() throws IOException {
writeMainClass();
assertThat(this.gradleBuild.script(Examples.DIR + "running/spring-boot-dsl-main-class-name")
.build("bootRun")
.getOutput()).contains("com.example.ExampleApplication");
}
@TestTemplate
void bootRunSourceResources() {
assertThat(this.gradleBuild.script(Examples.DIR + "running/boot-run-source-resources")
.build("configuredClasspath")
.getOutput()).contains(new File("src/main/resources").getPath());
}
@TestTemplate
void bootRunDisableOptimizedLaunch() {
assertThat(this.gradleBuild.script(Examples.DIR + "running/boot-run-disable-optimized-launch")
.build("optimizedLaunch")
.getOutput()).contains("false");
}
@TestTemplate
void bootRunSystemPropertyDefaultValue() {
assertThat(this.gradleBuild.script(Examples.DIR + "running/boot-run-system-property")
.build("configuredSystemProperties")
.getOutput()).contains("com.example.property = default");
}
@TestTemplate
void bootRunSystemProperty() {
assertThat(this.gradleBuild.script(Examples.DIR + "running/boot-run-system-property")
.build("-Pexample=custom", "configuredSystemProperties")
.getOutput()).contains("com.example.property = custom");
}
private void writeMainClass() throws IOException {
File exampleApplication = new File(this.gradleBuild.getProjectDir(),
"src/main/java/com/example/ExampleApplication.java");
exampleApplication.getParentFile().mkdirs();
try (PrintWriter writer = new PrintWriter(new FileWriter(exampleApplication))) {
writer.println("package com.example;");
writer.println("public | RunningDocumentationTests |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java | {
"start": 1344,
"end": 2638
} | class ____ extends StorageInfo
implements NodeRegistration {
final String rpcAddress; // RPC address of the node
final String httpAddress; // HTTP address of the node
final NamenodeRole role; // node role
public NamenodeRegistration(String address,
String httpAddress,
StorageInfo storageInfo,
NamenodeRole role) {
super(storageInfo);
this.rpcAddress = address;
this.httpAddress = httpAddress;
this.role = role;
}
@Override // NodeRegistration
public String getAddress() {
return rpcAddress;
}
public String getHttpAddress() {
return httpAddress;
}
@Override // NodeRegistration
public String getRegistrationID() {
return Storage.getRegistrationID(this);
}
@Override // NodeRegistration
public int getVersion() {
return super.getLayoutVersion();
}
@Override // NodeRegistration
public String toString() {
return getClass().getSimpleName()
+ "(" + rpcAddress
+ ", role=" + getRole()
+ ")";
}
/**
* Get name-node role.
*/
public NamenodeRole getRole() {
return role;
}
public boolean isRole(NamenodeRole that) {
return role.equals(that);
}
}
| NamenodeRegistration |
java | google__truth | core/src/main/java/com/google/common/truth/StackTraceCleaner.java | {
"start": 8253,
"end": 9811
} | class ____!).
*
* (StandardSubjectBuilder is listed here only for its fail() methods, anyway, so we don't
* have to worry about nested classes like we do with Subject.)
*/
|| isFromClassDirectly(stackTraceElement, STANDARD_SUBJECT_BUILDER_CLASS);
}
private static final ImmutableSet<String> JUNIT_INFRASTRUCTURE_CLASSES =
ImmutableSet.of("org.junit.runner.Runner", "org.junit.runners.model.Statement");
private static boolean isJUnitInfrastructure(StackTraceElement stackTraceElement) {
// It's not clear whether looking at nested classes here is useful, harmful, or neutral.
return isFromClassOrClassNestedInside(stackTraceElement, JUNIT_INFRASTRUCTURE_CLASSES);
}
private static boolean isFromClassOrClassNestedInside(
StackTraceElement stackTraceElement, ImmutableSet<String> recognizedClasses) {
Class<?> stackClass;
try {
stackClass = loadClass(stackTraceElement.getClassName());
} catch (ClassNotFoundException e) {
return false;
}
try {
for (; stackClass != null; stackClass = stackClass.getEnclosingClass()) {
for (String recognizedClass : recognizedClasses) {
if (isSubtypeOf(stackClass, recognizedClass)) {
return true;
}
}
}
} catch (Error e) {
if (e.getClass().getName().equals("com.google.j2objc.ReflectionStrippedError")) {
/*
* We're running under j2objc without reflection. Skip testing the enclosing classes. At
* least we tested the | itself |
java | google__dagger | dagger-producers/main/java/dagger/producers/monitoring/internal/Monitors.java | {
"start": 8988,
"end": 12891
} | class ____ extends ProducerMonitor {
private final ImmutableList<ProducerMonitor> delegates;
DelegatingProducerMonitor(ImmutableList<ProducerMonitor> delegates) {
this.delegates = delegates;
}
@Override
public void requested() {
for (ProducerMonitor delegate : delegates) {
try {
delegate.requested();
} catch (RuntimeException e) {
logProducerMonitorMethodException(e, delegate, "requested");
}
}
}
@Override
public void ready() {
for (ProducerMonitor delegate : delegates) {
try {
delegate.ready();
} catch (RuntimeException e) {
logProducerMonitorMethodException(e, delegate, "ready");
}
}
}
@Override
public void methodStarting() {
for (ProducerMonitor delegate : delegates) {
try {
delegate.methodStarting();
} catch (RuntimeException e) {
logProducerMonitorMethodException(e, delegate, "methodStarting");
}
}
}
@Override
public void methodFinished() {
for (ProducerMonitor delegate : delegates.reverse()) {
try {
delegate.methodFinished();
} catch (RuntimeException e) {
logProducerMonitorMethodException(e, delegate, "methodFinished");
}
}
}
@Override
public void succeeded(Object o) {
for (ProducerMonitor delegate : delegates.reverse()) {
try {
delegate.succeeded(o);
} catch (RuntimeException e) {
logProducerMonitorArgMethodException(e, delegate, "succeeded", o);
}
}
}
@Override
public void failed(Throwable t) {
for (ProducerMonitor delegate : delegates.reverse()) {
try {
delegate.failed(t);
} catch (RuntimeException e) {
logProducerMonitorArgMethodException(e, delegate, "failed", t);
}
}
}
}
/** Returns a provider of a no-op component monitor. */
public static Provider<ProductionComponentMonitor> noOpProductionComponentMonitorProvider() {
return NO_OP_PRODUCTION_COMPONENT_MONITOR_PROVIDER;
}
private static final Provider<ProductionComponentMonitor>
NO_OP_PRODUCTION_COMPONENT_MONITOR_PROVIDER =
new Provider<ProductionComponentMonitor>() {
@Override
public ProductionComponentMonitor get() {
return ProductionComponentMonitor.noOp();
}
};
private static void logCreateException(
RuntimeException e, ProductionComponentMonitor.Factory factory, Object component) {
logger.log(
Level.SEVERE,
"RuntimeException while calling ProductionComponentMonitor.Factory.create on factory "
+ factory
+ " with component "
+ component,
e);
}
private static void logProducerMonitorForException(
RuntimeException e, ProductionComponentMonitor monitor, ProducerToken token) {
logger.log(
Level.SEVERE,
"RuntimeException while calling ProductionComponentMonitor.producerMonitorFor on monitor "
+ monitor
+ " with token "
+ token,
e);
}
private static void logProducerMonitorMethodException(
RuntimeException e, ProducerMonitor monitor, String method) {
logger.log(
Level.SEVERE,
"RuntimeException while calling ProducerMonitor." + method + " on monitor " + monitor,
e);
}
private static void logProducerMonitorArgMethodException(
RuntimeException e, ProducerMonitor monitor, String method, Object arg) {
logger.log(
Level.SEVERE,
"RuntimeException while calling ProducerMonitor."
+ method
+ " on monitor "
+ monitor
+ " with "
+ arg,
e);
}
private Monitors() {}
}
| DelegatingProducerMonitor |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/action/internal/UnresolvedEntityInsertActions.java | {
"start": 11019,
"end": 11686
} | class ____ cannot be loaded.
*/
public static UnresolvedEntityInsertActions deserialize(
ObjectInputStream ois,
EventSource session) throws IOException, ClassNotFoundException {
final var rtn = new UnresolvedEntityInsertActions();
final int queueSize = ois.readInt();
ACTION_LOGGER.deserializingUnresolvedInsertEntries(queueSize);
for ( int i = 0; i < queueSize; i++ ) {
final var unresolvedAction = (AbstractEntityInsertAction) ois.readObject();
unresolvedAction.afterDeserialize( session );
rtn.addUnresolvedEntityInsertAction(
unresolvedAction,
unresolvedAction.findNonNullableTransientEntities()
);
}
return rtn;
}
}
| that |
java | spring-projects__spring-boot | module/spring-boot-resttestclient/src/main/java/org/springframework/boot/resttestclient/autoconfigure/AutoConfigureTestRestTemplate.java | {
"start": 1116,
"end": 1352
} | class ____ enable auto-configuration of a
* {@link TestRestTemplate}.
*
* @author Andy Wilkinson
* @since 4.0.0
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Inherited
@ImportAutoConfiguration
public @ | to |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java | {
"start": 21702,
"end": 24103
} | interface ____ of Zookeeper callback for monitor (exists)
*/
@Override
public synchronized void processResult(int rc, String path, Object ctx,
Stat stat) {
if (isStaleClient(ctx)) return;
monitorLockNodePending = false;
assert wantToBeInElection :
"Got a StatNode result after quitting election";
if (LOG.isDebugEnabled()) {
LOG.debug("StatNode result: " + rc + " for path: " + path
+ " connectionState: " + zkConnectionState + " for " + this);
}
Code code = Code.get(rc);
if (isSuccess(code)) {
// the following owner check completes verification in case the lock znode
// creation was retried
if (stat.getEphemeralOwner() == zkClient.getSessionId()) {
// we own the lock znode. so we are the leader
if (!becomeActive()) {
reJoinElectionAfterFailureToBecomeActive();
}
} else {
// we dont own the lock znode. so we are a standby.
becomeStandby();
}
// the watch set by us will notify about changes
return;
}
if (isNodeDoesNotExist(code)) {
// the lock znode disappeared before we started monitoring it
enterNeutralMode();
joinElectionInternal();
return;
}
String errorMessage = "Received stat error from Zookeeper. code:"
+ code.toString();
LOG.debug(errorMessage);
if (shouldRetry(code)) {
if (statRetryCount < maxRetryNum) {
++statRetryCount;
monitorLockNodeAsync();
return;
}
errorMessage = errorMessage
+ ". Not retrying further znode monitoring connection errors.";
} else if (isSessionExpired(code)) {
// This isn't fatal - the client Watcher will re-join the election
LOG.warn("Lock monitoring failed because session was lost");
return;
}
fatalError(errorMessage);
}
@VisibleForTesting
public boolean getWantToBeInElection() {
return wantToBeInElection;
}
/**
* We failed to become active. Re-join the election, but
* sleep for a few seconds after terminating our existing
* session, so that other nodes have a chance to become active.
* The failure to become active is already logged inside
* becomeActive().
*/
private void reJoinElectionAfterFailureToBecomeActive() {
reJoinElection(SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE);
}
/**
* | implementation |
java | apache__flink | flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/rest/message/session/SessionMessageParameters.java | {
"start": 1260,
"end": 1954
} | class ____ extends MessageParameters {
private final SessionHandleIdPathParameter sessionHandleIdPathParameter =
new SessionHandleIdPathParameter();
public SessionMessageParameters() {
// nothing to resolve
}
public SessionMessageParameters(SessionHandle sessionHandle) {
sessionHandleIdPathParameter.resolve(sessionHandle);
}
@Override
public Collection<MessagePathParameter<?>> getPathParameters() {
return Collections.singletonList(sessionHandleIdPathParameter);
}
@Override
public Collection<MessageQueryParameter<?>> getQueryParameters() {
return Collections.emptyList();
}
}
| SessionMessageParameters |
java | apache__rocketmq | broker/src/main/java/org/apache/rocketmq/broker/controller/ReplicasManager.java | {
"start": 6439,
"end": 41367
} | enum ____ {
INITIAL,
CREATE_TEMP_METADATA_FILE_DONE,
CREATE_METADATA_FILE_DONE,
REGISTERED
}
public void start() {
this.state = State.INITIAL;
updateControllerAddr();
scanAvailableControllerAddresses();
this.scheduledService.scheduleAtFixedRate(this::updateControllerAddr, 2 * 60 * 1000, 2 * 60 * 1000, TimeUnit.MILLISECONDS);
this.scheduledService.scheduleAtFixedRate(this::scanAvailableControllerAddresses, 3 * 1000, 3 * 1000, TimeUnit.MILLISECONDS);
if (!startBasicService()) {
LOGGER.error("Failed to start replicasManager");
this.executorService.submit(() -> {
int retryTimes = 0;
do {
try {
TimeUnit.SECONDS.sleep(RETRY_INTERVAL_SECOND);
} catch (InterruptedException ignored) {
}
retryTimes++;
LOGGER.warn("Failed to start replicasManager, retry times:{}, current state:{}, try it again", retryTimes, this.state);
}
while (!startBasicService());
LOGGER.info("Start replicasManager success, retry times:{}", retryTimes);
});
}
}
private boolean startBasicService() {
if (this.state == State.SHUTDOWN)
return false;
if (this.state == State.INITIAL) {
if (schedulingSyncControllerMetadata()) {
this.state = State.FIRST_TIME_SYNC_CONTROLLER_METADATA_DONE;
LOGGER.info("First time sync controller metadata success, change state to: {}", this.state);
} else {
return false;
}
}
if (this.state == State.FIRST_TIME_SYNC_CONTROLLER_METADATA_DONE) {
for (int retryTimes = 0; retryTimes < 5; retryTimes++) {
if (register()) {
this.state = State.REGISTER_TO_CONTROLLER_DONE;
LOGGER.info("First time register broker success, change state to: {}", this.state);
break;
}
// Try to avoid registration concurrency conflicts in random sleep
try {
Thread.sleep(random.nextInt(1000));
} catch (Exception ignore) {
}
}
// register 5 times but still unsuccessful
if (this.state != State.REGISTER_TO_CONTROLLER_DONE) {
LOGGER.error("Register to broker failed 5 times");
return false;
}
}
if (this.state == State.REGISTER_TO_CONTROLLER_DONE) {
// The scheduled task for heartbeat sending is not starting now, so we should manually send heartbeat request
this.sendHeartbeatToController();
if (this.masterBrokerId != null || brokerElect()) {
LOGGER.info("Master in this broker set is elected, masterBrokerId: {}, masterBrokerAddr: {}", this.masterBrokerId, this.masterAddress);
this.state = State.RUNNING;
setFenced(false);
LOGGER.info("All register process has been done, change state to: {}", this.state);
} else {
return false;
}
}
schedulingSyncBrokerMetadata();
// Register syncStateSet changed listener.
this.haService.registerSyncStateSetChangedListener(this::doReportSyncStateSetChanged);
return true;
}
public void shutdown() {
this.state = State.SHUTDOWN;
this.registerState = RegisterState.INITIAL;
this.executorService.shutdownNow();
this.scheduledService.shutdownNow();
this.scanExecutor.shutdownNow();
}
public synchronized void changeBrokerRole(final Long newMasterBrokerId, final String newMasterAddress,
final Integer newMasterEpoch,
final Integer syncStateSetEpoch, final Set<Long> syncStateSet) throws Exception {
if (newMasterBrokerId != null && newMasterEpoch > this.masterEpoch) {
if (newMasterBrokerId.equals(this.brokerControllerId)) {
changeToMaster(newMasterEpoch, syncStateSetEpoch, syncStateSet);
} else {
changeToSlave(newMasterAddress, newMasterEpoch, newMasterBrokerId);
}
}
}
public void changeToMaster(final int newMasterEpoch, final int syncStateSetEpoch, final Set<Long> syncStateSet) throws Exception {
synchronized (this) {
if (newMasterEpoch > this.masterEpoch) {
LOGGER.info("Begin to change to master, brokerName:{}, replicas:{}, new Epoch:{}", this.brokerConfig.getBrokerName(), this.brokerAddress, newMasterEpoch);
this.masterEpoch = newMasterEpoch;
if (this.masterBrokerId != null && this.masterBrokerId.equals(this.brokerControllerId) && this.brokerController.getBrokerConfig().getBrokerId() == MixAll.MASTER_ID) {
// Change SyncStateSet
final HashSet<Long> newSyncStateSet = new HashSet<>(syncStateSet);
changeSyncStateSet(newSyncStateSet, syncStateSetEpoch);
// if master doesn't change
this.haService.changeToMasterWhenLastRoleIsMaster(newMasterEpoch);
this.brokerController.getTopicConfigManager().getDataVersion().nextVersion(newMasterEpoch);
this.executorService.submit(this::checkSyncStateSetAndDoReport);
registerBrokerWhenRoleChange();
return;
}
// Change SyncStateSet
final HashSet<Long> newSyncStateSet = new HashSet<>(syncStateSet);
changeSyncStateSet(newSyncStateSet, syncStateSetEpoch);
// Handle the slave synchronise
handleSlaveSynchronize(BrokerRole.SYNC_MASTER);
// Notify ha service, change to master
this.haService.changeToMaster(newMasterEpoch);
this.brokerController.getBrokerConfig().setBrokerId(MixAll.MASTER_ID);
this.brokerController.getMessageStoreConfig().setBrokerRole(BrokerRole.SYNC_MASTER);
this.brokerController.changeSpecialServiceStatus(true);
// Change record
this.masterAddress = this.brokerAddress;
this.masterBrokerId = this.brokerControllerId;
schedulingCheckSyncStateSet();
this.brokerController.getTopicConfigManager().getDataVersion().nextVersion(newMasterEpoch);
this.executorService.submit(this::checkSyncStateSetAndDoReport);
registerBrokerWhenRoleChange();
}
}
}
public void changeToSlave(final String newMasterAddress, final int newMasterEpoch, Long newMasterBrokerId) {
synchronized (this) {
if (newMasterEpoch > this.masterEpoch) {
LOGGER.info("Begin to change to slave, brokerName={}, brokerId={}, newMasterBrokerId={}, newMasterAddress={}, newMasterEpoch={}",
this.brokerConfig.getBrokerName(), this.brokerControllerId, newMasterBrokerId, newMasterAddress, newMasterEpoch);
this.masterEpoch = newMasterEpoch;
if (newMasterBrokerId.equals(this.masterBrokerId)) {
// if master doesn't change
this.haService.changeToSlaveWhenMasterNotChange(newMasterAddress, newMasterEpoch);
this.brokerController.getTopicConfigManager().getDataVersion().nextVersion(newMasterEpoch);
registerBrokerWhenRoleChange();
return;
}
// Stop checking syncStateSet because only master is able to check
stopCheckSyncStateSet();
// Change config(compatibility problem)
this.brokerController.getMessageStoreConfig().setBrokerRole(BrokerRole.SLAVE);
this.brokerController.changeSpecialServiceStatus(false);
// The brokerId in brokerConfig just means its role(master[0] or slave[>=1])
this.brokerConfig.setBrokerId(brokerControllerId);
// Change record
this.masterAddress = newMasterAddress;
this.masterBrokerId = newMasterBrokerId;
// Handle the slave synchronise
handleSlaveSynchronize(BrokerRole.SLAVE);
// Notify ha service, change to slave
this.haService.changeToSlave(newMasterAddress, newMasterEpoch, brokerControllerId);
this.brokerController.getTopicConfigManager().getDataVersion().nextVersion(newMasterEpoch);
registerBrokerWhenRoleChange();
}
}
}
public void registerBrokerWhenRoleChange() {
this.executorService.submit(() -> {
// Register broker to name-srv
try {
this.brokerController.registerBrokerAll(true, false, this.brokerController.getBrokerConfig().isForceRegister());
} catch (final Throwable e) {
LOGGER.error("Error happen when register broker to name-srv, Failed to change broker to {}", this.brokerController.getMessageStoreConfig().getBrokerRole(), e);
return;
}
LOGGER.info("Change broker [id:{}][address:{}] to {}, newMasterBrokerId:{}, newMasterAddress:{}, newMasterEpoch:{}, syncStateSetEpoch:{}",
this.brokerControllerId, this.brokerAddress, this.brokerController.getMessageStoreConfig().getBrokerRole(), this.masterBrokerId, this.masterAddress, this.masterEpoch, this.syncStateSetEpoch);
});
}
private void changeSyncStateSet(final Set<Long> newSyncStateSet, final int newSyncStateSetEpoch) {
synchronized (this) {
if (newSyncStateSetEpoch > this.syncStateSetEpoch) {
LOGGER.info("SyncStateSet changed from {} to {}", this.syncStateSet, newSyncStateSet);
this.syncStateSetEpoch = newSyncStateSetEpoch;
this.syncStateSet = new HashSet<>(newSyncStateSet);
this.haService.setSyncStateSet(newSyncStateSet);
}
}
}
private void handleSlaveSynchronize(final BrokerRole role) {
if (role == BrokerRole.SLAVE) {
if (this.slaveSyncFuture != null) {
this.slaveSyncFuture.cancel(false);
}
this.brokerController.getSlaveSynchronize().setMasterAddr(this.masterAddress);
slaveSyncFuture = this.brokerController.getScheduledExecutorService().scheduleAtFixedRate(() -> {
try {
if (System.currentTimeMillis() - lastSyncTimeMs > 10 * 1000) {
brokerController.getSlaveSynchronize().syncAll();
lastSyncTimeMs = System.currentTimeMillis();
}
//timer checkpoint, latency-sensitive, so sync it more frequently
brokerController.getSlaveSynchronize().syncTimerCheckPoint();
} catch (final Throwable e) {
LOGGER.error("ScheduledTask SlaveSynchronize syncAll error.", e);
}
}, 1000 * 3, 1000 * 3, TimeUnit.MILLISECONDS);
} else {
if (this.slaveSyncFuture != null) {
this.slaveSyncFuture.cancel(false);
}
this.brokerController.getSlaveSynchronize().setMasterAddr(null);
}
}
private boolean brokerElect() {
// Broker try to elect itself as a master in broker set.
try {
Pair<ElectMasterResponseHeader, Set<Long>> tryElectResponsePair = this.brokerOuterAPI.brokerElect(this.controllerLeaderAddress, this.brokerConfig.getBrokerClusterName(),
this.brokerConfig.getBrokerName(), this.brokerControllerId);
ElectMasterResponseHeader tryElectResponse = tryElectResponsePair.getObject1();
Set<Long> syncStateSet = tryElectResponsePair.getObject2();
final String masterAddress = tryElectResponse.getMasterAddress();
final Long masterBrokerId = tryElectResponse.getMasterBrokerId();
if (StringUtils.isEmpty(masterAddress) || masterBrokerId == null) {
LOGGER.warn("Now no master in broker set");
return false;
}
if (masterBrokerId.equals(this.brokerControllerId)) {
changeToMaster(tryElectResponse.getMasterEpoch(), tryElectResponse.getSyncStateSetEpoch(), syncStateSet);
} else {
changeToSlave(masterAddress, tryElectResponse.getMasterEpoch(), tryElectResponse.getMasterBrokerId());
}
return true;
} catch (Exception e) {
LOGGER.error("Failed to try elect", e);
return false;
}
}
public void sendHeartbeatToController() {
final List<String> controllerAddresses = this.getAvailableControllerAddresses();
for (String controllerAddress : controllerAddresses) {
if (StringUtils.isNotEmpty(controllerAddress)) {
this.brokerOuterAPI.sendHeartbeatToController(
controllerAddress,
this.brokerConfig.getBrokerClusterName(),
this.brokerAddress,
this.brokerConfig.getBrokerName(),
this.brokerControllerId,
this.brokerConfig.getSendHeartbeatTimeoutMillis(),
this.brokerConfig.isInBrokerContainer(), this.getLastEpoch(),
this.brokerController.getMessageStore().getMaxPhyOffset(),
this.brokerController.getMessageStore().getConfirmOffset(),
this.brokerConfig.getControllerHeartBeatTimeoutMills(),
this.brokerConfig.getBrokerElectionPriority()
);
}
}
}
/**
* Register broker to controller, and persist the metadata to file
*
* @return whether registering process succeeded
*/
private boolean register() {
try {
// 1. confirm now registering state
confirmNowRegisteringState();
LOGGER.info("Confirm now register state: {}", this.registerState);
// 2. check metadata/tempMetadata if valid
if (!checkMetadataValid()) {
LOGGER.error("Check and find that metadata/tempMetadata invalid, you can modify the broker config to make them valid");
return false;
}
// 2. get next assigning brokerId, and create temp metadata file
if (this.registerState == RegisterState.INITIAL) {
Long nextBrokerId = getNextBrokerId();
if (nextBrokerId == null || !createTempMetadataFile(nextBrokerId)) {
LOGGER.error("Failed to create temp metadata file, nextBrokerId: {}", nextBrokerId);
return false;
}
this.registerState = RegisterState.CREATE_TEMP_METADATA_FILE_DONE;
LOGGER.info("Register state change to {}, temp metadata: {}", this.registerState, this.tempBrokerMetadata);
}
// 3. apply brokerId to controller, and create metadata file
if (this.registerState == RegisterState.CREATE_TEMP_METADATA_FILE_DONE) {
if (!applyBrokerId()) {
// apply broker id failed, means that this brokerId has been used
// delete temp metadata file
this.tempBrokerMetadata.clear();
// back to the first step
this.registerState = RegisterState.INITIAL;
LOGGER.info("Register state change to: {}", this.registerState);
return false;
}
if (!createMetadataFileAndDeleteTemp()) {
LOGGER.error("Failed to create metadata file and delete temp metadata file, temp metadata: {}", this.tempBrokerMetadata);
return false;
}
this.registerState = RegisterState.CREATE_METADATA_FILE_DONE;
LOGGER.info("Register state change to: {}, metadata: {}", this.registerState, this.brokerMetadata);
}
// 4. register
if (this.registerState == RegisterState.CREATE_METADATA_FILE_DONE) {
if (!registerBrokerToController()) {
LOGGER.error("Failed to register broker to controller");
return false;
}
this.registerState = RegisterState.REGISTERED;
LOGGER.info("Register state change to: {}, masterBrokerId: {}, masterBrokerAddr: {}", this.registerState, this.masterBrokerId, this.masterAddress);
}
return true;
} catch (final Exception e) {
LOGGER.error("Failed to register broker to controller", e);
return false;
}
}
/**
* Send GetNextBrokerRequest to controller for getting next assigning brokerId in this broker-set
*
* @return next brokerId in this broker-set
*/
private Long getNextBrokerId() {
try {
GetNextBrokerIdResponseHeader nextBrokerIdResp = this.brokerOuterAPI.getNextBrokerId(this.brokerConfig.getBrokerClusterName(), this.brokerConfig.getBrokerName(), this.controllerLeaderAddress);
return nextBrokerIdResp.getNextBrokerId();
} catch (Exception e) {
LOGGER.error("fail to get next broker id from controller", e);
return null;
}
}
/**
* Create temp metadata file in local file system, records the brokerId and registerCheckCode
*
* @param brokerId the brokerId that is expected to be assigned
* @return whether the temp meta file is created successfully
*/
private boolean createTempMetadataFile(Long brokerId) {
// generate register check code, format like that: $ipAddress;$timestamp
String registerCheckCode = this.brokerAddress + ";" + System.currentTimeMillis();
try {
this.tempBrokerMetadata.updateAndPersist(brokerConfig.getBrokerClusterName(), brokerConfig.getBrokerName(), brokerId, registerCheckCode);
return true;
} catch (Exception e) {
LOGGER.error("update and persist temp broker metadata file failed", e);
this.tempBrokerMetadata.clear();
return false;
}
}
/**
* Send applyBrokerId request to controller
*
* @return whether controller has assigned this brokerId for this broker
*/
private boolean applyBrokerId() {
try {
ApplyBrokerIdResponseHeader response = this.brokerOuterAPI.applyBrokerId(brokerConfig.getBrokerClusterName(), brokerConfig.getBrokerName(),
tempBrokerMetadata.getBrokerId(), tempBrokerMetadata.getRegisterCheckCode(), this.controllerLeaderAddress);
return true;
} catch (Exception e) {
LOGGER.error("fail to apply broker id: {}", tempBrokerMetadata.getBrokerId(), e);
return false;
}
}
/**
* Create metadata file and delete temp metadata file
*
* @return whether process success
*/
private boolean createMetadataFileAndDeleteTemp() {
// create metadata file and delete temp metadata file
try {
this.brokerMetadata.updateAndPersist(brokerConfig.getBrokerClusterName(), brokerConfig.getBrokerName(), tempBrokerMetadata.getBrokerId());
this.tempBrokerMetadata.clear();
this.brokerControllerId = this.brokerMetadata.getBrokerId();
this.haService.setLocalBrokerId(this.brokerControllerId);
return true;
} catch (Exception e) {
LOGGER.error("fail to create metadata file", e);
this.brokerMetadata.clear();
return false;
}
}
/**
* Send registerBrokerToController request to inform controller that now broker has been registered successfully and
* controller should update broker ipAddress if changed
*
* @return whether request success
*/
private boolean registerBrokerToController() {
try {
Pair<RegisterBrokerToControllerResponseHeader, Set<Long>> responsePair = this.brokerOuterAPI.registerBrokerToController(brokerConfig.getBrokerClusterName(), brokerConfig.getBrokerName(), brokerControllerId, brokerAddress, controllerLeaderAddress);
if (responsePair == null)
return false;
RegisterBrokerToControllerResponseHeader response = responsePair.getObject1();
Set<Long> syncStateSet = responsePair.getObject2();
final Long masterBrokerId = response.getMasterBrokerId();
final String masterAddress = response.getMasterAddress();
if (masterBrokerId == null) {
return true;
}
if (this.brokerControllerId.equals(masterBrokerId)) {
changeToMaster(response.getMasterEpoch(), response.getSyncStateSetEpoch(), syncStateSet);
} else {
changeToSlave(masterAddress, response.getMasterEpoch(), masterBrokerId);
}
return true;
} catch (Exception e) {
LOGGER.error("fail to send registerBrokerToController request to controller", e);
return false;
}
}
/**
* Confirm the registering state now
*/
private void confirmNowRegisteringState() {
// 1. check if metadata exist
try {
this.brokerMetadata.readFromFile();
} catch (Exception e) {
LOGGER.error("Read metadata file failed", e);
}
if (this.brokerMetadata.isLoaded()) {
this.registerState = RegisterState.CREATE_METADATA_FILE_DONE;
this.brokerControllerId = brokerMetadata.getBrokerId();
this.haService.setLocalBrokerId(this.brokerControllerId);
return;
}
// 2. check if temp metadata exist
try {
this.tempBrokerMetadata.readFromFile();
} catch (Exception e) {
LOGGER.error("Read temp metadata file failed", e);
}
if (this.tempBrokerMetadata.isLoaded()) {
this.registerState = RegisterState.CREATE_TEMP_METADATA_FILE_DONE;
}
}
private boolean checkMetadataValid() {
if (this.registerState == RegisterState.CREATE_TEMP_METADATA_FILE_DONE) {
if (this.tempBrokerMetadata.getClusterName() == null || !this.tempBrokerMetadata.getClusterName().equals(this.brokerConfig.getBrokerClusterName())) {
LOGGER.error("The clusterName: {} in broker temp metadata is different from the clusterName: {} in broker config",
this.tempBrokerMetadata.getClusterName(), this.brokerConfig.getBrokerClusterName());
return false;
}
if (this.tempBrokerMetadata.getBrokerName() == null || !this.tempBrokerMetadata.getBrokerName().equals(this.brokerConfig.getBrokerName())) {
LOGGER.error("The brokerName: {} in broker temp metadata is different from the brokerName: {} in broker config",
this.tempBrokerMetadata.getBrokerName(), this.brokerConfig.getBrokerName());
return false;
}
}
if (this.registerState == RegisterState.CREATE_METADATA_FILE_DONE) {
if (this.brokerMetadata.getClusterName() == null || !this.brokerMetadata.getClusterName().equals(this.brokerConfig.getBrokerClusterName())) {
LOGGER.error("The clusterName: {} in broker metadata is different from the clusterName: {} in broker config",
this.brokerMetadata.getClusterName(), this.brokerConfig.getBrokerClusterName());
return false;
}
if (this.brokerMetadata.getBrokerName() == null || !this.brokerMetadata.getBrokerName().equals(this.brokerConfig.getBrokerName())) {
LOGGER.error("The brokerName: {} in broker metadata is different from the brokerName: {} in broker config",
this.brokerMetadata.getBrokerName(), this.brokerConfig.getBrokerName());
return false;
}
}
return true;
}
/**
* Scheduling sync broker metadata form controller.
*/
private void schedulingSyncBrokerMetadata() {
this.scheduledService.scheduleAtFixedRate(() -> {
try {
final Pair<GetReplicaInfoResponseHeader, SyncStateSet> result = this.brokerOuterAPI.getReplicaInfo(this.controllerLeaderAddress, this.brokerConfig.getBrokerName());
final GetReplicaInfoResponseHeader info = result.getObject1();
final SyncStateSet syncStateSet = result.getObject2();
final String newMasterAddress = info.getMasterAddress();
final int newMasterEpoch = info.getMasterEpoch();
final Long masterBrokerId = info.getMasterBrokerId();
synchronized (this) {
// Check if master changed
if (newMasterEpoch > this.masterEpoch) {
if (StringUtils.isNoneEmpty(newMasterAddress) && masterBrokerId != null) {
if (masterBrokerId.equals(this.brokerControllerId)) {
// If this broker is now the master
changeToMaster(newMasterEpoch, syncStateSet.getSyncStateSetEpoch(), syncStateSet.getSyncStateSet());
} else {
// If this broker is now the slave, and master has been changed
changeToSlave(newMasterAddress, newMasterEpoch, masterBrokerId);
}
} else {
// In this case, the master in controller is null, try elect in controller, this will trigger the electMasterEvent in controller.
brokerElect();
}
} else if (newMasterEpoch == this.masterEpoch) {
// Check if SyncStateSet changed
if (isMasterState()) {
changeSyncStateSet(syncStateSet.getSyncStateSet(), syncStateSet.getSyncStateSetEpoch());
}
}
}
} catch (final MQBrokerException exception) {
LOGGER.warn("Error happen when get broker {}'s metadata", this.brokerConfig.getBrokerName(), exception);
if (exception.getResponseCode() == CONTROLLER_BROKER_METADATA_NOT_EXIST) {
try {
registerBrokerToController();
TimeUnit.SECONDS.sleep(2);
} catch (InterruptedException ignore) {
}
}
} catch (final Exception e) {
LOGGER.warn("Error happen when get broker {}'s metadata", this.brokerConfig.getBrokerName(), e);
}
}, 3 * 1000, this.brokerConfig.getSyncBrokerMetadataPeriod(), TimeUnit.MILLISECONDS);
}
/**
* Scheduling sync controller metadata.
*/
private boolean schedulingSyncControllerMetadata() {
// Get controller metadata first.
int tryTimes = 0;
while (tryTimes < 3) {
boolean flag = updateControllerMetadata();
if (flag) {
this.scheduledService.scheduleAtFixedRate(this::updateControllerMetadata, 1000 * 3, this.brokerConfig.getSyncControllerMetadataPeriod(), TimeUnit.MILLISECONDS);
return true;
}
try {
TimeUnit.SECONDS.sleep(1);
} catch (InterruptedException ignore) {
}
tryTimes++;
}
LOGGER.error("Failed to init controller metadata, maybe the controllers in {} is not available", this.controllerAddresses);
return false;
}
/**
* Update controller leader address by rpc.
*/
private boolean updateControllerMetadata() {
for (String address : this.availableControllerAddresses.keySet()) {
try {
final GetMetaDataResponseHeader responseHeader = this.brokerOuterAPI.getControllerMetaData(address);
if (responseHeader != null && StringUtils.isNoneEmpty(responseHeader.getControllerLeaderAddress())) {
this.controllerLeaderAddress = responseHeader.getControllerLeaderAddress();
LOGGER.info("Update controller leader address to {}", this.controllerLeaderAddress);
return true;
}
} catch (final Exception e) {
LOGGER.error("Failed to update controller metadata", e);
}
}
return false;
}
/**
* Scheduling check syncStateSet.
*/
private void schedulingCheckSyncStateSet() {
if (this.checkSyncStateSetTaskFuture != null) {
this.checkSyncStateSetTaskFuture.cancel(false);
}
this.checkSyncStateSetTaskFuture = this.scheduledService.scheduleAtFixedRate(this::checkSyncStateSetAndDoReport, 3 * 1000,
this.brokerConfig.getCheckSyncStateSetPeriod(), TimeUnit.MILLISECONDS);
}
private void checkSyncStateSetAndDoReport() {
try {
final Set<Long> newSyncStateSet = this.haService.maybeShrinkSyncStateSet();
newSyncStateSet.add(this.brokerControllerId);
synchronized (this) {
if (this.syncStateSet != null) {
// Check if syncStateSet changed
if (this.syncStateSet.size() == newSyncStateSet.size() && this.syncStateSet.containsAll(newSyncStateSet)) {
return;
}
}
}
doReportSyncStateSetChanged(newSyncStateSet);
} catch (Exception e) {
LOGGER.error("Check syncStateSet error", e);
}
}
private void doReportSyncStateSetChanged(Set<Long> newSyncStateSet) {
try {
final SyncStateSet result = this.brokerOuterAPI.alterSyncStateSet(this.controllerLeaderAddress, this.brokerConfig.getBrokerName(), this.brokerControllerId, this.masterEpoch, newSyncStateSet, this.syncStateSetEpoch);
if (result != null) {
changeSyncStateSet(result.getSyncStateSet(), result.getSyncStateSetEpoch());
}
} catch (final Exception e) {
LOGGER.error("Error happen when change SyncStateSet, broker:{}, masterAddress:{}, masterEpoch:{}, oldSyncStateSet:{}, newSyncStateSet:{}, syncStateSetEpoch:{}",
this.brokerConfig.getBrokerName(), this.masterAddress, this.masterEpoch, this.syncStateSet, newSyncStateSet, this.syncStateSetEpoch, e);
}
}
private void stopCheckSyncStateSet() {
if (this.checkSyncStateSetTaskFuture != null) {
this.checkSyncStateSetTaskFuture.cancel(false);
}
}
private void scanAvailableControllerAddresses() {
try {
if (controllerAddresses == null) {
LOGGER.warn("scanAvailableControllerAddresses addresses of controller is null!");
return;
}
for (String address : availableControllerAddresses.keySet()) {
if (!controllerAddresses.contains(address)) {
LOGGER.warn("scanAvailableControllerAddresses remove invalid address {}", address);
availableControllerAddresses.remove(address);
}
}
for (String address : controllerAddresses) {
scanExecutor.submit(() -> {
if (brokerOuterAPI.checkAddressReachable(address)) {
availableControllerAddresses.putIfAbsent(address, true);
} else {
Boolean value = availableControllerAddresses.remove(address);
if (value != null) {
LOGGER.warn("scanAvailableControllerAddresses remove unconnected address {}", address);
}
}
});
}
} catch (final Throwable t) {
LOGGER.error("scanAvailableControllerAddresses unexpected exception", t);
}
}
private void updateControllerAddr() {
if (brokerConfig.isFetchControllerAddrByDnsLookup()) {
List<String> adders = brokerOuterAPI.dnsLookupAddressByDomain(this.brokerConfig.getControllerAddr());
if (CollectionUtils.isNotEmpty(adders)) {
this.controllerAddresses = adders;
}
} else {
final String controllerPaths = this.brokerConfig.getControllerAddr();
final String[] controllers = controllerPaths.split(";");
assert controllers.length > 0;
this.controllerAddresses = Arrays.asList(controllers);
}
}
public int getLastEpoch() {
return this.haService.getLastEpoch();
}
public BrokerRole getBrokerRole() {
return this.brokerController.getMessageStoreConfig().getBrokerRole();
}
public boolean isMasterState() {
return getBrokerRole() == BrokerRole.SYNC_MASTER;
}
public SyncStateSet getSyncStateSet() {
return new SyncStateSet(this.syncStateSet, this.syncStateSetEpoch);
}
public String getBrokerAddress() {
return brokerAddress;
}
public String getMasterAddress() {
return masterAddress;
}
public int getMasterEpoch() {
return masterEpoch;
}
public List<String> getControllerAddresses() {
return controllerAddresses;
}
public List<EpochEntry> getEpochEntries() {
return this.haService.getEpochEntries();
}
public List<String> getAvailableControllerAddresses() {
return new ArrayList<>(availableControllerAddresses.keySet());
}
public Long getBrokerControllerId() {
return brokerControllerId;
}
public RegisterState getRegisterState() {
return registerState;
}
public State getState() {
return state;
}
public BrokerMetadata getBrokerMetadata() {
return brokerMetadata;
}
public TempBrokerMetadata getTempBrokerMetadata() {
return tempBrokerMetadata;
}
public void setFenced(boolean fenced) {
this.brokerController.setIsolated(fenced);
this.brokerController.getMessageStore().getRunningFlags().makeFenced(fenced);
}
}
| RegisterState |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableSequenceEqualTest.java | {
"start": 1421,
"end": 20023
} | class ____ extends RxJavaTest {
@Test
public void flowable1() {
Flowable<Boolean> flowable = Flowable.sequenceEqual(
Flowable.just("one", "two", "three"),
Flowable.just("one", "two", "three")).toFlowable();
verifyResult(flowable, true);
}
@Test
public void flowable2() {
Flowable<Boolean> flowable = Flowable.sequenceEqual(
Flowable.just("one", "two", "three"),
Flowable.just("one", "two", "three", "four")).toFlowable();
verifyResult(flowable, false);
}
@Test
public void flowable3() {
Flowable<Boolean> flowable = Flowable.sequenceEqual(
Flowable.just("one", "two", "three", "four"),
Flowable.just("one", "two", "three")).toFlowable();
verifyResult(flowable, false);
}
@Test
public void withError1Flowable() {
Flowable<Boolean> flowable = Flowable.sequenceEqual(
Flowable.concat(Flowable.just("one"),
Flowable.<String> error(new TestException())),
Flowable.just("one", "two", "three")).toFlowable();
verifyError(flowable);
}
@Test
public void withError2Flowable() {
Flowable<Boolean> flowable = Flowable.sequenceEqual(
Flowable.just("one", "two", "three"),
Flowable.concat(Flowable.just("one"),
Flowable.<String> error(new TestException()))).toFlowable();
verifyError(flowable);
}
@Test
public void withError3Flowable() {
Flowable<Boolean> flowable = Flowable.sequenceEqual(
Flowable.concat(Flowable.just("one"),
Flowable.<String> error(new TestException())),
Flowable.concat(Flowable.just("one"),
Flowable.<String> error(new TestException()))).toFlowable();
verifyError(flowable);
}
@Test
public void withEmpty1Flowable() {
Flowable<Boolean> flowable = Flowable.sequenceEqual(
Flowable.<String> empty(),
Flowable.just("one", "two", "three")).toFlowable();
verifyResult(flowable, false);
}
@Test
public void withEmpty2Flowable() {
Flowable<Boolean> flowable = Flowable.sequenceEqual(
Flowable.just("one", "two", "three"),
Flowable.<String> empty()).toFlowable();
verifyResult(flowable, false);
}
@Test
public void withEmpty3Flowable() {
Flowable<Boolean> flowable = Flowable.sequenceEqual(
Flowable.<String> empty(), Flowable.<String> empty()).toFlowable();
verifyResult(flowable, true);
}
@Test
public void withEqualityErrorFlowable() {
Flowable<Boolean> flowable = Flowable.sequenceEqual(
Flowable.just("one"), Flowable.just("one"),
new BiPredicate<String, String>() {
@Override
public boolean test(String t1, String t2) {
throw new TestException();
}
}).toFlowable();
verifyError(flowable);
}
@Test
public void one() {
Single<Boolean> single = Flowable.sequenceEqual(
Flowable.just("one", "two", "three"),
Flowable.just("one", "two", "three"));
verifyResult(single, true);
}
@Test
public void two() {
Single<Boolean> single = Flowable.sequenceEqual(
Flowable.just("one", "two", "three"),
Flowable.just("one", "two", "three", "four"));
verifyResult(single, false);
}
@Test
public void three() {
Single<Boolean> single = Flowable.sequenceEqual(
Flowable.just("one", "two", "three", "four"),
Flowable.just("one", "two", "three"));
verifyResult(single, false);
}
@Test
public void withError1() {
Single<Boolean> single = Flowable.sequenceEqual(
Flowable.concat(Flowable.just("one"),
Flowable.<String> error(new TestException())),
Flowable.just("one", "two", "three"));
verifyError(single);
}
@Test
public void withError2() {
Single<Boolean> single = Flowable.sequenceEqual(
Flowable.just("one", "two", "three"),
Flowable.concat(Flowable.just("one"),
Flowable.<String> error(new TestException())));
verifyError(single);
}
@Test
public void withError3() {
Single<Boolean> single = Flowable.sequenceEqual(
Flowable.concat(Flowable.just("one"),
Flowable.<String> error(new TestException())),
Flowable.concat(Flowable.just("one"),
Flowable.<String> error(new TestException())));
verifyError(single);
}
@Test
public void withEmpty1() {
Single<Boolean> single = Flowable.sequenceEqual(
Flowable.<String> empty(),
Flowable.just("one", "two", "three"));
verifyResult(single, false);
}
@Test
public void withEmpty2() {
Single<Boolean> single = Flowable.sequenceEqual(
Flowable.just("one", "two", "three"),
Flowable.<String> empty());
verifyResult(single, false);
}
@Test
public void withEmpty3() {
Single<Boolean> single = Flowable.sequenceEqual(
Flowable.<String> empty(), Flowable.<String> empty());
verifyResult(single, true);
}
@Test
public void withEqualityError() {
Single<Boolean> single = Flowable.sequenceEqual(
Flowable.just("one"), Flowable.just("one"),
new BiPredicate<String, String>() {
@Override
public boolean test(String t1, String t2) {
throw new TestException();
}
});
verifyError(single);
}
private void verifyResult(Flowable<Boolean> flowable, boolean result) {
Subscriber<Boolean> subscriber = TestHelper.mockSubscriber();
flowable.subscribe(subscriber);
InOrder inOrder = inOrder(subscriber);
inOrder.verify(subscriber, times(1)).onNext(result);
inOrder.verify(subscriber).onComplete();
inOrder.verifyNoMoreInteractions();
}
private void verifyResult(Single<Boolean> single, boolean result) {
SingleObserver<Boolean> observer = TestHelper.mockSingleObserver();
single.subscribe(observer);
InOrder inOrder = inOrder(observer);
inOrder.verify(observer, times(1)).onSuccess(result);
inOrder.verifyNoMoreInteractions();
}
private void verifyError(Flowable<Boolean> flowable) {
Subscriber<Boolean> subscriber = TestHelper.mockSubscriber();
flowable.subscribe(subscriber);
InOrder inOrder = inOrder(subscriber);
inOrder.verify(subscriber, times(1)).onError(isA(TestException.class));
inOrder.verifyNoMoreInteractions();
}
private void verifyError(Single<Boolean> single) {
SingleObserver<Boolean> observer = TestHelper.mockSingleObserver();
single.subscribe(observer);
InOrder inOrder = inOrder(observer);
inOrder.verify(observer, times(1)).onError(isA(TestException.class));
inOrder.verifyNoMoreInteractions();
}
@Test
public void prefetch() {
Flowable.sequenceEqual(Flowable.range(1, 20), Flowable.range(1, 20), 2)
.test()
.assertResult(true);
}
@Test
public void disposed() {
TestHelper.checkDisposed(Flowable.sequenceEqual(Flowable.just(1), Flowable.just(2)));
}
@Test
public void simpleInequal() {
Flowable.sequenceEqual(Flowable.just(1), Flowable.just(2))
.test()
.assertResult(false);
}
@Test
public void simpleInequalObservable() {
Flowable.sequenceEqual(Flowable.just(1), Flowable.just(2))
.toFlowable()
.test()
.assertResult(false);
}
@Test
public void onNextCancelRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final TestObserver<Boolean> to = Flowable.sequenceEqual(Flowable.never(), pp).test();
Runnable r1 = new Runnable() {
@Override
public void run() {
to.dispose();
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
pp.onNext(1);
}
};
TestHelper.race(r1, r2);
to.assertEmpty();
}
}
@Test
public void onNextCancelRaceObservable() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final TestSubscriber<Boolean> ts = Flowable.sequenceEqual(Flowable.never(), pp).toFlowable().test();
Runnable r1 = new Runnable() {
@Override
public void run() {
ts.cancel();
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
pp.onNext(1);
}
};
TestHelper.race(r1, r2);
ts.assertEmpty();
}
}
@Test
public void disposedFlowable() {
TestHelper.checkDisposed(Flowable.sequenceEqual(Flowable.just(1), Flowable.just(2)).toFlowable());
}
@Test
public void prefetchFlowable() {
Flowable.sequenceEqual(Flowable.range(1, 20), Flowable.range(1, 20), 2)
.toFlowable()
.test()
.assertResult(true);
}
@Test
public void longSequenceEqualsFlowable() {
Flowable<Integer> source = Flowable.range(1, Flowable.bufferSize() * 4).subscribeOn(Schedulers.computation());
Flowable.sequenceEqual(source, source)
.toFlowable()
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(true);
}
@Test
public void syncFusedCrashFlowable() {
Flowable<Integer> source = Flowable.range(1, 10).map(new Function<Integer, Integer>() {
@Override
public Integer apply(Integer v) throws Exception { throw new TestException(); }
});
Flowable.sequenceEqual(source, Flowable.range(1, 10).hide())
.toFlowable()
.test()
.assertFailure(TestException.class);
Flowable.sequenceEqual(Flowable.range(1, 10).hide(), source)
.toFlowable()
.test()
.assertFailure(TestException.class);
}
@Test
public void cancelAndDrainRaceFlowable() {
Flowable<Object> neverNever = new Flowable<Object>() {
@Override
protected void subscribeActual(Subscriber<? super Object> s) {
}
};
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final TestSubscriber<Boolean> ts = new TestSubscriber<>();
final PublishProcessor<Integer> pp = PublishProcessor.create();
boolean swap = (i & 1) == 0;
Flowable.sequenceEqual(swap ? pp : neverNever, swap ? neverNever : pp)
.toFlowable()
.subscribe(ts);
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onNext(1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ts.cancel();
}
};
TestHelper.race(r1, r2);
ts.assertEmpty();
}
}
@Test
public void sourceOverflowsFlowable() {
Flowable.sequenceEqual(Flowable.never(), new Flowable<Object>() {
@Override
protected void subscribeActual(Subscriber<? super Object> s) {
s.onSubscribe(new BooleanSubscription());
for (int i = 0; i < 10; i++) {
s.onNext(i);
}
}
}, 8)
.toFlowable()
.test()
.assertFailure(MissingBackpressureException.class);
}
@Test
public void doubleErrorFlowable() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
Flowable.sequenceEqual(Flowable.never(), new Flowable<Object>() {
@Override
protected void subscribeActual(Subscriber<? super Object> s) {
s.onSubscribe(new BooleanSubscription());
s.onError(new TestException("First"));
s.onError(new TestException("Second"));
}
}, 8)
.toFlowable()
.to(TestHelper.<Boolean>testConsumer())
.assertFailureAndMessage(TestException.class, "First");
TestHelper.assertUndeliverable(errors, 0, TestException.class, "Second");
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void longSequenceEquals() {
Flowable<Integer> source = Flowable.range(1, Flowable.bufferSize() * 4).subscribeOn(Schedulers.computation());
Flowable.sequenceEqual(source, source)
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(true);
}
@Test
public void syncFusedCrash() {
Flowable<Integer> source = Flowable.range(1, 10).map(new Function<Integer, Integer>() {
@Override
public Integer apply(Integer v) throws Exception { throw new TestException(); }
});
Flowable.sequenceEqual(source, Flowable.range(1, 10).hide())
.test()
.assertFailure(TestException.class);
Flowable.sequenceEqual(Flowable.range(1, 10).hide(), source)
.test()
.assertFailure(TestException.class);
}
@Test
public void cancelAndDrainRace() {
Flowable<Object> neverNever = new Flowable<Object>() {
@Override
protected void subscribeActual(Subscriber<? super Object> s) {
}
};
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final TestObserver<Boolean> to = new TestObserver<>();
final PublishProcessor<Integer> pp = PublishProcessor.create();
boolean swap = (i & 1) == 0;
Flowable.sequenceEqual(swap ? pp : neverNever, swap ? neverNever : pp)
.subscribe(to);
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onNext(1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
to.dispose();
}
};
TestHelper.race(r1, r2);
to.assertEmpty();
}
}
@Test
public void sourceOverflows() {
Flowable.sequenceEqual(Flowable.never(), new Flowable<Object>() {
@Override
protected void subscribeActual(Subscriber<? super Object> s) {
s.onSubscribe(new BooleanSubscription());
for (int i = 0; i < 10; i++) {
s.onNext(i);
}
}
}, 8)
.test()
.assertFailure(MissingBackpressureException.class);
}
@Test
public void doubleError() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
Flowable.sequenceEqual(Flowable.never(), new Flowable<Object>() {
@Override
protected void subscribeActual(Subscriber<? super Object> s) {
s.onSubscribe(new BooleanSubscription());
s.onError(new TestException("First"));
s.onError(new TestException("Second"));
}
}, 8)
.to(TestHelper.<Boolean>testConsumer())
.assertFailureAndMessage(TestException.class, "First");
TestHelper.assertUndeliverable(errors, 0, TestException.class, "Second");
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void undeliverableUponCancel() {
TestHelper.checkUndeliverableUponCancel(new FlowableConverter<Integer, Single<Boolean>>() {
@Override
public Single<Boolean> apply(Flowable<Integer> upstream) {
return Flowable.sequenceEqual(Flowable.just(1).hide(), upstream);
}
});
}
@Test
public void undeliverableUponCancelAsFlowable() {
TestHelper.checkUndeliverableUponCancel(new FlowableConverter<Integer, Flowable<Boolean>>() {
@Override
public Flowable<Boolean> apply(Flowable<Integer> upstream) {
return Flowable.sequenceEqual(Flowable.just(1).hide(), upstream).toFlowable();
}
});
}
@Test
public void undeliverableUponCancel2() {
TestHelper.checkUndeliverableUponCancel(new FlowableConverter<Integer, Single<Boolean>>() {
@Override
public Single<Boolean> apply(Flowable<Integer> upstream) {
return Flowable.sequenceEqual(upstream, Flowable.just(1).hide());
}
});
}
@Test
public void undeliverableUponCancelAsFlowable2() {
TestHelper.checkUndeliverableUponCancel(new FlowableConverter<Integer, Flowable<Boolean>>() {
@Override
public Flowable<Boolean> apply(Flowable<Integer> upstream) {
return Flowable.sequenceEqual(upstream, Flowable.just(1).hide()).toFlowable();
}
});
}
@Test
public void fusionRejected() {
Flowable.sequenceEqual(TestHelper.rejectFlowableFusion(), Flowable.never())
.test()
.assertEmpty();
}
@Test
public void fusionRejectedFlowable() {
Flowable.sequenceEqual(TestHelper.rejectFlowableFusion(), Flowable.never())
.toFlowable()
.test()
.assertEmpty();
}
@Test
public void asyncSourceCompare() {
Flowable.sequenceEqual(Flowable.fromCallable(() -> 1), Flowable.just(1))
.test()
.assertResult(true);
}
}
| FlowableSequenceEqualTest |
java | apache__camel | components/camel-atmosphere-websocket/src/test/java/org/apache/camel/component/atmosphere/websocket/WebsocketRoute2WithInitParamTest.java | {
"start": 1308,
"end": 6371
} | class ____ extends WebsocketCamelRouterWithInitParamTestSupport {
private static final String[] EXISTED_USERS = { "Kim", "Pavlo", "Peter" };
private static String[] broadcastMessageTo = {};
private static Map<String, String> connectionKeyUserMap = new HashMap<>();
@Test
void testWebsocketSingleClientBroadcastMultipleClients() throws Exception {
final int awaitTime = 2;
connectionKeyUserMap.clear();
WebsocketTestClient wsclient1 = new WebsocketTestClient("ws://localhost:" + PORT + "/broadcast", 2);
WebsocketTestClient wsclient2 = new WebsocketTestClient("ws://localhost:" + PORT + "/broadcast", 2);
WebsocketTestClient wsclient3 = new WebsocketTestClient("ws://localhost:" + PORT + "/broadcast", 2);
wsclient1.connect();
wsclient1.await(awaitTime);
wsclient2.connect();
wsclient2.await(awaitTime);
wsclient3.connect();
wsclient3.await(awaitTime);
//all connections were registered in external store
assertEquals(EXISTED_USERS.length, connectionKeyUserMap.size());
broadcastMessageTo = new String[] { EXISTED_USERS[0], EXISTED_USERS[1] };
wsclient1.sendTextMessage("Gambas");
wsclient1.await(awaitTime);
List<String> received1 = wsclient1.getReceived(String.class);
assertEquals(1, received1.size());
for (String element : broadcastMessageTo) {
assertTrue(received1.get(0).contains(element));
}
List<String> received2 = wsclient2.getReceived(String.class);
assertEquals(1, received2.size());
for (String element : broadcastMessageTo) {
assertTrue(received2.get(0).contains(element));
}
List<String> received3 = wsclient3.getReceived(String.class);
assertEquals(0, received3.size());
wsclient1.close();
wsclient2.close();
wsclient3.close();
}
// START SNIPPET: payload
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// route for single client broadcast to multiple clients
from("atmosphere-websocket:///broadcast").to("log:info")
.choice()
.when(header(WebsocketConstants.EVENT_TYPE).isEqualTo(WebsocketConstants.ONOPEN_EVENT_TYPE))
.process(new Processor() {
public void process(final Exchange exchange) {
createExternalConnectionRegister(exchange);
}
})
.when(header(WebsocketConstants.EVENT_TYPE).isEqualTo(WebsocketConstants.ONCLOSE_EVENT_TYPE))
.process(new Processor() {
public void process(final Exchange exchange) {
removeExternalConnectionRegister();
}
})
.when(header(WebsocketConstants.EVENT_TYPE).isEqualTo(WebsocketConstants.ONERROR_EVENT_TYPE))
.process(new Processor() {
public void process(final Exchange exchange) {
removeExternalConnectionRegister();
}
})
.otherwise()
.process(new Processor() {
public void process(final Exchange exchange) {
createBroadcastMultipleClientsResponse(exchange);
}
}).to("atmosphere-websocket:///broadcast");
}
};
}
private static void createExternalConnectionRegister(Exchange exchange) {
Object connectionKey = exchange.getIn().getHeader(WebsocketConstants.CONNECTION_KEY);
String userName = EXISTED_USERS[0];
if (!connectionKeyUserMap.isEmpty()) {
userName = EXISTED_USERS[connectionKeyUserMap.size()];
}
connectionKeyUserMap.put(userName, (String) connectionKey);
}
private static void removeExternalConnectionRegister() {
// remove connectionKey from external store
}
private static void createBroadcastMultipleClientsResponse(Exchange exchange) {
List<String> connectionKeyList = new ArrayList<>();
Object msg = exchange.getIn().getBody();
String additionalMessage = "";
//send the message only to selected connections
for (String element : broadcastMessageTo) {
connectionKeyList.add(connectionKeyUserMap.get(element));
additionalMessage += element + " ";
}
additionalMessage += " Received the message: ";
exchange.getIn().setBody(additionalMessage + msg);
exchange.getIn().setHeader(WebsocketConstants.CONNECTION_KEY_LIST, connectionKeyList);
}
// END SNIPPET: payload
}
| WebsocketRoute2WithInitParamTest |
java | quarkusio__quarkus | extensions/spring-data-jpa/deployment/src/test/java/io/quarkus/spring/data/deployment/Person.java | {
"start": 319,
"end": 2064
} | class ____ {
@Id
@GeneratedValue
private Integer id;
private String name;
private Integer age;
private ZonedDateTime birthDate;
private Boolean active;
private String order;
@ManyToOne(cascade = CascadeType.ALL)
@JoinColumn(name = "address_id", referencedColumnName = "id")
private Address address;
private String addressCountry;
public Person() {
}
public Person(String name, Integer age, ZonedDateTime birthDate, Boolean active, String order) {
this.name = name;
this.age = age;
this.birthDate = birthDate;
this.active = active;
this.order = order;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Integer getAge() {
return age;
}
public void setAge(Integer age) {
this.age = age;
}
public ZonedDateTime getBirthDate() {
return birthDate;
}
public void setBirthDate(ZonedDateTime birthDate) {
this.birthDate = birthDate;
}
public Boolean getActive() {
return active;
}
public void setActive(Boolean active) {
this.active = active;
}
public String getOrder() {
return order;
}
public void setOrder(String order) {
this.order = order;
}
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
public String getAddressCountry() {
return addressCountry;
}
public void setAddressCountry(String addressCountry) {
this.addressCountry = addressCountry;
}
@Entity
public static | Person |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/simple/RawListQueryParamTest.java | {
"start": 1164,
"end": 1477
} | class ____ {
@GET
@SuppressWarnings({ "rawtypes", "unchecked" })
public String hello(@RestQuery("name") List names) {
if (names.isEmpty()) {
return "hello world";
}
return "hello " + String.join(",", names);
}
}
}
| HelloResource |
java | google__dagger | dagger-android/main/java/dagger/android/DispatchingAndroidInjector.java | {
"start": 1768,
"end": 4100
} | class ____<T> implements AndroidInjector<T> {
private static final String NO_SUPERTYPES_BOUND_FORMAT =
"No injector factory bound for Class<%s>";
private static final String SUPERTYPES_BOUND_FORMAT =
"No injector factory bound for Class<%1$s>. Injector factories were bound for supertypes "
+ "of %1$s: %2$s. Did you mean to bind an injector factory for the subtype?";
private final Map<String, Provider<AndroidInjector.Factory<?>>> injectorFactories;
@Inject
DispatchingAndroidInjector(
Map<Class<?>, Provider<AndroidInjector.Factory<?>>> injectorFactoriesWithClassKeys,
Map<String, Provider<AndroidInjector.Factory<?>>> injectorFactoriesWithStringKeys) {
this.injectorFactories = merge(injectorFactoriesWithClassKeys, injectorFactoriesWithStringKeys);
}
/**
* Merges the two maps into one by transforming the values of the {@code classKeyedMap} with
* {@link Class#getName()}.
*
* <p>An SPI plugin verifies the logical uniqueness of the keysets of these two maps so we're
* assured there's no overlap.
*
* <p>Ideally we could achieve this with a generic {@code @Provides} method, but we'd need to have
* <i>N</i> modules that each extend one base module.
*/
private static <C, V> Map<String, Provider<AndroidInjector.Factory<?>>> merge(
Map<Class<? extends C>, V> classKeyedMap, Map<String, V> stringKeyedMap) {
if (classKeyedMap.isEmpty()) {
@SuppressWarnings({"unchecked", "rawtypes"})
Map<String, Provider<AndroidInjector.Factory<?>>> safeCast = (Map) stringKeyedMap;
return safeCast;
}
Map<String, V> merged =
newLinkedHashMapWithExpectedSize(classKeyedMap.size() + stringKeyedMap.size());
merged.putAll(stringKeyedMap);
for (Entry<Class<? extends C>, V> entry : classKeyedMap.entrySet()) {
merged.put(entry.getKey().getName(), entry.getValue());
}
@SuppressWarnings({"unchecked", "rawtypes"})
Map<String, Provider<AndroidInjector.Factory<?>>> safeCast = (Map) merged;
return Collections.unmodifiableMap(safeCast);
}
/**
* Attempts to perform members-injection on {@code instance}, returning {@code true} if
* successful, {@code false} otherwise.
*
* @throws InvalidInjectorBindingException if the injector factory bound for a | DispatchingAndroidInjector |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java | {
"start": 3103,
"end": 10409
} | class ____ extends AbstractService implements
LogHandler {
private static final Logger LOG =
LoggerFactory.getLogger(NonAggregatingLogHandler.class);
private final Dispatcher dispatcher;
private final DeletionService delService;
private final Map<ApplicationId, String> appOwners;
private final LocalDirsHandlerService dirsHandler;
private final NMStateStoreService stateStore;
private long deleteDelaySeconds;
private boolean enableTriggerDeleteBySize;
private long deleteThreshold;
private ScheduledThreadPoolExecutor sched;
public NonAggregatingLogHandler(Dispatcher dispatcher,
DeletionService delService, LocalDirsHandlerService dirsHandler,
NMStateStoreService stateStore) {
super(NonAggregatingLogHandler.class.getName());
this.dispatcher = dispatcher;
this.delService = delService;
this.dirsHandler = dirsHandler;
this.stateStore = stateStore;
this.appOwners = new ConcurrentHashMap<ApplicationId, String>();
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
// Default 3 hours.
this.deleteDelaySeconds =
conf.getLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS,
YarnConfiguration.DEFAULT_NM_LOG_RETAIN_SECONDS);
this.enableTriggerDeleteBySize =
conf.getBoolean(YarnConfiguration.NM_LOG_TRIGGER_DELETE_BY_SIZE_ENABLED,
YarnConfiguration.DEFAULT_NM_LOG_TRIGGER_DELETE_BY_SIZE_ENABLED);
this.deleteThreshold =
conf.getLongBytes(YarnConfiguration.NM_LOG_DELETE_THRESHOLD,
YarnConfiguration.DEFAULT_NM_LOG_DELETE_THRESHOLD);
sched = createScheduledThreadPoolExecutor(conf);
super.serviceInit(conf);
recover();
}
@Override
protected void serviceStop() throws Exception {
if (sched != null) {
sched.shutdown();
boolean isShutdown = false;
try {
isShutdown = sched.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
sched.shutdownNow();
isShutdown = true;
}
if (!isShutdown) {
sched.shutdownNow();
}
}
super.serviceStop();
}
FileContext getLocalFileContext(Configuration conf) {
try {
return FileContext.getLocalFSFileContext(conf);
} catch (IOException e) {
throw new YarnRuntimeException("Failed to access local fs");
}
}
private void recover() throws IOException {
if (stateStore.canRecover()) {
RecoveredLogDeleterState state = stateStore.loadLogDeleterState();
long now = System.currentTimeMillis();
for (Map.Entry<ApplicationId, LogDeleterProto> entry :
state.getLogDeleterMap().entrySet()) {
ApplicationId appId = entry.getKey();
LogDeleterProto proto = entry.getValue();
long deleteDelayMsec = proto.getDeletionTime() - now;
LOG.debug("Scheduling deletion of {} logs in {} msec", appId,
deleteDelayMsec);
LogDeleterRunnable logDeleter =
new LogDeleterRunnable(proto.getUser(), appId);
try {
sched.schedule(logDeleter, deleteDelayMsec, TimeUnit.MILLISECONDS);
} catch (RejectedExecutionException e) {
// Handling this event in local thread before starting threads
// or after calling sched.shutdownNow().
logDeleter.run();
}
}
}
}
@SuppressWarnings("unchecked")
@Override
public void handle(LogHandlerEvent event) {
switch (event.getType()) {
case APPLICATION_STARTED:
LogHandlerAppStartedEvent appStartedEvent =
(LogHandlerAppStartedEvent) event;
this.appOwners.put(appStartedEvent.getApplicationId(),
appStartedEvent.getUser());
this.dispatcher.getEventHandler().handle(
new ApplicationEvent(appStartedEvent.getApplicationId(),
ApplicationEventType.APPLICATION_LOG_HANDLING_INITED));
break;
case CONTAINER_FINISHED:
// Ignore
break;
case APPLICATION_FINISHED:
LogHandlerAppFinishedEvent appFinishedEvent =
(LogHandlerAppFinishedEvent) event;
ApplicationId appId = appFinishedEvent.getApplicationId();
String user = appOwners.remove(appId);
if (user == null) {
LOG.error("Unable to locate user for {}", appId);
// send LOG_HANDLING_FAILED out
NonAggregatingLogHandler.this.dispatcher.getEventHandler().handle(
new ApplicationEvent(appId,
ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED));
break;
}
LogDeleterRunnable logDeleter = new LogDeleterRunnable(user, appId);
long deletionTimestamp = System.currentTimeMillis()
+ this.deleteDelaySeconds * 1000;
LogDeleterProto deleterProto = LogDeleterProto.newBuilder()
.setUser(user)
.setDeletionTime(deletionTimestamp)
.build();
try {
stateStore.storeLogDeleter(appId, deleterProto);
} catch (IOException e) {
LOG.error("Unable to record log deleter state", e);
}
try {
boolean logDeleterStarted = false;
if (enableTriggerDeleteBySize) {
final long appLogSize = calculateSizeOfAppLogs(user, appId);
if (appLogSize >= deleteThreshold) {
LOG.info("Log Deletion for application: {}, with no delay, size={}", appId, appLogSize);
sched.schedule(logDeleter, 0, TimeUnit.SECONDS);
logDeleterStarted = true;
}
}
if (!logDeleterStarted) {
LOG.info("Scheduling Log Deletion for application: {}, with delay of {} seconds",
appId, this.deleteDelaySeconds);
sched.schedule(logDeleter, this.deleteDelaySeconds, TimeUnit.SECONDS);
}
} catch (RejectedExecutionException e) {
// Handling this event in local thread before starting threads
// or after calling sched.shutdownNow().
logDeleter.run();
}
break;
default:
}
}
@Override
public Set<ApplicationId> getInvalidTokenApps() {
return Collections.emptySet();
}
ScheduledThreadPoolExecutor createScheduledThreadPoolExecutor(
Configuration conf) {
ThreadFactory tf =
new ThreadFactoryBuilder().setNameFormat("LogDeleter #%d").build();
sched =
new HadoopScheduledThreadPoolExecutor(conf.getInt(
YarnConfiguration.NM_LOG_DELETION_THREADS_COUNT,
YarnConfiguration.DEFAULT_NM_LOG_DELETE_THREAD_COUNT), tf);
return sched;
}
private long calculateSizeOfAppLogs(String user, ApplicationId applicationId) {
FileContext lfs = getLocalFileContext(getConfig());
long appLogsSize = 0L;
for (String rootLogDir : dirsHandler.getLogDirsForCleanup()) {
Path logDir = new Path(rootLogDir, applicationId.toString());
try {
appLogsSize += lfs.getFileStatus(logDir).getLen();
} catch (UnsupportedFileSystemException ue) {
LOG.warn("Unsupported file system used for log dir {}", logDir, ue);
continue;
} catch (IOException ie) {
LOG.error("Unable to getFileStatus for {}", logDir, ie);
continue;
}
}
return appLogsSize;
}
| NonAggregatingLogHandler |
java | apache__camel | components/camel-datasonnet/src/main/java/org/apache/camel/language/datasonnet/DatasonnetExpression.java | {
"start": 1977,
"end": 10685
} | class ____ extends ExpressionAdapter implements ExpressionResultTypeAware {
private static final Logger LOG = LoggerFactory.getLogger(DatasonnetExpression.class);
private final String expression;
private Expression source;
private MediaType bodyMediaType;
private MediaType outputMediaType;
private Class<?> resultType;
private Collection<String> libraryPaths;
private transient DatasonnetLanguage language;
public DatasonnetExpression(String expression) {
this.expression = expression;
}
@Deprecated
public static DatasonnetExpression builder(String expression) {
return new DatasonnetExpression(expression);
}
@Deprecated
public static DatasonnetExpression builder(String expression, Class<?> resultType) {
DatasonnetExpression answer = new DatasonnetExpression(expression);
answer.setResultType(resultType);
return answer;
}
@Override
public boolean matches(Exchange exchange) {
this.outputMediaType = MediaTypes.APPLICATION_JAVA;
return evaluate(exchange, Boolean.class);
}
@SuppressWarnings("unchecked")
@Override
public <T> T evaluate(Exchange exchange, Class<T> type) {
try {
// pass exchange to CML lib using thread as context
CML.getInstance().getExchange().set(exchange);
Document<?> result = doEvaluate(exchange);
if (!type.equals(Object.class)) {
return ExchangeHelper.convertToType(exchange, type, result.getContent());
} else if (resultType == null || resultType.equals(Document.class)) {
return (T) result;
} else {
return (T) result.getContent();
}
} catch (Exception e) {
throw new RuntimeExpressionException("Unable to evaluate DataSonnet expression: " + expression, e);
} finally {
CML.getInstance().getExchange().remove();
}
}
private Document<?> doEvaluate(Exchange exchange) {
MediaType bodyMT = bodyMediaType;
if (bodyMT == null && !expression.startsWith(Header.DATASONNET_HEADER)) {
//Try to auto-detect input mime type if it was not explicitly set
String typeHeader = exchange.getProperty(DatasonnetConstants.BODY_MEDIATYPE,
exchange.getIn().getHeader(Exchange.CONTENT_TYPE), String.class);
if (typeHeader != null) {
bodyMT = MediaType.valueOf(typeHeader);
}
}
Object payload = source != null ? source.evaluate(exchange, Object.class) : exchange.getMessage().getBody();
Document<?> doc = null;
if (payload != null) {
doc = exchange.getContext().getTypeConverter().tryConvertTo(Document.class, exchange, payload);
}
if (doc == null) {
String text = exchange.getContext().getTypeConverter().tryConvertTo(String.class, exchange, payload);
if (exchange.getMessage().getBody() == null || "".equals(text)) {
//Empty body, force type to be application/java
doc = new DefaultDocument<>("", MediaTypes.APPLICATION_JAVA);
} else if (MediaTypes.APPLICATION_JAVA.equalsTypeAndSubtype(bodyMT) || bodyMT == null) {
doc = new DefaultDocument<>(payload);
} else {
// force using string value
doc = new DefaultDocument<>(text, bodyMT);
}
}
// the mapper is pre initialized but can become empty
Mapper mapper = language.computeIfMiss(expression, () -> {
MapperBuilder builder = new MapperBuilder(expression)
.withInputNames("body")
.withImports(resolveImports(language))
.withLibrary(CML.getInstance())
.withDefaultOutput(MediaTypes.APPLICATION_JAVA);
Set<Library> additionalLibraries = exchange.getContext().getRegistry().findByType(com.datasonnet.spi.Library.class);
for (Library lib : additionalLibraries) {
builder = builder.withLibrary(lib);
}
return builder.build();
});
MediaType outMT = outputMediaType;
if (outMT == null) {
//Try to auto-detect output mime type if it was not explicitly set
String typeHeader = exchange.getProperty(DatasonnetConstants.OUTPUT_MEDIATYPE,
exchange.getIn().getHeader(DatasonnetConstants.OUTPUT_MEDIATYPE), String.class);
if (typeHeader != null) {
outMT = MediaType.valueOf(typeHeader);
} else {
outMT = MediaTypes.ANY;
}
}
Map<String, Document<?>> inputs = Collections.singletonMap("body", doc);
if (resultType == null || resultType.equals(Document.class)) {
return mapper.transform(doc, inputs, outMT, Object.class);
} else {
return mapper.transform(doc, inputs, outMT, resultType);
}
}
private Map<String, String> resolveImports(DatasonnetLanguage language) {
if (libraryPaths == null) {
return language.getClasspathImports();
}
Map<String, String> answer = new HashMap<>();
LOG.debug("Explicit library path is: {}", libraryPaths);
for (String nextPath : libraryPaths) {
final File nextLibDir = new File(nextPath);
if (nextLibDir.isDirectory()) {
try {
Files.walkFileTree(nextLibDir.toPath(), new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
File f = file.toFile();
if (!f.isDirectory() && f.getName().toLowerCase().endsWith(".libsonnet")) {
String content = IOUtils.toString(file.toUri(), Charset.defaultCharset());
Path relative = nextLibDir.toPath().relativize(file);
LOG.debug("Loading DataSonnet library: {}", relative);
answer.put(relative.toString(), content);
}
return FileVisitResult.CONTINUE;
}
});
} catch (IOException e) {
LOG.warn("Unable to load DataSonnet library from: {}", nextPath, e);
}
}
}
return answer;
}
@Override
public void init(CamelContext context) {
super.init(context);
language = (DatasonnetLanguage) context.resolveLanguage("datasonnet");
// initialize mapper eager
language.computeIfMiss(expression, () -> {
MapperBuilder builder = new MapperBuilder(expression)
.withInputNames("body")
.withImports(resolveImports(language))
.withLibrary(CML.getInstance())
.withDefaultOutput(MediaTypes.APPLICATION_JAVA);
Set<Library> additionalLibraries = context.getRegistry().findByType(com.datasonnet.spi.Library.class);
for (Library lib : additionalLibraries) {
builder = builder.withLibrary(lib);
}
return builder.build();
});
}
// Getter/Setter methods
// -------------------------------------------------------------------------
public Expression getSource() {
return source;
}
public void setSource(Expression source) {
this.source = source;
}
public MediaType getBodyMediaType() {
return bodyMediaType;
}
/**
* The message's body MediaType
*/
public void setBodyMediaType(MediaType inputMimeType) {
this.bodyMediaType = inputMimeType;
}
public MediaType getOutputMediaType() {
return outputMediaType;
}
/**
* The MediaType to output
*/
public void setOutputMediaType(MediaType outputMimeType) {
this.outputMediaType = outputMimeType;
}
public Collection<String> getLibraryPaths() {
return libraryPaths;
}
/**
* The paths to search for .libsonnet files
*/
public void setLibraryPaths(Collection<String> libraryPaths) {
this.libraryPaths = libraryPaths;
}
@Override
public String getExpressionText() {
return this.expression;
}
@Override
public Class<?> getResultType() {
return this.resultType;
}
/**
* Sets the | DatasonnetExpression |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/context/support/XmlWebApplicationContext.java | {
"start": 2989,
"end": 6508
} | class ____ extends AbstractRefreshableWebApplicationContext {
/** Default config location for the root context. */
public static final String DEFAULT_CONFIG_LOCATION = "/WEB-INF/applicationContext.xml";
/** Default prefix for building a config location for a namespace. */
public static final String DEFAULT_CONFIG_LOCATION_PREFIX = "/WEB-INF/";
/** Default suffix for building a config location for a namespace. */
public static final String DEFAULT_CONFIG_LOCATION_SUFFIX = ".xml";
/**
* Loads the bean definitions via an XmlBeanDefinitionReader.
* @see org.springframework.beans.factory.xml.XmlBeanDefinitionReader
* @see #initBeanDefinitionReader
* @see #loadBeanDefinitions
*/
@Override
protected void loadBeanDefinitions(DefaultListableBeanFactory beanFactory) throws BeansException, IOException {
// Create a new XmlBeanDefinitionReader for the given BeanFactory.
XmlBeanDefinitionReader beanDefinitionReader = new XmlBeanDefinitionReader(beanFactory);
// Configure the bean definition reader with this context's
// resource loading environment.
beanDefinitionReader.setEnvironment(getEnvironment());
beanDefinitionReader.setResourceLoader(this);
beanDefinitionReader.setEntityResolver(new ResourceEntityResolver(this));
// Allow a subclass to provide custom initialization of the reader,
// then proceed with actually loading the bean definitions.
initBeanDefinitionReader(beanDefinitionReader);
loadBeanDefinitions(beanDefinitionReader);
}
/**
* Initialize the bean definition reader used for loading the bean
* definitions of this context. Default implementation is empty.
* <p>Can be overridden in subclasses, for example, for turning off XML validation
* or using a different XmlBeanDefinitionParser implementation.
* @param beanDefinitionReader the bean definition reader used by this context
* @see org.springframework.beans.factory.xml.XmlBeanDefinitionReader#setValidationMode
* @see org.springframework.beans.factory.xml.XmlBeanDefinitionReader#setDocumentReaderClass
*/
protected void initBeanDefinitionReader(XmlBeanDefinitionReader beanDefinitionReader) {
}
/**
* Load the bean definitions with the given XmlBeanDefinitionReader.
* <p>The lifecycle of the bean factory is handled by the refreshBeanFactory method;
* therefore this method is just supposed to load and/or register bean definitions.
* <p>Delegates to a ResourcePatternResolver for resolving location patterns
* into Resource instances.
* @throws IOException if the required XML document isn't found
* @see #refreshBeanFactory
* @see #getConfigLocations
* @see #getResources
* @see #getResourcePatternResolver
*/
protected void loadBeanDefinitions(XmlBeanDefinitionReader reader) throws IOException {
String[] configLocations = getConfigLocations();
if (configLocations != null) {
for (String configLocation : configLocations) {
reader.loadBeanDefinitions(configLocation);
}
}
}
/**
* The default location for the root context is "/WEB-INF/applicationContext.xml",
* and "/WEB-INF/test-servlet.xml" for a context with the namespace "test-servlet"
* (like for a DispatcherServlet instance with the servlet-name "test").
*/
@Override
protected String[] getDefaultConfigLocations() {
if (getNamespace() != null) {
return new String[] {DEFAULT_CONFIG_LOCATION_PREFIX + getNamespace() + DEFAULT_CONFIG_LOCATION_SUFFIX};
}
else {
return new String[] {DEFAULT_CONFIG_LOCATION};
}
}
}
| XmlWebApplicationContext |
java | google__error-prone | check_api/src/test/java/com/google/errorprone/util/FindIdentifiersTest.java | {
"start": 22477,
"end": 22943
} | class ____ {
void doIt() {
// BUG: Diagnostic contains: [UTF_8]
String.format(UTF_8.toString());
}
}
""")
.doTest();
}
@Test
public void findAllIdentsInheritedStaticImport() {
CompilationTestHelper.newInstance(PrintIdents.class, getClass())
.addSourceLines(
"pkg/MyInterface.java",
"""
package pkg;
public | Test |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java | {
"start": 1249,
"end": 2327
} | class ____ {
private boolean isParity;
private boolean isErased;
/**
* A default constructor. isParity and isErased are false by default.
*/
public ECBlock() {
this(false, false);
}
/**
* A constructor specifying isParity and isErased.
* @param isParity is a parity block
* @param isErased is erased or not
*/
public ECBlock(boolean isParity, boolean isErased) {
this.isParity = isParity;
this.isErased = isErased;
}
/**
* Set true if it's for a parity block.
* @param isParity is parity or not
*/
public void setParity(boolean isParity) {
this.isParity = isParity;
}
/**
* Set true if the block is missing.
* @param isErased is erased or not
*/
public void setErased(boolean isErased) {
this.isErased = isErased;
}
/**
*
* @return true if it's parity block, otherwise false
*/
public boolean isParity() {
return isParity;
}
/**
*
* @return true if it's erased due to erasure, otherwise false
*/
public boolean isErased() {
return isErased;
}
}
| ECBlock |
java | netty__netty | codec-socks/src/test/java/io/netty/handler/codec/socksx/v5/Socks5CommonTestUtils.java | {
"start": 840,
"end": 1782
} | class ____ constructed.
*/
private Socks5CommonTestUtils() {
//NOOP
}
public static void writeFromClientToServer(EmbeddedChannel embedder, Socks5Message msg) {
embedder.writeInbound(encodeClient(msg));
}
public static void writeFromServerToClient(EmbeddedChannel embedder, Socks5Message msg) {
embedder.writeInbound(encodeServer(msg));
}
public static ByteBuf encodeClient(Socks5Message msg) {
EmbeddedChannel out = new EmbeddedChannel(Socks5ClientEncoder.DEFAULT);
out.writeOutbound(msg);
ByteBuf encoded = out.readOutbound();
out.finish();
return encoded;
}
public static ByteBuf encodeServer(Socks5Message msg) {
EmbeddedChannel out = new EmbeddedChannel(Socks5ServerEncoder.DEFAULT);
out.writeOutbound(msg);
ByteBuf encoded = out.readOutbound();
out.finish();
return encoded;
}
}
| being |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/cluster/ClusterSetup.java | {
"start": 1449,
"end": 5989
} | class ____ {
/**
* Setup a cluster consisting of two members (see {@link ClusterTestSettings#port5} to {@link ClusterTestSettings#port6}).
* Two masters (0-11999 and 12000-16383)
*
* @param clusterHelper helper
*/
public static void setup2Masters(ClusterTestHelper clusterHelper) {
clusterHelper.clusterReset();
clusterHelper.meet(ClusterTestSettings.host, ClusterTestSettings.port5);
clusterHelper.meet(ClusterTestSettings.host, ClusterTestSettings.port6);
RedisAdvancedClusterAsyncCommands<String, String> connection = clusterHelper.getClusterClient().connect().async();
Wait.untilTrue(() -> {
clusterHelper.getClusterClient().reloadPartitions();
return clusterHelper.getClusterClient().getPartitions().size() == 2;
}).waitOrTimeout();
Partitions partitions = clusterHelper.getClusterClient().getPartitions();
for (RedisClusterNode partition : partitions) {
if (!partition.getSlots().isEmpty()) {
RedisClusterAsyncCommands<String, String> nodeConnection = connection.getConnection(partition.getNodeId());
for (Integer slot : partition.getSlots()) {
nodeConnection.clusterDelSlots(slot);
}
}
}
RedisClusterAsyncCommands<String, String> node1 = connection.getConnection(ClusterTestSettings.host,
ClusterTestSettings.port5);
node1.clusterAddSlots(ClusterTestSettings.createSlots(0, 12000));
RedisClusterAsyncCommands<String, String> node2 = connection.getConnection(ClusterTestSettings.host,
ClusterTestSettings.port6);
node2.clusterAddSlots(ClusterTestSettings.createSlots(12000, 16384));
Wait.untilTrue(clusterHelper::isStable).waitOrTimeout();
Wait.untilEquals(2L, () -> {
clusterHelper.getClusterClient().reloadPartitions();
return partitionStream(clusterHelper)
.filter(redisClusterNode -> redisClusterNode.is(RedisClusterNode.NodeFlag.UPSTREAM)).count();
}).waitOrTimeout();
connection.getStatefulConnection().close();
}
/**
* Setup a cluster consisting of two members (see {@link ClusterTestSettings#port5} to {@link ClusterTestSettings#port6}).
* One master (0-16383) and one replica.
*
* @param clusterHelper
*/
public static void setupMasterWithReplica(ClusterTestHelper clusterHelper) {
clusterHelper.clusterReset();
clusterHelper.meet(ClusterTestSettings.host, ClusterTestSettings.port5);
clusterHelper.meet(ClusterTestSettings.host, ClusterTestSettings.port6);
RedisAdvancedClusterAsyncCommands<String, String> connection = clusterHelper.getClusterClient().connect().async();
StatefulRedisClusterConnection<String, String> statefulConnection = connection.getStatefulConnection();
Wait.untilEquals(2, () -> {
clusterHelper.getClusterClient().reloadPartitions();
return clusterHelper.getClusterClient().getPartitions().size();
}).waitOrTimeout();
RedisClusterCommands<String, String> node1 = statefulConnection
.getConnection(TestSettings.hostAddr(), ClusterTestSettings.port5).sync();
node1.clusterAddSlots(ClusterTestSettings.createSlots(0, 16384));
Wait.untilTrue(clusterHelper::isStable).waitOrTimeout();
TestFutures.awaitOrTimeout(connection.getConnection(ClusterTestSettings.host, ClusterTestSettings.port6)
.clusterReplicate(node1.clusterMyId()));
clusterHelper.getClusterClient().reloadPartitions();
Wait.untilEquals(1L, () -> {
clusterHelper.getClusterClient().reloadPartitions();
return partitionStream(clusterHelper)
.filter(redisClusterNode -> redisClusterNode.is(RedisClusterNode.NodeFlag.UPSTREAM)).count();
}).waitOrTimeout();
Wait.untilEquals(1L, () -> {
clusterHelper.getClusterClient().reloadPartitions();
return partitionStream(clusterHelper)
.filter(redisClusterNode -> redisClusterNode.is(RedisClusterNode.NodeFlag.REPLICA)).count();
}).waitOrTimeout();
connection.getStatefulConnection().close();
}
private static Stream<RedisClusterNode> partitionStream(ClusterTestHelper clusterHelper) {
return clusterHelper.getClusterClient().getPartitions().getPartitions().stream();
}
}
| ClusterSetup |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/tm/JtaBeforeCompletionFailureTest.java | {
"start": 3230,
"end": 3544
} | class ____ {
@Id
public Integer id;
@Column(unique = true, nullable = false, name = "entity_key")
public String key;
public String name;
public SimpleEntity() {
}
public SimpleEntity(Integer id, String key, String name) {
this.id = id;
this.key = key;
this.name = name;
}
}
}
| SimpleEntity |
java | apache__dubbo | dubbo-plugin/dubbo-qos/src/main/java/org/apache/dubbo/qos/QosScopeModelInitializer.java | {
"start": 1229,
"end": 1813
} | class ____ implements ScopeModelInitializer {
@Override
public void initializeFrameworkModel(FrameworkModel frameworkModel) {
ScopeBeanFactory beanFactory = frameworkModel.getBeanFactory();
beanFactory.registerBean(Server.class);
beanFactory.registerBean(SerializeCheckUtils.class);
}
@Override
public void initializeApplicationModel(ApplicationModel applicationModel) {
ScopeBeanFactory beanFactory = applicationModel.getBeanFactory();
beanFactory.registerBean(ActuatorCommandExecutor.class);
}
}
| QosScopeModelInitializer |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/TransportVersions.java | {
"start": 1479,
"end": 1572
} | class ____ {
/*
* NOTE: IntelliJ lies!
* This map is used during | TransportVersions |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableIntervalRange.java | {
"start": 1263,
"end": 2386
} | class ____ extends Flowable<Long> {
final Scheduler scheduler;
final long start;
final long end;
final long initialDelay;
final long period;
final TimeUnit unit;
public FlowableIntervalRange(long start, long end, long initialDelay, long period, TimeUnit unit, Scheduler scheduler) {
this.initialDelay = initialDelay;
this.period = period;
this.unit = unit;
this.scheduler = scheduler;
this.start = start;
this.end = end;
}
@Override
public void subscribeActual(Subscriber<? super Long> s) {
IntervalRangeSubscriber is = new IntervalRangeSubscriber(s, start, end);
s.onSubscribe(is);
Scheduler sch = scheduler;
if (sch instanceof TrampolineScheduler) {
Worker worker = sch.createWorker();
is.setResource(worker);
worker.schedulePeriodically(is, initialDelay, period, unit);
} else {
Disposable d = sch.schedulePeriodicallyDirect(is, initialDelay, period, unit);
is.setResource(d);
}
}
static final | FlowableIntervalRange |
java | apache__camel | components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/model/complex/generateheader/Client.java | {
"start": 1011,
"end": 1977
} | class ____ {
@DataField(pos = 2, columnName = "Client Nr")
private String clientNr;
@DataField(pos = 3, columnName = "First Name")
private String firstName;
@DataField(pos = 4, columnName = "Last Name")
private String lastName;
public String getClientNr() {
return clientNr;
}
public void setClientNr(String clientNr) {
this.clientNr = clientNr;
}
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
@Override
public String toString() {
return "Model : " + Client.class.getName() + " : " + String.valueOf(this.clientNr) + ", "
+ String.valueOf(this.firstName) + ", " + String.valueOf(this.lastName);
}
}
| Client |
java | apache__camel | components/camel-zookeeper/src/main/java/org/apache/camel/component/zookeeper/operations/GetChildrenOperation.java | {
"start": 1165,
"end": 2187
} | class ____ extends ZooKeeperOperation<List<String>> {
public GetChildrenOperation(ZooKeeper connection, String node) {
super(connection, node);
}
@Override
public OperationResult<List<String>> getResult() {
try {
Stat statistics = new Stat();
List<String> children = connection.getChildren(node, true, statistics);
if (LOG.isDebugEnabled()) {
if (LOG.isTraceEnabled()) {
LOG.trace(format("Received children from '%s' path with statistics '%s'", node, statistics));
} else {
LOG.debug(format("Received children from '%s' path ", node));
}
}
return new OperationResult<>(children, statistics);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return new OperationResult<>(e);
} catch (Exception e) {
return new OperationResult<>(e);
}
}
}
| GetChildrenOperation |
java | google__guava | android/guava/src/com/google/common/cache/CacheBuilder.java | {
"start": 34910,
"end": 37315
} | class ____.
*
* <p>If you can represent the duration as a {@link Duration} (which should be preferred when
* feasible), use {@link #expireAfterWrite(Duration)} instead.
*
* @param duration the length of time after an entry is created that it should be automatically
* removed
* @param unit the unit that {@code duration} is expressed in
* @return this {@code CacheBuilder} instance (for chaining)
* @throws IllegalArgumentException if {@code duration} is negative
* @throws IllegalStateException if {@link #expireAfterWrite} was already set
* @deprecated Use {@link #expireAfterWrite(Duration)} instead.
*/
@Deprecated // GoodTime
@CanIgnoreReturnValue
public CacheBuilder<K, V> expireAfterWrite(long duration, TimeUnit unit) {
checkState(
expireAfterWriteNanos == UNSET_INT,
"expireAfterWrite was already set to %s ns",
expireAfterWriteNanos);
checkArgument(duration >= 0, "duration cannot be negative: %s %s", duration, unit);
this.expireAfterWriteNanos = unit.toNanos(duration);
return this;
}
@SuppressWarnings("GoodTime") // nanos internally, should be Duration
long getExpireAfterWriteNanos() {
return (expireAfterWriteNanos == UNSET_INT) ? DEFAULT_EXPIRATION_NANOS : expireAfterWriteNanos;
}
/**
* Specifies that each entry should be automatically removed from the cache once a fixed duration
* has elapsed after the entry's creation, the most recent replacement of its value, or its last
* access. Access time is reset by all cache read and write operations (including {@code
* Cache.asMap().get(Object)} and {@code Cache.asMap().put(K, V)}), but not by {@code
* containsKey(Object)}, nor by operations on the collection-views of {@link Cache#asMap}}. So,
* for example, iterating through {@code Cache.asMap().entrySet()} does not reset access time for
* the entries you retrieve.
*
* <p>When {@code duration} is zero, this method hands off to {@link #maximumSize(long)
* maximumSize}{@code (0)}, ignoring any otherwise-specified maximum size or weight. This can be
* useful in testing, or to disable caching temporarily without a code change.
*
* <p>Expired entries may be counted in {@link Cache#size}, but will never be visible to read or
* write operations. Expired entries are cleaned up as part of the routine maintenance described
* in the | javadoc |
java | google__dagger | javatests/dagger/functional/generictypes/SingletonGenericComponent.java | {
"start": 724,
"end": 941
} | interface ____ {
ScopedGeneric<A> scopedGenericA();
ScopedGeneric<B> scopedGenericB();
ScopedSimpleGeneric<A> scopedSimpleGenericA();
ScopedSimpleGeneric<B> scopedSimpleGenericB();
}
| SingletonGenericComponent |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/matchers/GenericMatchersTest.java | {
"start": 476,
"end": 990
} | interface ____ {
List<String> sort(List<String> otherList);
String convertDate(Date date);
}
@Mock Foo sorter;
@SuppressWarnings("unchecked")
@Test
public void shouldCompile() {
when(sorter.convertDate(new Date())).thenReturn("one");
when(sorter.convertDate((Date) any())).thenReturn("two");
// following requires warning suppression but allows setting anyList()
when(sorter.sort(ArgumentMatchers.<String>anyList())).thenReturn(null);
}
}
| Foo |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/filter/factory/StripPrefixGatewayFilterFactory.java | {
"start": 3215,
"end": 3375
} | class ____ {
private int parts = 1;
public int getParts() {
return parts;
}
public void setParts(int parts) {
this.parts = parts;
}
}
}
| Config |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/JUnit4TestsNotRunWithinEnclosed.java | {
"start": 2472,
"end": 2573
} | class ____ the Enclosed runner,"
+ " will not run.",
severity = ERROR)
public final | using |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/UserDefinedValueAggregatorDescriptor.java | {
"start": 1998,
"end": 2381
} | class ____ of the user defined descriptor class
* @param job a configure object used for decriptor configuration
*/
public UserDefinedValueAggregatorDescriptor(String className, JobConf job) {
super(className, job);
((ValueAggregatorDescriptor)theAggregatorDescriptor).configure(job);
}
/**
* Do nothing.
*/
public void configure(JobConf job) {
}
}
| name |
java | google__guava | android/guava/src/com/google/common/graph/ElementOrder.java | {
"start": 2164,
"end": 6752
} | enum ____ {
UNORDERED,
STABLE,
INSERTION,
SORTED
}
private ElementOrder(Type type, @Nullable Comparator<T> comparator) {
this.type = checkNotNull(type);
this.comparator = comparator;
checkState((type == Type.SORTED) == (comparator != null));
}
/** Returns an instance which specifies that no ordering is guaranteed. */
public static <S> ElementOrder<S> unordered() {
return new ElementOrder<>(Type.UNORDERED, null);
}
/**
* Returns an instance which specifies that ordering is guaranteed to be always be the same across
* iterations, and across releases. Some methods may have stronger guarantees.
*
* <p>This instance is only useful in combination with {@code incidentEdgeOrder}, e.g. {@code
* graphBuilder.incidentEdgeOrder(ElementOrder.stable())}.
*
* <h3>In combination with {@code incidentEdgeOrder}</h3>
*
* <p>{@code incidentEdgeOrder(ElementOrder.stable())} guarantees the ordering of the returned
* collections of the following methods:
*
* <ul>
* <li>For {@link Graph} and {@link ValueGraph}:
* <ul>
* <li>{@code edges()}: Stable order
* <li>{@code adjacentNodes(node)}: Connecting edge insertion order
* <li>{@code predecessors(node)}: Connecting edge insertion order
* <li>{@code successors(node)}: Connecting edge insertion order
* <li>{@code incidentEdges(node)}: Edge insertion order
* </ul>
* <li>For {@link Network}:
* <ul>
* <li>{@code adjacentNodes(node)}: Stable order
* <li>{@code predecessors(node)}: Connecting edge insertion order
* <li>{@code successors(node)}: Connecting edge insertion order
* <li>{@code incidentEdges(node)}: Stable order
* <li>{@code inEdges(node)}: Edge insertion order
* <li>{@code outEdges(node)}: Edge insertion order
* <li>{@code adjacentEdges(edge)}: Stable order
* <li>{@code edgesConnecting(nodeU, nodeV)}: Edge insertion order
* </ul>
* </ul>
*
* @since 29.0
*/
public static <S> ElementOrder<S> stable() {
return new ElementOrder<>(Type.STABLE, null);
}
/** Returns an instance which specifies that insertion ordering is guaranteed. */
public static <S> ElementOrder<S> insertion() {
return new ElementOrder<>(Type.INSERTION, null);
}
/**
* Returns an instance which specifies that the natural ordering of the elements is guaranteed.
*/
public static <S extends Comparable<? super S>> ElementOrder<S> natural() {
return new ElementOrder<>(Type.SORTED, Ordering.<S>natural());
}
/**
* Returns an instance which specifies that the ordering of the elements is guaranteed to be
* determined by {@code comparator}.
*/
public static <S> ElementOrder<S> sorted(Comparator<S> comparator) {
return new ElementOrder<>(Type.SORTED, checkNotNull(comparator));
}
/** Returns the type of ordering used. */
public Type type() {
return type;
}
/**
* Returns the {@link Comparator} used.
*
* @throws UnsupportedOperationException if comparator is not defined
*/
public Comparator<T> comparator() {
if (comparator != null) {
return comparator;
}
throw new UnsupportedOperationException("This ordering does not define a comparator.");
}
@Override
public boolean equals(@Nullable Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof ElementOrder)) {
return false;
}
ElementOrder<?> other = (ElementOrder<?>) obj;
return (type == other.type) && Objects.equals(comparator, other.comparator);
}
@Override
public int hashCode() {
return Objects.hash(type, comparator);
}
@Override
public String toString() {
ToStringHelper helper = MoreObjects.toStringHelper(this).add("type", type);
if (comparator != null) {
helper.add("comparator", comparator);
}
return helper.toString();
}
/** Returns an empty mutable map whose keys will respect this {@link ElementOrder}. */
<K extends T, V> Map<K, V> createMap(int expectedSize) {
switch (type) {
case UNORDERED:
return Maps.newHashMapWithExpectedSize(expectedSize);
case INSERTION:
case STABLE:
return Maps.newLinkedHashMapWithExpectedSize(expectedSize);
case SORTED:
return Maps.newTreeMap(comparator());
}
throw new AssertionError();
}
@SuppressWarnings("unchecked")
<T1 extends T> ElementOrder<T1> cast() {
return (ElementOrder<T1>) this;
}
}
| Type |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/build/ImageType.java | {
"start": 739,
"end": 1092
} | enum ____ {
/**
* Builder image.
*/
BUILDER("builder image"),
/**
* Run image.
*/
RUNNER("run image"),
/**
* Buildpack image.
*/
BUILDPACK("buildpack image");
private final String description;
ImageType(String description) {
this.description = description;
}
String getDescription() {
return this.description;
}
}
| ImageType |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/runtime/datasource/AbstractHashCommands.java | {
"start": 602,
"end": 7182
} | class ____<K, F, V> extends AbstractRedisCommands {
protected final Type typeOfValue;
protected final Type typeOfField;
AbstractHashCommands(RedisCommandExecutor redis, Type k, Type f, Type v) {
super(redis, new Marshaller(k, f, v));
this.typeOfField = f;
this.typeOfValue = v;
}
Uni<Response> _hdel(K key, F[] fields) {
nonNull(key, "key");
notNullOrEmpty(fields, "fields");
doesNotContainNull(fields, "fields");
RedisCommand cmd = RedisCommand.of(Command.HDEL)
.put(marshaller.encode(key));
for (F field : fields) {
cmd.put(marshaller.encode(field));
}
return execute(cmd);
}
Uni<Response> _hexists(K key, F field) {
nonNull(key, "key");
nonNull(field, "field");
return execute(RedisCommand.of(Command.HEXISTS).put(marshaller.encode(key)).put(marshaller.encode(field)));
}
Uni<Response> _hget(K key, F field) {
nonNull(key, "key");
nonNull(field, "field");
return execute(RedisCommand.of(Command.HGET).put(marshaller.encode(key)).put(marshaller.encode(field)));
}
Uni<Response> _hincrby(K key, F field, long amount) {
nonNull(key, "key");
nonNull(field, "field");
return execute(RedisCommand.of(Command.HINCRBY).put(marshaller.encode(key))
.put(marshaller.encode(field)).put(amount));
}
Uni<Response> _hincrbyfloat(K key, F field, double amount) {
nonNull(key, "key");
nonNull(field, "field");
return execute(RedisCommand.of(Command.HINCRBYFLOAT).put(marshaller.encode(key))
.put(marshaller.encode(field)).put(amount));
}
Uni<Response> _hgetall(K key) {
nonNull(key, "key");
return execute((RedisCommand.of(Command.HGETALL).put(marshaller.encode(key))));
}
Uni<Response> _hkeys(K key) {
nonNull(key, "key");
return execute((RedisCommand.of(Command.HKEYS).put(marshaller.encode(key))));
}
Uni<Response> _hlen(K key) {
nonNull(key, "key");
return execute((RedisCommand.of(Command.HLEN).put(marshaller.encode(key))));
}
@SafeVarargs
final Uni<Response> _hmget(K key, F... fields) {
nonNull(key, "key");
doesNotContainNull(fields, "fields");
if (fields.length == 0) {
return Uni.createFrom().failure(new IllegalArgumentException("`fields` must not be empty"));
}
RedisCommand cmd = RedisCommand.of(Command.HMGET);
cmd.put(marshaller.encode(key));
for (F field : fields) {
cmd.put(marshaller.encode(field));
}
return execute(cmd);
}
Uni<Response> _hmset(K key, Map<F, V> map) {
nonNull(key, "key");
nonNull(map, "map");
if (map.isEmpty()) {
return Uni.createFrom().failure(new IllegalArgumentException("`map` must not be empty"));
}
RedisCommand cmd = RedisCommand.of(Command.HMSET);
cmd.put(marshaller.encode(key));
for (Map.Entry<F, V> entry : map.entrySet()) {
cmd.put(marshaller.encode(entry.getKey()));
cmd.putNullable(marshaller.encode(entry.getValue()));
}
return execute(cmd);
}
Uni<Response> _hrandfield(K key) {
nonNull(key, "key");
return execute(RedisCommand.of(Command.HRANDFIELD).put(marshaller.encode(key)));
}
Uni<Response> _hrandfield(K key, long count) {
nonNull(key, "key");
positive(count, "count");
return execute(RedisCommand.of(Command.HRANDFIELD).put(marshaller.encode(key)).put(count));
}
Uni<Response> _hrandfieldWithValues(K key, long count) {
nonNull(key, "key");
return execute(RedisCommand.of(Command.HRANDFIELD).put(marshaller.encode(key)).put(count).put("WITHVALUES"));
}
Uni<Response> _hset(K key, F field, V value) {
nonNull(key, "key");
nonNull(field, "field");
nonNull(value, "value");
return execute(RedisCommand.of(Command.HSET)
.put(marshaller.encode(key))
.put(marshaller.encode(field))
.put(marshaller.encode(value)));
}
Uni<Response> _hset(K key, Map<F, V> map) {
nonNull(key, "key");
nonNull(map, "map");
if (map.isEmpty()) {
return Uni.createFrom().failure(new IllegalArgumentException("`map` must not be empty"));
}
RedisCommand cmd = RedisCommand.of(Command.HSET);
cmd.put(marshaller.encode(key));
for (Map.Entry<F, V> entry : map.entrySet()) {
cmd
.put(marshaller.encode(entry.getKey()))
.put(marshaller.encode(entry.getValue()));
}
return execute(cmd);
}
Uni<Response> _hsetnx(K key, F field, V value) {
nonNull(key, "key");
nonNull(field, "field");
return execute(RedisCommand.of(Command.HSETNX)
.put(marshaller.encode(key))
.put(marshaller.encode(field))
.put(marshaller.encode(value)));
}
Uni<Response> _hstrlen(K key, F field) {
nonNull(key, "key");
nonNull(field, "field");
return execute(RedisCommand.of(Command.HSTRLEN).put(marshaller.encode(key))
.put(marshaller.encode(field)));
}
Uni<Response> _hvals(K key) {
nonNull(key, "key");
return execute(RedisCommand.of(Command.HVALS).put(marshaller.encode(key)));
}
V decodeV(Response resp) {
return marshaller.decode(typeOfValue, resp);
}
Map<F, V> decodeMap(Response r) {
return marshaller.decodeAsMap(r, typeOfField, typeOfValue);
}
List<F> decodeListOfField(Response r) {
return marshaller.decodeAsList(r, typeOfField);
}
List<V> decodeListOfValue(Response r) {
return marshaller.decodeAsList(r, typeOfValue);
}
F decodeF(Response resp) {
return marshaller.decode(typeOfField, resp);
}
Map<F, V> decodeOrderedMap(Response r, F[] fields) {
return marshaller.decodeAsOrderedMap(r, typeOfValue, fields);
}
Map<F, V> decodeFieldWithValueMap(Response r) {
if (r == null) {
return Collections.emptyMap();
}
Map<F, V> map = new HashMap<>();
for (Response nested : r) {
map.put(marshaller.decode(typeOfField, nested.get(0)),
marshaller.decode(typeOfValue, nested.get(1)));
}
return map;
}
}
| AbstractHashCommands |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/origin/MockOrigin.java | {
"start": 898,
"end": 1913
} | class ____ implements Origin {
private final String value;
private final @Nullable Origin parent;
private MockOrigin(String value, @Nullable Origin parent) {
Assert.notNull(value, "'value' must not be null");
this.value = value;
this.parent = parent;
}
@Override
public @Nullable Origin getParent() {
return this.parent;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
return this.value.equals(((MockOrigin) obj).value);
}
@Override
public int hashCode() {
return this.value.hashCode();
}
@Override
public String toString() {
return this.value;
}
@Contract("!null -> !null")
public static @Nullable Origin of(@Nullable String value) {
return of(value, null);
}
@Contract("!null, _ -> !null")
public static @Nullable Origin of(@Nullable String value, @Nullable Origin parent) {
return (value != null) ? new MockOrigin(value, parent) : null;
}
}
| MockOrigin |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1000/Issue1089.java | {
"start": 425,
"end": 611
} | class ____ {
private int ab;
public int getAb() {
return ab;
}
public void setAb(int ab) {
this.ab = ab;
}
}
}
| TestBean |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/datavalidation/pogo/EmailController.java | {
"start": 1137,
"end": 1339
} | class ____ {
@Post("/send")
public HttpResponse send(@Body @Valid Email email) { // <2>
return HttpResponse.ok(Collections.singletonMap("msg", "OK"));
}
}
//end::clazz[]
| EmailController |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/basicapi/MocksCreationTest.java | {
"start": 1063,
"end": 3361
} | class ____ {}
@Test
public void should_create_mock_when_constructor_is_private() {
assertNotNull(Mockito.mock(HasPrivateConstructor.class));
}
@Test
public void should_combine_mock_name_and_smart_nulls() {
// given
IMethods mock =
mock(
IMethods.class,
withSettings().defaultAnswer(RETURNS_SMART_NULLS).name("great mockie"));
// when
IMethods smartNull = mock.iMethodsReturningMethod();
String name = mock.toString();
// then
assertThat(name).contains("great mockie");
// and
try {
smartNull.simpleMethod();
fail();
} catch (SmartNullPointerException e) {
}
}
@Test
public void should_combine_mock_name_and_extra_interfaces() {
// given
IMethods mock =
mock(
IMethods.class,
withSettings().extraInterfaces(List.class).name("great mockie"));
// when
String name = mock.toString();
// then
assertThat(name).contains("great mockie");
// and
assertTrue(mock instanceof List);
}
@Test
public void should_specify_mock_name_via_settings() {
// given
IMethods mock = mock(IMethods.class, withSettings().name("great mockie"));
// when
String name = mock.toString();
// then
assertThat(name).contains("great mockie");
}
@Test
public void should_scream_when_spy_created_with_wrong_type() {
// given
List list = new LinkedList();
try {
// when
mock(List.class, withSettings().spiedInstance(list));
fail();
// then
} catch (MockitoException e) {
}
}
@SuppressWarnings({"CheckReturnValue", "MockitoUsage"})
@Test
public void should_allow_creating_spies_with_correct_type() {
List list = new LinkedList();
mock(LinkedList.class, withSettings().spiedInstance(list));
}
@Test
public void should_allow_inline_mock_creation() {
when(mock(Set.class).isEmpty()).thenReturn(false);
}
@Retention(RetentionPolicy.RUNTIME)
@ | HasPrivateConstructor |
java | apache__camel | core/camel-management-api/src/main/java/org/apache/camel/api/management/mbean/ManagedStreamCachingStrategyMBean.java | {
"start": 973,
"end": 1177
} | interface ____ extends ManagedServiceMBean {
/**
* Used for selecting if the memory limit is <tt>committed</tt> or <tt>maximum</tt> heap memory setting.
*/
| ManagedStreamCachingStrategyMBean |
java | apache__spark | sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/PlainSaslHelper.java | {
"start": 5433,
"end": 5790
} | class ____ extends TProcessorFactory {
private final ThriftCLIService service;
SQLPlainProcessorFactory(ThriftCLIService service) {
super(null);
this.service = service;
}
@Override
public TProcessor getProcessor(TTransport trans) {
return new TSetIpAddressProcessor<Iface>(service);
}
}
}
| SQLPlainProcessorFactory |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsProducer.java | {
"start": 2870,
"end": 15265
} | class ____ {
private final Logger log;
private final String logPrefix;
private final ProcessingMode processingMode;
private final Time time;
private Producer<byte[], byte[]> producer;
private boolean transactionInFlight = false;
private boolean transactionInitialized = false;
private double oldProducerTotalBlockedTime = 0;
// we have a single `StreamsProducer` per thread, and thus a single `sendException` instance,
// which we share across all tasks, ie, all `RecordCollectorImpl`
private final AtomicReference<KafkaException> sendException = new AtomicReference<>(null);
public StreamsProducer(final Producer<byte[], byte[]> producer,
final ProcessingMode processingMode,
final Time time,
final LogContext logContext) {
this.producer = Objects.requireNonNull(producer, "producer cannot be null");
this.processingMode = Objects.requireNonNull(processingMode, "processingMode cannot be null");
this.time = Objects.requireNonNull(time, "time cannot be null");
log = Objects.requireNonNull(logContext, "logContext cannot be null").logger(getClass());
logPrefix = logContext.logPrefix().trim();
}
private String formatException(final String message) {
return message + " [" + logPrefix + "]";
}
boolean eosEnabled() {
return processingMode == EXACTLY_ONCE_V2;
}
boolean transactionInFlight() {
return transactionInFlight;
}
/**
* @throws IllegalStateException if EOS is disabled
*/
void initTransaction() {
if (!eosEnabled()) {
throw new IllegalStateException(formatException("Exactly-once is not enabled"));
}
if (!transactionInitialized) {
// initialize transactions if eos is turned on, which will block if the previous transaction has not
// completed yet; do not start the first transaction until the topology has been initialized later
try {
producer.initTransactions();
transactionInitialized = true;
} catch (final TimeoutException timeoutException) {
log.warn(
"Timeout exception caught trying to initialize transactions. " +
"The broker is either slow or in bad state (like not having enough replicas) in " +
"responding to the request, or the connection to broker was interrupted sending " +
"the request or receiving the response. " +
"Will retry initializing the task in the next loop. " +
"Consider overwriting {} to a larger value to avoid timeout errors",
ProducerConfig.MAX_BLOCK_MS_CONFIG
);
// re-throw to trigger `task.timeout.ms`
throw timeoutException;
} catch (final KafkaException exception) {
throw new StreamsException(
formatException("Error encountered trying to initialize transactions"),
exception
);
}
}
}
public void resetProducer(final Producer<byte[], byte[]> producer) {
if (!eosEnabled()) {
throw new IllegalStateException("Expected EOS to be enabled, but processing mode is " + processingMode);
}
oldProducerTotalBlockedTime += totalBlockedTime(this.producer);
final long start = time.nanoseconds();
close();
final long closeTime = time.nanoseconds() - start;
oldProducerTotalBlockedTime += closeTime;
this.producer = producer;
}
private double getMetricValue(final Map<MetricName, ? extends Metric> metrics,
final String name) {
final List<MetricName> found = metrics.keySet().stream()
.filter(n -> n.name().equals(name))
.collect(Collectors.toList());
if (found.isEmpty()) {
return 0.0;
}
if (found.size() > 1) {
final String err = String.format(
"found %d values for metric %s. total blocked time computation may be incorrect",
found.size(),
name
);
log.error(err);
throw new IllegalStateException(err);
}
return (Double) metrics.get(found.get(0)).metricValue();
}
private double totalBlockedTime(final Producer<?, ?> producer) {
return getMetricValue(producer.metrics(), "bufferpool-wait-time-ns-total")
+ getMetricValue(producer.metrics(), "flush-time-ns-total")
+ getMetricValue(producer.metrics(), "txn-init-time-ns-total")
+ getMetricValue(producer.metrics(), "txn-begin-time-ns-total")
+ getMetricValue(producer.metrics(), "txn-send-offsets-time-ns-total")
+ getMetricValue(producer.metrics(), "txn-commit-time-ns-total")
+ getMetricValue(producer.metrics(), "txn-abort-time-ns-total")
+ getMetricValue(producer.metrics(), "metadata-wait-time-ns-total");
}
public double totalBlockedTime() {
return oldProducerTotalBlockedTime + totalBlockedTime(producer);
}
private void maybeBeginTransaction() {
if (eosEnabled() && !transactionInFlight) {
try {
producer.beginTransaction();
transactionInFlight = true;
} catch (final ProducerFencedException | InvalidProducerEpochException | InvalidPidMappingException error) {
throw new TaskMigratedException(
formatException("Producer got fenced trying to begin a new transaction"),
error
);
} catch (final KafkaException error) {
throw new StreamsException(
formatException("Error encountered trying to begin a new transaction"),
error
);
}
}
}
AtomicReference<KafkaException> sendException() {
return sendException;
}
Future<RecordMetadata> send(final ProducerRecord<byte[], byte[]> record,
final Callback callback) {
maybeBeginTransaction();
try {
return producer.send(record, callback);
} catch (final KafkaException uncaughtException) {
if (isRecoverable(uncaughtException)) {
// producer.send() call may throw a KafkaException which wraps a FencedException,
// in this case we should throw its wrapped inner cause so that it can be
// captured and re-wrapped as TaskMigratedException
throw new TaskMigratedException(
formatException("Producer got fenced trying to send a record"),
uncaughtException.getCause()
);
} else {
throw new StreamsException(
formatException(String.format("Error encountered trying to send record to topic %s", record.topic())),
uncaughtException
);
}
}
}
private static boolean isRecoverable(final KafkaException uncaughtException) {
return uncaughtException.getCause() instanceof ProducerFencedException ||
uncaughtException.getCause() instanceof InvalidPidMappingException ||
uncaughtException.getCause() instanceof InvalidProducerEpochException ||
uncaughtException.getCause() instanceof UnknownProducerIdException;
}
/**
* @throws IllegalStateException if EOS is disabled
* @throws TaskMigratedException
*/
public void commitTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets,
final ConsumerGroupMetadata consumerGroupMetadata) {
if (!eosEnabled()) {
throw new IllegalStateException(formatException("Exactly-once is not enabled"));
}
maybeBeginTransaction();
try {
producer.sendOffsetsToTransaction(offsets, consumerGroupMetadata);
} catch (final ProducerFencedException | InvalidProducerEpochException | CommitFailedException | InvalidPidMappingException error) {
throw new TaskMigratedException(
formatException("Producer got fenced trying to add offsets to a transaction"),
error
);
} catch (final TimeoutException timeoutException) {
// re-throw to trigger `task.timeout.ms`
throw timeoutException;
} catch (final KafkaException error) {
throw new StreamsException(
formatException("Error encountered trying to add offsets to a transaction"),
error
);
}
try {
producer.commitTransaction();
transactionInFlight = false;
} catch (final ProducerFencedException | InvalidProducerEpochException | CommitFailedException | InvalidPidMappingException error) {
throw new TaskMigratedException(
formatException("Producer got fenced trying to commit a transaction"),
error
);
} catch (final TimeoutException timeoutException) {
// re-throw to trigger `task.timeout.ms`
throw timeoutException;
} catch (final KafkaException error) {
throw new StreamsException(
formatException("Error encountered trying to commit a transaction"),
error
);
}
}
/**
* @throws IllegalStateException if EOS is disabled
*/
void abortTransaction() {
if (!eosEnabled()) {
throw new IllegalStateException(formatException("Exactly-once is not enabled"));
}
if (transactionInFlight) {
try {
producer.abortTransaction();
} catch (final TimeoutException logAndSwallow) {
// no need to re-throw because we abort a TX only if we close a task dirty,
// and thus `task.timeout.ms` does not apply
log.warn(
"Aborting transaction failed due to timeout." +
" Will rely on broker to eventually abort the transaction after the transaction timeout passed.",
logAndSwallow
);
} catch (final ProducerFencedException | InvalidProducerEpochException | InvalidPidMappingException error) {
// The producer is aborting the txn when there's still an ongoing one,
// which means that we did not commit the task while closing it, which
// means that it is a dirty close. Therefore it is possible that the dirty
// close is due to an fenced exception already thrown previously, and hence
// when calling abortTxn here the same exception would be thrown again.
// Even if the dirty close was not due to an observed fencing exception but
// something else (e.g. task corrupted) we can still ignore the exception here
// since transaction already got aborted by brokers/transactional-coordinator if this happens
log.debug("Encountered {} while aborting the transaction; this is expected and hence swallowed", error.getMessage());
} catch (final KafkaException error) {
throw new StreamsException(
formatException("Error encounter trying to abort a transaction"),
error
);
}
transactionInFlight = false;
}
}
/**
* Cf {@link KafkaProducer#partitionsFor(String)}
*/
List<PartitionInfo> partitionsFor(final String topic) {
return producer.partitionsFor(topic);
}
Map<MetricName, ? extends Metric> metrics() {
return producer.metrics();
}
void flush() {
producer.flush();
}
void close() {
producer.close();
transactionInFlight = false;
transactionInitialized = false;
}
// for testing only
Producer<byte[], byte[]> kafkaProducer() {
return producer;
}
}
| StreamsProducer |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/query/HbmResultSetMappingDescriptor.java | {
"start": 10753,
"end": 10895
} | interface ____ {
FetchParentMemento resolveParentMemento(ResultSetMappingResolutionContext resolutionContext);
}
public static | HbmFetchParent |
java | apache__camel | archetypes/camel-archetype-main/src/main/resources/archetype-resources/src/main/java/MyApplication.java | {
"start": 1012,
"end": 1067
} | class ____ boot the Camel application
*/
public final | that |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/InternalCartesianBoundsTests.java | {
"start": 801,
"end": 4464
} | class ____ extends InternalAggregationTestCase<InternalCartesianBounds> {
static final double GEOHASH_TOLERANCE = 1E-5D;
@Override
protected SearchPlugin registerPlugin() {
return new LocalStateSpatialPlugin();
}
@Override
protected InternalCartesianBounds createTestInstance(String name, Map<String, Object> metadata) {
// we occasionally want to test top = Double.NEGATIVE_INFINITY since this triggers empty xContent object
double top = frequently() ? randomDouble() : Double.NEGATIVE_INFINITY;
return new InternalCartesianBounds(name, top, randomDouble(), randomDouble(), randomDouble(), metadata);
}
@Override
protected void assertReduced(InternalCartesianBounds reduced, List<InternalCartesianBounds> inputs) {
double top = Double.NEGATIVE_INFINITY;
double bottom = Double.POSITIVE_INFINITY;
double left = Double.POSITIVE_INFINITY;
double right = Double.NEGATIVE_INFINITY;
for (InternalCartesianBounds bounds : inputs) {
top = max(top, bounds.top);
bottom = min(bottom, bounds.bottom);
left = min(left, bounds.left);
right = max(right, bounds.right);
}
assertValueClose(reduced.top, top);
assertValueClose(reduced.bottom, bottom);
assertValueClose(reduced.left, left);
assertValueClose(reduced.right, right);
}
private static void assertValueClose(double expected, double actual) {
if (Double.isInfinite(expected) == false) {
assertThat(expected, closeTo(actual, GEOHASH_TOLERANCE));
} else {
assertTrue(Double.isInfinite(actual));
}
}
@Override
protected boolean supportsSampling() {
return true;
}
@Override
protected void assertSampled(InternalCartesianBounds sampled, InternalCartesianBounds reduced, SamplingContext samplingContext) {
assertValueClose(sampled.top, reduced.top);
assertValueClose(sampled.bottom, reduced.bottom);
assertValueClose(sampled.left, reduced.left);
assertValueClose(sampled.right, reduced.right);
}
@Override
protected InternalCartesianBounds mutateInstance(InternalCartesianBounds instance) {
String name = instance.getName();
double top = instance.top;
double bottom = instance.bottom;
double left = instance.left;
double right = instance.right;
Map<String, Object> metadata = instance.getMetadata();
switch (between(0, 5)) {
case 0:
name += randomAlphaOfLength(5);
break;
case 1:
if (Double.isFinite(top)) {
top += between(1, 20);
} else {
top = randomDouble();
}
break;
case 2:
bottom += between(1, 20);
break;
case 3:
left += between(1, 20);
break;
case 4:
right += between(1, 20);
break;
case 5:
if (metadata == null) {
metadata = Maps.newMapWithExpectedSize(1);
} else {
metadata = new HashMap<>(instance.getMetadata());
}
metadata.put(randomAlphaOfLength(15), randomInt());
break;
default:
throw new AssertionError("Illegal randomisation branch");
}
return new InternalCartesianBounds(name, top, bottom, left, right, metadata);
}
}
| InternalCartesianBoundsTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.