language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/inline_association_with_dot/InlineCollectionWithDotTest.java | {
"start": 1112,
"end": 2752
} | class ____ {
private SqlSession sqlSession;
public void openSession(String aConfig) throws Exception {
final String resource = "org/apache/ibatis/submitted/inline_association_with_dot/ibatis-" + aConfig + ".xml";
try (Reader batisConfigReader = Resources.getResourceAsReader(resource)) {
SqlSessionFactory sqlSessionFactory = new SqlSessionFactoryBuilder().build(batisConfigReader);
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/inline_association_with_dot/create.sql");
sqlSession = sqlSessionFactory.openSession();
}
}
@AfterEach
void closeSession() {
if (sqlSession != null) {
sqlSession.close();
}
}
/*
* Load an element with an element with and element with a value. Expect that this is possible bij using an inline
* 'association' map.
*/
@Test
void selectElementValueInContainerUsingInline() throws Exception {
openSession("inline");
Element myElement = sqlSession.getMapper(ElementMapperUsingInline.class).selectElement();
assertEquals("value", myElement.getElement().getElement().getValue());
}
/*
* Load an element with an element with and element with a value. Expect that this is possible bij using an
* sub-'association' map.
*/
@Test
void selectElementValueInContainerUsingSubMap() throws Exception {
openSession("submap");
Element myElement = sqlSession.getMapper(ElementMapperUsingSubMap.class).selectElement();
assertEquals("value", myElement.getElement().getElement().getValue());
}
}
| InlineCollectionWithDotTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestRegexMountPointInterceptorFactory.java | {
"start": 1007,
"end": 2159
} | class ____ {
@Test
public void testCreateNormalCase() {
String replaceInterceptorStr =
RegexMountPointInterceptorType.REPLACE_RESOLVED_DST_PATH.getConfigName()
+ Character.toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP)
+ "src" + Character
.toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP) + "replace";
RegexMountPointInterceptor interceptor =
RegexMountPointInterceptorFactory.create(replaceInterceptorStr);
assertTrue(
interceptor
instanceof RegexMountPointResolvedDstPathReplaceInterceptor);
}
@Test
public void testCreateBadCase() {
String replaceInterceptorStr =
RegexMountPointInterceptorType.REPLACE_RESOLVED_DST_PATH.getConfigName()
+ "___" + Character
.toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP) + "src"
+ Character.toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP)
+ "replace";
RegexMountPointInterceptor interceptor =
RegexMountPointInterceptorFactory.create(replaceInterceptorStr);
assertTrue(interceptor == null);
}
}
| TestRegexMountPointInterceptorFactory |
java | apache__avro | lang/java/thrift/src/test/java/org/apache/avro/thrift/test/Foo.java | {
"start": 26925,
"end": 27145
} | class ____ implements org.apache.thrift.scheme.SchemeFactory {
public ping_argsStandardScheme getScheme() {
return new ping_argsStandardScheme();
}
}
private static | ping_argsStandardSchemeFactory |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/read/SupportsReportPartitioning.java | {
"start": 1351,
"end": 1526
} | interface ____ extends Scan {
/**
* Returns the output data partitioning that this reader guarantees.
*/
Partitioning outputPartitioning();
}
| SupportsReportPartitioning |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/ReturnMissingNullableTest.java | {
"start": 21209,
"end": 21830
} | interface ____<K, V> extends Map<K, V> {
@DoNotCall
@Override
V put(K k, V v);
}
""")
.doTest();
}
@Test
public void onlyIfAlreadyInScopeAndItIs() {
createCompilationTestHelper()
.setArgs("-XepOpt:Nullness:OnlyIfAnnotationAlreadyInScope=true")
.addSourceLines(
"com/google/errorprone/bugpatterns/nullness/LiteralNullReturnTest.java",
"""
package com.google.errorprone.bugpatterns.nullness;
import org.checkerframework.checker.nullness.qual.Nullable;
public | MyMap |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/cloud/ServiceChooserFactory.java | {
"start": 988,
"end": 1063
} | interface ____ extends ServiceFactory<ServiceChooser> {
}
| ServiceChooserFactory |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/ClaimCheckTest.java | {
"start": 1255,
"end": 2982
} | class ____ extends ContextTestSupport {
// in memory data store for testing only!
public static final Map<String, Object> dataStore = new HashMap<>();
@Test
public void testClaimCheck() throws Exception {
String body = "<order custId=\"123\"><lotsOfContent/></order>";
// check to make sure the message body gets added back in properly
MockEndpoint resultEndpoint = getMockEndpoint("mock:result");
resultEndpoint.expectedMessageCount(1);
resultEndpoint.message(0).body().isEqualTo(body);
// check to make sure the claim check is added to the message and
// the body is removed
MockEndpoint testCheckpointEndpoint = getMockEndpoint("mock:testCheckpoint");
testCheckpointEndpoint.expectedMessageCount(1);
testCheckpointEndpoint.expectedHeaderReceived("claimCheck", "123");
testCheckpointEndpoint.message(0).body().isNull();
template.sendBody("direct:start", body);
assertMockEndpointsSatisfied();
}
@Override
protected Registry createCamelRegistry() throws Exception {
Registry jndi = super.createCamelRegistry();
jndi.bind("checkLuggage", new CheckLuggageBean());
jndi.bind("dataEnricher", new DataEnricherBean());
return jndi;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// START SNIPPET: e1
from("direct:start").to("bean:checkLuggage", "mock:testCheckpoint", "bean:dataEnricher", "mock:result");
// END SNIPPET: e1
}
};
}
// START SNIPPET: e2
public static final | ClaimCheckTest |
java | apache__avro | lang/java/tools/src/test/compiler/output-string/avro/examples/baseball/FieldTest.java | {
"start": 402,
"end": 2665
} | class ____ extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
private static final long serialVersionUID = 4609235620572341636L;
public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"FieldTest\",\"namespace\":\"avro.examples.baseball\",\"doc\":\"Test various field types\",\"fields\":[{\"name\":\"number\",\"type\":\"int\",\"doc\":\"The number of the player\"},{\"name\":\"last_name\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"timestamp\",\"type\":{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}},{\"name\":\"timestampMicros\",\"type\":{\"type\":\"long\",\"logicalType\":\"timestamp-micros\"}},{\"name\":\"timeMillis\",\"type\":{\"type\":\"int\",\"logicalType\":\"time-millis\"}},{\"name\":\"timeMicros\",\"type\":{\"type\":\"long\",\"logicalType\":\"time-micros\"}}]}");
public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
private static final SpecificData MODEL$ = new SpecificData();
static {
MODEL$.addLogicalTypeConversion(new org.apache.avro.data.TimeConversions.TimestampMillisConversion());
MODEL$.addLogicalTypeConversion(new org.apache.avro.data.TimeConversions.TimeMicrosConversion());
MODEL$.addLogicalTypeConversion(new org.apache.avro.data.TimeConversions.TimestampMicrosConversion());
MODEL$.addLogicalTypeConversion(new org.apache.avro.data.TimeConversions.TimeMillisConversion());
}
private static final BinaryMessageEncoder<FieldTest> ENCODER =
new BinaryMessageEncoder<>(MODEL$, SCHEMA$);
private static final BinaryMessageDecoder<FieldTest> DECODER =
new BinaryMessageDecoder<>(MODEL$, SCHEMA$);
/**
* Return the BinaryMessageEncoder instance used by this class.
* @return the message encoder used by this class
*/
public static BinaryMessageEncoder<FieldTest> getEncoder() {
return ENCODER;
}
/**
* Return the BinaryMessageDecoder instance used by this class.
* @return the message decoder used by this class
*/
public static BinaryMessageDecoder<FieldTest> getDecoder() {
return DECODER;
}
/**
* Create a new BinaryMessageDecoder instance for this | FieldTest |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/BootstrapTestUtilsContextInitializerTests.java | {
"start": 3892,
"end": 4159
} | class ____ implements ApplicationContextInitializer<GenericWebApplicationContext> {
@Override
public void initialize(GenericWebApplicationContext applicationContext) {
}
}
@ContextConfiguration(initializers = FooInitializer.class)
private static | BarInitializer |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/client/internal/Client.java | {
"start": 3412,
"end": 3884
} | interface ____ performing actions/operations against the cluster.
* <p>
* All operations performed are asynchronous by nature. Each action/operation has two flavors, the first
* simply returns an {@link org.elasticsearch.action.ActionFuture}, while the second accepts an
* {@link org.elasticsearch.action.ActionListener}.
* <p>
* A client can be retrieved from a started {@link org.elasticsearch.node.Node}.
*
* @see org.elasticsearch.node.Node#client()
*/
public | for |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java | {
"start": 800,
"end": 2614
} | class ____ extends ESIntegTestCase {
@Override
protected int numberOfShards() {
return 3;
}
@Override
protected int numberOfReplicas() {
return 1;
}
public void testSaneAllocation() {
assertAcked(prepareCreate("test", 3));
if (randomBoolean()) {
assertAcked(indicesAdmin().prepareClose("test"));
}
ensureGreen("test");
ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0));
for (RoutingNode node : state.getRoutingNodes()) {
if (node.isEmpty() == false) {
assertThat(node.size(), equalTo(2));
}
}
setReplicaCount(0, "test");
ensureGreen("test");
state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0));
for (RoutingNode node : state.getRoutingNodes()) {
if (node.isEmpty() == false) {
assertThat(node.size(), equalTo(1));
}
}
// create another index
assertAcked(prepareCreate("test2", 3));
if (randomBoolean()) {
assertAcked(indicesAdmin().prepareClose("test2"));
}
ensureGreen("test2");
setReplicaCount(1, "test");
ensureGreen("test");
state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0));
for (RoutingNode node : state.getRoutingNodes()) {
if (node.isEmpty() == false) {
assertThat(node.size(), equalTo(4));
}
}
}
}
| SimpleAllocationIT |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/annotation/Mixin.java | {
"start": 1239,
"end": 1379
} | interface ____ {
/**
* @return The target of the mixin
*/
Class<?> value();
/**
* A way to specify the target | Mixin |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/inheritance/joined/JoinedDiscriminatorSameChildTableTest.java | {
"start": 4410,
"end": 4587
} | class ____ extends EntityParent {
private String name;
}
@Entity(name = "EntityChildTwo")
@Table(name = "child_table")
@DiscriminatorValue("child-two")
static | EntityChildOne |
java | spring-projects__spring-framework | framework-docs/src/main/java/org/springframework/docs/web/websocket/stomp/websocketstompserverconfig/WebSocketConfiguration.java | {
"start": 1104,
"end": 1381
} | class ____ implements WebSocketMessageBrokerConfigurer {
@Override
public void configureWebSocketTransport(WebSocketTransportRegistration registry) {
registry.setMessageSizeLimit(4 * 8192);
registry.setTimeToFirstMessage(30000);
}
}
// end::snippet[]
| WebSocketConfiguration |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/Aws2TextractComponentBuilderFactory.java | {
"start": 1970,
"end": 17352
} | interface ____ extends ComponentBuilder<Textract2Component> {
/**
* Component configuration.
*
* The option is a:
* <code>org.apache.camel.component.aws2.textract.Textract2Configuration</code> type.
*
* Group: producer
*
* @param configuration the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder configuration(org.apache.camel.component.aws2.textract.Textract2Configuration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* The operation to perform.
*
* The option is a:
* <code>org.apache.camel.component.aws2.textract.Textract2Operations</code> type.
*
* Default: detectDocumentText
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder operation(org.apache.camel.component.aws2.textract.Textract2Operations operation) {
doSetProperty("operation", operation);
return this;
}
/**
* Set the need for overriding the endpoint. This option needs to be
* used in combination with uriEndpointOverride option.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param overrideEndpoint the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder overrideEndpoint(boolean overrideEndpoint) {
doSetProperty("overrideEndpoint", overrideEndpoint);
return this;
}
/**
* If we want to use a POJO request as body or not.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param pojoRequest the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder pojoRequest(boolean pojoRequest) {
doSetProperty("pojoRequest", pojoRequest);
return this;
}
/**
* The region in which Textract client needs to work. When using this
* parameter, the configuration will expect the lowercase name of the
* region (for example ap-east-1) You'll need to use the name
* Region.EU_WEST_1.id().
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param region the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder region(java.lang.String region) {
doSetProperty("region", region);
return this;
}
/**
* The S3 bucket name for document location.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param s3Bucket the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder s3Bucket(java.lang.String s3Bucket) {
doSetProperty("s3Bucket", s3Bucket);
return this;
}
/**
* The S3 object name for document location.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param s3Object the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder s3Object(java.lang.String s3Object) {
doSetProperty("s3Object", s3Object);
return this;
}
/**
* The S3 object version for document location.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param s3ObjectVersion the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder s3ObjectVersion(java.lang.String s3ObjectVersion) {
doSetProperty("s3ObjectVersion", s3ObjectVersion);
return this;
}
/**
* Set the overriding uri endpoint. This option needs to be used in
* combination with overrideEndpoint option.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param uriEndpointOverride the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder uriEndpointOverride(java.lang.String uriEndpointOverride) {
doSetProperty("uriEndpointOverride", uriEndpointOverride);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* To use an existing configured AWS Textract client.
*
* The option is a:
* <code>software.amazon.awssdk.services.textract.TextractClient</code> type.
*
* Group: advanced
*
* @param textractClient the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder textractClient(software.amazon.awssdk.services.textract.TextractClient textractClient) {
doSetProperty("textractClient", textractClient);
return this;
}
/**
* Used for enabling or disabling all consumer based health checks from
* this component.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckConsumerEnabled the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder healthCheckConsumerEnabled(boolean healthCheckConsumerEnabled) {
doSetProperty("healthCheckConsumerEnabled", healthCheckConsumerEnabled);
return this;
}
/**
* Used for enabling or disabling all producer based health checks from
* this component. Notice: Camel has by default disabled all producer
* based health-checks. You can turn on producer checks globally by
* setting camel.health.producersEnabled=true.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckProducerEnabled the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder healthCheckProducerEnabled(boolean healthCheckProducerEnabled) {
doSetProperty("healthCheckProducerEnabled", healthCheckProducerEnabled);
return this;
}
/**
* To define a proxy host when instantiating the Textract client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyHost the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder proxyHost(java.lang.String proxyHost) {
doSetProperty("proxyHost", proxyHost);
return this;
}
/**
* To define a proxy port when instantiating the Textract client.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder proxyPort(java.lang.Integer proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy protocol when instantiating the Textract client.
*
* The option is a:
* <code>software.amazon.awssdk.core.Protocol</code> type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder proxyProtocol(software.amazon.awssdk.core.Protocol proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* Amazon AWS Access Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param accessKey the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder accessKey(java.lang.String accessKey) {
doSetProperty("accessKey", accessKey);
return this;
}
/**
* If using a profile credentials provider this parameter will set the
* profile name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param profileCredentialsName the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder profileCredentialsName(java.lang.String profileCredentialsName) {
doSetProperty("profileCredentialsName", profileCredentialsName);
return this;
}
/**
* Amazon AWS Secret Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param secretKey the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder secretKey(java.lang.String secretKey) {
doSetProperty("secretKey", secretKey);
return this;
}
/**
* Amazon AWS Session Token.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param sessionToken the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder sessionToken(java.lang.String sessionToken) {
doSetProperty("sessionToken", sessionToken);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustAllCertificates the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder trustAllCertificates(boolean trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
/**
* Set whether the Textract client should expect to load credentials
* through a default credentials provider or to expect static
* credentials to be passed in.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder useDefaultCredentialsProvider(boolean useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the Textract client should expect to load credentials
* through a profile credentials provider.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder useProfileCredentialsProvider(boolean useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* Set whether the Textract client should expect to use Session
* Credentials. This is useful in situation in which the user needs to
* assume a IAM role for doing operations in Textract.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default Aws2TextractComponentBuilder useSessionCredentials(boolean useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
}
| Aws2TextractComponentBuilder |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/NotWritablePropertyException.java | {
"start": 960,
"end": 3054
} | class ____ extends InvalidPropertyException {
private final String @Nullable [] possibleMatches;
/**
* Create a new NotWritablePropertyException.
* @param beanClass the offending bean class
* @param propertyName the offending property name
*/
public NotWritablePropertyException(Class<?> beanClass, String propertyName) {
super(beanClass, propertyName,
"Bean property '" + propertyName + "' is not writable or has an invalid setter method: " +
"Does the return type of the getter match the parameter type of the setter?");
this.possibleMatches = null;
}
/**
* Create a new NotWritablePropertyException.
* @param beanClass the offending bean class
* @param propertyName the offending property name
* @param msg the detail message
*/
public NotWritablePropertyException(Class<?> beanClass, String propertyName, String msg) {
super(beanClass, propertyName, msg);
this.possibleMatches = null;
}
/**
* Create a new NotWritablePropertyException.
* @param beanClass the offending bean class
* @param propertyName the offending property name
* @param msg the detail message
* @param cause the root cause
*/
public NotWritablePropertyException(Class<?> beanClass, String propertyName, String msg, Throwable cause) {
super(beanClass, propertyName, msg, cause);
this.possibleMatches = null;
}
/**
* Create a new NotWritablePropertyException.
* @param beanClass the offending bean class
* @param propertyName the offending property name
* @param msg the detail message
* @param possibleMatches suggestions for actual bean property names
* that closely match the invalid property name
*/
public NotWritablePropertyException(Class<?> beanClass, String propertyName, String msg, String[] possibleMatches) {
super(beanClass, propertyName, msg);
this.possibleMatches = possibleMatches;
}
/**
* Return suggestions for actual bean property names that closely match
* the invalid property name, if any.
*/
public String @Nullable [] getPossibleMatches() {
return this.possibleMatches;
}
}
| NotWritablePropertyException |
java | quarkusio__quarkus | test-framework/junit5-component/src/test/java/io/quarkus/test/component/beans/Delta.java | {
"start": 106,
"end": 216
} | class ____ {
public boolean ping() {
return true;
}
public void onBoolean() {
}
}
| Delta |
java | quarkusio__quarkus | extensions/reactive-routes/runtime/src/main/java/io/quarkus/vertx/web/runtime/UniFailureCallback.java | {
"start": 130,
"end": 413
} | class ____ implements Consumer<Throwable> {
private final RoutingContext context;
public UniFailureCallback(RoutingContext context) {
this.context = context;
}
@Override
public void accept(Throwable t) {
context.fail(t);
}
}
| UniFailureCallback |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java | {
"start": 12971,
"end": 18483
} | class ____ not be instantiated.
*/
private AzureTestUtils() {
}
/**
* Assert that a configuration option matches the expected value.
* @param conf configuration
* @param key option key
* @param expected expected value
*/
public static void assertOptionEquals(Configuration conf,
String key,
String expected) {
assertEquals(expected, conf.get(key), "Value of " + key);
}
/**
* Assume that a condition is met. If not: log at WARN and
* then throw an {@link TestAbortedException}.
* @param message message in an assumption
* @param condition condition to probe
*/
public static void assume(String message, boolean condition) {
if (!condition) {
LOG.warn(message);
}
assumeThat(condition).as(message).isTrue();
}
/**
* Gets the current value of the given gauge.
* @param fs filesystem
* @param gaugeName gauge name
* @return the gauge value
*/
public static long getLongGaugeValue(NativeAzureFileSystem fs,
String gaugeName) {
return getLongGauge(gaugeName, getMetrics(fs.getInstrumentation()));
}
/**
* Gets the current value of the given counter.
* @param fs filesystem
* @param counterName counter name
* @return the counter value
*/
public static long getLongCounterValue(NativeAzureFileSystem fs,
String counterName) {
return getLongCounter(counterName, getMetrics(fs.getInstrumentation()));
}
/**
* Delete a path, catching any exception and downgrading to a log message.
* @param fs filesystem
* @param path path to delete
* @param recursive recursive delete?
* @throws IOException IO failure.
*/
public static void deleteQuietly(FileSystem fs,
Path path,
boolean recursive) throws IOException {
if (fs != null && path != null) {
try {
fs.delete(path, recursive);
} catch (IOException e) {
LOG.warn("When deleting {}", path, e);
}
}
}
/**
* Clean up the test account if non-null; return null to put in the
* field.
* @param testAccount test account to clean up
* @return null
*/
public static AzureBlobStorageTestAccount cleanup(
AzureBlobStorageTestAccount testAccount) throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
return null;
}
/**
* Clean up the test account; any thrown exceptions are caught and
* logged.
* @param testAccount test account
* @return null, so that any fields can be reset.
*/
public static AzureBlobStorageTestAccount cleanupTestAccount(
AzureBlobStorageTestAccount testAccount) {
if (testAccount != null) {
try {
testAccount.cleanup();
} catch (Exception e) {
LOG.error("While cleaning up test account: ", e);
}
}
return null;
}
/**
* Assume that the scale tests are enabled by the relevant system property.
*/
public static void assumeScaleTestsEnabled(Configuration conf) {
boolean enabled = getTestPropertyBool(
conf,
KEY_SCALE_TESTS_ENABLED,
DEFAULT_SCALE_TESTS_ENABLED);
assume("Scale test disabled: to enable set property "
+ KEY_SCALE_TESTS_ENABLED,
enabled);
}
/**
* Check the account name for WASB tests is set correctly and return.
*/
public static String verifyWasbAccountNameInConfig(Configuration conf) {
String accountName = conf.get(ACCOUNT_NAME_PROPERTY_NAME);
if (accountName == null) {
accountName = conf.get(WASB_TEST_ACCOUNT_NAME_WITH_DOMAIN);
}
assumeThat(accountName)
.as("Account for WASB is missing or it is not in correct format")
.isNotNull()
.doesNotEndWith(WASB_ACCOUNT_NAME_DOMAIN_SUFFIX_REGEX);
return accountName;
}
/**
* Write string into a file.
*/
public static void writeStringToFile(FileSystem fs, Path path, String value)
throws IOException {
FSDataOutputStream outputStream = fs.create(path, true);
writeStringToStream(outputStream, value);
}
/**
* Write string into a file.
*/
public static void writeStringToStream(FSDataOutputStream outputStream, String value)
throws IOException {
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
outputStream));
writer.write(value);
writer.close();
}
/**
* Read string from a file.
*/
public static String readStringFromFile(FileSystem fs, Path testFile) throws IOException {
FSDataInputStream inputStream = fs.open(testFile);
String ret = readStringFromStream(inputStream);
inputStream.close();
return ret;
}
/**
* Read string from stream.
*/
public static String readStringFromStream(FSDataInputStream inputStream) throws IOException {
BufferedReader reader = new BufferedReader(new InputStreamReader(
inputStream));
final int BUFFER_SIZE = 1024;
char[] buffer = new char[BUFFER_SIZE];
int count = reader.read(buffer, 0, BUFFER_SIZE);
if (count > BUFFER_SIZE) {
throw new IOException("Exceeded buffer size");
}
inputStream.close();
return new String(buffer, 0, count);
}
/**
* Assume hierarchical namespace is disabled for test account.
*/
public static void assumeNamespaceDisabled(Configuration conf) {
assumeThat(conf.getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false))
.as("Hierarchical namespace is enabled for test account.")
.isFalse();
}
}
| should |
java | spring-projects__spring-framework | spring-aspects/src/test/java/org/springframework/beans/factory/aspectj/XmlBeanConfigurerTests.java | {
"start": 880,
"end": 1237
} | class ____ {
@Test
void injection() {
try (ClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext(
"org/springframework/beans/factory/aspectj/beanConfigurerTests.xml")) {
ShouldBeConfiguredBySpring myObject = new ShouldBeConfiguredBySpring();
assertThat(myObject.getName()).isEqualTo("Rod");
}
}
}
| XmlBeanConfigurerTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/archive/scan/spi/ScanEnvironment.java | {
"start": 849,
"end": 1818
} | interface ____ {
/**
* Returns the root URL for scanning. Can be {@code null}, indicating that
* no root URL scanning should be done (aka, if maybe a root URL is not known).
*
* @return The root URL
*
* @see ScanOptions#canDetectUnlistedClassesInRoot()
*/
URL getRootUrl();
/**
* Returns any non-root URLs for scanning. Can be null/empty to indicate
* that no non-root URL scanning should be done.
*
* @return The non-root URLs
*
* @see ScanOptions#canDetectUnlistedClassesInNonRoot()
*/
List<URL> getNonRootUrls();
/**
* Returns any classes which are explicitly listed as part of the
* "persistence unit".
*
* @return The explicitly listed classes
*/
List<String> getExplicitlyListedClassNames();
/**
* Returns the mapping files which are explicitly listed as part of the
* "persistence unit".
*
* @return The explicitly listed mapping files.
*/
List<String> getExplicitlyListedMappingFiles();
}
| ScanEnvironment |
java | apache__flink | flink-datastream/src/test/java/org/apache/flink/datastream/impl/operators/TwoOutputProcessOperatorTest.java | {
"start": 1613,
"end": 5921
} | class ____ {
@Test
void testProcessRecord() throws Exception {
OutputTag<Long> sideOutputTag = new OutputTag<Long>("side-output") {};
TwoOutputProcessOperator<Integer, Integer, Long> processOperator =
new TwoOutputProcessOperator<>(
new TwoOutputStreamProcessFunction<Integer, Integer, Long>() {
@Override
public void processRecord(
Integer record,
Collector<Integer> output1,
Collector<Long> output2,
TwoOutputPartitionedContext<Integer, Long> ctx) {
output1.collect(record);
output2.collect((long) (record * 2));
}
},
sideOutputTag);
try (OneInputStreamOperatorTestHarness<Integer, Integer> testHarness =
new OneInputStreamOperatorTestHarness<>(processOperator)) {
testHarness.open();
testHarness.processElement(new StreamRecord<>(1));
testHarness.processElement(new StreamRecord<>(2));
testHarness.processElement(new StreamRecord<>(3));
Collection<StreamRecord<Integer>> firstOutput = testHarness.getRecordOutput();
ConcurrentLinkedQueue<StreamRecord<Long>> secondOutput =
testHarness.getSideOutput(sideOutputTag);
assertThat(firstOutput)
.containsExactly(
new StreamRecord<>(1), new StreamRecord<>(2), new StreamRecord<>(3));
assertThat(secondOutput)
.containsExactly(
new StreamRecord<>(2L), new StreamRecord<>(4L), new StreamRecord<>(6L));
}
}
@Test
void testEndInput() throws Exception {
AtomicInteger counter = new AtomicInteger();
OutputTag<Long> sideOutputTag = new OutputTag<Long>("side-output") {};
TwoOutputProcessOperator<Integer, Integer, Long> processOperator =
new TwoOutputProcessOperator<>(
new TwoOutputStreamProcessFunction<Integer, Integer, Long>() {
@Override
public void processRecord(
Integer record,
Collector<Integer> output1,
Collector<Long> output2,
TwoOutputPartitionedContext<Integer, Long> ctx) {
// do nothing.
}
@Override
public void endInput(
TwoOutputNonPartitionedContext<Integer, Long> ctx) {
try {
ctx.applyToAllPartitions(
(firstOutput, secondOutput, context) -> {
counter.incrementAndGet();
firstOutput.collect(1);
secondOutput.collect(2L);
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
},
sideOutputTag);
try (OneInputStreamOperatorTestHarness<Integer, Integer> testHarness =
new OneInputStreamOperatorTestHarness<>(processOperator)) {
testHarness.open();
testHarness.endInput();
assertThat(counter).hasValue(1);
Collection<StreamRecord<Integer>> firstOutput = testHarness.getRecordOutput();
ConcurrentLinkedQueue<StreamRecord<Long>> secondOutput =
testHarness.getSideOutput(sideOutputTag);
assertThat(firstOutput).containsExactly(new StreamRecord<>(1));
assertThat(secondOutput).containsExactly(new StreamRecord<>(2L));
}
}
}
| TwoOutputProcessOperatorTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java | {
"start": 30211,
"end": 31125
} | class ____ cache. */
private static native void initNativeWindows(
boolean doThreadsafeWorkaround);
}
private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
private static boolean nativeLoaded = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
try {
initNative(false);
nativeLoaded = true;
} catch (Throwable t) {
// This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO
// after warning
PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
}
}
}
/**
* @return Return true if the JNI-based native IO extensions are available.
*/
public static boolean isAvailable() {
return NativeCodeLoader.isNativeCodeLoaded() && nativeLoaded;
}
/** Initialize the JNI method ID and | ID |
java | quarkusio__quarkus | extensions/qute/deployment/src/main/java/io/quarkus/qute/deployment/MessageBundleProcessor.java | {
"start": 90636,
"end": 91249
} | class ____ implements ClassInfoWrapper {
private final ClassInfo classInfo;
SimpleClassInfoWrapper(ClassInfo classInfo) {
this.classInfo = classInfo;
}
@Override
public ClassInfo getClassInfo() {
return classInfo;
}
@Override
public final List<MethodInfo> methods() {
return classInfo.methods();
}
@Override
public final MethodInfo method(String name, Type... parameters) {
return classInfo.method(name, parameters);
}
}
private static | SimpleClassInfoWrapper |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/impl/ContinuousFileSplitEnumerator.java | {
"start": 2004,
"end": 6603
} | class ____
implements SplitEnumerator<FileSourceSplit, PendingSplitsCheckpoint<FileSourceSplit>>,
SupportsBatchSnapshot {
private static final Logger LOG = LoggerFactory.getLogger(ContinuousFileSplitEnumerator.class);
private final SplitEnumeratorContext<FileSourceSplit> context;
private final FileSplitAssigner splitAssigner;
private final FileEnumerator enumerator;
private final HashSet<Path> pathsAlreadyProcessed;
private final LinkedHashMap<Integer, String> readersAwaitingSplit;
private final Path[] paths;
private final long discoveryInterval;
// ------------------------------------------------------------------------
public ContinuousFileSplitEnumerator(
SplitEnumeratorContext<FileSourceSplit> context,
FileEnumerator enumerator,
FileSplitAssigner splitAssigner,
Path[] paths,
Collection<Path> alreadyDiscoveredPaths,
long discoveryInterval) {
checkArgument(discoveryInterval > 0L);
this.context = checkNotNull(context);
this.enumerator = checkNotNull(enumerator);
this.splitAssigner = checkNotNull(splitAssigner);
this.paths = paths;
this.discoveryInterval = discoveryInterval;
this.pathsAlreadyProcessed = new HashSet<>(alreadyDiscoveredPaths);
this.readersAwaitingSplit = new LinkedHashMap<>();
}
@Override
public void start() {
context.callAsync(
() -> enumerator.enumerateSplits(paths, 1),
this::processDiscoveredSplits,
discoveryInterval,
discoveryInterval);
}
@Override
public void close() throws IOException {
// no resources to close
}
@Override
public void addReader(int subtaskId) {
// this source is purely lazy-pull-based, nothing to do upon registration
}
@Override
public void handleSplitRequest(int subtaskId, @Nullable String requesterHostname) {
readersAwaitingSplit.put(subtaskId, requesterHostname);
assignSplits();
}
@Override
public void handleSourceEvent(int subtaskId, SourceEvent sourceEvent) {
LOG.error("Received unrecognized event: {}", sourceEvent);
}
@Override
public void addSplitsBack(List<FileSourceSplit> splits, int subtaskId) {
LOG.debug("File Source Enumerator adds splits back: {}", splits);
splitAssigner.addSplits(splits);
}
@Override
public PendingSplitsCheckpoint<FileSourceSplit> snapshotState(long checkpointId)
throws Exception {
final PendingSplitsCheckpoint<FileSourceSplit> checkpoint =
PendingSplitsCheckpoint.fromCollectionSnapshot(
splitAssigner.remainingSplits(), pathsAlreadyProcessed);
LOG.debug("Source Checkpoint is {}", checkpoint);
return checkpoint;
}
// ------------------------------------------------------------------------
private void processDiscoveredSplits(Collection<FileSourceSplit> splits, Throwable error) {
if (error != null) {
LOG.error("Failed to enumerate files", error);
return;
}
final Collection<FileSourceSplit> newSplits =
splits.stream()
.filter((split) -> pathsAlreadyProcessed.add(split.path()))
.collect(Collectors.toList());
splitAssigner.addSplits(newSplits);
assignSplits();
}
private void assignSplits() {
final Iterator<Map.Entry<Integer, String>> awaitingReader =
readersAwaitingSplit.entrySet().iterator();
while (awaitingReader.hasNext()) {
final Map.Entry<Integer, String> nextAwaiting = awaitingReader.next();
// if the reader that requested another split has failed in the meantime, remove
// it from the list of waiting readers
if (!context.registeredReaders().containsKey(nextAwaiting.getKey())) {
awaitingReader.remove();
continue;
}
final String hostname = nextAwaiting.getValue();
final int awaitingSubtask = nextAwaiting.getKey();
final Optional<FileSourceSplit> nextSplit = splitAssigner.getNext(hostname);
if (nextSplit.isPresent()) {
context.assignSplit(nextSplit.get(), awaitingSubtask);
awaitingReader.remove();
} else {
break;
}
}
}
}
| ContinuousFileSplitEnumerator |
java | spring-projects__spring-boot | module/spring-boot-health/src/main/java/org/springframework/boot/health/autoconfigure/actuate/endpoint/NoSuchHealthContributorFailureAnalyzer.java | {
"start": 1171,
"end": 1646
} | class ____ extends AbstractFailureAnalyzer<NoSuchHealthContributorException> {
@Override
protected FailureAnalysis analyze(Throwable rootFailure, NoSuchHealthContributorException cause) {
return new FailureAnalysis(cause.getMessage(), "Update your application to correct the invalid configuration.\n"
+ "You can also set 'management.endpoint.health.validate-group-membership' to false to disable the validation.",
cause);
}
}
| NoSuchHealthContributorFailureAnalyzer |
java | google__dagger | javatests/dagger/hilt/processor/internal/definecomponent/DefineComponentProcessorTest.java | {
"start": 5217,
"end": 5965
} | interface ____<T> {}");
HiltCompilerTests.hiltCompiler(component)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
"@DefineComponent test.FooComponent<T>, cannot have type parameters.");
});
}
@Test
public void testDefineComponentWithInvalidComponent_fails() {
Source component =
HiltCompilerTests.javaSource(
"test.FooComponent",
"package test;",
"",
"import dagger.hilt.DefineComponent;",
"import dagger.hilt.android.qualifiers.ApplicationContext;",
"",
"@DefineComponent( parent = ApplicationContext.class )",
" | FooComponent |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java | {
"start": 1869,
"end": 8283
} | class ____ {
private final Client client;
private final NamedXContentRegistry xContentRegistry;
private final AnomalyDetectionAuditor auditor;
private final AnnotationPersister annotationPersister;
private final Supplier<Long> currentTimeSupplier;
private final JobResultsPersister jobResultsPersister;
private final boolean remoteClusterClient;
private final ClusterService clusterService;
private volatile long delayedDataCheckFreq;
public DatafeedJobBuilder(
Client client,
NamedXContentRegistry xContentRegistry,
AnomalyDetectionAuditor auditor,
AnnotationPersister annotationPersister,
Supplier<Long> currentTimeSupplier,
JobResultsPersister jobResultsPersister,
Settings settings,
ClusterService clusterService
) {
this.client = client;
this.xContentRegistry = Objects.requireNonNull(xContentRegistry);
this.auditor = Objects.requireNonNull(auditor);
this.annotationPersister = Objects.requireNonNull(annotationPersister);
this.currentTimeSupplier = Objects.requireNonNull(currentTimeSupplier);
this.jobResultsPersister = Objects.requireNonNull(jobResultsPersister);
this.remoteClusterClient = DiscoveryNode.isRemoteClusterClient(settings);
this.delayedDataCheckFreq = DELAYED_DATA_CHECK_FREQ.get(settings).millis();
this.clusterService = Objects.requireNonNull(clusterService);
clusterService.getClusterSettings().addSettingsUpdateConsumer(DELAYED_DATA_CHECK_FREQ, this::setDelayedDataCheckFreq);
}
private void setDelayedDataCheckFreq(TimeValue value) {
this.delayedDataCheckFreq = value.millis();
}
void build(TransportStartDatafeedAction.DatafeedTask task, DatafeedContext context, ActionListener<DatafeedJob> listener) {
final ParentTaskAssigningClient parentTaskAssigningClient = new ParentTaskAssigningClient(client, clusterService.localNode(), task);
final DatafeedConfig datafeedConfig = context.datafeedConfig();
final Job job = context.job();
final long latestFinalBucketEndMs = context.restartTimeInfo().getLatestFinalBucketTimeMs() == null
? -1
: context.restartTimeInfo().getLatestFinalBucketTimeMs() + job.getAnalysisConfig().getBucketSpan().millis() - 1;
final long latestRecordTimeMs = context.restartTimeInfo().getLatestRecordTimeMs() == null
? -1
: context.restartTimeInfo().getLatestRecordTimeMs();
final DatafeedTimingStatsReporter timingStatsReporter = new DatafeedTimingStatsReporter(
context.timingStats(),
jobResultsPersister::persistDatafeedTimingStats
);
// Validate remote indices are available and get the job
try {
checkRemoteIndicesAreAvailable(datafeedConfig);
} catch (Exception e) {
listener.onFailure(e);
return;
}
// Re-validation is required as the config has been re-read since
// the previous validation
try {
DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry);
} catch (Exception e) {
listener.onFailure(e);
return;
}
ActionListener<DataExtractorFactory> dataExtractorFactoryHandler = ActionListener.wrap(dataExtractorFactory -> {
TimeValue frequency = getFrequencyOrDefault(datafeedConfig, job, xContentRegistry);
TimeValue queryDelay = datafeedConfig.getQueryDelay();
DelayedDataDetector delayedDataDetector = DelayedDataDetectorFactory.buildDetector(
job,
datafeedConfig,
parentTaskAssigningClient,
xContentRegistry
);
DatafeedJob datafeedJob = new DatafeedJob(
job.getId(),
buildDataDescription(job),
frequency.millis(),
queryDelay.millis(),
dataExtractorFactory,
timingStatsReporter,
parentTaskAssigningClient,
auditor,
annotationPersister,
currentTimeSupplier,
delayedDataDetector,
datafeedConfig.getMaxEmptySearches(),
latestFinalBucketEndMs,
latestRecordTimeMs,
context.restartTimeInfo().haveSeenDataPreviously(),
delayedDataCheckFreq
);
listener.onResponse(datafeedJob);
}, e -> {
auditor.error(job.getId(), e.getMessage());
listener.onFailure(e);
});
DataExtractorFactory.create(
parentTaskAssigningClient,
datafeedConfig,
job,
xContentRegistry,
timingStatsReporter,
dataExtractorFactoryHandler
);
}
private void checkRemoteIndicesAreAvailable(DatafeedConfig datafeedConfig) {
if (remoteClusterClient == false) {
List<String> remoteIndices = RemoteClusterLicenseChecker.remoteIndices(datafeedConfig.getIndices());
if (remoteIndices.isEmpty() == false) {
throw ExceptionsHelper.badRequestException(
Messages.getMessage(
Messages.DATAFEED_NEEDS_REMOTE_CLUSTER_SEARCH,
datafeedConfig.getId(),
remoteIndices,
clusterService.getNodeName()
)
);
}
}
}
private static TimeValue getFrequencyOrDefault(DatafeedConfig datafeed, Job job, NamedXContentRegistry xContentRegistry) {
TimeValue frequency = datafeed.getFrequency();
if (frequency == null) {
TimeValue bucketSpan = job.getAnalysisConfig().getBucketSpan();
return datafeed.defaultFrequency(bucketSpan, xContentRegistry);
}
return frequency;
}
private static DataDescription buildDataDescription(Job job) {
DataDescription.Builder dataDescription = new DataDescription.Builder();
if (job.getDataDescription() != null) {
dataDescription.setTimeField(job.getDataDescription().getTimeField());
}
dataDescription.setTimeFormat(DataDescription.EPOCH_MS);
return dataDescription.build();
}
}
| DatafeedJobBuilder |
java | google__dagger | javatests/dagger/hilt/android/MultiTestRootExternalModules.java | {
"start": 1105,
"end": 1300
} | interface ____ {
@Provides
@External
static String provideStringValue() {
return EXTERNAL_STR_VALUE;
}
}
@Module
@InstallIn(ActivityComponent.class)
| PkgPrivateAppModule |
java | grpc__grpc-java | api/src/main/java/io/grpc/ServiceProviders.java | {
"start": 5002,
"end": 5316
} | class ____'t be found at
// all and so would be skipped. We want to skip in this case as well.
return null;
} catch (Throwable t) {
throw new ServiceConfigurationError(
String.format("Provider %s could not be instantiated %s", rawClass.getName(), t), t);
}
}
/**
* An | wouldn |
java | hibernate__hibernate-orm | hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/pagination/FirstLimitHandler.java | {
"start": 614,
"end": 1506
} | class ____ extends AbstractNoOffsetLimitHandler {
public static final FirstLimitHandler INSTANCE = new FirstLimitHandler(false);
public FirstLimitHandler(boolean variableLimit) {
super(variableLimit);
}
@Override
protected String limitClause() {
return " first ?";
}
@Override
protected String limitClause(int jdbcParameterCount, ParameterMarkerStrategy parameterMarkerStrategy) {
return " first " + parameterMarkerStrategy.createMarker( 1, null ) + " rows only";
}
@Override
protected String insert(String first, String sql) {
return insertAfterSelect( first, sql );
}
@Override
public boolean bindLimitParametersFirst() {
return true;
}
@Override
public boolean processSqlMutatesState() {
return false;
}
@Override
public int getParameterPositionStart(Limit limit) {
return hasMaxRows( limit ) && supportsVariableLimit() ? 2 : 1;
}
}
| FirstLimitHandler |
java | grpc__grpc-java | api/src/main/java/io/grpc/InternalChannelz.java | {
"start": 20252,
"end": 20600
} | class ____ {
@Nullable
public final Tls tls;
@Nullable
public final OtherSecurity other;
public Security(Tls tls) {
this.tls = checkNotNull(tls);
this.other = null;
}
public Security(OtherSecurity other) {
this.tls = null;
this.other = checkNotNull(other);
}
}
public static final | Security |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/util/CachedSupplier.java | {
"start": 762,
"end": 1904
} | class ____<T> implements Supplier<T> {
private volatile Supplier<T> supplier;
// result does not need to be volatile as we only read it after reading that the supplier got nulled out. Since we null out the
// supplier after setting the result, total store order from an observed volatile write is sufficient to make a plain read safe.
private T result;
public static <R> CachedSupplier<R> wrap(Supplier<R> supplier) {
if (supplier instanceof CachedSupplier<R> c) {
// no need to wrap a cached supplier again
return c;
}
return new CachedSupplier<>(supplier);
}
private CachedSupplier(Supplier<T> supplier) {
this.supplier = supplier;
}
@Override
public T get() {
if (supplier == null) {
return result;
}
return initResult();
}
private synchronized T initResult() {
var s = supplier;
if (s != null) {
T res = s.get();
result = res;
supplier = null;
return res;
} else {
return result;
}
}
}
| CachedSupplier |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/scheduler/VirtualThreadFactory.java | {
"start": 1285,
"end": 2005
} | class ____ implements ThreadFactory,
Thread.UncaughtExceptionHandler {
VirtualThreadFactory(String name,
boolean inheritThreadLocals,
@Nullable BiConsumer<Thread, Throwable> uncaughtExceptionHandler) {
throw new UnsupportedOperationException("Virtual Threads are not supported in JVM lower than 21");
}
@Override
public final Thread newThread(@NonNull Runnable runnable) {
throw new UnsupportedOperationException("Virtual Threads are not supported in JVM lower than 21");
}
@Override
public void uncaughtException(Thread t, Throwable e) {
throw new UnsupportedOperationException("Virtual Threads are not supported in JVM lower than 21");
}
}
| VirtualThreadFactory |
java | playframework__playframework | documentation/manual/working/javaGuide/main/dependencyinjection/code/javaguide/di/CurrentSharePrice.java | {
"start": 237,
"end": 400
} | class ____ {
private volatile int price;
public void set(int p) {
price = p;
}
public int get() {
return price;
}
}
// #singleton
| CurrentSharePrice |
java | google__gson | gson/src/test/java/com/google/gson/functional/NamingPolicyTest.java | {
"start": 13140,
"end": 13224
} | class ____ {
int a;
int b;
ClassWithTwoFields() {}
}
}
| ClassWithTwoFields |
java | elastic__elasticsearch | libs/core/src/test/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoaderTests.java | {
"start": 26024,
"end": 28278
} | class ____ a package known to the loader
expectThrows(CNFE, () -> loader.loadClass("p.Unknown"));
expectThrows(CNFE, () -> loader.loadClass("q.Unknown"));
expectThrows(CNFE, () -> loader.loadClass("r.Unknown"));
}
}
/*
* Tests resource lookup across multiple embedded jars.
*/
public void testResourcesWithMultipleJars() throws Exception {
Path topLevelDir = createTempDir(getTestName());
Map<String, String> jarEntries = new HashMap<>();
jarEntries.put("IMPL-JARS/blah/LISTING.TXT", "foo.jar\nbar.jar\nbaz.jar");
jarEntries.put("IMPL-JARS/blah/foo.jar/res.txt", "fooRes");
jarEntries.put("IMPL-JARS/blah/bar.jar/META-INF/MANIFEST.MF", "Multi-Release: TRUE\n");
jarEntries.put("IMPL-JARS/blah/bar.jar/META-INF/versions/9/res.txt", "barRes");
jarEntries.put("IMPL-JARS/blah/baz.jar/META-INF/MANIFEST.MF", "Multi-Release: trUE\n");
jarEntries.put("IMPL-JARS/blah/baz.jar/META-INF/versions/11/res.txt", "bazRes");
Path outerJar = topLevelDir.resolve("impl.jar");
JarUtils.createJarWithEntriesUTF(outerJar, jarEntries);
URL[] urls = new URL[] { outerJar.toUri().toURL() };
try (URLClassLoader parent = loader(urls)) {
EmbeddedImplClassLoader loader = EmbeddedImplClassLoader.getInstance(parent, "blah");
var res = Collections.list(loader.getResources("res.txt"));
assertThat(res, hasSize(3));
List<String> l = res.stream().map(EmbeddedImplClassLoaderTests::urlToString).toList();
assertThat(l, containsInAnyOrder("fooRes", "barRes", "bazRes"));
}
}
private static URLClassLoader loader(URL[] urls) {
return URLClassLoader.newInstance(urls, EmbeddedImplClassLoaderTests.class.getClassLoader());
}
@SuppressForbidden(reason = "file urls")
static String urlToString(URL url) {
try {
var urlc = url.openConnection();
urlc.setUseCaches(false);
try (var is = urlc.getInputStream()) {
return new String(is.readAllBytes(), UTF_8);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
| in |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java | {
"start": 4027,
"end": 9405
} | class ____ {
@Test
public void testRMConnectionRetry() throws Exception {
// verify the connection exception is thrown
// if we haven't exhausted the retry interval
ApplicationMasterProtocol mockScheduler =
mock(ApplicationMasterProtocol.class);
when(mockScheduler.allocate(isA(AllocateRequest.class)))
.thenThrow(RPCUtil.getRemoteException(new IOException("forcefail")));
Configuration conf = new Configuration();
LocalContainerAllocator lca =
new StubbedLocalContainerAllocator(mockScheduler);
lca.init(conf);
lca.start();
try {
lca.heartbeat();
fail("heartbeat was supposed to throw");
} catch (YarnException e) {
// YarnException is expected
} finally {
lca.stop();
}
// verify YarnRuntimeException is thrown when the retry interval has expired
conf.setLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS, 0);
lca = new StubbedLocalContainerAllocator(mockScheduler);
lca.init(conf);
lca.start();
try {
lca.heartbeat();
fail("heartbeat was supposed to throw");
} catch (YarnRuntimeException e) {
// YarnRuntimeException is expected
} finally {
lca.stop();
}
}
@Test
public void testAllocResponseId() throws Exception {
ApplicationMasterProtocol scheduler = new MockScheduler();
Configuration conf = new Configuration();
LocalContainerAllocator lca =
new StubbedLocalContainerAllocator(scheduler);
lca.init(conf);
lca.start();
// do two heartbeats to verify the response ID is being tracked
lca.heartbeat();
lca.heartbeat();
lca.close();
}
@Test
public void testAMRMTokenUpdate() throws Exception {
Configuration conf = new Configuration();
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1, 1), 1);
AMRMTokenIdentifier oldTokenId = new AMRMTokenIdentifier(attemptId, 1);
AMRMTokenIdentifier newTokenId = new AMRMTokenIdentifier(attemptId, 2);
Token<AMRMTokenIdentifier> oldToken = new Token<AMRMTokenIdentifier>(
oldTokenId.getBytes(), "oldpassword".getBytes(), oldTokenId.getKind(),
new Text());
Token<AMRMTokenIdentifier> newToken = new Token<AMRMTokenIdentifier>(
newTokenId.getBytes(), "newpassword".getBytes(), newTokenId.getKind(),
new Text());
MockScheduler scheduler = new MockScheduler();
scheduler.amToken = newToken;
final LocalContainerAllocator lca =
new StubbedLocalContainerAllocator(scheduler);
lca.init(conf);
lca.start();
UserGroupInformation testUgi = UserGroupInformation.createUserForTesting(
"someuser", new String[0]);
testUgi.addToken(oldToken);
testUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
lca.heartbeat();
return null;
}
});
lca.close();
// verify there is only one AMRM token in the UGI and it matches the
// updated token from the RM
int tokenCount = 0;
Token<? extends TokenIdentifier> ugiToken = null;
for (Token<? extends TokenIdentifier> token : testUgi.getTokens()) {
if (AMRMTokenIdentifier.KIND_NAME.equals(token.getKind())) {
ugiToken = token;
++tokenCount;
}
}
assertEquals(1, tokenCount, "too many AMRM tokens");
assertArrayEquals(newToken.getIdentifier(), ugiToken.getIdentifier(),
"token identifier not updated");
assertArrayEquals(newToken.getPassword(), ugiToken.getPassword(),
"token password not updated");
assertEquals(new Text(ClientRMProxy.getAMRMTokenService(conf)),
ugiToken.getService(), "AMRM token service not updated");
}
@Test
public void testAllocatedContainerResourceIsNotNull() {
ArgumentCaptor<TaskAttemptContainerAssignedEvent> containerAssignedCaptor
= ArgumentCaptor.forClass(TaskAttemptContainerAssignedEvent.class);
@SuppressWarnings("unchecked")
EventHandler<Event> eventHandler = mock(EventHandler.class);
AppContext context = mock(AppContext.class) ;
when(context.getEventHandler()).thenReturn(eventHandler);
ContainerId containerId = ContainerId.fromString(
"container_1427562107907_0002_01_000001");
LocalContainerAllocator containerAllocator = new LocalContainerAllocator(
mock(ClientService.class), context, "localhost", -1, -1, containerId);
ContainerAllocatorEvent containerAllocatorEvent =
createContainerRequestEvent();
containerAllocator.handle(containerAllocatorEvent);
verify(eventHandler, times(1)).handle(containerAssignedCaptor.capture());
Container container = containerAssignedCaptor.getValue().getContainer();
Resource containerResource = container.getResource();
assertNotNull(containerResource);
assertThat(containerResource.getMemorySize()).isEqualTo(0);
assertThat(containerResource.getVirtualCores()).isEqualTo(0);
}
private static ContainerAllocatorEvent createContainerRequestEvent() {
TaskAttemptId taskAttemptId = mock(TaskAttemptId.class);
TaskId taskId = mock(TaskId.class);
when(taskAttemptId.getTaskId()).thenReturn(taskId);
return new ContainerAllocatorEvent(taskAttemptId,
ContainerAllocator.EventType.CONTAINER_REQ);
}
private static | TestLocalContainerAllocator |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/testkit/MutatesGlobalConfiguration.java | {
"start": 1184,
"end": 1538
} | class ____ mutates the global configuration.
*
* <p>By using this annotation, any tests that mutate this configuration will have the configuration
* reset to the default values after each test case runs.
*
* @author Ashley Scopes
*/
@ExtendWith(AssumptionMutatingExtension.class)
@Isolated("Mutates global state")
@Target(ElementType.TYPE)
public @ | that |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/errors/GroupMaxSizeReachedException.java | {
"start": 963,
"end": 1165
} | class ____ extends ApiException {
private static final long serialVersionUID = 1L;
public GroupMaxSizeReachedException(String message) {
super(message);
}
}
| GroupMaxSizeReachedException |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/blob/TransientBlobService.java | {
"start": 1503,
"end": 5821
} | interface ____ extends Closeable {
// --------------------------------------------------------------------------------------------
// GET
// --------------------------------------------------------------------------------------------
/**
* Returns the path to a local copy of the (job-unrelated) file associated with the provided
* blob key.
*
* @param key blob key associated with the requested file
* @return The path to the file.
* @throws java.io.FileNotFoundException when the path does not exist;
* @throws IOException if any other error occurs when retrieving the file
*/
File getFile(TransientBlobKey key) throws IOException;
/**
* Returns the path to a local copy of the file associated with the provided job ID and blob
* key.
*
* @param jobId ID of the job this blob belongs to
* @param key blob key associated with the requested file
* @return The path to the file.
* @throws java.io.FileNotFoundException when the path does not exist;
* @throws IOException if any other error occurs when retrieving the file
*/
File getFile(JobID jobId, TransientBlobKey key) throws IOException;
// --------------------------------------------------------------------------------------------
// PUT
// --------------------------------------------------------------------------------------------
/**
* Uploads the (job-unrelated) data of the given byte array to the BLOB server.
*
* @param value the buffer to upload
* @return the computed BLOB key identifying the BLOB on the server
* @throws IOException thrown if an I/O error occurs while uploading the data to the BLOB server
*/
TransientBlobKey putTransient(byte[] value) throws IOException;
/**
* Uploads the data of the given byte array for the given job to the BLOB server.
*
* @param jobId the ID of the job the BLOB belongs to
* @param value the buffer to upload
* @return the computed BLOB key identifying the BLOB on the server
* @throws IOException thrown if an I/O error occurs while uploading the data to the BLOB server
*/
TransientBlobKey putTransient(JobID jobId, byte[] value) throws IOException;
/**
* Uploads the (job-unrelated) data from the given input stream to the BLOB server.
*
* @param inputStream the input stream to read the data from
* @return the computed BLOB key identifying the BLOB on the server
* @throws IOException thrown if an I/O error occurs while reading the data from the input
* stream or uploading the data to the BLOB server
*/
TransientBlobKey putTransient(InputStream inputStream) throws IOException;
/**
* Uploads the data from the given input stream for the given job to the BLOB server.
*
* @param jobId ID of the job this blob belongs to
* @param inputStream the input stream to read the data from
* @return the computed BLOB key identifying the BLOB on the server
* @throws IOException thrown if an I/O error occurs while reading the data from the input
* stream or uploading the data to the BLOB server
*/
TransientBlobKey putTransient(JobID jobId, InputStream inputStream) throws IOException;
// --------------------------------------------------------------------------------------------
// DELETE
// --------------------------------------------------------------------------------------------
/**
* Deletes the (job-unrelated) file associated with the provided blob key from the local cache.
*
* @param key associated with the file to be deleted
* @return <tt>true</tt> if the given blob is successfully deleted or non-existing;
* <tt>false</tt> otherwise
*/
boolean deleteFromCache(TransientBlobKey key);
/**
* Deletes the file associated with the provided job ID and blob key from the local cache.
*
* @param jobId ID of the job this blob belongs to
* @param key associated with the file to be deleted
* @return <tt>true</tt> if the given blob is successfully deleted or non-existing;
* <tt>false</tt> otherwise
*/
boolean deleteFromCache(JobID jobId, TransientBlobKey key);
}
| TransientBlobService |
java | quarkusio__quarkus | extensions/reactive-oracle-client/deployment/src/test/java/io/quarkus/reactive/oracle/client/OraclePoolProducerTest.java | {
"start": 1363,
"end": 1718
} | class ____ {
@Inject
io.vertx.mutiny.sqlclient.Pool oracleClient;
public CompletionStage<Void> verify() {
return oracleClient.query("SELECT 1 FROM DUAL").execute()
.onItem().ignore().andContinueWithNull()
.subscribeAsCompletionStage();
}
}
}
| BeanUsingMutinyOracleClient |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/instantiation/InstantiationWithMultipleWrapperConstructorsTest.java | {
"start": 3763,
"end": 4266
} | class ____ {
private final Integer key;
private final String value;
// primitive int should be compatible with both Short and Integer
public KeyValuePrimitive(int k, String val) {
key = k;
value = val;
}
// primitive int should be compatible with both Short and Integer
public KeyValuePrimitive(int k, int val) {
key = k;
value = String.valueOf( val );
}
public Integer getKey() {
return key;
}
public String getValue() {
return value;
}
}
}
| KeyValuePrimitive |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/launcher/core/CompositeTestExecutionListenerTests.java | {
"start": 8833,
"end": 9306
} | class ____ extends ThrowingTestExecutionListener
implements EagerTestExecutionListener {
@Override
public void executionJustStarted(TestIdentifier testIdentifier) {
throw new RuntimeException("failed to invoke listener");
}
@Override
public void executionJustFinished(TestIdentifier testIdentifier, TestExecutionResult testExecutionResult) {
throw new RuntimeException("failed to invoke listener");
}
}
private static | ThrowingEagerTestExecutionListener |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/FreemarkerEndpointBuilderFactory.java | {
"start": 14367,
"end": 15886
} | class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final FreemarkerHeaderNameBuilder INSTANCE = new FreemarkerHeaderNameBuilder();
/**
* A URI for the template resource to use instead of the endpoint
* configured.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code FreemarkerResourceUri}.
*/
public String freemarkerResourceUri() {
return "CamelFreemarkerResourceUri";
}
/**
* The template to use instead of the endpoint configured.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code FreemarkerTemplate}.
*/
public String freemarkerTemplate() {
return "CamelFreemarkerTemplate";
}
/**
* The data model.
*
* The option is a: {@code Object} type.
*
* Group: producer
*
* @return the name of the header {@code FreemarkerDataModel}.
*/
public String freemarkerDataModel() {
return "CamelFreemarkerDataModel";
}
}
static FreemarkerEndpointBuilder endpointBuilder(String componentName, String path) {
| FreemarkerHeaderNameBuilder |
java | quarkusio__quarkus | integration-tests/grpc-streaming/src/test/java/io/quarkus/grpc/example/streaming/StreamingEndpointTestBase.java | {
"start": 296,
"end": 1396
} | class ____ {
protected static final TypeRef<List<String>> LIST_OF_STRING = new TypeRef<List<String>>() {
};
@Test
public void testSource() {
List<String> response = get("/streaming").as(LIST_OF_STRING);
assertThat(response).containsExactly("0", "1", "2", "3", "4", "5", "6", "7", "8", "9");
ensureThatMetricsAreProduced();
}
@Test
public void testPipe() {
Response r = get("/streaming/3");
List<String> response = r.as(LIST_OF_STRING);
assertThat(response).containsExactly("0", "0", "1", "3");
}
@Test
public void testSink() {
get("/streaming/sink/3")
.then().statusCode(204);
}
public void ensureThatMetricsAreProduced() {
String metrics = get("/q/metrics")
.then().statusCode(200)
.extract().asString();
assertThat(metrics)
.contains("grpc_server_processing_duration_seconds_max") // server
.contains("grpc_client_processing_duration_seconds_count"); // client
}
}
| StreamingEndpointTestBase |
java | apache__camel | core/camel-main/src/main/java/org/apache/camel/main/SimpleMain.java | {
"start": 932,
"end": 1859
} | class ____ extends BaseMainSupport {
public SimpleMain(CamelContext camelContext) {
super(camelContext);
}
@Override
protected void doInit() throws Exception {
super.doInit();
postProcessCamelContext(camelContext);
}
@Override
protected void doStart() throws Exception {
for (MainListener listener : listeners) {
listener.beforeStart(this);
}
super.doStart();
getCamelContext().start();
for (MainListener listener : listeners) {
listener.afterStart(this);
}
}
@Override
protected void doStop() throws Exception {
for (MainListener listener : listeners) {
listener.beforeStop(this);
}
super.doStop();
getCamelContext().stop();
for (MainListener listener : listeners) {
listener.afterStop(this);
}
}
}
| SimpleMain |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/support/Querydsl.java | {
"start": 1967,
"end": 8515
} | class ____ {
private final EntityManager em;
private final PersistenceProvider provider;
private final PathBuilder<?> builder;
/**
* Creates a new {@link Querydsl} for the given {@link EntityManager} and {@link PathBuilder}.
*
* @param em must not be {@literal null}.
* @param builder must not be {@literal null}.
*/
public Querydsl(EntityManager em, PathBuilder<?> builder) {
Assert.notNull(em, "EntityManager must not be null");
Assert.notNull(builder, "PathBuilder must not be null");
this.em = em;
this.provider = PersistenceProvider.fromEntityManager(em);
this.builder = builder;
}
/**
* Creates the {@link JPQLQuery} instance based on the configured {@link EntityManager}.
*/
public <T> AbstractJPAQuery<T, JPAQuery<T>> createQuery() {
JPQLTemplates templates = getTemplates();
return templates != null ? new SpringDataJpaQuery<>(em, templates) : new SpringDataJpaQuery<>(em);
}
/**
* Obtains the {@link JPQLTemplates} for the configured {@link EntityManager}. Can return {@literal null} to use the
* default templates.
*
* @return the {@link JPQLTemplates} for the configured {@link EntityManager}, {@link JPQLTemplates#DEFAULT} by
* default.
* @since 3.5
*/
public JPQLTemplates getTemplates() {
return switch (provider) {
case ECLIPSELINK -> EclipseLinkTemplates.DEFAULT;
case HIBERNATE -> HQLTemplates.DEFAULT;
default -> JPQLTemplates.DEFAULT;
};
}
/**
* Creates the {@link JPQLQuery} instance based on the configured {@link EntityManager}.
*
* @param paths must not be {@literal null}.
*/
public AbstractJPAQuery<Object, JPAQuery<Object>> createQuery(EntityPath<?>... paths) {
Assert.notNull(paths, "Paths must not be null");
return createQuery().from(paths);
}
/**
* Applies the given {@link Pageable} to the given {@link JPQLQuery}.
*
* @param pageable must not be {@literal null}.
* @param query must not be {@literal null}.
* @return the Querydsl {@link JPQLQuery}.
*/
public <T> JPQLQuery<T> applyPagination(Pageable pageable, JPQLQuery<T> query) {
Assert.notNull(pageable, "Pageable must not be null");
Assert.notNull(query, "JPQLQuery must not be null");
if (pageable.isPaged()) {
query.offset(pageable.getOffset());
query.limit(pageable.getPageSize());
}
return applySorting(pageable.getSort(), query);
}
/**
* Applies sorting to the given {@link JPQLQuery}.
*
* @param sort must not be {@literal null}.
* @param query must not be {@literal null}.
* @return the Querydsl {@link JPQLQuery}
*/
public <T> JPQLQuery<T> applySorting(Sort sort, JPQLQuery<T> query) {
Assert.notNull(sort, "Sort must not be null");
Assert.notNull(query, "Query must not be null");
if (sort.isUnsorted()) {
return query;
}
if (sort instanceof QSort qsort) {
return addOrderByFrom(qsort, query);
}
return addOrderByFrom(sort, query);
}
/**
* Applies the given {@link OrderSpecifier}s to the given {@link JPQLQuery}. Potentially transforms the given
* {@code OrderSpecifier}s to be able to injection potentially necessary left-joins.
*
* @param qsort must not be {@literal null}.
* @param query must not be {@literal null}.
*/
private <T> JPQLQuery<T> addOrderByFrom(QSort qsort, JPQLQuery<T> query) {
List<OrderSpecifier<?>> orderSpecifiers = qsort.getOrderSpecifiers();
return query.orderBy(orderSpecifiers.toArray(new OrderSpecifier[0]));
}
/**
* Converts the {@link Order} items of the given {@link Sort} into {@link OrderSpecifier} and attaches those to the
* given {@link JPQLQuery}.
*
* @param sort must not be {@literal null}.
* @param query must not be {@literal null}.
*/
private <T> JPQLQuery<T> addOrderByFrom(Sort sort, JPQLQuery<T> query) {
Assert.notNull(sort, "Sort must not be null");
Assert.notNull(query, "Query must not be null");
for (Order order : sort) {
query.orderBy(toOrderSpecifier(order));
}
return query;
}
/**
* Transforms a plain {@link Order} into a QueryDsl specific {@link OrderSpecifier}.
*
* @param order must not be {@literal null}.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
private OrderSpecifier<?> toOrderSpecifier(Order order) {
return new OrderSpecifier(
order.isAscending() ? com.querydsl.core.types.Order.ASC : com.querydsl.core.types.Order.DESC,
buildOrderPropertyPathFrom(order), toQueryDslNullHandling(order.getNullHandling()));
}
/**
* Converts the given {@link org.springframework.data.domain.Sort.NullHandling} to the appropriate Querydsl
* {@link NullHandling}.
*
* @param nullHandling must not be {@literal null}.
* @since 1.6
*/
private NullHandling toQueryDslNullHandling(org.springframework.data.domain.Sort.NullHandling nullHandling) {
Assert.notNull(nullHandling, "NullHandling must not be null");
return switch (nullHandling) {
case NULLS_FIRST -> NullHandling.NullsFirst;
case NULLS_LAST -> NullHandling.NullsLast;
default -> NullHandling.Default;
};
}
/**
* Creates an {@link Expression} for the given {@link Order} property.
*
* @param order must not be {@literal null}.
*/
private Expression<?> buildOrderPropertyPathFrom(Order order) {
Assert.notNull(order, "Order must not be null");
QueryUtils.checkSortExpression(order);
PropertyPath path = PropertyPath.from(order.getProperty(), builder.getType());
Expression<?> sortPropertyExpression = builder;
while (path != null) {
sortPropertyExpression = !path.hasNext() && order.isIgnoreCase() && String.class.equals(path.getType()) //
? Expressions.stringPath((Path<?>) sortPropertyExpression, path.getSegment()).lower() //
: Expressions.path(path.getType(), (Path<?>) sortPropertyExpression, path.getSegment());
path = path.next();
}
return sortPropertyExpression;
}
/**
* Creates an {@link Expression} for the given {@code property} property.
*
* @param property must not be {@literal null}.
*/
Expression<?> createExpression(String property) {
Assert.notNull(property, "Property must not be null");
PropertyPath path = PropertyPath.from(property, builder.getType());
Expression<?> sortPropertyExpression = builder;
while (path != null) {
sortPropertyExpression = !path.hasNext() && String.class.equals(path.getType()) //
? Expressions.stringPath((Path<?>) sortPropertyExpression, path.getSegment()) //
: Expressions.path(path.getType(), (Path<?>) sortPropertyExpression, path.getSegment());
path = path.next();
}
return sortPropertyExpression;
}
}
| Querydsl |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/client/DefaultRestClient.java | {
"start": 25777,
"end": 25884
} | interface ____ {
void writeTo(ClientHttpRequest request) throws IOException;
}
}
private | InternalBody |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/twosteperror/ErroneousMapperMC.java | {
"start": 660,
"end": 712
} | class ____ {
public String t1;
}
| Target |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/hierarchical/ThrowableCollector.java | {
"start": 6453,
"end": 6570
} | interface ____ an executable block of code that may throw a
* {@link Throwable}.
*/
@FunctionalInterface
public | for |
java | apache__camel | components/camel-huawei/camel-huaweicloud-imagerecognition/src/generated/java/org/apache/camel/component/huaweicloud/image/ImageRecognitionComponentConfigurer.java | {
"start": 744,
"end": 2360
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
ImageRecognitionComponent target = (ImageRecognitionComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
ImageRecognitionComponent target = (ImageRecognitionComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| ImageRecognitionComponentConfigurer |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/AssertJMultipleFailuresError.java | {
"start": 933,
"end": 3444
} | class ____ extends MultipleFailuresError {
@Serial
private static final long serialVersionUID = 1L;
private static final String EOL = System.lineSeparator();
private static final String ERROR_SEPARATOR = EOL + "-- failure %d --";
private String heading;
private Object objectUnderTest;
private boolean showObjectUnderTest;
public AssertJMultipleFailuresError(String heading, List<? extends Throwable> failures) {
super(heading, failures);
this.heading = heading;
}
public AssertJMultipleFailuresError(String heading, Object objectUnderTest, List<? extends Throwable> failures) {
super(heading, failures);
this.heading = heading;
this.objectUnderTest = objectUnderTest;
this.showObjectUnderTest = true;
}
@Override
public String getMessage() {
List<Throwable> failures = getFailures();
int failureCount = failures.size();
if (failureCount == 0) return super.getMessage();
heading = isBlank(heading) ? "Multiple Failures" : heading.trim();
String beginningOfErrorMessage = showObjectUnderTest ? "%nFor %s,%n".formatted(objectUnderTest) : EOL;
var builder = new StringBuilder(beginningOfErrorMessage).append(heading)
.append(" (")
.append(failureCount).append(" ")
.append(failureCount == 1 ? "failure" : "failures")
.append(")");
List<Throwable> failuresWithLineNumbers = addLineNumberToErrorMessages(failures);
for (int i = 0; i < failureCount; i++) {
builder.append(errorSeparator(i + 1));
String message = nullSafeMessage(failuresWithLineNumbers.get(i));
// when we have a description, we add a line before for readability
if (hasDescription(message)) builder.append(EOL);
builder.append(message);
}
return builder.toString();
}
private String errorSeparator(int errorNumber) {
return ERROR_SEPARATOR.formatted(errorNumber);
}
private boolean hasDescription(String message) {
return message.startsWith("[");
}
private static boolean isBlank(String str) {
return str == null || str.trim().isEmpty();
}
private static String nullSafeMessage(Throwable failure) {
return isBlank(failure.getMessage()) ? "<no message> in " + failure.getClass().getName() : failure.getMessage();
}
}
| AssertJMultipleFailuresError |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java | {
"start": 10991,
"end": 11167
} | class ____ message are equal
error == null ? null : error.getMessage(),
error == null ? null : error.getClass()
);
}
}
}
| and |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/any/annotations/LazyAnyDiscriminatorQueryTest.java | {
"start": 3897,
"end": 4518
} | class ____ {
@Id
private Long id;
@Any(fetch = FetchType.LAZY)
@AnyDiscriminator(DiscriminatorType.STRING)
@AnyDiscriminatorValues({
@AnyDiscriminatorValue(discriminator = "S", entity = StringProperty.class),
})
@AnyKeyJavaClass(Long.class)
@Column(name = "property_type")
@JoinColumn(name = "property_id")
private Property property;
public PropertyHolder() {
}
public PropertyHolder(Long id, Property property) {
this.id = id;
this.property = property;
}
public Long getId() {
return id;
}
public Property getProperty() {
return property;
}
}
public | PropertyHolder |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java | {
"start": 1798,
"end": 9581
} | class ____ extends ValuesSourceAggregationBuilder<VariableWidthHistogramAggregationBuilder> {
public static final String NAME = "variable_width_histogram";
public static final ValuesSourceRegistry.RegistryKey<VariableWidthHistogramAggregatorSupplier> REGISTRY_KEY =
new ValuesSourceRegistry.RegistryKey<>(NAME, VariableWidthHistogramAggregatorSupplier.class);
private static final ParseField NUM_BUCKETS_FIELD = new ParseField("buckets");
private static final ParseField INITIAL_BUFFER_FIELD = new ParseField("initial_buffer");
private static final ParseField SHARD_SIZE_FIELD = new ParseField("shard_size");
public static final ObjectParser<VariableWidthHistogramAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(
NAME,
VariableWidthHistogramAggregationBuilder::new
);
static {
ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, true);
PARSER.declareInt(VariableWidthHistogramAggregationBuilder::setNumBuckets, NUM_BUCKETS_FIELD);
PARSER.declareInt(VariableWidthHistogramAggregationBuilder::setShardSize, SHARD_SIZE_FIELD);
PARSER.declareInt(VariableWidthHistogramAggregationBuilder::setInitialBuffer, INITIAL_BUFFER_FIELD);
}
private int numBuckets = 10;
private int shardSize = -1;
private int initialBuffer = -1;
public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
VariableWidthHistogramAggregatorFactory.registerAggregators(builder);
}
/** Create a new builder with the given name. */
public VariableWidthHistogramAggregationBuilder(String name) {
super(name);
}
/** Read in object data from a stream, for internal use only. */
public VariableWidthHistogramAggregationBuilder(StreamInput in) throws IOException {
super(in);
numBuckets = in.readVInt();
}
protected VariableWidthHistogramAggregationBuilder(
VariableWidthHistogramAggregationBuilder clone,
AggregatorFactories.Builder factoriesBuilder,
Map<String, Object> metaData
) {
super(clone, factoriesBuilder, metaData);
this.numBuckets = clone.numBuckets;
}
@Override
protected ValuesSourceType defaultValueSourceType() {
return CoreValuesSourceType.NUMERIC;
}
public VariableWidthHistogramAggregationBuilder setNumBuckets(int numBuckets) {
if (numBuckets <= 0) {
throw new IllegalArgumentException(NUM_BUCKETS_FIELD.getPreferredName() + " must be greater than [0] for [" + name + "]");
}
this.numBuckets = numBuckets;
return this;
}
public VariableWidthHistogramAggregationBuilder setShardSize(int shardSize) {
if (shardSize <= 1) {
// A shard size of 1 will cause divide by 0s and, even if it worked, would produce garbage results.
throw new IllegalArgumentException(SHARD_SIZE_FIELD.getPreferredName() + " must be greater than [1] for [" + name + "]");
}
this.shardSize = shardSize;
return this;
}
public VariableWidthHistogramAggregationBuilder setInitialBuffer(int initialBuffer) {
if (initialBuffer <= 0) {
throw new IllegalArgumentException(INITIAL_BUFFER_FIELD.getPreferredName() + " must be greater than [0] for [" + name + "]");
}
this.initialBuffer = initialBuffer;
return this;
}
public int getShardSize() {
if (shardSize == -1) {
return numBuckets * 50;
}
return shardSize;
}
public int getInitialBuffer() {
if (initialBuffer == -1) {
return Math.min(10 * getShardSize(), 50000);
}
return initialBuffer;
}
@Override
public BucketCardinality bucketCardinality() {
return BucketCardinality.MANY;
}
@Override
protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metaData) {
return new VariableWidthHistogramAggregationBuilder(this, factoriesBuilder, metaData);
}
@Override
protected void innerWriteTo(StreamOutput out) throws IOException {
out.writeVInt(numBuckets);
}
@Override
protected ValuesSourceAggregatorFactory innerBuild(
AggregationContext context,
ValuesSourceConfig config,
AggregatorFactory parent,
AggregatorFactories.Builder subFactoriesBuilder
) throws IOException {
Settings settings = context.getIndexSettings().getNodeSettings();
int maxBuckets = MultiBucketConsumerService.MAX_BUCKET_SETTING.get(settings);
if (numBuckets > maxBuckets) {
throw new IllegalArgumentException(NUM_BUCKETS_FIELD.getPreferredName() + " must be less than " + maxBuckets);
}
int initialBuffer = getInitialBuffer();
int shardSize = getShardSize();
if (initialBuffer < numBuckets) {
// If numBuckets buckets are being returned, then at least that many must be stored in memory
throw new IllegalArgumentException(
String.format(
Locale.ROOT,
"%s must be at least %s but was [%s<%s] for [%s]",
INITIAL_BUFFER_FIELD.getPreferredName(),
NUM_BUCKETS_FIELD.getPreferredName(),
initialBuffer,
numBuckets,
name
)
);
}
int mergePhaseInit = VariableWidthHistogramAggregator.mergePhaseInitialBucketCount(shardSize);
if (mergePhaseInit < numBuckets) {
// If the initial buckets from the merge phase is super low we will consistently return too few buckets
throw new IllegalArgumentException(
"3/4 of "
+ SHARD_SIZE_FIELD.getPreferredName()
+ " must be at least "
+ NUM_BUCKETS_FIELD.getPreferredName()
+ " but was ["
+ mergePhaseInit
+ "<"
+ numBuckets
+ "] for ["
+ name
+ "]"
);
}
VariableWidthHistogramAggregatorSupplier aggregatorSupplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config);
return new VariableWidthHistogramAggregatorFactory(
name,
config,
numBuckets,
shardSize,
initialBuffer,
context,
parent,
subFactoriesBuilder,
metadata,
aggregatorSupplier
);
}
@Override
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
builder.field(NUM_BUCKETS_FIELD.getPreferredName(), numBuckets);
return builder;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), numBuckets, shardSize, initialBuffer);
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null || getClass() != obj.getClass()) return false;
if (super.equals(obj) == false) return false;
VariableWidthHistogramAggregationBuilder other = (VariableWidthHistogramAggregationBuilder) obj;
return Objects.equals(numBuckets, other.numBuckets)
&& Objects.equals(shardSize, other.shardSize)
&& Objects.equals(initialBuffer, other.initialBuffer);
}
@Override
public String getType() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
}
| VariableWidthHistogramAggregationBuilder |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/aop/introduction/TxInterceptor.java | {
"start": 1077,
"end": 1990
} | class ____ implements MethodInterceptor<Object, Object> {
public static final List<Method> EXECUTED_METHODS = new ArrayList<>();
private final ConversionService conversionService;
public TxInterceptor(ConversionService conversionService) {
this.conversionService = conversionService;
}
@Override
public int getOrder() {
return 0;
}
@Nullable
@Override
public Object intercept(MethodInvocationContext<Object, Object> context) {
EXECUTED_METHODS.add(context.getExecutableMethod().getTargetMethod());
InterceptedMethod interceptedMethod = InterceptedMethod.of(context, conversionService);
try {
return interceptedMethod.handleResult(
interceptedMethod.interceptResult()
);
} catch (Exception e) {
return interceptedMethod.handleException(e);
}
}
}
| TxInterceptor |
java | google__gson | gson/src/test/java/com/google/gson/functional/MoreSpecificTypeSerializationTest.java | {
"start": 5097,
"end": 5259
} | class ____<T> extends ParameterizedBase<T> {
T s;
ParameterizedSub(T t, T s) {
super(t);
this.s = s;
}
}
private static | ParameterizedSub |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/RoutingSlipWithInterceptorTest.java | {
"start": 1607,
"end": 4584
} | class ____ implements InterceptStrategy {
private static final Logger LOGGER = LoggerFactory.getLogger(MyInterceptStrategy.class);
private static int doneCount;
@Override
public Processor wrapProcessorInInterceptors(
final CamelContext context, final NamedNode definition, final Processor target, final Processor nextTarget) {
if (definition instanceof RoutingSlipDefinition<?>) {
final DelegateAsyncProcessor delegateAsyncProcessor = new DelegateAsyncProcessor() {
@Override
public boolean process(final Exchange exchange, final AsyncCallback callback) {
LOGGER.info("I'm doing someting");
return super.process(exchange, new AsyncCallback() {
public void done(final boolean doneSync) {
LOGGER.info("I'm done");
doneCount++;
callback.done(doneSync);
}
});
}
};
delegateAsyncProcessor.setProcessor(target);
return delegateAsyncProcessor;
}
return new DelegateAsyncProcessor(target);
}
public void reset() {
doneCount = 0;
}
}
@Test
public void testRoutingSlipOne() throws Exception {
interceptStrategy.reset();
getMockEndpoint("mock:foo").expectedMessageCount(1);
getMockEndpoint("mock:bar").expectedMessageCount(0);
getMockEndpoint("mock:result").expectedMessageCount(1);
template.sendBodyAndHeader("direct:start", "Hello World", "slip", "direct:foo");
assertMockEndpointsSatisfied();
assertEquals(1, MyInterceptStrategy.doneCount, "Done method shall be called only once");
}
@Test
public void testRoutingSlipTwo() throws Exception {
interceptStrategy.reset();
getMockEndpoint("mock:foo").expectedMessageCount(1);
getMockEndpoint("mock:bar").expectedMessageCount(1);
getMockEndpoint("mock:result").expectedMessageCount(1);
template.sendBodyAndHeader("direct:start", "Hello World", "slip", "direct:foo,direct:bar");
assertMockEndpointsSatisfied();
assertEquals(1, MyInterceptStrategy.doneCount, "Done method shall be called only once");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
context.getCamelContextExtension().addInterceptStrategy(interceptStrategy);
from("direct:start").routingSlip(header("slip")).to("mock:result");
from("direct:foo").to("log:foo").to("mock:foo");
from("direct:bar").to("log:bar").to("mock:bar");
}
};
}
}
| MyInterceptStrategy |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java | {
"start": 40035,
"end": 40259
} | class ____ the job.
*/
public Class<? extends Reducer> getReducerClass() {
return getClass("mapred.reducer.class",
IdentityReducer.class, Reducer.class);
}
/**
* Set the {@link Reducer} | for |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/reseasy/reactive/ResteasyReactiveProcessorNoClientFilterTest.java | {
"start": 1301,
"end": 1528
} | class ____ {
@RestClient
TestClient client;
@GET
public String hello() {
return "test/subresource";
}
}
@RegisterRestClient(baseUri = "test")
public | TestSubResource |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioProvider.java | {
"start": 354,
"end": 770
} | enum ____ {
OPENAI,
MISTRAL,
META,
MICROSOFT_PHI,
COHERE,
DATABRICKS;
public static final String NAME = "azure_ai_studio_provider";
public static AzureAiStudioProvider fromString(String name) {
return valueOf(name.trim().toUpperCase(Locale.ROOT));
}
@Override
public String toString() {
return name().toLowerCase(Locale.ROOT);
}
}
| AzureAiStudioProvider |
java | reactor__reactor-core | reactor-test/src/main/java/reactor/test/DefaultStepVerifierBuilder.java | {
"start": 56422,
"end": 69590
} | class ____ implements StepVerifier.Assertions {
private final Duration duration;
private final MessageFormatter messageFormatter;
private final HookRecorder hookRecorder;
DefaultStepVerifierAssertions(HookRecorder hookRecorder,
Duration duration,
MessageFormatter messageFormatter) {
this.hookRecorder = hookRecorder;
this.duration = duration;
this.messageFormatter = messageFormatter;
}
private StepVerifier.Assertions satisfies(BooleanSupplier check, Supplier<String> message) {
if (!check.getAsBoolean()) {
throw messageFormatter.assertionError(message.get());
}
return this;
}
@Override
public StepVerifier.Assertions hasDroppedElements() {
return satisfies(hookRecorder::hasDroppedElements,
() -> "Expected dropped elements, none found.");
}
@Override
public StepVerifier.Assertions hasNotDroppedElements() {
return satisfies(hookRecorder::noDroppedElements,
() -> messageFormatter.format("Expected no dropped elements, found <%s>.", hookRecorder.droppedElements));
}
@Override
public StepVerifier.Assertions hasDropped(Object... values) {
//noinspection ConstantConditions
satisfies(() -> values != null && values.length > 0, () -> "Require non-empty values");
List<Object> valuesList = Arrays.asList(values);
return satisfies(() -> hookRecorder.droppedAllOf(valuesList),
() -> messageFormatter.format(
"Expected dropped elements to contain <%s>, was <%s>.",
valuesList, hookRecorder.droppedElements));
}
@Override
public StepVerifier.Assertions hasDroppedExactly(Object... values) {
//noinspection ConstantConditions
satisfies(() -> values != null && values.length > 0, () -> "Require non-empty values");
List<Object> valuesList = Arrays.asList(values);
return satisfies(
() -> hookRecorder.droppedAllOf(valuesList)
&& hookRecorder.droppedElements.size() == valuesList.size(),
() -> messageFormatter.format(
"Expected dropped elements to contain exactly <%s>, was <%s>.",
valuesList, hookRecorder.droppedElements));
}
@Override
public StepVerifier.Assertions hasDiscardedElements() {
return satisfies(hookRecorder::hasDiscardedElements,
() -> "Expected discarded elements, none found.");
}
@Override
public StepVerifier.Assertions hasNotDiscardedElements() {
return satisfies(hookRecorder::noDiscardedElements,
() -> messageFormatter.format("Expected no discarded elements, found <%s>.", hookRecorder.discardedElements));
}
@Override
public StepVerifier.Assertions hasDiscarded(Object... values) {
//noinspection ConstantConditions
satisfies(() -> values != null && values.length > 0, () -> "Require non-empty values");
List<Object> valuesList = Arrays.asList(values);
return satisfies(() -> hookRecorder.discardedAllOf(valuesList),
() -> messageFormatter.format(
"Expected discarded elements to contain <%s>, was <%s>.",
valuesList, hookRecorder.discardedElements));
}
@Override
public StepVerifier.Assertions hasDiscardedExactly(Object... values) {
//noinspection ConstantConditions
satisfies(() -> values != null && values.length > 0, () -> "Require non-empty values");
List<Object> valuesList = Arrays.asList(values);
return satisfies(
() -> hookRecorder.discardedAllOf(valuesList)
&& hookRecorder.discardedElements.size() == valuesList.size(),
() -> messageFormatter.format(
"Expected discarded elements to contain exactly <%s>, was <%s>.",
valuesList, hookRecorder.discardedElements));
}
@Override
public StepVerifier.Assertions hasDiscardedElementsMatching(Predicate<Collection<Object>> matcher) {
//noinspection ConstantConditions
satisfies(() -> matcher != null, () -> "Require non-null matcher");
hasDiscardedElements();
return satisfies(() -> matcher.test(hookRecorder.discardedElements),
() -> String.format(
"Expected collection of discarded elements matching the given predicate, did not match: <%s>.",
hookRecorder.discardedElements));
}
@Override
public StepVerifier.Assertions hasDiscardedElementsSatisfying(Consumer<Collection<Object>> asserter) {
//noinspection ConstantConditions
satisfies(() -> asserter != null, () -> "Require non-null asserter");
hasDiscardedElements();
asserter.accept(hookRecorder.discardedElements);
return this;
}
@Override
public StepVerifier.Assertions hasNotDroppedErrors() {
return satisfies(hookRecorder::noDroppedErrors,
() -> String.format("Expected no dropped errors, found <%s>.",
hookRecorder.droppedErrors));
}
@Override
public StepVerifier.Assertions hasDroppedErrors() {
return satisfies(hookRecorder::hasDroppedErrors,
() -> "Expected at least 1 dropped error, none found.");
}
@Override
public StepVerifier.Assertions hasDroppedErrors(int size) {
return satisfies(() -> hookRecorder.droppedErrors.size() == size,
() -> String.format("Expected exactly %d dropped errors, %d found.",
size, hookRecorder.droppedErrors.size()));
}
@Override
public StepVerifier.Assertions hasDroppedErrorOfType(Class<? extends Throwable> clazz) {
//noinspection ConstantConditions
satisfies(() -> clazz != null, () -> "Require non-null clazz");
hasDroppedErrors(1);
return satisfies(
() -> clazz.isInstance(hookRecorder.droppedErrors.peek()),
() -> String.format("Expected dropped error to be of type %s, was %s.",
clazz.getCanonicalName(),
Objects.requireNonNull(hookRecorder.droppedErrors.peek(),
"droppped error is null!").getClass().getCanonicalName()));
}
@Override
public StepVerifier.Assertions hasDroppedErrorMatching(Predicate<Throwable> matcher) {
//noinspection ConstantConditions
satisfies(() -> matcher != null, () -> "Require non-null matcher");
hasDroppedErrors(1);
return satisfies(() -> matcher.test(Objects.requireNonNull(hookRecorder.droppedErrors.peek(),
"dropped error is null!")),
() -> String.format(
"Expected dropped error matching the given predicate, did not match: <%s>.",
hookRecorder.droppedErrors.peek()));
}
@Override
public StepVerifier.Assertions hasDroppedErrorWithMessage(String message) {
//noinspection ConstantConditions
satisfies(() -> message != null, () -> "Require non-null message");
hasDroppedErrors(1);
String actual = Objects.requireNonNull(hookRecorder.droppedErrors.peek(),
"dropped error is null!").getMessage();
return satisfies(() -> message.equals(actual),
() -> String.format("Expected dropped error with message <\"%s\">, was <\"%s\">.", message, actual));
}
@Override
public StepVerifier.Assertions hasDroppedErrorWithMessageContaining(
String messagePart) {
//noinspection ConstantConditions
satisfies(() -> messagePart != null, () -> "Require non-null messagePart");
hasDroppedErrors(1);
String actual = Objects.requireNonNull(hookRecorder.droppedErrors.peek(),
"dropped error is null!").getMessage();
return satisfies(() -> actual != null && actual.contains(messagePart),
() -> String.format("Expected dropped error with message containing <\"%s\">, was <\"%s\">.", messagePart, actual));
}
@Override
public StepVerifier.Assertions hasDroppedErrorsMatching(Predicate<Collection<Throwable>> matcher) {
//noinspection ConstantConditions
satisfies(() -> matcher != null, () -> "Require non-null matcher");
hasDroppedErrors();
return satisfies(() -> matcher.test(hookRecorder.droppedErrors),
() -> String.format(
"Expected collection of dropped errors matching the given predicate, did not match: <%s>.",
hookRecorder.droppedErrors));
}
@Override
public StepVerifier.Assertions hasDroppedErrorsSatisfying(Consumer<Collection<Throwable>> asserter) {
//noinspection ConstantConditions
satisfies(() -> asserter != null, () -> "Require non-null asserter");
hasDroppedErrors();
asserter.accept(hookRecorder.droppedErrors);
return this;
}
@Override
public StepVerifier.Assertions hasOperatorErrors() {
return satisfies(hookRecorder::hasOperatorErrors,
() -> "Expected at least 1 operator error, none found.");
}
@Override
public StepVerifier.Assertions hasOperatorErrors(int size) {
return satisfies(() -> hookRecorder.operatorErrors.size() == size,
() -> String.format(
"Expected exactly %d operator errors, %d found.",
size, hookRecorder.operatorErrors.size()));
}
StepVerifier.Assertions hasOneOperatorErrorWithError() {
satisfies(() -> hookRecorder.operatorErrors.size() == 1,
() -> String.format("Expected exactly one operator error, %d found.", hookRecorder.operatorErrors.size()));
satisfies(() -> Objects.requireNonNull(hookRecorder.operatorErrors.peek(),
"operator error is null!").getT1().isPresent(),
() -> "Expected exactly one operator error with an actual throwable content, no throwable found.");
return this;
}
@Override
public StepVerifier.Assertions hasOperatorErrorOfType(Class<? extends Throwable> clazz) {
//noinspection ConstantConditions
satisfies(() -> clazz != null, () -> "Require non-null clazz");
hasOneOperatorErrorWithError();
return satisfies(() -> clazz.isInstance(
Objects.requireNonNull(hookRecorder.operatorErrors.peek(), "operator error is null")
.getT1()
.get()),
() -> String.format(
"Expected operator error to be of type %s, was %s.",
clazz.getCanonicalName(),
Objects.requireNonNull(hookRecorder.operatorErrors.peek(),
"operator error is null!")
.getT1()
.get()
.getClass()
.getCanonicalName()));
}
@Override
public StepVerifier.Assertions hasOperatorErrorMatching(Predicate<Throwable> matcher) {
//noinspection ConstantConditions
satisfies(() -> matcher != null, () -> "Require non-null matcher");
hasOneOperatorErrorWithError();
return satisfies(
() -> {
Tuple2<Optional<Throwable>, Optional<?>> signals =
Objects.requireNonNull(hookRecorder.operatorErrors.peek(), "operator error is null!");
return matcher.test(Objects.requireNonNull(signals.getT1().orElse(null)));
},
() -> String.format(
"Expected operator error matching the given predicate, did not match: <%s>.",
hookRecorder.operatorErrors.peek()));
}
@Override
public StepVerifier.Assertions hasOperatorErrorWithMessage(String message) {
//noinspection ConstantConditions
satisfies(() -> message != null, () -> "Require non-null message");
hasOneOperatorErrorWithError();
Tuple2<Optional<Throwable>, Optional<?>> signals =
Objects.requireNonNull(hookRecorder.operatorErrors.peek(), "operator error is null!");
String actual = signals.getT1().get().getMessage();
return satisfies(() -> message.equals(actual),
() -> String.format("Expected operator error with message <\"%s\">, was <\"%s\">.", message, actual));
}
@Override
public StepVerifier.Assertions hasOperatorErrorWithMessageContaining(
String messagePart) {
//noinspection ConstantConditions
satisfies(() -> messagePart != null, () -> "Require non-null messagePart");
hasOneOperatorErrorWithError();
Tuple2<Optional<Throwable>, Optional<?>> signals =
Objects.requireNonNull(hookRecorder.operatorErrors.peek(), "operator error is null!");
String actual = signals.getT1().get().getMessage();
return satisfies(() -> actual != null && actual.contains(messagePart),
() -> String.format("Expected operator error with message containing <\"%s\">, was <\"%s\">.", messagePart, actual));
}
@Override
public StepVerifier.Assertions hasOperatorErrorsMatching(Predicate<Collection<Tuple2<Optional<Throwable>, Optional<?>>>> matcher) {
//noinspection ConstantConditions
satisfies(() -> matcher != null, () -> "Require non-null matcher");
hasOperatorErrors();
return satisfies(() -> matcher.test(hookRecorder.operatorErrors),
() -> String.format(
"Expected collection of operator errors matching the given predicate, did not match: <%s>.",
hookRecorder.operatorErrors));
}
@Override
public StepVerifier.Assertions hasOperatorErrorsSatisfying(Consumer<Collection<Tuple2<Optional<Throwable>, Optional<?>>>> asserter) {
//noinspection ConstantConditions
satisfies(() -> asserter != null, () -> "Require non-null asserter");
hasOperatorErrors();
asserter.accept(hookRecorder.operatorErrors);
return this;
}
@Override
public StepVerifier.Assertions tookLessThan(Duration d) {
return satisfies(() -> duration.compareTo(d) <= 0,
() -> String.format("Expected scenario to be verified in less than %sms, took %sms.",
d.toMillis(), duration.toMillis()));
}
@Override
public StepVerifier.Assertions tookMoreThan(Duration d) {
return satisfies(() -> duration.compareTo(d) >= 0,
() -> String.format("Expected scenario to be verified in more than %sms, took %sms.",
d.toMillis(), duration.toMillis()));
}
}
| DefaultStepVerifierAssertions |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/cache/config/EnableCachingIntegrationTests.java | {
"start": 2085,
"end": 5231
} | class ____ {
private ConfigurableApplicationContext context;
@AfterEach
void closeContext() {
if (this.context != null) {
this.context.close();
}
}
@Test
void fooServiceWithInterface() {
this.context = new AnnotationConfigApplicationContext(FooConfig.class);
FooService service = this.context.getBean(FooService.class);
fooGetSimple(service);
}
@Test
void fooServiceWithInterfaceCglib() {
this.context = new AnnotationConfigApplicationContext(FooConfigCglib.class);
FooService service = this.context.getBean(FooService.class);
fooGetSimple(service);
}
private void fooGetSimple(FooService service) {
Cache cache = getCache();
Object key = new Object();
assertCacheMiss(key, cache);
Object value = service.getSimple(key);
assertCacheHit(key, value, cache);
}
@Test // gh-31238
public void cglibProxyClassIsCachedAcrossApplicationContexts() {
ConfigurableApplicationContext ctx;
// Round #1
ctx = new AnnotationConfigApplicationContext(FooConfigCglib.class);
FooService service1 = ctx.getBean(FooService.class);
assertThat(AopUtils.isCglibProxy(service1)).as("FooService #1 is not a CGLIB proxy").isTrue();
ctx.close();
// Round #2
ctx = new AnnotationConfigApplicationContext(FooConfigCglib.class);
FooService service2 = ctx.getBean(FooService.class);
assertThat(AopUtils.isCglibProxy(service2)).as("FooService #2 is not a CGLIB proxy").isTrue();
ctx.close();
assertThat(service1.getClass()).isSameAs(service2.getClass());
}
@Test
void barServiceWithCacheableInterfaceCglib() {
this.context = new AnnotationConfigApplicationContext(BarConfigCglib.class);
BarService service = this.context.getBean(BarService.class);
Cache cache = getCache();
Object key = new Object();
assertCacheMiss(key, cache);
Object value = service.getSimple(key);
assertCacheHit(key, value, cache);
}
@Test
void beanConditionOff() {
this.context = new AnnotationConfigApplicationContext(BeanConditionConfig.class);
FooService service = this.context.getBean(FooService.class);
Cache cache = getCache();
Object key = new Object();
service.getWithCondition(key);
assertCacheMiss(key, cache);
service.getWithCondition(key);
assertCacheMiss(key, cache);
assertThat(this.context.getBean(BeanConditionConfig.Bar.class).count).isEqualTo(2);
}
@Test
void beanConditionOn() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.setEnvironment(new MockEnvironment().withProperty("bar.enabled", "true"));
ctx.register(BeanConditionConfig.class);
ctx.refresh();
this.context = ctx;
FooService service = this.context.getBean(FooService.class);
Cache cache = getCache();
Object key = new Object();
Object value = service.getWithCondition(key);
assertCacheHit(key, value, cache);
value = service.getWithCondition(key);
assertCacheHit(key, value, cache);
assertThat(this.context.getBean(BeanConditionConfig.Bar.class).count).isEqualTo(2);
}
private Cache getCache() {
return this.context.getBean(CacheManager.class).getCache("testCache");
}
@Configuration
static | EnableCachingIntegrationTests |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-kotlin-serialization-common/runtime/src/main/java/io/quarkus/resteasy/reactive/kotlin/serialization/common/runtime/JsonConfig.java | {
"start": 4329,
"end": 4722
} | enum ____ should be decoded case insensitively.
*/
@WithDefault("false")
boolean decodeEnumsCaseInsensitive();
/**
* Specifies if trailing comma is allowed.
*/
@WithDefault("false")
boolean allowTrailingComma();
/**
* Allows parser to accept C/Java-style comments in JSON input.
*/
@WithDefault("false")
boolean allowComments();
}
| values |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/search/arguments/AggregateArgs.java | {
"start": 19097,
"end": 19380
} | interface ____<K, V> {
/**
* Build the operation arguments into the command args.
*
* @param args the command args to build into
*/
void build(CommandArgs<K, V> args);
}
// Helper classes
public static | PipelineOperation |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/namingstrategy/complete/BaseAnnotationBindingTests.java | {
"start": 245,
"end": 598
} | class ____ extends BaseNamingTests {
@Override
protected void applySources(MetadataSources metadataSources) {
metadataSources.addAnnotatedClass( Address.class )
.addAnnotatedClass( Customer.class )
.addAnnotatedClass( Industry.class )
.addAnnotatedClass( Order.class )
.addAnnotatedClass( ZipCode.class );
}
}
| BaseAnnotationBindingTests |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java | {
"start": 3447,
"end": 48875
} | class ____ extends ESAllocationTestCase {
public void testReasonOrdinalOrder() {
UnassignedInfo.Reason[] order = new UnassignedInfo.Reason[] {
UnassignedInfo.Reason.INDEX_CREATED,
UnassignedInfo.Reason.CLUSTER_RECOVERED,
UnassignedInfo.Reason.INDEX_REOPENED,
UnassignedInfo.Reason.DANGLING_INDEX_IMPORTED,
UnassignedInfo.Reason.NEW_INDEX_RESTORED,
UnassignedInfo.Reason.EXISTING_INDEX_RESTORED,
UnassignedInfo.Reason.REPLICA_ADDED,
UnassignedInfo.Reason.ALLOCATION_FAILED,
UnassignedInfo.Reason.NODE_LEFT,
UnassignedInfo.Reason.REROUTE_CANCELLED,
UnassignedInfo.Reason.REINITIALIZED,
UnassignedInfo.Reason.REALLOCATED_REPLICA,
UnassignedInfo.Reason.PRIMARY_FAILED,
UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY,
UnassignedInfo.Reason.MANUAL_ALLOCATION,
UnassignedInfo.Reason.INDEX_CLOSED,
UnassignedInfo.Reason.NODE_RESTARTING,
UnassignedInfo.Reason.UNPROMOTABLE_REPLICA,
UnassignedInfo.Reason.RESHARD_ADDED };
for (int i = 0; i < order.length; i++) {
assertThat(order[i].ordinal(), equalTo(i));
}
assertThat(UnassignedInfo.Reason.values().length, equalTo(order.length));
}
public void testSerialization() throws Exception {
UnassignedInfo.Reason reason = RandomPicks.randomFrom(random(), UnassignedInfo.Reason.values());
int failedAllocations = randomIntBetween(1, 100);
Set<String> failedNodes = IntStream.range(0, between(0, failedAllocations))
.mapToObj(n -> "failed-node-" + n)
.collect(Collectors.toSet());
UnassignedInfo meta;
if (reason == UnassignedInfo.Reason.ALLOCATION_FAILED) {
meta = new UnassignedInfo(
reason,
randomBoolean() ? randomAlphaOfLength(4) : null,
null,
failedAllocations,
System.nanoTime(),
System.currentTimeMillis(),
false,
AllocationStatus.NO_ATTEMPT,
failedNodes,
null
);
} else if (reason == UnassignedInfo.Reason.NODE_LEFT || reason == UnassignedInfo.Reason.NODE_RESTARTING) {
String lastAssignedNodeId = randomAlphaOfLength(10);
if (reason == UnassignedInfo.Reason.NODE_LEFT && randomBoolean()) {
// If the reason is `NODE_LEFT`, sometimes we'll have an empty lastAllocatedNodeId due to BWC
lastAssignedNodeId = null;
}
meta = new UnassignedInfo(
reason,
randomBoolean() ? randomAlphaOfLength(4) : null,
null,
0,
System.nanoTime(),
System.currentTimeMillis(),
false,
AllocationStatus.NO_ATTEMPT,
Set.of(),
lastAssignedNodeId
);
} else {
meta = new UnassignedInfo(reason, randomBoolean() ? randomAlphaOfLength(4) : null);
}
BytesStreamOutput out = new BytesStreamOutput();
meta.writeTo(out);
out.close();
UnassignedInfo read = UnassignedInfo.fromStreamInput(out.bytes().streamInput());
assertThat(read.reason(), equalTo(meta.reason()));
assertThat(read.unassignedTimeMillis(), equalTo(meta.unassignedTimeMillis()));
assertThat(read.message(), equalTo(meta.message()));
assertThat(read.details(), equalTo(meta.details()));
assertThat(read.failedAllocations(), equalTo(meta.failedAllocations()));
assertThat(read.failedNodeIds(), equalTo(meta.failedNodeIds()));
assertThat(read.lastAllocatedNodeId(), equalTo(meta.lastAllocatedNodeId()));
}
public void testIndexCreated() {
Metadata metadata = Metadata.builder()
.put(
IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 3))
.numberOfReplicas(randomIntBetween(0, 3))
)
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.getProject().index("test")).build()
)
.build();
for (ShardRouting shard : shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED)) {
assertThat(shard.unassignedInfo().reason(), equalTo(UnassignedInfo.Reason.INDEX_CREATED));
}
}
public void testClusterRecovered() {
Metadata metadata = Metadata.builder()
.put(
IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 3))
.numberOfReplicas(randomIntBetween(0, 3))
)
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsRecovery(metadata.getProject().index("test"))
.build()
)
.build();
for (ShardRouting shard : shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED)) {
assertThat(shard.unassignedInfo().reason(), equalTo(UnassignedInfo.Reason.CLUSTER_RECOVERED));
}
}
public void testIndexClosedAndReopened() {
final var allocationService = createAllocationService();
// cluster state 0: index fully assigned and ready to close
final var metadata0 = Metadata.builder()
.put(
IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true))
.numberOfShards(randomIntBetween(1, 3))
.numberOfReplicas(randomIntBetween(0, 3))
)
.build();
assertThat(metadata0.projects(), aMapWithSize(1));
var entry = metadata0.projects().entrySet().iterator().next();
final ProjectMetadata projectMetadata0 = entry.getValue();
final ProjectId projectId0 = entry.getKey();
final var clusterState0 = applyStartedShardsUntilNoChange(
ClusterState.builder(ClusterState.EMPTY_STATE)
.nodes(
DiscoveryNodes.builder()
.add(newNode("node-1"))
.add(newNode("node-2"))
.add(newNode("node-3"))
.add(newNode("node-4"))
.add(newNode("node-5"))
)
.metadata(metadata0)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(projectMetadata0.index("test")).build()
)
.build(),
allocationService
);
assertTrue(clusterState0.routingTable().index("test").allShardsActive());
// cluster state 1: perhaps start one of the shards relocating
final var clusterState1 = randomBoolean()
? clusterState0
: allocationService.executeWithRoutingAllocation(clusterState0, "test", routingAllocation -> {
final var indexShardRoutingTable = routingAllocation.routingTable(projectId0).index("test").shard(0);
for (DiscoveryNode node : routingAllocation.nodes()) {
if (routingAllocation.routingNodes().node(node.getId()).getByShardId(indexShardRoutingTable.shardId()) == null) {
routingAllocation.routingNodes()
.relocateShard(indexShardRoutingTable.shard(0), node.getId(), 0L, "test", routingAllocation.changes());
return;
}
}
throw new AssertionError("no suitable target found");
});
// cluster state 2: index closed and fully unassigned
final var metadata1 = Metadata.builder(metadata0)
.put(IndexMetadata.builder(projectMetadata0.index("test")).state(IndexMetadata.State.CLOSE))
.build();
final ProjectMetadata projectMetadata1 = metadata1.getProject();
final var clusterState2 = ClusterState.builder(clusterState1)
.metadata(metadata1)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, clusterState1.routingTable(projectMetadata1.id()))
.addAsFromOpenToClose(projectMetadata1.index("test"))
)
.build();
assertLastAllocatedNodeIdsAssigned(
UnassignedInfo.Reason.INDEX_CLOSED,
clusterState1.routingTable().index("test"),
clusterState2.routingTable().index("test")
);
// cluster state 3: closed index has been fully assigned
final var clusterState3 = applyStartedShardsUntilNoChange(clusterState2, allocationService);
assertTrue(clusterState3.routingTable().index("test").allShardsActive());
// cluster state 4: index reopened, fully unassigned again
final var metadata4 = Metadata.builder(metadata0)
.put(IndexMetadata.builder(projectMetadata1.index("test")).state(IndexMetadata.State.OPEN))
.build();
final var clusterState4 = ClusterState.builder(clusterState3)
.metadata(metadata4)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, clusterState3.routingTable())
.addAsFromCloseToOpen(metadata4.getProject().index("test"))
)
.build();
assertLastAllocatedNodeIdsAssigned(
UnassignedInfo.Reason.INDEX_REOPENED,
clusterState3.routingTable().index("test"),
clusterState4.routingTable().index("test")
);
}
private void assertLastAllocatedNodeIdsAssigned(
UnassignedInfo.Reason expectedUnassignedReason,
IndexRoutingTable originalRoutingTable,
IndexRoutingTable finalRoutingTable
) {
final var shardCountChanged = originalRoutingTable.size() != finalRoutingTable.size()
|| originalRoutingTable.shard(0).size() != finalRoutingTable.shard(0).size();
if (shardCountChanged) {
assertThat(expectedUnassignedReason, equalTo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED));
}
boolean foundAnyNodeIds = false;
for (int shardId = 0; shardId < finalRoutingTable.size(); shardId++) {
final var previousShardRoutingTable = originalRoutingTable.shard(shardId);
final var previousNodes = previousShardRoutingTable == null
? Set.<String>of()
: IntStream.range(0, previousShardRoutingTable.size()).mapToObj(previousShardRoutingTable::shard).map(shard -> {
assertTrue(shard.started() || shard.relocating());
return shard.currentNodeId();
}).collect(Collectors.toSet());
final var shardRoutingTable = finalRoutingTable.shard(shardId);
for (int shardCopy = 0; shardCopy < shardRoutingTable.size(); shardCopy++) {
final var shard = shardRoutingTable.shard(shardCopy);
assertTrue(shard.unassigned());
assertThat(shard.unassignedInfo().reason(), equalTo(expectedUnassignedReason));
final var lastAllocatedNodeId = shard.unassignedInfo().lastAllocatedNodeId();
if (lastAllocatedNodeId == null) {
// restoring an index may change the number of shards/replicas so no guarantee that lastAllocatedNodeId is populated
assertTrue(shardCountChanged);
} else {
foundAnyNodeIds = true;
assertThat(previousNodes, hasItem(lastAllocatedNodeId));
}
}
if (shardCountChanged == false) {
assertNotNull(previousShardRoutingTable);
assertThat(
shardRoutingTable.primaryShard().unassignedInfo().lastAllocatedNodeId(),
equalTo(previousShardRoutingTable.primaryShard().currentNodeId())
);
}
}
// both original and restored index must have at least one shard tho
assertTrue(foundAnyNodeIds);
}
public void testIndexReopened() {
Metadata metadata = Metadata.builder()
.put(
IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 3))
.numberOfReplicas(randomIntBetween(0, 3))
)
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsFromCloseToOpen(metadata.getProject().index("test"))
.build()
)
.build();
for (ShardRouting shard : shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED)) {
assertThat(shard.unassignedInfo().reason(), equalTo(UnassignedInfo.Reason.INDEX_REOPENED));
}
}
public void testNewIndexRestored() {
Metadata metadata = Metadata.builder()
.put(
IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 3))
.numberOfReplicas(randomIntBetween(0, 3))
)
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNewRestore(
metadata.getProject().index("test"),
new SnapshotRecoverySource(
UUIDs.randomBase64UUID(),
new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())),
IndexVersion.current(),
new IndexId("test", UUIDs.randomBase64UUID(random()))
),
new HashSet<>()
)
.build()
)
.build();
for (ShardRouting shard : shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED)) {
assertThat(shard.unassignedInfo().reason(), equalTo(UnassignedInfo.Reason.NEW_INDEX_RESTORED));
}
}
public void testExistingIndexRestored() {
final var allocationService = createAllocationService();
// cluster state 0: index fully assigned and ready to close
final var metadata0 = Metadata.builder()
.put(
IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true))
.numberOfShards(randomIntBetween(1, 3))
.numberOfReplicas(randomIntBetween(0, 3))
)
.build();
final var clusterState0 = applyStartedShardsUntilNoChange(
ClusterState.builder(ClusterState.EMPTY_STATE)
.nodes(
DiscoveryNodes.builder()
.add(newNode("node-1"))
.add(newNode("node-2"))
.add(newNode("node-3"))
.add(newNode("node-4"))
.add(newNode("node-5"))
)
.metadata(metadata0)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata0.getProject().index("test"))
.build()
)
.build(),
allocationService
);
assertTrue(clusterState0.routingTable().index("test").allShardsActive());
// cluster state 1: index closed and reassigned
final var metadata1 = Metadata.builder(metadata0)
.put(IndexMetadata.builder(metadata0.getProject().index("test")).state(IndexMetadata.State.CLOSE))
.build();
final var clusterState1 = ClusterState.builder(clusterState0)
.metadata(metadata1)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, clusterState0.routingTable())
.addAsFromOpenToClose(metadata1.getProject().index("test"))
)
.build();
assertLastAllocatedNodeIdsAssigned(
UnassignedInfo.Reason.INDEX_CLOSED,
clusterState0.routingTable().index("test"),
clusterState1.routingTable().index("test")
);
// cluster state 2: closed index has been fully assigned
final var clusterState2 = applyStartedShardsUntilNoChange(clusterState1, allocationService);
assertTrue(clusterState2.routingTable().index("test").allShardsActive());
// cluster state 3: restore started, fully unassigned again (NB may have different number of shards/replicas)
final var metadata3 = Metadata.builder()
.put(
IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 3))
.numberOfReplicas(randomIntBetween(0, 3))
)
.build();
final var clusterState3 = ClusterState.builder(clusterState2)
.metadata(metadata3)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, clusterState2.routingTable())
.addAsRestore(
metadata3.getProject().index("test"),
new SnapshotRecoverySource(
UUIDs.randomBase64UUID(),
new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())),
IndexVersion.current(),
new IndexId("test", UUIDs.randomBase64UUID(random()))
)
)
)
.build();
assertLastAllocatedNodeIdsAssigned(
UnassignedInfo.Reason.EXISTING_INDEX_RESTORED,
clusterState2.routingTable().index("test"),
clusterState3.routingTable().index("test")
);
}
public void testDanglingIndexImported() {
Metadata metadata = Metadata.builder()
.put(
IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 3))
.numberOfReplicas(randomIntBetween(0, 3))
)
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsFromDangling(metadata.getProject().index("test"))
.build()
)
.build();
for (ShardRouting shard : shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED)) {
assertThat(shard.unassignedInfo().reason(), equalTo(UnassignedInfo.Reason.DANGLING_INDEX_IMPORTED));
}
}
public void testReplicaAdded() {
AllocationService allocation = createAllocationService();
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(0))
.build();
final Index index = metadata.getProject().index("test").getIndex();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.getProject().index(index)).build()
)
.build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
// starting primaries
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
IndexRoutingTable.Builder builder = IndexRoutingTable.builder(index);
final IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(index);
for (int i = 0; i < indexRoutingTable.size(); i++) {
builder.addIndexShard(new IndexShardRoutingTable.Builder(indexRoutingTable.shard(i)));
}
builder.addReplica(ShardRouting.Role.DEFAULT);
clusterState = ClusterState.builder(clusterState)
.routingTable(RoutingTable.builder(clusterState.routingTable()).add(builder).build())
.build();
assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).size(), equalTo(1));
assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).get(0).unassignedInfo(), notNullValue());
assertThat(
shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).get(0).unassignedInfo().reason(),
equalTo(UnassignedInfo.Reason.REPLICA_ADDED)
);
}
/**
* The unassigned meta is kept when a shard goes to INITIALIZING, but cleared when it moves to STARTED.
*/
public void testStateTransitionMetaHandling() {
ShardRouting shard = shardRoutingBuilder("test", 1, null, true, ShardRoutingState.UNASSIGNED).withUnassignedInfo(
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)
).build();
assertThat(shard.unassignedInfo(), notNullValue());
shard = shard.initialize("test_node", null, -1);
assertThat(shard.state(), equalTo(ShardRoutingState.INITIALIZING));
assertThat(shard.unassignedInfo(), notNullValue());
shard = shard.moveToStarted(ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
assertThat(shard.state(), equalTo(ShardRoutingState.STARTED));
assertThat(shard.unassignedInfo(), nullValue());
}
/**
* Tests that during reroute when a node is detected as leaving the cluster, the right unassigned meta is set
*/
public void testNodeLeave() {
AllocationService allocation = createAllocationService();
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.getProject().index("test")).build()
)
.build();
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
// starting primaries
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
// starting replicas
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
// remove node2 and reroute
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
// verify that NODE_LEAVE is the reason for meta
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true));
assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).size(), equalTo(1));
assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).get(0).unassignedInfo(), notNullValue());
assertThat(
shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).get(0).unassignedInfo().reason(),
equalTo(UnassignedInfo.Reason.NODE_LEFT)
);
assertThat(
shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).get(0).unassignedInfo().unassignedTimeMillis(),
greaterThan(0L)
);
}
/**
* Verifies that when a shard fails, reason is properly set and details are preserved.
*/
public void testFailedShard() {
AllocationService allocation = createAllocationService();
final var projectId = randomProjectIdOrDefault();
ProjectMetadata project = ProjectMetadata.builder(projectId)
.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.putRoutingTable(
projectId,
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(project.index("test")).build()
)
.build();
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
// starting primaries
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
// starting replicas
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
// fail shard
ShardRouting shardToFail = shardsWithState(clusterState.getRoutingNodes(), STARTED).get(0);
clusterState = allocation.applyFailedShards(
clusterState,
List.of(new FailedShard(shardToFail, "test fail", null, randomBoolean())),
List.of()
);
// verify the reason and details
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true));
assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).size(), equalTo(1));
assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).get(0).unassignedInfo(), notNullValue());
assertThat(
shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).get(0).unassignedInfo().reason(),
equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)
);
assertThat(
shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).get(0).unassignedInfo().message(),
equalTo("failed shard on node [" + shardToFail.currentNodeId() + "]: test fail")
);
assertThat(
shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).get(0).unassignedInfo().details(),
equalTo("failed shard on node [" + shardToFail.currentNodeId() + "]: test fail")
);
assertThat(
shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).get(0).unassignedInfo().unassignedTimeMillis(),
greaterThan(0L)
);
}
/**
* Verifies that delayed allocation calculation are correct when there are no registered node shutdowns.
*/
public void testRemainingDelayCalculationWithNoShutdowns() {
checkRemainingDelayCalculation(
"bogusNodeId",
TimeValue.timeValueNanos(10),
NodesShutdownMetadata.EMPTY,
TimeValue.timeValueNanos(10),
false
);
}
/**
* Verifies that delayed allocation calculations are correct when there are registered node shutdowns for nodes which are not relevant
* to the shard currently being evaluated.
*/
public void testRemainingDelayCalculationsWithUnrelatedShutdowns() {
String lastNodeId = "bogusNodeId";
NodesShutdownMetadata shutdowns = NodesShutdownMetadata.EMPTY;
int numberOfShutdowns = randomIntBetween(1, 15);
for (int i = 0; i <= numberOfShutdowns; i++) {
final SingleNodeShutdownMetadata.Type type = randomFrom(EnumSet.allOf(SingleNodeShutdownMetadata.Type.class));
final String targetNodeName = type == REPLACE ? randomAlphaOfLengthBetween(10, 20) : null;
shutdowns = shutdowns.putSingleNodeMetadata(
SingleNodeShutdownMetadata.builder()
.setNodeId(randomValueOtherThan(lastNodeId, () -> randomAlphaOfLengthBetween(5, 10)))
.setNodeEphemeralId(randomValueOtherThan(lastNodeId, () -> randomAlphaOfLengthBetween(5, 10)))
.setReason(this.getTestName())
.setStartedAtMillis(randomNonNegativeLong())
.setType(type)
.setTargetNodeName(targetNodeName)
.setGracePeriod(type == SIGTERM ? randomTimeValue() : null)
.build()
);
}
checkRemainingDelayCalculation(lastNodeId, TimeValue.timeValueNanos(10), shutdowns, TimeValue.timeValueNanos(10), false);
}
/**
* Verifies that delay calculation is not impacted when the node the shard was last assigned to was registered for removal.
*/
public void testRemainingDelayCalculationWhenNodeIsShuttingDownForRemoval() {
for (SingleNodeShutdownMetadata.Type type : List.of(REMOVE, SIGTERM)) {
String lastNodeId = "bogusNodeId";
NodesShutdownMetadata shutdowns = NodesShutdownMetadata.EMPTY.putSingleNodeMetadata(
SingleNodeShutdownMetadata.builder()
.setNodeId(lastNodeId)
.setNodeEphemeralId(lastNodeId)
.setReason(this.getTestName())
.setStartedAtMillis(randomNonNegativeLong())
.setType(type)
.setGracePeriod(type == SIGTERM ? randomTimeValue() : null)
.build()
);
checkRemainingDelayCalculation(lastNodeId, TimeValue.timeValueNanos(10), shutdowns, TimeValue.timeValueNanos(10), false);
}
}
/**
* Verifies that the delay calculation uses the configured delay value for nodes known to be restarting, because they are registered for
* a `RESTART`-type shutdown, rather than the default global delay.
*/
public void testRemainingDelayCalculationWhenNodeIsKnownToBeRestartingWithCustomDelay() {
String lastNodeId = "bogusNodeId";
NodesShutdownMetadata shutdowns = NodesShutdownMetadata.EMPTY.putSingleNodeMetadata(
SingleNodeShutdownMetadata.builder()
.setNodeId(lastNodeId)
.setNodeEphemeralId(lastNodeId)
.setReason(this.getTestName())
.setStartedAtMillis(randomNonNegativeLong())
.setType(SingleNodeShutdownMetadata.Type.RESTART)
.setAllocationDelay(TimeValue.timeValueMinutes(1))
.build()
);
// Use a different index-level delay so this test will fail if that one gets used instead of the one from the shutdown metadata
checkRemainingDelayCalculation(lastNodeId, TimeValue.timeValueNanos(10), shutdowns, TimeValue.timeValueMinutes(1), true);
}
/**
* Verifies that the delay calculation uses the default delay value for nodes known to be restarting, because they are registered for
* a `RESTART`-type shutdown, rather than the default global delay.
*/
public void testRemainingDelayCalculationWhenNodeIsKnownToBeRestartingWithDefaultDelay() {
String lastNodeId = "bogusNodeId";
// Note that we do not explicitly configure the reallocation delay here.
NodesShutdownMetadata shutdowns = NodesShutdownMetadata.EMPTY.putSingleNodeMetadata(
SingleNodeShutdownMetadata.builder()
.setNodeId(lastNodeId)
.setNodeEphemeralId(lastNodeId)
.setReason(this.getTestName())
.setStartedAtMillis(randomNonNegativeLong())
.setType(SingleNodeShutdownMetadata.Type.RESTART)
.build()
);
// Use a different index-level delay so this test will fail if that one gets used instead of the one from the shutdown metadata
checkRemainingDelayCalculation(
lastNodeId,
TimeValue.timeValueNanos(10),
shutdowns,
SingleNodeShutdownMetadata.DEFAULT_RESTART_SHARD_ALLOCATION_DELAY,
true
);
}
public void testRemainingDelayUsesIndexLevelDelayIfNodeWasNotRestartingWhenShardBecameUnassigned() {
String lastNodeId = "bogusNodeId";
// Generate a random time value - but don't use nanos as extremely small values of nanos can break assertion calculations
final TimeValue shutdownDelay = randomTimeValue(
100,
1000,
randomValueOtherThan(TimeUnit.NANOSECONDS, () -> randomFrom(TimeUnit.values()))
);
NodesShutdownMetadata shutdowns = NodesShutdownMetadata.EMPTY.putSingleNodeMetadata(
SingleNodeShutdownMetadata.builder()
.setNodeId(lastNodeId)
.setNodeEphemeralId(lastNodeId)
.setReason(this.getTestName())
.setStartedAtMillis(randomNonNegativeLong())
.setType(SingleNodeShutdownMetadata.Type.RESTART)
.setAllocationDelay(shutdownDelay)
.build()
);
// We want an index level delay that's less than the shutdown delay to avoid picking the index-level delay because it's larger
final TimeValue indexLevelDelay = randomValueOtherThanMany(
tv -> shutdownDelay.compareTo(tv) < 0,
() -> randomTimeValue(1, 1000, randomValueOtherThan(TimeUnit.NANOSECONDS, () -> randomFrom(TimeUnit.values())))
);
logger.info("index level delay: {}, shutdown delay: {}", indexLevelDelay, shutdownDelay);
checkRemainingDelayCalculation(lastNodeId, indexLevelDelay, shutdowns, indexLevelDelay, false);
}
private void checkRemainingDelayCalculation(
String lastNodeId,
TimeValue indexLevelTimeoutSetting,
NodesShutdownMetadata nodeShutdowns,
TimeValue expectedTotalDelay,
boolean nodeRestarting
) {
final long baseTime = System.nanoTime();
UnassignedInfo unassignedInfo = new UnassignedInfo(
nodeRestarting ? UnassignedInfo.Reason.NODE_RESTARTING : UnassignedInfo.Reason.NODE_LEFT,
"test",
null,
0,
baseTime,
System.currentTimeMillis(),
randomBoolean(),
AllocationStatus.NO_ATTEMPT,
Set.of(),
lastNodeId
);
final long totalDelayNanos = expectedTotalDelay.nanos();
final Settings indexSettings = Settings.builder()
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), indexLevelTimeoutSetting)
.build();
long delay = unassignedInfo.remainingDelay(baseTime, indexSettings, nodeShutdowns);
assertThat(delay, equalTo(totalDelayNanos));
long delta1 = randomLongBetween(1, (totalDelayNanos - 1));
delay = unassignedInfo.remainingDelay(baseTime + delta1, indexSettings, nodeShutdowns);
assertThat(delay, equalTo(totalDelayNanos - delta1));
delay = unassignedInfo.remainingDelay(baseTime + totalDelayNanos, indexSettings, nodeShutdowns);
assertThat(delay, equalTo(0L));
delay = unassignedInfo.remainingDelay(baseTime + totalDelayNanos + randomIntBetween(1, 20), indexSettings, nodeShutdowns);
assertThat(delay, equalTo(0L));
}
public void testNumberOfDelayedUnassigned() throws Exception {
MockAllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator());
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test1").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.put(IndexMetadata.builder("test2").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test1"))
.addAsNew(metadata.getProject().index("test2"))
.build()
)
.build();
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0));
// starting primaries
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
// starting replicas
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
// remove node2 and reroute
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
// make sure both replicas are marked as delayed (i.e. not reallocated)
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
assertThat(clusterState.toString(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2));
}
public void testFindNextDelayedAllocation() {
MockAllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator());
final TimeValue delayTest1 = TimeValue.timeValueMillis(randomIntBetween(1, 200));
final TimeValue delayTest2 = TimeValue.timeValueMillis(randomIntBetween(1, 200));
final long expectMinDelaySettingsNanos = Math.min(delayTest1.nanos(), delayTest2.nanos());
Metadata metadata = Metadata.builder()
.put(
IndexMetadata.builder("test1")
.settings(
settings(IndexVersion.current()).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest1)
)
.numberOfShards(1)
.numberOfReplicas(1)
)
.put(
IndexMetadata.builder("test2")
.settings(
settings(IndexVersion.current()).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest2)
)
.numberOfShards(1)
.numberOfReplicas(1)
)
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test1"))
.addAsNew(metadata.getProject().index("test2"))
.build()
)
.build();
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0));
// starting primaries
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
// starting replicas
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
// remove node2 and reroute
final long baseTime = System.nanoTime();
allocation.setNanoTimeOverride(baseTime);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute");
final long delta = randomBoolean() ? 0 : randomInt((int) expectMinDelaySettingsNanos - 1);
if (delta > 0) {
allocation.setNanoTimeOverride(baseTime + delta);
clusterState = allocation.reroute(clusterState, "time moved", ActionListener.noop());
}
assertThat(UnassignedInfo.findNextDelayedAllocation(baseTime + delta, clusterState), equalTo(expectMinDelaySettingsNanos - delta));
}
public void testAllocationStatusSerialization() throws IOException {
for (AllocationStatus allocationStatus : AllocationStatus.values()) {
BytesStreamOutput out = new BytesStreamOutput();
allocationStatus.writeTo(out);
ByteBufferStreamInput in = new ByteBufferStreamInput(ByteBuffer.wrap(out.bytes().toBytesRef().bytes));
AllocationStatus readStatus = AllocationStatus.readFrom(in);
assertThat(readStatus, equalTo(allocationStatus));
}
}
public static UnassignedInfo randomUnassignedInfo(String message) {
return randomUnassignedInfo(message, null);
}
/**
* Randomly generates an UnassignedInfo.
* @param message The message to be used.
* @param delayed Used for the `delayed` flag if provided.
* @return A randomly-generated UnassignedInfo with the given message and delayed value (if any)
*/
public static UnassignedInfo randomUnassignedInfo(String message, @Nullable Boolean delayed) {
UnassignedInfo.Reason reason = randomFrom(UnassignedInfo.Reason.values());
String lastAllocatedNodeId = null;
boolean delayedFlag = delayed == null ? false : delayed;
if (reason == UnassignedInfo.Reason.NODE_LEFT || reason == UnassignedInfo.Reason.NODE_RESTARTING) {
if (randomBoolean() && delayed == null) {
delayedFlag = true;
}
lastAllocatedNodeId = randomIdentifier();
}
return new UnassignedInfo(
reason,
message,
null,
reason == UnassignedInfo.Reason.ALLOCATION_FAILED ? 1 : 0,
System.nanoTime(),
System.currentTimeMillis(),
delayedFlag,
UnassignedInfo.AllocationStatus.NO_ATTEMPT,
reason == UnassignedInfo.Reason.ALLOCATION_FAILED ? Set.of(randomIdentifier()) : Set.of(),
lastAllocatedNodeId
);
}
public void testSummaryContainsImportantFields() {
var info = randomUnassignedInfo(randomBoolean() ? randomIdentifier() : null);
var summary = info.shortSummary();
assertThat("reason", summary, containsString("[reason=" + info.reason() + ']'));
assertThat(
"delay",
summary,
containsString("at[" + UnassignedInfo.DATE_TIME_FORMATTER.format(Instant.ofEpochMilli(info.unassignedTimeMillis())) + ']')
);
if (info.failedAllocations() > 0) {
assertThat("failed_allocations", summary, containsString("failed_attempts[" + info.failedAllocations() + ']'));
}
if (info.failedNodeIds().isEmpty() == false) {
assertThat("failed_nodes", summary, containsString("failed_nodes[" + info.failedNodeIds() + ']'));
}
assertThat("delayed", summary, containsString("delayed=" + info.delayed()));
if (info.lastAllocatedNodeId() != null) {
assertThat("last_node", summary, containsString("last_node[" + info.lastAllocatedNodeId() + ']'));
}
if (info.message() != null) {
assertThat("details", summary, containsString("details[" + info.message() + ']'));
}
assertThat("allocation_status", summary, containsString("allocation_status[" + info.lastAllocationStatus().value() + ']'));
}
}
| UnassignedInfoTests |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/steps/NativeImageFeatureStep.java | {
"start": 3119,
"end": 4650
} | class ____ provided by org.graalvm.sdk module and with 23.1 onwards, it's provided by org.graalvm.nativeimage instead
features.produce(new JPMSExportBuildItem("org.graalvm.sdk", "org.graalvm.nativeimage.impl", null,
GraalVM.Version.VERSION_23_1_0));
features.produce(new JPMSExportBuildItem("org.graalvm.nativeimage", "org.graalvm.nativeimage.impl",
GraalVM.Version.VERSION_23_1_0));
}
@BuildStep
void generateFeature(BuildProducer<GeneratedNativeImageClassBuildItem> nativeImageClass,
List<RuntimeInitializedClassBuildItem> runtimeInitializedClassBuildItems,
List<RuntimeInitializedPackageBuildItem> runtimeInitializedPackageBuildItems,
List<RuntimeReinitializedClassBuildItem> runtimeReinitializedClassBuildItems,
List<UnsafeAccessedFieldBuildItem> unsafeAccessedFields,
NativeConfig nativeConfig,
LocalesBuildTimeConfig localesBuildTimeConfig) {
ClassCreator file = new ClassCreator(new ClassOutput() {
@Override
public void write(String s, byte[] bytes) {
nativeImageClass.produce(new GeneratedNativeImageClassBuildItem(s, bytes));
}
}, GRAAL_FEATURE, null,
Object.class.getName(), Feature.class.getName());
// Add getDescription method
MethodCreator getDescription = file.getMethodCreator("getDescription", String.class);
getDescription.returnValue(getDescription.load("Auto-generated | was |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/node/Node.java | {
"start": 7878,
"end": 28950
} | class ____ referred to
* in InternalSettingsPreparer#finalizeSettings, which runs when creating the Environment, before logging is
* initialized.
*/
private final Logger logger = LogManager.getLogger(Node.class);
private final Injector injector;
private final Environment environment;
private final NodeEnvironment nodeEnvironment;
private final PluginsService pluginsService;
private final NodeClient client;
private final Collection<LifecycleComponent> pluginLifecycleComponents;
private final LocalNodeFactory localNodeFactory;
private final NodeService nodeService;
private final TerminationHandler terminationHandler;
// for testing
final NamedWriteableRegistry namedWriteableRegistry;
final NamedXContentRegistry namedXContentRegistry;
/**
* Constructs a node
*
* @param environment the initial environment for this node, which will be added to by plugins
*/
public Node(Environment environment, PluginsLoader pluginsLoader) {
this(NodeConstruction.prepareConstruction(environment, pluginsLoader, new NodeServiceProvider(), true));
}
/**
* Constructs a node using information from {@code construction}
*/
Node(NodeConstruction construction) {
injector = construction.injector();
environment = construction.environment();
nodeEnvironment = construction.nodeEnvironment();
pluginsService = construction.pluginsService();
client = construction.client();
pluginLifecycleComponents = construction.pluginLifecycleComponents();
localNodeFactory = construction.localNodeFactory();
nodeService = construction.nodeService();
terminationHandler = construction.terminationHandler();
namedWriteableRegistry = construction.namedWriteableRegistry();
namedXContentRegistry = construction.namedXContentRegistry();
}
/**
* If the JVM was started with the Elastic APM agent and a config file argument was specified, then
* delete the config file. The agent only reads it once, when supplied in this fashion, and it
* may contain a secret token.
* <p>
* Public for testing only
*/
@SuppressForbidden(reason = "Cannot guarantee that the temp config path is relative to the environment")
public static void deleteTemporaryApmConfig(JvmInfo jvmInfo, BiConsumer<Exception, Path> errorHandler) {
for (String inputArgument : jvmInfo.getInputArguments()) {
if (inputArgument.startsWith("-javaagent:")) {
final String agentArg = inputArgument.substring(11);
final String[] parts = agentArg.split("=", 2);
String APM_AGENT_CONFIG_FILE_REGEX = String.join(
"\\" + File.separator,
".*modules",
"apm",
"elastic-apm-agent-java8-\\d+\\.\\d+\\.\\d+\\.jar"
);
if (parts[0].matches(APM_AGENT_CONFIG_FILE_REGEX)) {
if (parts.length == 2 && parts[1].startsWith("c=")) {
final Path apmConfig = PathUtils.get(parts[1].substring(2));
if (apmConfig.getFileName().toString().matches("^\\.elstcapm\\..*\\.tmp")) {
try {
Files.deleteIfExists(apmConfig);
} catch (IOException e) {
errorHandler.accept(e, apmConfig);
}
}
}
return;
}
}
}
}
/**
* The settings that are used by this node. Contains original settings as well as additional settings provided by plugins.
*/
public Settings settings() {
return this.environment.settings();
}
/**
* A client that can be used to execute actions (operations) against the cluster.
*/
public Client client() {
return client;
}
/**
* Returns the environment of the node
*/
public Environment getEnvironment() {
return environment;
}
/**
* Returns the {@link NodeEnvironment} instance of this node
*/
public NodeEnvironment getNodeEnvironment() {
return nodeEnvironment;
}
/**
* Start the node. If the node is already started, this method is no-op.
*/
public Node start() throws NodeValidationException {
if (lifecycle.moveToStarted() == false) {
return this;
}
logger.info("starting ...");
pluginLifecycleComponents.forEach(LifecycleComponent::start);
injector.getInstance(MappingUpdatedAction.class).setClient(client);
injector.getInstance(IndicesService.class).start();
injector.getInstance(IndicesClusterStateService.class).start();
injector.getInstance(SnapshotsService.class).start();
injector.getInstance(SnapshotShardsService.class).start();
injector.getInstance(RepositoriesService.class).start();
injector.getInstance(SearchService.class).start();
injector.getInstance(FsHealthService.class).start();
injector.getInstance(NodeMetrics.class).start();
injector.getInstance(IndicesMetrics.class).start();
injector.getInstance(HealthPeriodicLogger.class).start();
injector.getInstance(SamplingService.class).start();
nodeService.getMonitorService().start();
final ClusterService clusterService = injector.getInstance(ClusterService.class);
final NodeConnectionsService nodeConnectionsService = injector.getInstance(NodeConnectionsService.class);
nodeConnectionsService.start();
clusterService.setNodeConnectionsService(nodeConnectionsService);
injector.getInstance(GatewayService.class).start();
final Coordinator coordinator = injector.getInstance(Coordinator.class);
clusterService.getMasterService().setClusterStatePublisher(coordinator);
// Start the transport service now so the publish address will be added to the local disco node in ClusterService
TransportService transportService = injector.getInstance(TransportService.class);
transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class));
transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService));
transportService.start();
assert localNodeFactory.getNode() != null;
assert transportService.getLocalNode().equals(localNodeFactory.getNode())
: "transportService has a different local node than the factory provided";
injector.getInstance(PeerRecoverySourceService.class).start();
// Load (and maybe upgrade) the metadata stored on disk
final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class);
gatewayMetaState.start(
settings(),
transportService,
clusterService,
injector.getInstance(MetaStateService.class),
injector.getInstance(IndexMetadataVerifier.class),
injector.getInstance(MetadataUpgrader.class),
injector.getInstance(PersistedClusterStateService.class),
pluginsService.filterPlugins(ClusterCoordinationPlugin.class).toList(),
injector.getInstance(CompatibilityVersions.class)
);
// TODO: Do not expect that the legacy metadata file is always present https://github.com/elastic/elasticsearch/issues/95211
if (Assertions.ENABLED && DiscoveryNode.isStateless(settings()) == false) {
try {
final NodeMetadata nodeMetadata = NodeMetadata.FORMAT.loadLatestState(
logger,
NamedXContentRegistry.EMPTY,
nodeEnvironment.nodeDataPaths()
);
assert nodeMetadata != null;
assert nodeMetadata.nodeVersion().equals(BuildVersion.current());
assert nodeMetadata.nodeId().equals(localNodeFactory.getNode().getId());
} catch (IOException e) {
assert false : e;
}
}
// we load the global state here (the persistent part of the cluster state stored on disk) to
// pass it to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state.
final Metadata onDiskMetadata = gatewayMetaState.getPersistedState().getLastAcceptedState().metadata();
assert onDiskMetadata != null : "metadata is null but shouldn't"; // this is never null
validateNodeBeforeAcceptingRequests(
new BootstrapContext(environment, onDiskMetadata),
transportService.boundAddress(),
pluginsService.flatMap(Plugin::getBootstrapChecks).toList()
);
final FileSettingsService fileSettingsService = injector.getInstance(FileSettingsService.class);
fileSettingsService.start();
clusterService.addStateApplier(transportService.getTaskManager());
// start after transport service so the local disco is known
coordinator.start(); // start before cluster service so that it can set initial state on ClusterApplierService
clusterService.start();
assert clusterService.localNode().equals(localNodeFactory.getNode())
: "clusterService has a different local node than the factory provided";
transportService.acceptIncomingRequests();
/*
* CoordinationDiagnosticsService expects to be able to send transport requests and use the cluster state, so it is important to
* start it here after the clusterService and transportService have been started.
*/
injector.getInstance(CoordinationDiagnosticsService.class).start();
coordinator.startInitialJoin();
final TimeValue initialStateTimeout = INITIAL_STATE_TIMEOUT_SETTING.get(settings());
configureNodeAndClusterIdStateListener(clusterService);
if (initialStateTimeout.millis() > 0) {
final ThreadPool thread = injector.getInstance(ThreadPool.class);
ClusterState clusterState = clusterService.state();
ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext());
if (clusterState.nodes().getMasterNodeId() == null) {
logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout);
final CountDownLatch latch = new CountDownLatch(1);
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
latch.countDown();
}
@Override
public void onClusterServiceClose() {
latch.countDown();
}
@Override
public void onTimeout(TimeValue timeout) {
logger.warn(
"timed out after [{}={}] while waiting for initial discovery state; for troubleshooting guidance see [{}]",
INITIAL_STATE_TIMEOUT_SETTING.getKey(),
initialStateTimeout,
ReferenceDocs.DISCOVERY_TROUBLESHOOTING
);
latch.countDown();
}
}, state -> state.nodes().getMasterNodeId() != null, initialStateTimeout);
try {
latch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state");
}
}
}
// ------- DO NOT ADD NEW START CALLS BELOW HERE -------
injector.getInstance(HttpServerTransport.class).start();
if (ReadinessService.enabled(environment)) {
injector.getInstance(ReadinessService.class).start();
}
if (WRITE_PORTS_FILE_SETTING.get(settings())) {
TransportService transport = injector.getInstance(TransportService.class);
writePortsFile("transport", transport.boundAddress());
HttpServerTransport http = injector.getInstance(HttpServerTransport.class);
writePortsFile("http", http.boundAddress());
if (ReadinessService.enabled(environment)) {
ReadinessService readiness = injector.getInstance(ReadinessService.class);
readiness.addBoundAddressListener(address -> writePortsFile("readiness", address));
}
if (RemoteClusterPortSettings.REMOTE_CLUSTER_SERVER_ENABLED.get(environment.settings())) {
writePortsFile("remote_cluster", transport.boundRemoteAccessAddress());
}
}
logger.info("started {}", transportService.getLocalNode());
pluginsService.filterPlugins(ClusterPlugin.class).forEach(ClusterPlugin::onNodeStarted);
return this;
}
protected void configureNodeAndClusterIdStateListener(ClusterService clusterService) {
NodeAndClusterIdStateListener.getAndSetNodeIdAndClusterId(
clusterService,
injector.getInstance(ThreadPool.class).getThreadContext()
);
}
private void stop() {
if (lifecycle.moveToStopped() == false) {
return;
}
logger.info("stopping ...");
if (ReadinessService.enabled(environment)) {
stopIfStarted(ReadinessService.class);
}
// We stop the health periodic logger first since certain checks won't be possible anyway
stopIfStarted(HealthPeriodicLogger.class);
stopIfStarted(SamplingService.class);
stopIfStarted(FileSettingsService.class);
injector.getInstance(ResourceWatcherService.class).close();
stopIfStarted(HttpServerTransport.class);
stopIfStarted(SnapshotsService.class);
stopIfStarted(SnapshotShardsService.class);
stopIfStarted(RepositoriesService.class);
// stop any changes happening as a result of cluster state changes
stopIfStarted(IndicesClusterStateService.class);
// close cluster coordinator early to not react to pings anymore.
// This can confuse other nodes and delay things - mostly if we're the master and we're running tests.
stopIfStarted(Coordinator.class);
// we close indices first, so operations won't be allowed on it
stopIfStarted(ClusterService.class);
stopIfStarted(NodeConnectionsService.class);
stopIfStarted(FsHealthService.class);
stopIfStarted(nodeService.getMonitorService());
stopIfStarted(GatewayService.class);
stopIfStarted(SearchService.class);
stopIfStarted(TransportService.class);
stopIfStarted(NodeMetrics.class);
stopIfStarted(IndicesMetrics.class);
pluginLifecycleComponents.forEach(Node::stopIfStarted);
// we should stop this last since it waits for resources to get released
// if we had scroll searchers etc or recovery going on we wait for to finish.
stopIfStarted(IndicesService.class);
logger.info("stopped");
}
private <T extends LifecycleComponent> void stopIfStarted(Class<T> componentClass) {
stopIfStarted(injector.getInstance(componentClass));
}
private static void stopIfStarted(LifecycleComponent component) {
// if we failed during startup then some of our components might not have started yet
if (component.lifecycleState() == Lifecycle.State.STARTED) {
component.stop();
}
}
// During concurrent close() calls we want to make sure that all of them return after the node has completed it's shutdown cycle.
// If not, the hook that is added in Bootstrap#setup() will be useless:
// close() might not be executed, in case another (for example api) call to close() has already set some lifecycles to stopped.
// In this case the process will be terminated even if the first call to close() has not finished yet.
@Override
public synchronized void close() throws IOException {
synchronized (lifecycle) {
if (lifecycle.started()) {
stop();
}
if (lifecycle.moveToClosed() == false) {
return;
}
}
logger.info("closing ...");
List<Closeable> toClose = new ArrayList<>();
StopWatch stopWatch = new StopWatch("node_close");
toClose.add(() -> stopWatch.start("node_service"));
toClose.add(nodeService);
toClose.add(() -> stopWatch.stop().start("http"));
toClose.add(injector.getInstance(HttpServerTransport.class));
toClose.add(() -> stopWatch.stop().start("snapshot_service"));
toClose.add(injector.getInstance(SnapshotsService.class));
toClose.add(injector.getInstance(SnapshotShardsService.class));
toClose.add(injector.getInstance(RepositoriesService.class));
toClose.add(() -> stopWatch.stop().start("indices_cluster"));
toClose.add(injector.getInstance(IndicesClusterStateService.class));
toClose.add(() -> stopWatch.stop().start("indices"));
toClose.add(injector.getInstance(IndicesService.class));
// close filter/fielddata caches after indices
toClose.add(injector.getInstance(IndicesStore.class));
toClose.add(injector.getInstance(PeerRecoverySourceService.class));
toClose.add(() -> stopWatch.stop().start("cluster"));
toClose.add(injector.getInstance(ClusterService.class));
toClose.add(() -> stopWatch.stop().start("node_connections_service"));
toClose.add(injector.getInstance(NodeConnectionsService.class));
toClose.add(() -> stopWatch.stop().start("cluster_coordinator"));
toClose.add(injector.getInstance(Coordinator.class));
toClose.add(() -> stopWatch.stop().start("monitor"));
toClose.add(nodeService.getMonitorService());
toClose.add(() -> stopWatch.stop().start("fsHealth"));
toClose.add(injector.getInstance(FsHealthService.class));
toClose.add(() -> stopWatch.stop().start("gateway"));
toClose.add(injector.getInstance(GatewayService.class));
toClose.add(() -> stopWatch.stop().start("search"));
toClose.add(injector.getInstance(SearchService.class));
toClose.add(() -> stopWatch.stop().start("transport"));
toClose.add(injector.getInstance(TransportService.class));
toClose.add(injector.getInstance(NodeMetrics.class));
toClose.add(injector.getInstance(IndicesMetrics.class));
if (ReadinessService.enabled(environment)) {
toClose.add(injector.getInstance(ReadinessService.class));
}
toClose.add(injector.getInstance(FileSettingsService.class));
toClose.add(injector.getInstance(HealthPeriodicLogger.class));
toClose.add(injector.getInstance(SamplingService.class));
for (LifecycleComponent plugin : pluginLifecycleComponents) {
toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getClass().getName() + ")"));
toClose.add(plugin);
}
pluginsService.filterPlugins(Plugin.class).forEach(toClose::add);
toClose.add(() -> stopWatch.stop().start("script"));
toClose.add(injector.getInstance(ScriptService.class));
toClose.add(() -> stopWatch.stop().start("thread_pool"));
toClose.add(() -> injector.getInstance(ThreadPool.class).shutdown());
// Don't call shutdownNow here, it might break ongoing operations on Lucene indices.
// See https://issues.apache.org/jira/browse/LUCENE-7248. We call shutdownNow in
// awaitClose if the node doesn't finish closing within the specified time.
toClose.add(() -> stopWatch.stop().start("gateway_meta_state"));
toClose.add(injector.getInstance(GatewayMetaState.class));
toClose.add(() -> stopWatch.stop().start("node_environment"));
toClose.add(injector.getInstance(NodeEnvironment.class));
toClose.add(stopWatch::stop);
if (logger.isTraceEnabled()) {
toClose.add(() -> logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint()));
}
IOUtils.close(toClose);
logger.info("closed");
}
/**
* Invokes hooks to prepare this node to be closed. This should be called when Elasticsearch receives a request to shut down
* gracefully from the underlying operating system, before system resources are closed. This method will block
* until the node is ready to shut down.
* <p>
* Note that this | is |
java | apache__camel | core/camel-management/src/main/java/org/apache/camel/management/CompositePerformanceCounter.java | {
"start": 1238,
"end": 3234
} | class ____ implements PerformanceCounter {
private final PerformanceCounter counter1;
private final PerformanceCounter counter2;
private final PerformanceCounter counter3; // counter 3 is optional
public CompositePerformanceCounter(PerformanceCounter counter1, PerformanceCounter counter2, PerformanceCounter counter3) {
this.counter1 = counter1;
this.counter2 = counter2;
this.counter3 = counter3;
}
@Override
public void processExchange(Exchange exchange, String type) {
if (counter1.isStatisticsEnabled()) {
counter1.processExchange(exchange, type);
}
if (counter2.isStatisticsEnabled()) {
counter2.processExchange(exchange, type);
}
if (counter3 != null && counter3.isStatisticsEnabled()) {
counter3.processExchange(exchange, type);
}
}
@Override
public void completedExchange(Exchange exchange, long time) {
if (counter1.isStatisticsEnabled()) {
counter1.completedExchange(exchange, time);
}
if (counter2.isStatisticsEnabled()) {
counter2.completedExchange(exchange, time);
}
if (counter3 != null && counter3.isStatisticsEnabled()) {
counter3.completedExchange(exchange, time);
}
}
@Override
public void failedExchange(Exchange exchange) {
if (counter1.isStatisticsEnabled()) {
counter1.failedExchange(exchange);
}
if (counter2.isStatisticsEnabled()) {
counter2.failedExchange(exchange);
}
if (counter3 != null && counter3.isStatisticsEnabled()) {
counter3.failedExchange(exchange);
}
}
@Override
public boolean isStatisticsEnabled() {
// this method is not used
return true;
}
@Override
public void setStatisticsEnabled(boolean statisticsEnabled) {
// this method is not used
}
}
| CompositePerformanceCounter |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/selector/BasicContextSelectorTest.java | {
"start": 1286,
"end": 2019
} | class ____ {
@BeforeAll
static void beforeClass() {
System.setProperty(Constants.LOG4J_CONTEXT_SELECTOR, BasicContextSelector.class.getName());
}
@AfterAll
static void afterClass() {
System.clearProperty(Constants.LOG4J_CONTEXT_SELECTOR);
}
@Test
void testLogManagerShutdown() {
final LoggerContext context = (LoggerContext) LogManager.getContext();
assertEquals(LifeCycle.State.STARTED, context.getState());
LogManager.shutdown();
assertEquals(LifeCycle.State.STOPPED, context.getState());
}
@Test
void testNotDependentOnClassLoader() {
assertFalse(LogManager.getFactory().isClassLoaderDependent());
}
}
| BasicContextSelectorTest |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/ExporterListener.java | {
"start": 1041,
"end": 1499
} | interface ____ {
/**
* The exporter exported.
*
* @param exporter
* @throws RpcException
* @see org.apache.dubbo.rpc.Protocol#export(Invoker)
*/
void exported(Exporter<?> exporter) throws RpcException;
/**
* The exporter unexported.
*
* @param exporter
* @throws RpcException
* @see org.apache.dubbo.rpc.Exporter#unexport()
*/
void unexported(Exporter<?> exporter);
}
| ExporterListener |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/synonyms/AbstractSynonymsPagedResultAction.java | {
"start": 1514,
"end": 1749
} | class ____<T extends ActionResponse> extends ActionType<T> {
public AbstractSynonymsPagedResultAction(String name, Writeable.Reader<T> reader) {
super(name);
}
/**
* Base request | AbstractSynonymsPagedResultAction |
java | reactor__reactor-core | benchmarks/src/main/java/reactor/core/CompositeDisposableHashcodeBenchmark.java | {
"start": 1407,
"end": 1542
} | class ____ {
@Param({"1", "31", "1024", "10000"})
public static int elementCount;
private static | CompositeDisposableHashcodeBenchmark |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java | {
"start": 929,
"end": 1048
} | class ____ all remote cluster information to be rendered on
* {@code _remote/info} requests.
*/
public final | encapsulates |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/event/AbstractApplicationEventMulticaster.java | {
"start": 19188,
"end": 19261
} | class ____ encapsulates a general set of target listeners.
*/
private | that |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/temporal/TemporalRowTimeJoinOperator.java | {
"start": 17701,
"end": 18494
} | class ____ implements Comparator<RowData>, Serializable {
private static final long serialVersionUID = 8160134014590716914L;
private final int timeAttribute;
private RowtimeComparator(int timeAttribute) {
this.timeAttribute = timeAttribute;
}
@Override
public int compare(RowData o1, RowData o2) {
long o1Time = o1.getLong(timeAttribute);
long o2Time = o2.getLong(timeAttribute);
return Long.compare(o1Time, o2Time);
}
}
@VisibleForTesting
static String getNextLeftIndexStateName() {
return NEXT_LEFT_INDEX_STATE_NAME;
}
@VisibleForTesting
static String getRegisteredTimerStateName() {
return REGISTERED_TIMER_STATE_NAME;
}
}
| RowtimeComparator |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java | {
"start": 1407,
"end": 1937
} | class ____ implements Metadata.ProjectCustom {
public static final String TYPE = "ingest";
private static final ParseField PIPELINES_FIELD = new ParseField("pipeline");
private static final ObjectParser<List<PipelineConfiguration>, Void> INGEST_METADATA_PARSER = new ObjectParser<>(
"ingest_metadata",
ArrayList::new
);
static {
INGEST_METADATA_PARSER.declareObjectArray(List::addAll, PipelineConfiguration.getParser(), PIPELINES_FIELD);
}
// We can't use Pipeline | IngestMetadata |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/ast/tree/expression/Collation.java | {
"start": 434,
"end": 920
} | class ____ implements SqlExpressible, SqlAstNode {
private final String collation;
public Collation(String collation) {
this.collation = collation;
}
public String getCollation() {
return collation;
}
@Override
public void accept(SqlAstWalker walker) {
walker.visitCollation( this );
}
@Override
public int forEachJdbcType(int offset, IndexedConsumer<JdbcMapping> action) {
return 0;
}
@Override
public JdbcMapping getJdbcMapping() {
return null;
}
}
| Collation |
java | greenrobot__EventBus | EventBusTestJava/src/main/java/org/greenrobot/eventbus/EventBusFallbackToReflectionTest.java | {
"start": 1332,
"end": 1492
} | class ____ {
@Subscribe
public void onEvent(PrivateEvent any) {
trackEvent(any);
}
}
public | PublicClassWithPrivateEvent |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/sessioncontext/SessionContextTest.java | {
"start": 2416,
"end": 2639
} | class ____ {
@Inject
SessionScopedBean bean;
@OnTextMessage
String process(String message) throws InterruptedException {
return bean.appendAndGet(message);
}
}
}
| Append |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/shorts/Shorts_assertIsNotZero_Test.java | {
"start": 860,
"end": 1964
} | class ____ extends ShortsBaseTest {
@Test
void should_succeed_since_actual_is_not_zero() {
shorts.assertIsNotZero(someInfo(), (short) 2);
}
@Test
void should_fail_since_actual_is_zero() {
assertThatAssertionErrorIsThrownBy(() -> shorts.assertIsNotZero(someInfo(), (short) 0))
.withMessage("%nExpecting actual:%n 0%nnot to be equal to:%n 0%n".formatted());
}
@Test
void should_succeed_since_actual_is_zero_whatever_custom_comparison_strategy_is() {
shortsWithAbsValueComparisonStrategy.assertIsNotZero(someInfo(), (short) 1);
}
@Test
void should_fail_since_actual_is_not_zero_whatever_custom_comparison_strategy_is() {
assertThatAssertionErrorIsThrownBy(() -> shortsWithAbsValueComparisonStrategy.assertIsNotZero(someInfo(), (short) 0))
.withMessage("%nExpecting actual:%n 0%nnot to be equal to:%n 0%n".formatted());
}
}
| Shorts_assertIsNotZero_Test |
java | FasterXML__jackson-core | src/main/java/tools/jackson/core/exc/StreamReadException.java | {
"start": 88,
"end": 210
} | class ____ all read-side streaming processing problems, including
* parsing and input value coercion problems.
*/
public | for |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_jiangwei.java | {
"start": 112,
"end": 340
} | class ____ extends TestCase {
public void test_0 () throws Exception {
String text = "['42-0','超級聯隊\\x28中\\x29','辛當斯','1.418',10,'11/18/2012 02:15',1,0,1,0,'',0,0,0,0]";
JSON.parse(text);
}
}
| Bug_for_jiangwei |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/cdi/QualifiedCustomizedCdiConfiguration.java | {
"start": 792,
"end": 962
} | class ____ implements CdiRepositoryConfiguration {
@Override
public String getRepositoryImplementationPostfix() {
return "Bean";
}
}
| QualifiedCustomizedCdiConfiguration |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/reflection/Match.java | {
"start": 1181,
"end": 1326
} | class ____ extends Competition {
public String competitor1Point;
@Version
public Integer version;
public SocialSecurityNumber playerASSN;
}
| Match |
java | quarkusio__quarkus | integration-tests/reactive-messaging-mqtt/src/test/java/io/quarkus/it/mqtt/MQTTConnectorTest.java | {
"start": 432,
"end": 775
} | class ____ {
protected static final TypeRef<List<String>> TYPE_REF = new TypeRef<List<String>>() {
};
@Test
public void test() {
post("/mqtt/people");
await().atMost(30, SECONDS)
.untilAsserted(() -> Assertions.assertEquals(6, get("/mqtt/people").as(TYPE_REF).size()));
}
}
| MQTTConnectorTest |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/TestContextAnnotationUtilsTests.java | {
"start": 26801,
"end": 26935
} | class ____ extends NonInheritedAnnotationClass {
}
@ContextConfiguration(classes = Number.class)
static | SubNonInheritedAnnotationClass |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng3906MergedPluginClassPathOrderingTest.java | {
"start": 1193,
"end": 3191
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that project-level plugin dependencies are properly merged during inheritance.
*
* @throws Exception in case of failure
*/
@Test
public void testitMNG3906() throws Exception {
File testDir = extractResources("/mng-3906");
Verifier verifier = newVerifier(new File(testDir, "sub").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.deleteArtifacts("org.apache.maven.its.mng3906");
verifier.filterFile("settings-template.xml", "settings.xml");
verifier.addCliArgument("--settings");
verifier.addCliArgument("settings.xml");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
Properties pclProps = verifier.loadProperties("target/pcl.properties");
String className = "org.apache.maven.its.mng3906.SomeClass";
String resName = className.replace('.', '/') + ".class";
assertEquals("5", pclProps.getProperty(resName + ".count"));
assertTrue(
pclProps.getProperty(resName + ".0").endsWith("/c-0.1.jar!/" + resName),
pclProps.getProperty(resName + ".0"));
assertTrue(
pclProps.getProperty(resName + ".1").endsWith("/a-0.2.jar!/" + resName),
pclProps.getProperty(resName + ".1"));
assertTrue(
pclProps.getProperty(resName + ".2").endsWith("/b-0.1.jar!/" + resName),
pclProps.getProperty(resName + ".2"));
assertTrue(
pclProps.getProperty(resName + ".3").endsWith("/e-0.1.jar!/" + resName),
pclProps.getProperty(resName + ".3"));
assertTrue(
pclProps.getProperty(resName + ".4").endsWith("/d-0.1.jar!/" + resName),
pclProps.getProperty(resName + ".4"));
}
}
| MavenITmng3906MergedPluginClassPathOrderingTest |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java | {
"start": 17377,
"end": 18289
} | class ____ use
* @param expectedErrorText error text to expect
* @return the exception raised
* @throws Exception any unexpected exception thrown.
*/
private IOException expectProviderInstantiationFailure(Class aClass,
String expectedErrorText) throws Exception {
return expectProviderInstantiationFailure(
buildClassListString(Collections.singletonList(aClass)),
expectedErrorText);
}
/**
* Create a configuration with a specific provider.
* @param providerOption option for the aws credential provider option.
* @return a configuration to use in test cases
*/
private Configuration createProviderConfiguration(
final String providerOption) {
Configuration conf = new Configuration(false);
conf.set(AWS_CREDENTIALS_PROVIDER, providerOption);
return conf;
}
/**
* Create a configuration with a specific class.
* @param aClass | to |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/FileConsumerIdempotentOnExceptionHandledTest.java | {
"start": 1036,
"end": 2249
} | class ____ extends ContextTestSupport {
private static final String TEST_FILE_NAME = "hello" + UUID.randomUUID() + ".txt";
@Test
public void testIdempotent() throws Exception {
getMockEndpoint("mock:invalid").expectedMessageCount(1);
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, TEST_FILE_NAME);
oneExchangeDone.matchesWaitTime();
assertMockEndpointsSatisfied();
// the error is handled and the file is regarded as success and
// therefore moved to .camel
assertFileNotExists(testFile(TEST_FILE_NAME));
assertFileExists(testFile(".camel/" + TEST_FILE_NAME));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
onException(Exception.class).handled(true).to("mock:invalid");
// our route logic to process files from the input folder
from(fileUri("?initialDelay=0&delay=10&idempotent=true")).to("mock:input")
.throwException(new IllegalArgumentException("Forced"));
}
};
}
}
| FileConsumerIdempotentOnExceptionHandledTest |
java | spring-projects__spring-boot | module/spring-boot-jackson/src/test/java/org/springframework/boot/jackson/JacksonComponentModuleTests.java | {
"start": 8838,
"end": 8939
} | class ____ extends NameAndAgeJacksonComponent.Serializer {
}
@JacksonComponent
static | OnlySerializer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/fetching/FetchingTest.java | {
"start": 1262,
"end": 4686
} | class ____ {
@Test
public void test(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
Department department = new Department();
department.id = 1L;
entityManager.persist(department);
Employee employee1 = new Employee();
employee1.id = 1L;
employee1.username = "user1";
employee1.password = "3fabb4de8f1ee2e97d7793bab2db1116";
employee1.accessLevel = 0;
employee1.department = department;
entityManager.persist(employee1);
Employee employee2 = new Employee();
employee2.id = 2L;
employee2.username = "user2";
employee2.password = "3fabb4de8f1ee2e97d7793bab2db1116";
employee2.accessLevel = 1;
employee2.department = department;
entityManager.persist(employee2);
});
scope.inTransaction( entityManager -> {
String username = "user1";
String password = "3fabb4de8f1ee2e97d7793bab2db1116";
//tag::fetching-strategies-no-fetching-example[]
Employee employee = entityManager.createQuery(
"select e " +
"from Employee e " +
"where " +
" e.username = :username and " +
" e.password = :password",
Employee.class)
.setParameter("username", username)
.setParameter("password", password)
.getSingleResult();
//end::fetching-strategies-no-fetching-example[]
assertNotNull(employee);
});
scope.inTransaction( entityManager -> {
String username = "user1";
String password = "3fabb4de8f1ee2e97d7793bab2db1116";
//tag::fetching-strategies-no-fetching-scalar-example[]
Integer accessLevel = entityManager.createQuery(
"select e.accessLevel " +
"from Employee e " +
"where " +
" e.username = :username and " +
" e.password = :password",
Integer.class)
.setParameter("username", username)
.setParameter("password", password)
.getSingleResult();
//end::fetching-strategies-no-fetching-scalar-example[]
assertEquals(Integer.valueOf(0), accessLevel);
});
scope.inTransaction( entityManager -> {
String username = "user1";
String password = "3fabb4de8f1ee2e97d7793bab2db1116";
//tag::fetching-strategies-dynamic-fetching-jpql-example[]
Employee employee = entityManager.createQuery(
"select e " +
"from Employee e " +
"left join fetch e.projects " +
"where " +
" e.username = :username and " +
" e.password = :password",
Employee.class)
.setParameter("username", username)
.setParameter("password", password)
.getSingleResult();
//end::fetching-strategies-dynamic-fetching-jpql-example[]
assertNotNull(employee);
});
scope.inTransaction( entityManager -> {
String username = "user1";
String password = "3fabb4de8f1ee2e97d7793bab2db1116";
//tag::fetching-strategies-dynamic-fetching-criteria-example[]
CriteriaBuilder builder = entityManager.getCriteriaBuilder();
CriteriaQuery<Employee> query = builder.createQuery(Employee.class);
Root<Employee> root = query.from(Employee.class);
root.fetch("projects", JoinType.LEFT);
query.select(root).where(
builder.and(
builder.equal(root.get("username"), username),
builder.equal(root.get("password"), password)
)
);
Employee employee = entityManager.createQuery(query).getSingleResult();
//end::fetching-strategies-dynamic-fetching-criteria-example[]
assertNotNull(employee);
});
}
//tag::fetching-strategies-domain-model-example[]
@Entity(name = "Department")
public static | FetchingTest |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/assigners/FileSplitAssigner.java | {
"start": 1293,
"end": 2284
} | interface ____ {
/**
* Gets the next split.
*
* <p>When this method returns an empty {@code Optional}, then the set of splits is assumed to
* be done and the source will finish once the readers finished their current splits.
*/
Optional<FileSourceSplit> getNext(@Nullable String hostname);
/**
* Adds a set of splits to this assigner. This happens for example when some split processing
* failed and the splits need to be re-added, or when new splits got discovered.
*/
void addSplits(Collection<FileSourceSplit> splits);
/** Gets the remaining splits that this assigner has pending. */
Collection<FileSourceSplit> remainingSplits();
// ------------------------------------------------------------------------
/**
* Factory for the {@code FileSplitAssigner}, to allow the {@code FileSplitAssigner} to be
* eagerly initialized and to not be serializable.
*/
@FunctionalInterface
| FileSplitAssigner |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/ComparableAndComparator.java | {
"start": 1862,
"end": 2009
} | interface ____ subtype of Comparable */
private static final Matcher<Tree> COMPARABLE_MATCHER = isSubtypeOf(COMPARABLE);
/** Matches if a class/ | is |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-servlet/runtime/src/main/java/io/quarkus/resteasy/reactive/server/servlet/runtime/ServletRequestContext.java | {
"start": 21186,
"end": 22391
} | class ____<K, V> implements Map.Entry<K, V> {
private final K key;
private V value;
MapEntry(K key, V value) {
this.key = key;
this.value = value;
}
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return value;
}
@Override
public V setValue(V value) {
V old = value;
this.value = value;
return old;
}
}
@Override
public ServerHttpResponse sendFile(String path, long offset, long length) {
context.response().sendFile(path, offset, length);
return this;
}
@Override
public boolean isWriteQueueFull() {
return context.response().writeQueueFull();
}
@Override
public ServerHttpResponse addDrainHandler(Runnable onDrain) {
context.response().drainHandler(new Handler<Void>() {
@Override
public void handle(Void event) {
onDrain.run();
}
});
return this;
}
@Override
public void reset() {
context.response().reset();
}
}
| MapEntry |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/merge/CompositeIdWithAssociationsAndGeneratedValuesMerge2Test.java | {
"start": 2206,
"end": 2647
} | class ____ {
@Id
@ManyToOne(optional = false, cascade = CascadeType.MERGE)
@JoinColumn(name = "middle_id", nullable = false)
private Middle middle;
@Id
@Column(name = "type_column")
private Integer type;
private String note;
public Bottom() {
}
public Bottom(Middle middle, Integer type,String note) {
this.middle = middle;
this.middle.addBottom( this );
this.type = type;
this.note = note;
}
}
}
| Bottom |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/provisioner/VolumeProvisioningResults.java | {
"start": 1167,
"end": 1975
} | class ____ {
private Map<VolumeId, VolumeProvisioningResult> resultMap;
public VolumeProvisioningResults() {
this.resultMap = new HashMap<>();
}
public boolean isSuccess() {
return !resultMap.isEmpty() && resultMap.values().stream()
.allMatch(subResult -> subResult.isSuccess());
}
public String getBriefMessage() {
JsonObject obj = new JsonObject();
obj.addProperty("TotalVolumes", resultMap.size());
JsonObject failed = new JsonObject();
for (VolumeProvisioningResult result : resultMap.values()) {
if (!result.isSuccess()) {
failed.addProperty(result.getVolumeId().toString(),
result.getVolumeState().name());
}
}
obj.add("failedVolumesStates", failed);
return obj.toString();
}
static | VolumeProvisioningResults |
java | apache__kafka | raft/src/test/java/org/apache/kafka/raft/UnattachedStateTest.java | {
"start": 1426,
"end": 9044
} | class ____ {
private final MockTime time = new MockTime();
private final LogContext logContext = new LogContext();
private final int epoch = 5;
private final int electionTimeoutMs = 10000;
private final Set<Integer> voters = Set.of(1, 2, 3);
private final ReplicaKey voter1Key = ReplicaKey.of(1, Uuid.randomUuid());
private final ReplicaKey votedKey = voter1Key;
private UnattachedState newUnattachedState(
OptionalInt leaderId,
Optional<ReplicaKey> votedKey
) {
return new UnattachedState(
time,
epoch,
leaderId,
votedKey,
voters,
Optional.empty(),
electionTimeoutMs,
logContext
);
}
@ParameterizedTest
@CsvSource({ "true,false", "false,true", "false,false" })
public void testElectionStateAndElectionTimeout(boolean hasVotedKey, boolean hasLeaderId) {
OptionalInt leader = hasLeaderId ? OptionalInt.of(3) : OptionalInt.empty();
Optional<ReplicaKey> votedKey = hasVotedKey ? Optional.of(this.votedKey) : Optional.empty();
UnattachedState state = newUnattachedState(leader, votedKey);
assertEquals(
new ElectionState(epoch, leader, votedKey, voters),
state.election()
);
assertEquals(electionTimeoutMs, state.remainingElectionTimeMs(time.milliseconds()));
assertFalse(state.hasElectionTimeoutExpired(time.milliseconds()));
time.sleep(electionTimeoutMs / 2);
assertEquals(electionTimeoutMs / 2, state.remainingElectionTimeMs(time.milliseconds()));
assertFalse(state.hasElectionTimeoutExpired(time.milliseconds()));
time.sleep(electionTimeoutMs / 2);
assertEquals(0, state.remainingElectionTimeMs(time.milliseconds()));
assertTrue(state.hasElectionTimeoutExpired(time.milliseconds()));
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testGrantVoteWithoutVotedKey(boolean isLogUpToDate) {
UnattachedState state = newUnattachedState(OptionalInt.empty(), Optional.empty());
assertEquals(
isLogUpToDate,
state.canGrantVote(voter1Key, isLogUpToDate, true)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(voter1Key, isLogUpToDate, false)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)
);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCanGrantVoteWithVotedKey(boolean isLogUpToDate) {
UnattachedState state = newUnattachedState(OptionalInt.empty(), Optional.of(votedKey));
// Same voterKey
// Local can reject PreVote for a replica that local has already granted a standard vote to if their log is behind
assertEquals(
isLogUpToDate,
state.canGrantVote(votedKey, isLogUpToDate, true)
);
assertTrue(state.canGrantVote(votedKey, isLogUpToDate, false));
// Different directoryId
// Local can grant PreVote for a replica that local has already granted a standard vote to if their log is up-to-date,
// even if the directoryId is different
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(votedKey.id(), Uuid.randomUuid()), isLogUpToDate, true)
);
assertFalse(state.canGrantVote(ReplicaKey.of(votedKey.id(), Uuid.randomUuid()), isLogUpToDate, false));
// Missing directoryId
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(votedKey.id(), ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)
);
assertFalse(state.canGrantVote(ReplicaKey.of(votedKey.id(), ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false));
// Different voterId
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(2, votedKey.directoryId().get()), isLogUpToDate, true)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)
);
assertFalse(state.canGrantVote(ReplicaKey.of(2, votedKey.directoryId().get()), isLogUpToDate, false));
assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false));
// Observer
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)
);
assertFalse(state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false));
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
void testGrantVoteWithLeader(boolean isLogUpToDate) {
int leaderId = 3;
UnattachedState state = newUnattachedState(OptionalInt.of(leaderId), Optional.empty());
// Check that the leader is persisted if the leader is known
assertEquals(ElectionState.withElectedLeader(epoch, leaderId, Optional.empty(), voters), state.election());
// Check that the replica can grant PreVotes if the log is up-to-date, even if the last leader is known
// This is because nodes in Unattached have not successfully fetched from the leader yet
assertEquals(
isLogUpToDate,
state.canGrantVote(voter1Key, isLogUpToDate, true)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(leaderId, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)
);
assertEquals(
isLogUpToDate,
state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)
);
// Check that the replica rejects all standard votes request if the leader is known
assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false));
assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false));
assertFalse(state.canGrantVote(ReplicaKey.of(leaderId, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false));
assertFalse(state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false));
}
@Test
public void testLeaderEndpoints() {
UnattachedState state = newUnattachedState(OptionalInt.of(3), Optional.of(this.votedKey));
assertEquals(Endpoints.empty(), state.leaderEndpoints());
}
}
| UnattachedStateTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.