language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/common/time/DateUtilsRoundingTests.java | {
"start": 667,
"end": 2391
} | class ____ extends ESTestCase {
public void testDateUtilsRounding() {
for (int year = -1000; year < 3000; year++) {
final long startOfYear = DateUtilsRounding.utcMillisAtStartOfYear(year);
assertThat(startOfYear, equalTo(ZonedDateTime.of(year, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant().toEpochMilli()));
assertThat(DateUtilsRounding.getYear(startOfYear), equalTo(year));
assertThat(DateUtilsRounding.getYear(startOfYear - 1), equalTo(year - 1));
assertThat(DateUtilsRounding.getMonthOfYear(startOfYear, year), equalTo(1));
assertThat(DateUtilsRounding.getMonthOfYear(startOfYear - 1, year - 1), equalTo(12));
for (int month = 1; month <= 12; month++) {
final long startOfMonth = ZonedDateTime.of(year, month, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant().toEpochMilli();
assertThat(DateUtilsRounding.getMonthOfYear(startOfMonth, year), equalTo(month));
if (month > 1) {
assertThat(DateUtilsRounding.getYear(startOfMonth - 1), equalTo(year));
assertThat(DateUtilsRounding.getMonthOfYear(startOfMonth - 1, year), equalTo(month - 1));
}
}
}
}
public void testIsLeapYear() {
assertTrue(DateUtilsRounding.isLeapYear(2004));
assertTrue(DateUtilsRounding.isLeapYear(2000));
assertTrue(DateUtilsRounding.isLeapYear(1996));
assertFalse(DateUtilsRounding.isLeapYear(2001));
assertFalse(DateUtilsRounding.isLeapYear(1900));
assertFalse(DateUtilsRounding.isLeapYear(-1000));
assertTrue(DateUtilsRounding.isLeapYear(-996));
}
}
| DateUtilsRoundingTests |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/intercepted/InterceptedBeanInjectionTest.java | {
"start": 463,
"end": 1580
} | class ____ {
@RegisterExtension
public ArcTestContainer container = new ArcTestContainer(Simple.class,
SimpleInterceptor.class, InterceptedBean.class, InterceptedDependent.class);
@Test
public void testInterception() {
InterceptedBean bean = Arc.container().instance(InterceptedBean.class).get();
assertEquals(InterceptedBean.class.getName() + InterceptedBean.class.getName(), bean.ping());
assertEquals(InterceptedBean.class.getName(), SimpleInterceptor.aroundConstructResult);
assertEquals(InterceptedBean.class.getName(), SimpleInterceptor.postConstructResult);
assertEquals(
InterceptedBean.class.getName() + InterceptedDependent.class.getName() + InterceptedDependent.class.getName(),
bean.pong());
InterceptedDependent dependent = Arc.container().instance(InterceptedDependent.class).get();
assertEquals(InterceptedDependent.class.getName() + InterceptedDependent.class.getName(),
dependent.pong());
}
@ApplicationScoped
@Simple
static | InterceptedBeanInjectionTest |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng7697PomWithEmojiTest.java | {
"start": 1055,
"end": 1688
} | class ____ extends AbstractMavenIntegrationTestCase {
MavenITmng7697PomWithEmojiTest() {
// affected Maven versions: 3.9.0, 4.0.0-alpha-4
super();
}
/**
* Pom read successful.
*
* @throws Exception in case of failure
*/
@Test
void testPomRead() throws Exception {
File testDir = extractResources("/mng-7697-emoji");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.addCliArgument("verify");
verifier.execute();
verifier.verifyErrorFreeLog();
}
}
| MavenITmng7697PomWithEmojiTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/query/internal/NativeQueryInterpreterStandardImpl.java | {
"start": 357,
"end": 987
} | class ____ implements NativeQueryInterpreter {
/**
* Singleton access
*/
public static final NativeQueryInterpreterStandardImpl NATIVE_QUERY_INTERPRETER = new NativeQueryInterpreterStandardImpl( false );
private final boolean nativeJdbcParametersIgnored;
public NativeQueryInterpreterStandardImpl(boolean nativeJdbcParametersIgnored) {
this.nativeJdbcParametersIgnored = nativeJdbcParametersIgnored;
}
@Override
public void recognizeParameters(String nativeQuery, ParameterRecognizer recognizer) {
ParameterParser.parse( nativeQuery, recognizer, nativeJdbcParametersIgnored );
}
}
| NativeQueryInterpreterStandardImpl |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesTests.java | {
"start": 65894,
"end": 66305
} | class ____ implements ProtocolResolver {
static final String PREFIX = "test:/";
@Override
public @Nullable Resource resolve(String location, ResourceLoader resourceLoader) {
if (location.startsWith(PREFIX)) {
String path = location.substring(PREFIX.length());
return new ClassPathResource(path);
}
return null;
}
}
@Configuration(proxyBeanMethods = false)
static | TestProtocolResolver |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SjmsEndpointBuilderFactory.java | {
"start": 128151,
"end": 130912
} | interface ____ {
/**
* Simple JMS (camel-sjms)
* Send and receive messages to/from a JMS Queue or Topic using plain
* JMS 1.x API.
*
* Category: messaging
* Since: 2.11
* Maven coordinates: org.apache.camel:camel-sjms
*
* @return the dsl builder for the headers' name.
*/
default SjmsHeaderNameBuilder sjms() {
return SjmsHeaderNameBuilder.INSTANCE;
}
/**
* Simple JMS (camel-sjms)
* Send and receive messages to/from a JMS Queue or Topic using plain
* JMS 1.x API.
*
* Category: messaging
* Since: 2.11
* Maven coordinates: org.apache.camel:camel-sjms
*
* Syntax: <code>sjms:destinationType:destinationName</code>
*
* Path parameter: destinationType
* The kind of destination to use
* Default value: queue
* There are 2 enums and the value can be one of: queue, topic
*
* Path parameter: destinationName (required)
* DestinationName is a JMS queue or topic name. By default, the
* destinationName is interpreted as a queue name.
*
* @param path destinationType:destinationName
* @return the dsl builder
*/
default SjmsEndpointBuilder sjms(String path) {
return SjmsEndpointBuilderFactory.endpointBuilder("sjms", path);
}
/**
* Simple JMS (camel-sjms)
* Send and receive messages to/from a JMS Queue or Topic using plain
* JMS 1.x API.
*
* Category: messaging
* Since: 2.11
* Maven coordinates: org.apache.camel:camel-sjms
*
* Syntax: <code>sjms:destinationType:destinationName</code>
*
* Path parameter: destinationType
* The kind of destination to use
* Default value: queue
* There are 2 enums and the value can be one of: queue, topic
*
* Path parameter: destinationName (required)
* DestinationName is a JMS queue or topic name. By default, the
* destinationName is interpreted as a queue name.
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path destinationType:destinationName
* @return the dsl builder
*/
default SjmsEndpointBuilder sjms(String componentName, String path) {
return SjmsEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Simple JMS component.
*/
public static | SjmsBuilders |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/batchfetch/BatchFetchReferencedColumnNameTest.java | {
"start": 2551,
"end": 3180
} | class ____ {
@Id
@Column(name = "CHILD_ID")
private Long id;
@Column(name = "PARENT_ID")
private Long parentId;
@Column(name = "CREATED_ON")
private ZonedDateTime createdOn;
public ZonedDateTime getCreatedOn() {
return createdOn;
}
public void setCreatedOn(ZonedDateTime createdOn) {
this.createdOn = createdOn;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Long getParentId() {
return parentId;
}
public void setParentId(Long parentId) {
this.parentId = parentId;
}
}
@Entity
@Table(name = "PARENT")
public static | Child |
java | quarkusio__quarkus | integration-tests/elytron-undertow/src/test/java/io/quarkus/it/undertow/elytron/BaseAuthTest.java | {
"start": 312,
"end": 1264
} | class ____ extends HttpsSetup {
@RepeatedTest(100)
public void testPost() {
// This is a regression test in that we had a problem where the Vert.x request was not paused
// before the authentication filters ran and the post message was thrown away by Vert.x because
// RESTEasy hadn't registered its request handlers yet.
given()
.header("Authorization", "Basic am9objpqb2hu")
.body("Bill")
.contentType(ContentType.TEXT)
.when()
.post("/foo/")
.then()
.statusCode(200)
.body(is("hello Bill"));
}
@Test
public void testGet() {
given()
.header("Authorization", "Basic am9objpqb2hu")
.when()
.get("/foo/")
.then()
.statusCode(200)
.body(is("hello"));
}
}
| BaseAuthTest |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedTestContextTests.java | {
"start": 1929,
"end": 2622
} | class ____ {
@ParameterizedTest
void onePrimitive(int num) {
}
@ParameterizedTest
void twoPrimitives(int num1, int num2) {
}
@ParameterizedTest
void twoAggregators(@CsvToPerson Person person, ArgumentsAccessor arguments) {
}
@ParameterizedTest
void twoAggregatorsWithTestInfoAtTheEnd(@CsvToPerson Person person1, @CsvToPerson Person person2,
TestInfo testInfo) {
}
@ParameterizedTest
void mixedMode(int num1, int num2, ArgumentsAccessor arguments1, ArgumentsAccessor arguments2,
@CsvToPerson Person person1, @CsvToPerson Person person2, TestInfo testInfo1, TestInfo testInfo2) {
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
static | ValidTestCase |
java | google__guava | guava-testlib/test/com/google/common/testing/NullPointerTesterTest.java | {
"start": 7025,
"end": 7160
} | class ____ {
@Keep
public static void christenPoodle(String name) {
checkNotNull(name);
}
}
private static | ThrowsNpe |
java | FasterXML__jackson-core | src/main/java/tools/jackson/core/util/JacksonFeature.java | {
"start": 282,
"end": 1019
} | interface ____
{
/**
* Accessor for checking whether this feature is enabled by default.
*
* @return Whether this instance is enabled by default or not
*/
public boolean enabledByDefault();
/**
* Returns bit mask for this feature instance; must be a single bit,
* that is of form {@code 1 << N}.
*
* @return Bit mask of this feature
*/
public int getMask();
/**
* Convenience method for checking whether feature is enabled in given bitmask.
*
* @param flags Bitfield that contains a set of enabled features of this type
*
* @return True if this feature is enabled in passed bit field
*/
public boolean enabledIn(int flags);
}
| JacksonFeature |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/jackson2/BadCredentialsExceptionMixin.java | {
"start": 916,
"end": 1054
} | class ____ in serialize/deserialize
* {@link org.springframework.security.authentication.BadCredentialsException} class. To
* use this | helps |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/flush/ManualFlushTest.java | {
"start": 1066,
"end": 2009
} | class ____ {
private final Logger log = Logger.getLogger( ManualFlushTest.class );
@AfterEach
void tearDown(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@Test
public void testFlushSQL(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( entityManager -> {
log.info("testFlushSQL");
//tag::flushing-manual-flush-example[]
Person person = new Person("John Doe");
entityManager.persist(person);
Session session = entityManager.unwrap(Session.class);
session.setHibernateFlushMode(FlushMode.MANUAL);
Assertions.assertEquals( 0, ((Number) entityManager
.createQuery( "select count(id) from Person" )
.getSingleResult()).intValue() );
Assertions.assertEquals( 0, (int) session.createNativeQuery( "select count(*) from Person", Integer.class )
.uniqueResult() );
//end::flushing-manual-flush-example[]
});
}
@Entity(name = "Person")
public static | ManualFlushTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/referencearray/AtomicReferenceArrayAssert_allMatch_with_description_Test.java | {
"start": 1028,
"end": 1614
} | class ____ extends AtomicReferenceArrayAssertBaseTest {
private Predicate<Object> predicate;
@BeforeEach
void beforeOnce() {
predicate = o -> o != null;
}
@Override
protected AtomicReferenceArrayAssert<Object> invoke_api_method() {
return assertions.allMatch(predicate, "custom");
}
@Override
protected void verify_internal_effects() {
verify(iterables).assertAllMatch(info(), newArrayList(internalArray()), predicate,
new PredicateDescription("custom"));
}
}
| AtomicReferenceArrayAssert_allMatch_with_description_Test |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceDiagnosticsAction.java | {
"start": 1935,
"end": 2327
} | class ____ extends BaseNodesRequest {
public Request() {
super((String[]) null);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return true;
}
@Override
public int hashCode() {
// The | Request |
java | junit-team__junit5 | junit-jupiter-engine/src/main/java/org/junit/jupiter/engine/support/OpenTest4JAndJUnit4AwareThrowableCollector.java | {
"start": 1139,
"end": 2730
} | class ____ extends ThrowableCollector {
private static final Logger logger = LoggerFactory.getLogger(OpenTest4JAndJUnit4AwareThrowableCollector.class);
private static final String ASSUMPTION_VIOLATED_EXCEPTION = "org.junit.internal.AssumptionViolatedException";
private static final String COMMON_FAILURE_MESSAGE = "Failed to load class " + ASSUMPTION_VIOLATED_EXCEPTION
+ ": only supporting " + TestAbortedException.class.getName() + " for aborted execution.";
private static final Predicate<? super Throwable> abortedExecutionPredicate = createAbortedExecutionPredicate();
OpenTest4JAndJUnit4AwareThrowableCollector() {
super(abortedExecutionPredicate);
}
private static Predicate<? super Throwable> createAbortedExecutionPredicate() {
Predicate<Throwable> otaPredicate = TestAbortedException.class::isInstance;
// Additionally support JUnit 4's AssumptionViolatedException?
try {
Class<?> clazz = ReflectionSupport.tryToLoadClass(ASSUMPTION_VIOLATED_EXCEPTION).get();
if (clazz != null) {
return otaPredicate.or(clazz::isInstance);
}
}
catch (Throwable throwable) {
UnrecoverableExceptions.rethrowIfUnrecoverable(throwable);
Supplier<String> messageSupplier = (throwable instanceof NoClassDefFoundError)
? () -> COMMON_FAILURE_MESSAGE + " Note that " + ASSUMPTION_VIOLATED_EXCEPTION
+ " requires that Hamcrest is on the classpath."
: () -> COMMON_FAILURE_MESSAGE;
logger.debug(throwable, messageSupplier);
}
// Else just OTA's TestAbortedException
return otaPredicate;
}
}
| OpenTest4JAndJUnit4AwareThrowableCollector |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/TreatAbstractSuperclassTest.java | {
"start": 6364,
"end": 6737
} | class ____ extends BaseEntity {
@ManyToOne
@JoinColumn(name = "author_id", nullable = false)
private Author author;
@ManyToOne
@JoinColumn(name = "book_id", nullable = false)
private Book book;
public AuthorParticipation() {
}
public AuthorParticipation(Author author, Book book) {
this.author = author;
this.book = book;
}
}
}
| AuthorParticipation |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/DefaultInflightRepository.java | {
"start": 6650,
"end": 8722
} | class ____ implements InflightExchange {
private final Exchange exchange;
private InflightExchangeEntry(Exchange exchange) {
this.exchange = exchange;
}
@Override
public Exchange getExchange() {
return exchange;
}
@Override
public long getDuration() {
return DefaultInflightRepository.getExchangeDuration(exchange);
}
@Override
@SuppressWarnings("unchecked")
public long getElapsed() {
// this can only be calculate if message history is enabled
List<MessageHistory> list = exchange.getProperty(ExchangePropertyKey.MESSAGE_HISTORY, List.class);
if (list == null || list.isEmpty()) {
return 0;
}
// get latest entry
MessageHistory history = list.get(list.size() - 1);
if (history != null) {
long elapsed = history.getElapsed();
if (elapsed == 0) {
// still in progress, so lets compute it via the start time
elapsed = history.getElapsedSinceCreated();
}
return elapsed;
} else {
return 0;
}
}
@Override
public String getNodeId() {
return exchange.getExchangeExtension().getHistoryNodeId();
}
@Override
public String getFromRouteId() {
return exchange.getFromRouteId();
}
@Override
public boolean isFromRemoteEndpoint() {
if (exchange.getFromEndpoint() != null) {
return exchange.getFromEndpoint().isRemote();
}
return false;
}
@Override
public String getAtRouteId() {
return ExchangeHelper.getAtRouteId(exchange);
}
@Override
public String toString() {
return "InflightExchangeEntry[exchangeId=" + exchange.getExchangeId() + "]";
}
}
}
| InflightExchangeEntry |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/utils/ServiceAnnotationResolver.java | {
"start": 3402,
"end": 4204
} | class ____ of interface
*
* @return if not found, return <code>null</code>
*/
public String resolveInterfaceClassName() {
Class interfaceClass;
// first, try to get the value from "interfaceName" attribute
String interfaceName = resolveAttribute("interfaceName");
if (isEmpty(interfaceName)) { // If not found, try "interfaceClass"
interfaceClass = resolveAttribute("interfaceClass");
} else {
interfaceClass = resolveClass(interfaceName, getClass().getClassLoader());
}
if (isGenericClass(interfaceClass)) {
interfaceName = interfaceClass.getName();
} else {
interfaceName = null;
}
if (isEmpty(interfaceName)) { // If not fund, try to get the first | name |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskManagerRunner.java | {
"start": 5158,
"end": 5378
} | class ____ the executable entry point for the task manager in yarn or standalone mode. It
* constructs the related components (network, I/O manager, memory manager, RPC service, HA service)
* and starts them.
*/
public | is |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/GenericRowData.java | {
"start": 2146,
"end": 8465
} | class ____ implements RowData {
/** The array to store the actual internal format values. */
private final Object[] fields;
/** The kind of change that a row describes in a changelog. */
private RowKind kind;
/**
* Creates an instance of {@link GenericRowData} with given kind and number of fields.
*
* <p>Initially, all fields are set to null.
*
* <p>Note: All fields of the row must be internal data structures.
*
* @param kind kind of change that this row describes in a changelog
* @param arity number of fields
*/
public GenericRowData(RowKind kind, int arity) {
this.fields = new Object[arity];
this.kind = kind;
}
/**
* Creates an instance of {@link GenericRowData} with given number of fields.
*
* <p>Initially, all fields are set to null. By default, the row describes a {@link
* RowKind#INSERT} in a changelog.
*
* <p>Note: All fields of the row must be internal data structures.
*
* @param arity number of fields
*/
public GenericRowData(int arity) {
this.fields = new Object[arity];
this.kind = RowKind.INSERT; // INSERT as default
}
/**
* Sets the field value at the given position.
*
* <p>Note: The given field value must be an internal data structures. Otherwise the {@link
* GenericRowData} is corrupted and may throw exception when processing. See {@link RowData} for
* more information about internal data structures.
*
* <p>The field value can be null for representing nullability.
*/
public void setField(int pos, Object value) {
this.fields[pos] = value;
}
/**
* Returns the field value at the given position.
*
* <p>Note: The returned value is in internal data structure. See {@link RowData} for more
* information about internal data structures.
*
* <p>The returned field value can be null for representing nullability.
*/
public Object getField(int pos) {
return this.fields[pos];
}
@Override
public int getArity() {
return fields.length;
}
@Override
public RowKind getRowKind() {
return kind;
}
@Override
public void setRowKind(RowKind kind) {
checkNotNull(kind);
this.kind = kind;
}
@Override
public boolean isNullAt(int pos) {
return this.fields[pos] == null;
}
@Override
public boolean getBoolean(int pos) {
return (boolean) this.fields[pos];
}
@Override
public byte getByte(int pos) {
return (byte) this.fields[pos];
}
@Override
public short getShort(int pos) {
return (short) this.fields[pos];
}
@Override
public int getInt(int pos) {
return (int) this.fields[pos];
}
@Override
public long getLong(int pos) {
return (long) this.fields[pos];
}
@Override
public float getFloat(int pos) {
return (float) this.fields[pos];
}
@Override
public double getDouble(int pos) {
return (double) this.fields[pos];
}
@Override
public StringData getString(int pos) {
return (StringData) this.fields[pos];
}
@Override
public DecimalData getDecimal(int pos, int precision, int scale) {
return (DecimalData) this.fields[pos];
}
@Override
public TimestampData getTimestamp(int pos, int precision) {
return (TimestampData) this.fields[pos];
}
@SuppressWarnings("unchecked")
@Override
public <T> RawValueData<T> getRawValue(int pos) {
return (RawValueData<T>) this.fields[pos];
}
@Override
public byte[] getBinary(int pos) {
return (byte[]) this.fields[pos];
}
@Override
public ArrayData getArray(int pos) {
return (ArrayData) this.fields[pos];
}
@Override
public MapData getMap(int pos) {
return (MapData) this.fields[pos];
}
@Override
public RowData getRow(int pos, int numFields) {
return (RowData) this.fields[pos];
}
@Override
public Variant getVariant(int pos) {
return (Variant) this.fields[pos];
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof GenericRowData)) {
return false;
}
GenericRowData that = (GenericRowData) o;
return kind == that.kind && Arrays.deepEquals(fields, that.fields);
}
@Override
public int hashCode() {
int result = Objects.hash(kind);
result = 31 * result + Arrays.deepHashCode(fields);
return result;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(kind.shortString()).append("(");
for (int i = 0; i < fields.length; i++) {
if (i != 0) {
sb.append(",");
}
sb.append(StringUtils.arrayAwareToString(fields[i]));
}
sb.append(")");
return sb.toString();
}
// ----------------------------------------------------------------------------------------
// Utilities
// ----------------------------------------------------------------------------------------
/**
* Creates an instance of {@link GenericRowData} with given field values.
*
* <p>By default, the row describes a {@link RowKind#INSERT} in a changelog.
*
* <p>Note: All fields of the row must be internal data structures.
*/
public static GenericRowData of(Object... values) {
GenericRowData row = new GenericRowData(values.length);
for (int i = 0; i < values.length; ++i) {
row.setField(i, values[i]);
}
return row;
}
/**
* Creates an instance of {@link GenericRowData} with given kind and field values.
*
* <p>Note: All fields of the row must be internal data structures.
*/
public static GenericRowData ofKind(RowKind kind, Object... values) {
GenericRowData row = new GenericRowData(kind, values.length);
for (int i = 0; i < values.length; ++i) {
row.setField(i, values[i]);
}
return row;
}
}
| GenericRowData |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/criteria/JpaDerivedRoot.java | {
"start": 222,
"end": 293
} | interface ____<T> extends JpaDerivedFrom<T>, JpaRoot<T> {
}
| JpaDerivedRoot |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java | {
"start": 1779,
"end": 7159
} | class ____ implements AutodetectProcessFactory {
private static final Logger logger = LogManager.getLogger(NativeAutodetectProcessFactory.class);
private static final NamedPipeHelper NAMED_PIPE_HELPER = new NamedPipeHelper();
private final Environment env;
private final Settings settings;
private final NativeController nativeController;
private final ClusterService clusterService;
private final ResultsPersisterService resultsPersisterService;
private final AnomalyDetectionAuditor auditor;
private volatile Duration processConnectTimeout;
public NativeAutodetectProcessFactory(
Environment env,
Settings settings,
NativeController nativeController,
ClusterService clusterService,
ResultsPersisterService resultsPersisterService,
AnomalyDetectionAuditor auditor
) {
this.env = Objects.requireNonNull(env);
this.settings = Objects.requireNonNull(settings);
this.nativeController = Objects.requireNonNull(nativeController);
this.clusterService = Objects.requireNonNull(clusterService);
this.resultsPersisterService = Objects.requireNonNull(resultsPersisterService);
this.auditor = Objects.requireNonNull(auditor);
setProcessConnectTimeout(MachineLearning.PROCESS_CONNECT_TIMEOUT.get(settings));
clusterService.getClusterSettings()
.addSettingsUpdateConsumer(MachineLearning.PROCESS_CONNECT_TIMEOUT, this::setProcessConnectTimeout);
}
void setProcessConnectTimeout(TimeValue processConnectTimeout) {
this.processConnectTimeout = Duration.ofMillis(processConnectTimeout.getMillis());
}
@Override
public AutodetectProcess createAutodetectProcess(
String pipelineId,
Job job,
AutodetectParams params,
ExecutorService executorService,
Consumer<String> onProcessCrash
) {
List<Path> filesToDelete = new ArrayList<>();
ProcessPipes processPipes = new ProcessPipes(
env,
NAMED_PIPE_HELPER,
processConnectTimeout,
AutodetectBuilder.AUTODETECT,
pipelineId,
null,
false,
true,
true,
params.modelSnapshot() != null,
true
);
createNativeProcess(job, params, processPipes, filesToDelete);
boolean includeTokensField = job.getAnalysisConfig().getCategorizationFieldName() != null;
// The extra 1 is the control field
int numberOfFields = job.allInputFields().size() + (includeTokensField ? 1 : 0) + 1;
IndexingStateProcessor stateProcessor = new IndexingStateProcessor(job.getId(), resultsPersisterService, auditor);
ProcessResultsParser<AutodetectResult> resultsParser = new ProcessResultsParser<>(
AutodetectResult.PARSER,
NamedXContentRegistry.EMPTY
);
NativeAutodetectProcess autodetect = new NativeAutodetectProcess(
job.getId(),
nativeController,
processPipes,
numberOfFields,
filesToDelete,
resultsParser,
onProcessCrash
);
try {
autodetect.start(executorService, stateProcessor);
return autodetect;
} catch (IOException | EsRejectedExecutionException e) {
String msg = "Failed to connect to autodetect for job " + job.getId();
logger.error(msg);
try {
IOUtils.close(autodetect);
} catch (IOException ioe) {
logger.error("Can't close autodetect", ioe);
}
throw ExceptionsHelper.serverError(msg, e);
}
}
void createNativeProcess(Job job, AutodetectParams autodetectParams, ProcessPipes processPipes, List<Path> filesToDelete) {
try {
Settings updatedSettings = Settings.builder()
.put(settings)
.put(
AutodetectBuilder.MAX_ANOMALY_RECORDS_SETTING_DYNAMIC.getKey(),
clusterService.getClusterSettings().get(AutodetectBuilder.MAX_ANOMALY_RECORDS_SETTING_DYNAMIC)
)
.build();
AutodetectBuilder autodetectBuilder = new AutodetectBuilder(
job,
filesToDelete,
logger,
env,
updatedSettings,
nativeController,
processPipes
).referencedFilters(autodetectParams.filters()).scheduledEvents(autodetectParams.scheduledEvents());
// if state is null or empty it will be ignored
// else it is used to restore the quantiles
if (autodetectParams.quantiles() != null) {
autodetectBuilder.quantiles(autodetectParams.quantiles());
}
autodetectBuilder.build();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.warn("[{}] Interrupted while launching autodetect", job.getId());
} catch (IOException e) {
String msg = "[" + job.getId() + "] Failed to launch autodetect";
logger.error(msg);
throw ExceptionsHelper.serverError(msg + " on [" + clusterService.getNodeName() + "]", e);
}
}
}
| NativeAutodetectProcessFactory |
java | elastic__elasticsearch | x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene70/fst/FSTStore.java | {
"start": 1165,
"end": 1389
} | interface ____ extends Accountable {
void init(DataInput in, long numBytes) throws IOException;
long size();
FST.BytesReader getReverseBytesReader();
void writeTo(DataOutput out) throws IOException;
}
| FSTStore |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/type/filter/AbstractTypeHierarchyTraversingFilter.java | {
"start": 1187,
"end": 1428
} | interface ____. The algorithm employed uses a succeed-fast
* strategy: if at any time a match is declared, no further processing is
* carried out.
*
* @author Ramnivas Laddad
* @author Mark Fisher
* @since 2.5
*/
public abstract | hierarchy |
java | apache__camel | components/camel-servicenow/camel-servicenow-component/src/main/java/org/apache/camel/component/servicenow/ServiceNowDispatcher.java | {
"start": 1042,
"end": 2189
} | class ____ {
private final Predicate<Exchange> predicate;
private final Processor delegate;
public ServiceNowDispatcher(Predicate<Exchange> predicate, Processor delegate) {
this.predicate = ObjectHelper.notNull(predicate, "predicate");
this.delegate = ObjectHelper.notNull(delegate, "delegate");
}
public boolean match(Exchange exchange) {
return predicate.test(exchange);
}
public void process(Exchange exchange) throws Exception {
delegate.process(exchange);
}
// ********************
// Helpers
// ********************
public static ServiceNowDispatcher on(final String action, final String subject, final Processor delegate) {
return new ServiceNowDispatcher(e -> matches(e.getIn(), action, subject), delegate);
}
public static boolean matches(Message in, String action, final String subject) {
return ObjectHelper.equal(action, in.getHeader(ServiceNowConstants.ACTION, String.class), true)
&& ObjectHelper.equal(subject, in.getHeader(ServiceNowConstants.ACTION_SUBJECT, String.class), true);
}
}
| ServiceNowDispatcher |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java | {
"start": 10484,
"end": 13446
} | class ____ to be excluded from checking.
*/
@Override
@Internal
public Set<String> getExcludes() {
return getPatternSet().getExcludes();
}
@Override
public CheckForbiddenApisTask setExcludes(Iterable<String> excludes) {
getPatternSet().setExcludes(excludes);
return this;
}
@Override
public CheckForbiddenApisTask exclude(String... arg0) {
getPatternSet().exclude(arg0);
return this;
}
@Override
public CheckForbiddenApisTask exclude(Iterable<String> arg0) {
getPatternSet().exclude(arg0);
return this;
}
@Override
public CheckForbiddenApisTask exclude(Spec<FileTreeElement> arg0) {
getPatternSet().exclude(arg0);
return this;
}
@Override
public CheckForbiddenApisTask exclude(@SuppressWarnings("rawtypes") Closure arg0) {
getPatternSet().exclude(arg0);
return this;
}
@Override
public CheckForbiddenApisTask include(String... arg0) {
getPatternSet().include(arg0);
return this;
}
@Override
public CheckForbiddenApisTask include(Iterable<String> arg0) {
getPatternSet().include(arg0);
return this;
}
@Override
public CheckForbiddenApisTask include(Spec<FileTreeElement> arg0) {
getPatternSet().include(arg0);
return this;
}
@Override
public CheckForbiddenApisTask include(@SuppressWarnings("rawtypes") Closure arg0) {
getPatternSet().include(arg0);
return this;
}
/** Returns the classes to check. */
@InputFiles
@SkipWhenEmpty
@IgnoreEmptyDirectories
@PathSensitive(PathSensitivity.RELATIVE)
public FileTree getClassFiles() {
return getClassesDirs().getAsFileTree().matching(getPatternSet());
}
@Inject
public abstract WorkerExecutor getWorkerExecutor();
/** Executes the forbidden apis task. */
@TaskAction
public void checkForbidden() {
WorkQueue workQueue = getWorkerExecutor().noIsolation();
workQueue.submit(ForbiddenApisCheckWorkAction.class, parameters -> {
parameters.getClasspath().setFrom(getClasspath());
parameters.getClassDirectories().setFrom(getClassesDirs());
parameters.getClassFiles().from(getClassFiles().getFiles());
parameters.getSuppressAnnotations().set(getSuppressAnnotations());
parameters.getBundledSignatures().set(getBundledSignatures());
parameters.getSignatures().set(getSignatures());
parameters.getTargetCompatibility().set(getTargetCompatibility());
parameters.getIgnoreFailures().set(getIgnoreFailures());
parameters.getIgnoreMissingClasses().set(getIgnoreMissingClasses());
parameters.getSuccessMarker().set(getSuccessMarker());
parameters.getSignaturesFiles().from(getSignaturesFiles());
});
}
abstract static | files |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/AsyncBiFunction.java | {
"start": 1237,
"end": 1539
} | interface ____ expected to perform an
* asynchronous operation that takes two parameters of types T and P, and
* returns a result of type R. The asynchronous operation is typically
* represented as a {@link CompletableFuture} of the result type R.</p>
*
* <p>For example, an implementation of this | are |
java | apache__logging-log4j2 | src/site/antora/modules/ROOT/examples/manual/webapp/AsyncServlet.java | {
"start": 1297,
"end": 2292
} | class ____ extends HttpServlet {
private final Logger logger = LogManager.getLogger();
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) {
// tag::manual[]
AsyncContext asyncContext = req.startAsync();
Log4jWebSupport webSupport = WebLoggerContextUtils.getWebLifeCycle(getServletContext());
asyncContext.start(() -> {
try {
webSupport.setLoggerContext();
// Put your logic here
} finally {
webSupport.clearLoggerContext();
}
});
// end::manual[]
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) {
// tag::automatic[]
AsyncContext asyncContext = req.startAsync();
asyncContext.start(WebLoggerContextUtils.wrapExecutionContext(getServletContext(), () -> {
// Put your logic here
}));
// end::automatic[]
}
}
| AsyncServlet |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java | {
"start": 6043,
"end": 91051
} | class ____ extends AbstractLifecycleComponent implements ClusterStatePublisher {
private static final Logger logger = LogManager.getLogger(Coordinator.class);
// the timeout before emitting an info log about a slow-running publication
public static final Setting<TimeValue> PUBLISH_INFO_TIMEOUT_SETTING = Setting.timeSetting(
"cluster.publish.info_timeout",
TimeValue.timeValueMillis(10000),
TimeValue.timeValueMillis(1),
Setting.Property.NodeScope
);
// the timeout for the publication of each value
public static final Setting<TimeValue> PUBLISH_TIMEOUT_SETTING = Setting.timeSetting(
"cluster.publish.timeout",
TimeValue.timeValueMillis(30000),
TimeValue.timeValueMillis(1),
Setting.Property.NodeScope
);
public static final Setting<TimeValue> SINGLE_NODE_CLUSTER_SEED_HOSTS_CHECK_INTERVAL_SETTING = Setting.timeSetting(
"cluster.discovery_configuration_check.interval",
TimeValue.timeValueMillis(30000),
TimeValue.timeValueMillis(1),
Setting.Property.NodeScope
);
public static final String COMMIT_STATE_ACTION_NAME = "internal:cluster/coordination/commit_state";
private final Settings settings;
private final boolean singleNodeDiscovery;
private final ElectionStrategy electionStrategy;
private final TransportService transportService;
private final Executor clusterCoordinationExecutor;
private final MasterService masterService;
private final AllocationService allocationService;
private final JoinHelper joinHelper;
private final JoinValidationService joinValidationService;
private final MasterServiceTaskQueue<NodeLeftExecutor.Task> nodeLeftQueue;
private final Supplier<CoordinationState.PersistedState> persistedStateSupplier;
private final NoMasterBlockService noMasterBlockService;
final Object mutex = new Object(); // package-private to allow tests to call methods that assert that the mutex is held
private final SetOnce<CoordinationState> coordinationState = new SetOnce<>(); // initialized on start-up (see doStart)
private volatile ClusterState applierState; // the state that should be exposed to the cluster state applier
private final PeerFinder peerFinder;
private final PreVoteCollector preVoteCollector;
private final Random random;
private final ElectionSchedulerFactory electionSchedulerFactory;
private final SeedHostsResolver configuredHostsResolver;
private final TimeValue publishTimeout;
private final TimeValue publishInfoTimeout;
private final TimeValue singleNodeClusterSeedHostsCheckInterval;
@Nullable
private Scheduler.Cancellable singleNodeClusterChecker = null;
private final PublicationTransportHandler publicationHandler;
private final LeaderChecker leaderChecker;
private final FollowersChecker followersChecker;
private final ClusterApplier clusterApplier;
private final Collection<BiConsumer<DiscoveryNode, ClusterState>> onJoinValidators;
@Nullable
private volatile Releasable electionScheduler; // volatile so we can check if it's there from other threads
@Nullable
private Releasable prevotingRound;
private long maxTermSeen;
private final Reconfigurator reconfigurator;
private final ClusterBootstrapService clusterBootstrapService;
private final LagDetector lagDetector;
private final ClusterFormationFailureHelper clusterFormationFailureHelper;
private final JoinReasonService joinReasonService;
private final CompatibilityVersions compatibilityVersions;
private Mode mode;
private Optional<DiscoveryNode> lastKnownLeader;
private Optional<Join> lastJoin;
private JoinHelper.JoinAccumulator joinAccumulator;
private Optional<CoordinatorPublication> currentPublication = Optional.empty();
private final NodeHealthService nodeHealthService;
private final List<PeerFinderListener> peerFinderListeners;
private final LeaderHeartbeatService leaderHeartbeatService;
private final ClusterService clusterService;
/**
* @param nodeName The name of the node, used to name the {@link java.util.concurrent.ExecutorService} of the {@link SeedHostsResolver}.
* @param onJoinValidators A collection of join validators to restrict which nodes may join the cluster.
*/
@SuppressWarnings("this-escape")
public Coordinator(
String nodeName,
Settings settings,
ClusterSettings clusterSettings,
TransportService transportService,
Client client,
NamedWriteableRegistry namedWriteableRegistry,
AllocationService allocationService,
MasterService masterService,
Supplier<CoordinationState.PersistedState> persistedStateSupplier,
SeedHostsProvider seedHostsProvider,
ClusterApplier clusterApplier,
Collection<BiConsumer<DiscoveryNode, ClusterState>> onJoinValidators,
Random random,
RerouteService rerouteService,
ElectionStrategy electionStrategy,
NodeHealthService nodeHealthService,
CircuitBreakerService circuitBreakerService,
Reconfigurator reconfigurator,
LeaderHeartbeatService leaderHeartbeatService,
PreVoteCollector.Factory preVoteCollectorFactory,
CompatibilityVersions compatibilityVersions,
FeatureService featureService,
ClusterService clusterService
) {
this.settings = settings;
this.transportService = transportService;
this.clusterCoordinationExecutor = transportService.getThreadPool().executor(Names.CLUSTER_COORDINATION);
this.masterService = masterService;
this.allocationService = allocationService;
this.onJoinValidators = NodeJoinExecutor.addBuiltInJoinValidators(onJoinValidators);
this.singleNodeDiscovery = DiscoveryModule.isSingleNodeDiscovery(settings);
this.electionStrategy = electionStrategy;
this.joinReasonService = new JoinReasonService(transportService.getThreadPool().relativeTimeInMillisSupplier());
this.joinHelper = new JoinHelper(
allocationService,
masterService,
clusterApplier,
transportService,
this::getCurrentTerm,
this::handleJoinRequest,
this::joinLeaderInTerm,
rerouteService,
nodeHealthService,
joinReasonService,
circuitBreakerService,
reconfigurator::maybeReconfigureAfterNewMasterIsElected,
this::getLatestStoredStateAfterWinningAnElection,
compatibilityVersions,
featureService
);
this.joinValidationService = new JoinValidationService(
settings,
transportService,
namedWriteableRegistry,
this::getStateForJoinValidationService,
() -> getLastAcceptedState().metadata(),
this.onJoinValidators
);
this.persistedStateSupplier = persistedStateSupplier;
this.noMasterBlockService = new NoMasterBlockService(settings, clusterSettings);
this.lastKnownLeader = Optional.empty();
this.lastJoin = Optional.empty();
this.joinAccumulator = new InitialJoinAccumulator();
this.publishTimeout = PUBLISH_TIMEOUT_SETTING.get(settings);
this.publishInfoTimeout = PUBLISH_INFO_TIMEOUT_SETTING.get(settings);
this.singleNodeClusterSeedHostsCheckInterval = SINGLE_NODE_CLUSTER_SEED_HOSTS_CHECK_INTERVAL_SETTING.get(settings);
this.random = random;
this.electionSchedulerFactory = new ElectionSchedulerFactory(settings, random, transportService.getThreadPool());
this.preVoteCollector = preVoteCollectorFactory.create(
transportService,
this::startElection,
this::updateMaxTermSeen,
electionStrategy,
nodeHealthService,
leaderHeartbeatService
);
configuredHostsResolver = new SeedHostsResolver(nodeName, settings, transportService, seedHostsProvider);
this.peerFinder = new CoordinatorPeerFinder(
settings,
transportService,
new HandshakingTransportAddressConnector(settings, transportService),
configuredHostsResolver
);
transportService.registerRequestHandler(
COMMIT_STATE_ACTION_NAME,
this.clusterCoordinationExecutor,
false,
false,
ApplyCommitRequest::new,
(request, channel, task) -> handleApplyCommit(request, new ChannelActionListener<>(channel).map(r -> Empty.INSTANCE))
);
this.publicationHandler = new PublicationTransportHandler(transportService, namedWriteableRegistry, this::handlePublishRequest);
this.leaderChecker = new LeaderChecker(settings, transportService, this::onLeaderFailure, nodeHealthService);
this.followersChecker = new FollowersChecker(
settings,
transportService,
this::onFollowerCheckRequest,
this::removeNode,
nodeHealthService
);
this.nodeLeftQueue = masterService.createTaskQueue("node-left", Priority.IMMEDIATE, new NodeLeftExecutor(allocationService));
this.clusterApplier = clusterApplier;
masterService.setClusterStateSupplier(this::getStateForMasterService);
this.reconfigurator = reconfigurator;
this.clusterBootstrapService = new ClusterBootstrapService(
settings,
transportService,
this::getFoundPeers,
this::isInitialConfigurationSet,
this::setInitialConfiguration
);
this.lagDetector = new LagDetector(
settings,
transportService.getThreadPool(),
new LagDetector.HotThreadsLoggingLagListener(
transportService,
client,
(node, appliedVersion, expectedVersion) -> removeNode(node, "lagging")
),
transportService::getLocalNode
);
this.clusterFormationFailureHelper = new ClusterFormationFailureHelper(
settings,
this::getClusterFormationState,
transportService.getThreadPool(),
joinHelper::logLastFailedJoinAttempt
);
this.nodeHealthService = nodeHealthService;
this.peerFinderListeners = new CopyOnWriteArrayList<>();
this.peerFinderListeners.add(clusterBootstrapService);
this.leaderHeartbeatService = leaderHeartbeatService;
this.compatibilityVersions = compatibilityVersions;
this.clusterService = clusterService;
}
/**
* This method returns an object containing information about why cluster formation failed, which can be useful in troubleshooting.
* @return Information about why cluster formation failed
*/
public ClusterFormationState getClusterFormationState() {
return new ClusterFormationState(
settings,
getLastAcceptedState(), // doesn't care about blocks or the current master node so no need for getStateForMasterService
peerFinder.getLastResolvedAddresses(),
Stream.concat(Stream.of(getLocalNode()), StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false)).toList(),
peerFinder.getMastersOfPeers(),
getCurrentTerm(),
electionStrategy,
nodeHealthService.getHealth(),
joinHelper.getInFlightJoinStatuses()
);
}
private void onLeaderFailure(Supplier<String> message, Exception e) {
synchronized (mutex) {
if (mode != Mode.CANDIDATE) {
assert lastKnownLeader.isPresent();
if (logger.isDebugEnabled()) {
// TODO this is a workaround for log4j's Supplier. We should remove this, once using ES logging api
logger.info(message::get, e);
} else {
logger.info(message::get);
}
}
becomeCandidate("onLeaderFailure");
}
}
private void removeNode(DiscoveryNode discoveryNode, String reason) {
synchronized (mutex) {
if (mode == Mode.LEADER) {
var task = new NodeLeftExecutor.Task(discoveryNode, reason, () -> joinReasonService.onNodeRemoved(discoveryNode, reason));
nodeLeftQueue.submitTask("node-left", task, null);
}
}
}
void onFollowerCheckRequest(FollowerCheckRequest followerCheckRequest) {
synchronized (mutex) {
ensureTermAtLeast(followerCheckRequest.getSender(), followerCheckRequest.getTerm());
if (getCurrentTerm() != followerCheckRequest.getTerm()) {
logger.trace("onFollowerCheckRequest: current term is [{}], rejecting {}", getCurrentTerm(), followerCheckRequest);
throw new CoordinationStateRejectedException(
"onFollowerCheckRequest: current term is [" + getCurrentTerm() + "], rejecting " + followerCheckRequest
);
}
// check if node has accepted a state in this term already. If not, this node has never committed a cluster state in this
// term and therefore never removed the NO_MASTER_BLOCK for this term. This logic ensures that we quickly turn a node
// into follower, even before receiving the first cluster state update, but also don't have to deal with the situation
// where we would possibly have to remove the NO_MASTER_BLOCK from the applierState when turning a candidate back to follower.
if (getLastAcceptedState().term() < getCurrentTerm()) {
becomeFollower("onFollowerCheckRequest", followerCheckRequest.getSender());
} else if (mode == Mode.FOLLOWER) {
logger.trace("onFollowerCheckRequest: responding successfully to {}", followerCheckRequest);
} else if (joinHelper.isJoinPending()) {
logger.trace("onFollowerCheckRequest: rejoining master, responding successfully to {}", followerCheckRequest);
} else {
logger.trace("onFollowerCheckRequest: received check from faulty master, rejecting {}", followerCheckRequest);
throw new CoordinationStateRejectedException(
"onFollowerCheckRequest: received check from faulty master, rejecting " + followerCheckRequest
);
}
}
}
private void handleApplyCommit(ApplyCommitRequest applyCommitRequest, ActionListener<Void> applyListener) {
synchronized (mutex) {
logger.trace("handleApplyCommit: applying commit {}", applyCommitRequest);
coordinationState.get().handleCommit(applyCommitRequest);
final ClusterState committedState = hideStateIfNotRecovered(coordinationState.get().getLastAcceptedState());
applierState = mode == Mode.CANDIDATE ? clusterStateWithNoMasterBlock(committedState) : committedState;
updateSingleNodeClusterChecker(); // in case nodes increase/decrease, possibly update the single-node checker
if (applyCommitRequest.getSourceNode().equals(getLocalNode())) {
// master node applies the committed state at the end of the publication process, not here.
applyListener.onResponse(null);
} else {
clusterApplier.onNewClusterState(applyCommitRequest.toString(), () -> applierState, applyListener.map(r -> {
onClusterStateApplied();
return r;
}));
}
}
}
private void onClusterStateApplied() {
assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME);
synchronized (mutex) {
if (mode != Mode.CANDIDATE) {
joinHelper.onClusterStateApplied();
closeElectionScheduler();
peerFinder.closePeers();
}
}
if (getLocalNode().isMasterNode()) {
joinReasonService.onClusterStateApplied(applierState.nodes());
}
}
PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) {
assert ThreadPool.assertCurrentThreadPool(Names.CLUSTER_COORDINATION);
assert publishRequest.getAcceptedState().nodes().getLocalNode().equals(getLocalNode())
: publishRequest.getAcceptedState().nodes().getLocalNode() + " != " + getLocalNode();
final ClusterState newClusterState = publishRequest.getAcceptedState();
if (newClusterState.nodes().isLocalNodeElectedMaster() == false) {
// background initialization on the current master has been started by the master service already
newClusterState.initializeAsync(transportService.getThreadPool().generic());
}
synchronized (mutex) {
final DiscoveryNode sourceNode = newClusterState.nodes().getMasterNode();
logger.trace("handlePublishRequest: handling [{}] from [{}]", publishRequest, sourceNode);
if (sourceNode.equals(getLocalNode()) && mode != Mode.LEADER) {
// Rare case in which we stood down as leader between starting this publication and receiving it ourselves. The publication
// is already failed so there is no point in proceeding.
throw new CoordinationStateRejectedException("no longer leading this publication's term: " + publishRequest);
}
final ClusterState localState = coordinationState.get().getLastAcceptedState();
if (localState.metadata().clusterUUIDCommitted()
&& localState.metadata().clusterUUID().equals(newClusterState.metadata().clusterUUID()) == false) {
logger.warn(
"received cluster state from {} with a different cluster uuid {} than local cluster uuid {}, rejecting",
sourceNode,
newClusterState.metadata().clusterUUID(),
localState.metadata().clusterUUID()
);
throw new CoordinationStateRejectedException(
"received cluster state from "
+ sourceNode
+ " with a different cluster uuid "
+ newClusterState.metadata().clusterUUID()
+ " than local cluster uuid "
+ localState.metadata().clusterUUID()
+ ", rejecting"
);
}
if (newClusterState.term() > localState.term()) {
// only do join validation if we have not accepted state from this master yet
onJoinValidators.forEach(a -> a.accept(getLocalNode(), newClusterState));
}
ensureTermAtLeast(sourceNode, newClusterState.term());
final PublishResponse publishResponse = coordinationState.get().handlePublishRequest(publishRequest);
if (sourceNode.equals(getLocalNode())) {
preVoteCollector.update(getPreVoteResponse(), getLocalNode());
} else {
becomeFollower("handlePublishRequest", sourceNode); // also updates preVoteCollector
}
return new PublishWithJoinResponse(publishResponse, joinWithDestination(lastJoin, sourceNode, newClusterState.term()));
}
}
private static Optional<Join> joinWithDestination(Optional<Join> lastJoin, DiscoveryNode leader, long term) {
if (lastJoin.isPresent() && lastJoin.get().masterCandidateMatches(leader) && lastJoin.get().term() == term) {
return lastJoin;
}
return Optional.empty();
}
private void closePrevotingRound() {
if (prevotingRound != null) {
prevotingRound.close();
prevotingRound = null;
}
}
/**
* Updates {@link #maxTermSeen} if greater.
* <p>
* Every time a new term is found, either from another node requesting election, or this node trying to run for election, always update
* the max term number. The max term may not reflect an actual election, but rather an election attempt by some node in the
* cluster.
*/
private void updateMaxTermSeen(final long term) {
synchronized (mutex) {
maxTermSeen = Math.max(maxTermSeen, term);
final long currentTerm = getCurrentTerm();
if (mode == Mode.LEADER && maxTermSeen > currentTerm) {
// Bump our term. However, if there is a publication in flight then doing so would cancel the publication, so don't do that
// since we check whether a term bump is needed at the end of the publication too.
if (publicationInProgress()) {
logger.debug("updateMaxTermSeen: maxTermSeen = {} > currentTerm = {}, enqueueing term bump", maxTermSeen, currentTerm);
} else {
try {
logger.debug("updateMaxTermSeen: maxTermSeen = {} > currentTerm = {}, bumping term", maxTermSeen, currentTerm);
ensureTermAtLeast(getLocalNode(), maxTermSeen);
startElection();
} catch (Exception e) {
logger.warn(() -> format("failed to bump term to %s", maxTermSeen), e);
becomeCandidate("updateMaxTermSeen");
}
}
}
}
}
private long getTermForNewElection() {
assert Thread.holdsLock(mutex);
return Math.max(getCurrentTerm(), maxTermSeen) + 1;
}
private void startElection() {
synchronized (mutex) {
// The preVoteCollector is only active while we are candidate, but it does not call this method with synchronisation, so we have
// to check our mode again here.
if (mode == Mode.CANDIDATE) {
final var nodeEligibility = localNodeMayWinElection(getLastAcceptedState(), electionStrategy);
if (nodeEligibility.mayWin() == false) {
assert nodeEligibility.reason().isEmpty() == false;
logger.trace(
"skip election as local node may not win it ({}): {}",
nodeEligibility.reason(),
getLastAcceptedState().coordinationMetadata()
);
return;
}
final var electionTerm = getTermForNewElection();
logger.debug("starting election for {} in term {}", getLocalNode(), electionTerm);
broadcastStartJoinRequest(getLocalNode(), electionTerm, getDiscoveredNodes());
}
}
}
/**
* Broadcasts a request to all 'discoveredNodes' in the cluster to elect 'candidateMasterNode' as the new master.
*
* @param candidateMasterNode the node running for election
* @param term the new proposed master term
* @param discoveredNodes all the nodes to which to send the request
*/
private void broadcastStartJoinRequest(DiscoveryNode candidateMasterNode, long term, List<DiscoveryNode> discoveredNodes) {
electionStrategy.onNewElection(candidateMasterNode, term, new ActionListener<>() {
@Override
public void onResponse(StartJoinRequest startJoinRequest) {
discoveredNodes.forEach(node -> joinHelper.sendStartJoinRequest(startJoinRequest, node));
}
@Override
public void onFailure(Exception e) {
logger.log(
e instanceof CoordinationStateRejectedException ? Level.DEBUG : Level.WARN,
Strings.format("election attempt for [%s] in term [%d] failed", candidateMasterNode, term),
e
);
}
});
}
/**
* Attempts to abdicate master position to a new master-eligible node in the cluster.
* Broadcasts {@link StartJoinRequest} for {@param newMaster} to each member of the cluster.
*/
private void abdicateTo(DiscoveryNode newMaster) {
assert Thread.holdsLock(mutex);
assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode;
assert newMaster.isMasterNode() : "should only abdicate to master-eligible node but was " + newMaster;
final var electionTerm = getTermForNewElection();
logger.info("abdicating to {} with term {}", newMaster, electionTerm);
broadcastStartJoinRequest(newMaster, electionTerm, getLastAcceptedState().nodes().mastersFirstStream().toList());
// handling of start join messages on the local node will be dispatched to the coordination thread-pool
assert mode == Mode.LEADER : "should still be leader after sending abdication messages " + mode;
// explicitly move node to candidate state so that the next cluster state update task yields an onNoLongerMaster event
becomeCandidate("after abdicating to " + newMaster);
}
private static NodeEligibility localNodeMayWinElection(ClusterState lastAcceptedState, ElectionStrategy electionStrategy) {
final DiscoveryNode localNode = lastAcceptedState.nodes().getLocalNode();
assert localNode != null;
return electionStrategy.nodeMayWinElection(lastAcceptedState, localNode);
}
private Optional<Join> ensureTermAtLeast(DiscoveryNode sourceNode, long targetTerm) {
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
if (getCurrentTerm() < targetTerm) {
return Optional.of(joinLeaderInTerm(new StartJoinRequest(sourceNode, targetTerm)));
}
return Optional.empty();
}
private Join joinLeaderInTerm(StartJoinRequest startJoinRequest) {
synchronized (mutex) {
logger.debug("joinLeaderInTerm: for [{}] with term {}", startJoinRequest.getMasterCandidateNode(), startJoinRequest.getTerm());
final Join join = coordinationState.get().handleStartJoin(startJoinRequest);
lastJoin = Optional.of(join);
peerFinder.setCurrentTerm(getCurrentTerm());
if (mode != Mode.CANDIDATE) {
becomeCandidate("joinLeaderInTerm"); // updates followersChecker and preVoteCollector
} else {
followersChecker.updateFastResponseState(getCurrentTerm(), mode);
preVoteCollector.update(getPreVoteResponse(), null);
}
return join;
}
}
private void handleJoinRequest(JoinRequest joinRequest, ActionListener<Void> joinListener) {
assert Thread.holdsLock(mutex) == false;
DiscoveryNode masterNode = getLocalNode();
assert masterNode.isMasterNode() : getLocalNode() + " received a join but is not master-eligible";
logger.trace("handleJoinRequest: as {}, handling {}", mode, joinRequest);
if (singleNodeDiscovery && joinRequest.getSourceNode().equals(getLocalNode()) == false) {
joinListener.onFailure(
new IllegalStateException(
"cannot join node with ["
+ DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey()
+ "] set to ["
+ DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE
+ "] discovery"
)
);
return;
}
// Store the current term so we can check later whether a new master has been elected
final long currentTerm = getCurrentTerm();
transportService.connectToNode(joinRequest.getSourceNode(), new ActionListener<>() {
@Override
public void onResponse(Releasable response) {
String joiningNode = joinRequest.getSourceNode().toString();
SubscribableListener
// Validates the join request: can the remote node deserialize our cluster state and does it respond to pings?
.<Void>newForked(l -> validateJoinRequest(joinRequest, l))
// Adds the joining node to the cluster state
.<Void>andThen(l -> processJoinRequest(joinRequest, l.delegateResponse((ll, e) -> {
// #ES-11449
if (e instanceof FailedToCommitClusterStateException) {
// The commit failed (i.e. master is failing over) but this does not imply that the join has actually failed:
// the next master may have already accepted the state that we just published and will therefore include the
// joining node in its future states too. Thus, we need to wait for the next committed state before we know the
// eventual outcome, and we need to wait for that before we can release (our ref to) the connection and complete
// the listener.
logger.debug(
"the cluster state update adding {} to the cluster was not committed to all nodes, "
+ "and {} will be stepping down as master. However, the next master may have already committed the "
+ "cluster state we've just published and will therefore include {} in its future states too. "
+ "We will keep the connection to {} open until the next published cluster state update confirms "
+ "whether {} has been added to the cluster",
joiningNode,
masterNode.getName(),
joiningNode,
joiningNode,
joiningNode
);
// NB we are on the master update thread here at the end of processing the failed cluster state update, so this
// all happens before any cluster state update that re-elects a master
assert ThreadPool.assertCurrentThreadPool(MasterService.MASTER_UPDATE_THREAD_NAME);
final ClusterStateListener clusterStateListener = new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
final var discoveryNodes = event.state().nodes();
// Keep the connection open until the next committed state by the next elected master
if (discoveryNodes.getMasterNode() != null && event.state().term() > currentTerm) {
// Remove this listener to avoid memory leaks
clusterService.removeListener(this);
if (discoveryNodes.nodeExists(joinRequest.getSourceNode().getId())) {
logger.debug(
"node {} was added to the cluster in the next cluster state update",
joinRequest.getSourceNode()
);
ll.onResponse(null);
} else {
logger.debug(
"node {} was not added to the cluster in the next cluster state update",
joinRequest.getSourceNode()
);
ll.onFailure(e);
}
}
}
};
clusterService.addListener(clusterStateListener);
clusterStateListener.clusterChanged(
new ClusterChangedEvent(
"Checking if another master has been elected since "
+ joinRequest.getSourceNode().getName()
+ " attempted to join cluster",
clusterService.state(),
clusterService.state()
)
);
} else {
ll.onFailure(e);
}
})))
// Whatever the outcome, release (our ref to) the connection we just opened and notify the joining node.
.addListener(ActionListener.runBefore(joinListener, () -> {
logger.debug("closing the connection to {}", joinRequest.getSourceNode());
Releasables.close(response);
}));
}
@Override
public void onFailure(Exception e) {
logger.warn(
() -> format(
"received join request from [%s] but could not connect back to the joining node",
joinRequest.getSourceNode()
),
e
);
joinListener.onFailure(
// NodeDisconnectedException mainly to suppress uninteresting stack trace
new NodeDisconnectedException(
joinRequest.getSourceNode(),
String.format(
Locale.ROOT,
"failure when opening connection back from [%s] to [%s]",
getLocalNode().descriptionWithoutAttributes(),
joinRequest.getSourceNode().descriptionWithoutAttributes()
),
JoinHelper.JOIN_ACTION_NAME,
e
)
);
}
});
}
/**
* Validates a request to join the new cluster. Runs on the candidate node running for election to master.
*/
private void validateJoinRequest(JoinRequest joinRequest, ActionListener<Void> validateListener) {
// Before letting the node join the cluster, ensure:
// - it's a new enough version to pass the version barrier
// - we have a healthy STATE channel to the node
// - if we're already master that it can make sense of the current cluster state.
// - we have a healthy PING channel to the node
try (var listeners = new RefCountingListener(validateListener)) {
// The join will be rejected if any of these steps fail, but we wait them all to complete, particularly state validation, since
// the node will retry and we don't want lots of cluster states in flight.
ActionListener.completeWith(listeners.acquire(), () -> {
final ClusterState stateForJoinValidation = getStateForJoinValidationService();
if (stateForJoinValidation == null) {
return null;
}
assert stateForJoinValidation.nodes().isLocalNodeElectedMaster();
onJoinValidators.forEach(a -> a.accept(joinRequest.getSourceNode(), stateForJoinValidation));
if (stateForJoinValidation.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) {
// We do this in a couple of places including the cluster update thread. This one here is really just best effort to
// ensure we fail as fast as possible.
NodeJoinExecutor.ensureVersionBarrier(
joinRequest.getSourceNode().getVersion(),
stateForJoinValidation.getNodes().getMinNodeVersion()
);
}
sendJoinValidate(joinRequest.getSourceNode(), listeners.acquire());
return null;
});
if (listeners.isFailing() == false) {
// We may not have sent a state for validation, so just ping both channel types.
sendJoinPing(joinRequest.getSourceNode(), TransportRequestOptions.Type.PING, listeners.acquire());
sendJoinPing(joinRequest.getSourceNode(), TransportRequestOptions.Type.STATE, listeners.acquire());
}
} catch (Exception e) {
logger.error("unexpected exception in validateJoinRequest", e);
assert false : e;
}
}
private void sendJoinValidate(DiscoveryNode discoveryNode, ActionListener<Void> listener) {
joinValidationService.validateJoin(discoveryNode, listener.delegateResponse((delegate, e) -> {
logger.warn(() -> "failed to validate incoming join request from node [" + discoveryNode + "]", e);
delegate.onFailure(
new IllegalStateException(
String.format(
Locale.ROOT,
"failure when sending a join validation request from [%s] to [%s]",
getLocalNode().descriptionWithoutAttributes(),
discoveryNode.descriptionWithoutAttributes()
),
e
)
);
}));
}
private void sendJoinPing(DiscoveryNode discoveryNode, TransportRequestOptions.Type channelType, ActionListener<Void> listener) {
transportService.sendRequest(
discoveryNode,
JoinHelper.JOIN_PING_ACTION_NAME,
new JoinHelper.JoinPingRequest(),
TransportRequestOptions.of(null, channelType),
TransportResponseHandler.empty(clusterCoordinationExecutor, listener.delegateResponse((l, e) -> {
logger.warn(() -> format("failed to ping joining node [%s] on channel type [%s]", discoveryNode, channelType), e);
listener.onFailure(
new IllegalStateException(
String.format(
Locale.ROOT,
"failure when sending a join ping request from [%s] to [%s]",
getLocalNode().descriptionWithoutAttributes(),
discoveryNode.descriptionWithoutAttributes()
),
e
)
);
}))
);
}
/**
* Processes the request to join the cluster. Received by the node running for election to master.
*/
private void processJoinRequest(JoinRequest joinRequest, ActionListener<Void> joinListener) {
assert Transports.assertNotTransportThread("blocking on coordinator mutex and maybe doing IO to increase term");
final Optional<Join> optionalJoin = joinRequest.getOptionalJoin();
try {
synchronized (mutex) {
updateMaxTermSeen(joinRequest.getTerm());
final CoordinationState localCoordinationState = coordinationState.get();
final boolean previousElectionWon = localCoordinationState.electionWon()
&& optionalJoin.stream().allMatch(j -> j.term() <= getCurrentTerm());
optionalJoin.ifPresent(this::handleJoin);
joinAccumulator.handleJoinRequest(
joinRequest.getSourceNode(),
joinRequest.getCompatibilityVersions(),
joinRequest.getFeatures(),
joinListener
);
if (previousElectionWon == false && localCoordinationState.electionWon()) {
becomeLeader();
}
}
} catch (Exception e) {
joinListener.onFailure(e);
}
}
private void updateSingleNodeClusterChecker() {
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
if (mode == Mode.LEADER && applierState.nodes().size() == 1) {
if (singleNodeClusterChecker == null) {
// Make a single-node checker if none exists
singleNodeClusterChecker = transportService.getThreadPool().scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
synchronized (mutex) {
if (mode != Mode.LEADER || applierState.nodes().size() > 1) {
return;
}
}
if (DISCOVERY_SEED_HOSTS_SETTING.exists(settings)
&& DISCOVERY_SEED_HOSTS_SETTING.get(settings).isEmpty() == false) {
logger.warn(
"""
This node is a fully-formed single-node cluster with cluster UUID [{}], but it is configured as if to \
discover other nodes and form a multi-node cluster via the [{}={}] setting. Fully-formed clusters do \
not attempt to discover other nodes, and nodes with different cluster UUIDs cannot belong to the same \
cluster. The cluster UUID persists across restarts and can only be changed by deleting the contents of \
the node's data path(s). Remove the discovery configuration to suppress this message. See [{}] for \
more information.""",
applierState.metadata().clusterUUID(),
DISCOVERY_SEED_HOSTS_SETTING.getKey(),
DISCOVERY_SEED_HOSTS_SETTING.get(settings),
ReferenceDocs.FORMING_SINGLE_NODE_CLUSTERS
);
}
}
@Override
public String toString() {
return "single-node cluster checker";
}
}, singleNodeClusterSeedHostsCheckInterval, clusterCoordinationExecutor);
}
return;
}
// In case of a multi-node cluster, there is no need for the single-node checker so cancel it
if (singleNodeClusterChecker != null) {
singleNodeClusterChecker.cancel();
singleNodeClusterChecker = null;
}
}
void becomeCandidate(String method) {
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
logger.debug(
"{}: coordinator becoming CANDIDATE in term {} (was {}, lastKnownLeader was [{}])",
method,
getCurrentTerm(),
mode,
lastKnownLeader
);
if (mode != Mode.CANDIDATE) {
final Mode prevMode = mode;
mode = Mode.CANDIDATE;
cancelActivePublication("become candidate: " + method);
joinAccumulator.close(mode);
joinAccumulator = joinHelper.new CandidateJoinAccumulator();
peerFinder.activate(coordinationState.get().getLastAcceptedState().nodes());
clusterFormationFailureHelper.start();
leaderHeartbeatService.stop();
leaderChecker.setCurrentNodes(DiscoveryNodes.EMPTY_NODES);
leaderChecker.updateLeader(null);
followersChecker.clearCurrentNodes();
followersChecker.updateFastResponseState(getCurrentTerm(), mode);
lagDetector.clearTrackedNodes();
if (prevMode == Mode.LEADER) {
cleanMasterService();
}
if (applierState.nodes().getMasterNodeId() != null) {
applierState = clusterStateWithNoMasterBlock(applierState);
clusterApplier.onNewClusterState("becoming candidate: " + method, () -> applierState, ActionListener.noop());
}
}
updateSingleNodeClusterChecker();
preVoteCollector.update(getPreVoteResponse(), null);
}
private void becomeLeader() {
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
assert mode == Mode.CANDIDATE : "expected candidate but was " + mode;
assert getLocalNode().isMasterNode() : getLocalNode() + " became a leader but is not master-eligible";
final var leaderTerm = getCurrentTerm();
logger.debug(
"handleJoinRequest: coordinator becoming LEADER in term {} (was {}, lastKnownLeader was [{}])",
leaderTerm,
mode,
lastKnownLeader
);
mode = Mode.LEADER;
joinAccumulator.close(mode);
joinAccumulator = joinHelper.new LeaderJoinAccumulator();
lastKnownLeader = Optional.of(getLocalNode());
peerFinder.deactivate(getLocalNode());
clusterFormationFailureHelper.stop();
closePrevotingRound();
preVoteCollector.update(getPreVoteResponse(), getLocalNode());
leaderHeartbeatService.start(
getLocalNode(),
leaderTerm,
new ThreadedActionListener<>(transportService.getThreadPool().executor(Names.CLUSTER_COORDINATION), new ActionListener<>() {
@Override
public void onResponse(Long newTerm) {
assert newTerm != null && newTerm > leaderTerm : newTerm + " vs " + leaderTerm;
updateMaxTermSeen(newTerm);
}
@Override
public void onFailure(Exception e) {
// TODO tests for heartbeat failures
logger.warn(() -> Strings.format("failed to write heartbeat for term [%s]", leaderTerm), e);
synchronized (mutex) {
if (getCurrentTerm() == leaderTerm) {
becomeCandidate("leaderHeartbeatService");
}
}
}
@Override
public String toString() {
return "term change heartbeat listener";
}
})
);
assert leaderChecker.leader() == null : leaderChecker.leader();
followersChecker.updateFastResponseState(leaderTerm, mode);
updateSingleNodeClusterChecker();
}
void becomeFollower(String method, DiscoveryNode leaderNode) {
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
assert leaderNode.isMasterNode() : leaderNode + " became a leader but is not master-eligible";
assert mode != Mode.LEADER : "do not switch to follower from leader (should be candidate first)";
if (mode == Mode.FOLLOWER && Optional.of(leaderNode).equals(lastKnownLeader)) {
logger.trace("{}: coordinator remaining FOLLOWER of [{}] in term {}", method, leaderNode, getCurrentTerm());
} else {
logger.debug(
"{}: coordinator becoming FOLLOWER of [{}] in term {} (was {}, lastKnownLeader was [{}])",
method,
leaderNode,
getCurrentTerm(),
mode,
lastKnownLeader
);
}
final boolean restartLeaderChecker = (mode == Mode.FOLLOWER && Optional.of(leaderNode).equals(lastKnownLeader)) == false;
if (mode != Mode.FOLLOWER) {
mode = Mode.FOLLOWER;
joinAccumulator.close(mode);
joinAccumulator = new JoinHelper.FollowerJoinAccumulator();
leaderChecker.setCurrentNodes(DiscoveryNodes.EMPTY_NODES);
leaderHeartbeatService.stop();
}
updateSingleNodeClusterChecker();
lastKnownLeader = Optional.of(leaderNode);
peerFinder.deactivate(leaderNode);
clusterFormationFailureHelper.stop();
closePrevotingRound();
cancelActivePublication("become follower: " + method);
preVoteCollector.update(getPreVoteResponse(), leaderNode);
if (restartLeaderChecker) {
leaderChecker.updateLeader(leaderNode);
}
followersChecker.clearCurrentNodes();
followersChecker.updateFastResponseState(getCurrentTerm(), mode);
lagDetector.clearTrackedNodes();
}
private void cleanMasterService() {
new LocalMasterServiceTask(Priority.NORMAL) {
@Override
public void onFailure(Exception e) {
// ignore
logger.trace("failed to clean-up after stepping down as master", e);
}
@Override
public void execute(ClusterState currentState) {
if (currentState.nodes().isLocalNodeElectedMaster() == false) {
allocationService.cleanCaches();
}
}
@Override
public String toString() {
return "cleanMasterService";
}
}.submit(masterService, "clean-up after stepping down as master");
}
private PreVoteResponse getPreVoteResponse() {
return new PreVoteResponse(
getCurrentTerm(),
coordinationState.get().getLastAcceptedTerm(),
coordinationState.get().getLastAcceptedState().version()
);
}
// package-visible for testing
long getCurrentTerm() {
synchronized (mutex) {
return coordinationState.get().getCurrentTerm();
}
}
// package-visible for testing
Mode getMode() {
synchronized (mutex) {
return mode;
}
}
// visible for testing
DiscoveryNode getLocalNode() {
return transportService.getLocalNode();
}
// package-visible for testing
boolean publicationInProgress() {
synchronized (mutex) {
return currentPublication.isPresent();
}
}
@Override
protected void doStart() {
synchronized (mutex) {
CoordinationState.PersistedState persistedState = persistedStateSupplier.get();
coordinationState.set(new CoordinationState(getLocalNode(), persistedState, electionStrategy));
peerFinder.setCurrentTerm(getCurrentTerm());
configuredHostsResolver.start();
final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState();
clusterBootstrapService.logBootstrapState(lastAcceptedState.metadata());
final VotingConfiguration votingConfiguration = lastAcceptedState.getLastCommittedConfiguration();
if (singleNodeDiscovery
&& votingConfiguration.isEmpty() == false
&& votingConfiguration.hasQuorum(Collections.singleton(getLocalNode().getId())) == false) {
throw new IllegalStateException(
"cannot start with ["
+ DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey()
+ "] set to ["
+ DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE
+ "] when local node "
+ getLocalNode()
+ " does not have quorum in voting configuration "
+ votingConfiguration
);
}
final Metadata.Builder metadata = Metadata.builder();
if (lastAcceptedState.metadata().clusterUUIDCommitted()) {
metadata.clusterUUID(lastAcceptedState.metadata().clusterUUID()).clusterUUIDCommitted(true);
}
ClusterState initialState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings))
.blocks(
ClusterBlocks.builder()
.addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)
.addGlobalBlock(noMasterBlockService.getNoMasterBlock())
)
.nodes(DiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId()))
.putCompatibilityVersions(getLocalNode().getId(), compatibilityVersions)
.metadata(metadata)
.build();
applierState = initialState;
clusterApplier.setInitialState(initialState);
}
clusterFormationFailureHelper.setLoggingEnabled(true);
}
public DiscoveryStats stats() {
return new DiscoveryStats(
new PendingClusterStateStats(0, 0, 0),
publicationHandler.stats(),
getLocalNode().isMasterNode() ? masterService.getClusterStateUpdateStats() : null,
clusterApplier.getStats()
);
}
public void startInitialJoin() {
synchronized (mutex) {
becomeCandidate("startInitialJoin");
}
clusterBootstrapService.scheduleUnconfiguredBootstrap();
}
@Override
protected void doStop() {
configuredHostsResolver.stop();
joinValidationService.stop();
clusterFormationFailureHelper.setLoggingEnabled(false);
}
@Override
protected void doClose() throws IOException {
final CoordinationState coordinationState = this.coordinationState.get();
if (coordinationState != null) {
// This looks like a race that might leak an unclosed CoordinationState if it's created while execution is here, but this method
// is synchronized on AbstractLifecycleComponent#lifestyle, as is the doStart() method that creates the CoordinationState, so
// it's all ok.
synchronized (mutex) {
coordinationState.close();
}
}
}
public void invariant() {
synchronized (mutex) {
final Optional<DiscoveryNode> peerFinderLeader = peerFinder.getLeader();
final ClusterState lastAcceptedClusterState = getStateForMasterService();
assert peerFinder.getCurrentTerm() == getCurrentTerm();
assert followersChecker.getFastResponseState().term() == getCurrentTerm() : followersChecker.getFastResponseState();
assert followersChecker.getFastResponseState().mode() == getMode() : followersChecker.getFastResponseState();
assert (applierState.nodes().getMasterNodeId() == null) == applierState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID);
assert preVoteCollector.getPreVoteResponse().equals(getPreVoteResponse()) : preVoteCollector + " vs " + getPreVoteResponse();
assert lagDetector.getTrackedNodes().contains(getLocalNode()) == false : lagDetector.getTrackedNodes();
assert followersChecker.getKnownFollowers().equals(lagDetector.getTrackedNodes())
: followersChecker.getKnownFollowers() + " vs " + lagDetector.getTrackedNodes();
assert singleNodeClusterChecker == null || (mode == Mode.LEADER && applierState.nodes().size() == 1)
: "Single node checker must exist iff there is a single-node cluster";
if (mode == Mode.LEADER) {
final boolean becomingMaster = lastAcceptedClusterState.term() != getCurrentTerm();
assert coordinationState.get().electionWon();
assert lastKnownLeader.isPresent() && lastKnownLeader.get().equals(getLocalNode());
assert joinAccumulator instanceof JoinHelper.LeaderJoinAccumulator;
assert peerFinderLeader.equals(lastKnownLeader) : peerFinderLeader;
assert prevotingRound == null : prevotingRound;
assert becomingMaster || lastAcceptedClusterState.nodes().getMasterNodeId() != null : lastAcceptedClusterState;
assert leaderChecker.leader() == null : leaderChecker.leader();
assert getLocalNode().equals(applierState.nodes().getMasterNode())
|| (applierState.nodes().getMasterNodeId() == null && applierState.term() < getCurrentTerm());
assert preVoteCollector.getLeader() == getLocalNode() : preVoteCollector;
assert clusterFormationFailureHelper.isRunning() == false;
final boolean activePublication = currentPublication.map(CoordinatorPublication::isActiveForCurrentLeader).orElse(false);
if (becomingMaster && activePublication == false) {
// cluster state update task to become master is submitted to MasterService, but publication has not started yet
assert followersChecker.getKnownFollowers().isEmpty() : followersChecker.getKnownFollowers();
} else {
final ClusterState lastPublishedState;
if (activePublication) {
// active publication in progress: followersChecker is up-to-date with nodes that we're actively publishing to
lastPublishedState = currentPublication.get().publishedState();
} else {
// no active publication: followersChecker is up-to-date with the nodes of the latest publication
lastPublishedState = coordinationState.get().getLastAcceptedState();
}
final Set<DiscoveryNode> lastPublishedNodes = new HashSet<>();
lastPublishedState.nodes().forEach(lastPublishedNodes::add);
assert lastPublishedNodes.remove(getLocalNode()); // followersChecker excludes local node
assert lastPublishedNodes.equals(followersChecker.getKnownFollowers())
: lastPublishedNodes + " != " + followersChecker.getKnownFollowers();
}
assert becomingMaster
|| activePublication
|| coordinationState.get()
.getLastAcceptedConfiguration()
.equals(coordinationState.get().getLastCommittedConfiguration())
: coordinationState.get().getLastAcceptedConfiguration()
+ " != "
+ coordinationState.get().getLastCommittedConfiguration();
} else if (mode == Mode.FOLLOWER) {
assert coordinationState.get().electionWon() == false : getLocalNode() + " is FOLLOWER so electionWon() should be false";
assert lastKnownLeader.isPresent() && (lastKnownLeader.get().equals(getLocalNode()) == false);
assert joinAccumulator instanceof JoinHelper.FollowerJoinAccumulator;
assert peerFinderLeader.equals(lastKnownLeader) : peerFinderLeader;
assert prevotingRound == null : prevotingRound;
assert lastAcceptedClusterState.nodes().getMasterNodeId() == null : lastAcceptedClusterState;
assert leaderChecker.currentNodeIsMaster() == false;
assert lastKnownLeader.equals(Optional.of(leaderChecker.leader()));
assert followersChecker.getKnownFollowers().isEmpty();
assert lastKnownLeader.get().equals(applierState.nodes().getMasterNode())
|| (applierState.nodes().getMasterNodeId() == null
&& (applierState.term() < getCurrentTerm() || applierState.version() < getLastAcceptedState().version()));
assert currentPublication.map(Publication::isCommitted).orElse(true);
assert preVoteCollector.getLeader().equals(lastKnownLeader.get()) : preVoteCollector;
assert clusterFormationFailureHelper.isRunning() == false;
} else {
assert mode == Mode.CANDIDATE;
assert joinAccumulator instanceof JoinHelper.CandidateJoinAccumulator;
assert peerFinderLeader.isPresent() == false : peerFinderLeader;
assert prevotingRound == null || electionScheduler != null;
assert lastAcceptedClusterState.nodes().getMasterNodeId() == null : lastAcceptedClusterState;
assert leaderChecker.currentNodeIsMaster() == false;
assert leaderChecker.leader() == null : leaderChecker.leader();
assert followersChecker.getKnownFollowers().isEmpty();
assert applierState.nodes().getMasterNodeId() == null;
assert currentPublication.map(Publication::isCommitted).orElse(true);
assert preVoteCollector.getLeader() == null : preVoteCollector;
assert clusterFormationFailureHelper.isRunning();
}
}
}
public boolean isInitialConfigurationSet() {
return getLastAcceptedState().getLastAcceptedConfiguration().isEmpty() == false;
}
/**
* Sets the initial configuration to the given {@link VotingConfiguration}. This method is safe to call
* more than once, as long as the argument to each call is the same.
*
* @param votingConfiguration The nodes that should form the initial configuration.
* @return whether this call successfully set the initial configuration - if false, the cluster has already been bootstrapped.
*/
public boolean setInitialConfiguration(final VotingConfiguration votingConfiguration) {
synchronized (mutex) {
final ClusterState currentState = getStateForMasterService();
if (isInitialConfigurationSet()) {
logger.debug("initial configuration already set, ignoring {}", votingConfiguration);
return false;
}
if (getLocalNode().isMasterNode() == false) {
logger.debug("skip setting initial configuration as local node is not a master-eligible node");
throw new CoordinationStateRejectedException(
"this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node"
);
}
if (votingConfiguration.getNodeIds().contains(getLocalNode().getId()) == false) {
logger.debug("skip setting initial configuration as local node is not part of initial configuration");
throw new CoordinationStateRejectedException("local node is not part of initial configuration");
}
final List<DiscoveryNode> knownNodes = new ArrayList<>();
knownNodes.add(getLocalNode());
peerFinder.getFoundPeers().forEach(knownNodes::add);
if (votingConfiguration.hasQuorum(knownNodes.stream().map(DiscoveryNode::getId).toList()) == false) {
logger.debug(
"skip setting initial configuration as not enough nodes discovered to form a quorum in the "
+ "initial configuration [knownNodes={}, {}]",
knownNodes,
votingConfiguration
);
throw new CoordinationStateRejectedException(
"not enough nodes discovered to form a quorum in the initial configuration "
+ "[knownNodes="
+ knownNodes
+ ", "
+ votingConfiguration
+ "]"
);
}
logger.info("setting initial configuration to {}", votingConfiguration);
final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder(currentState.coordinationMetadata())
.lastAcceptedConfiguration(votingConfiguration)
.lastCommittedConfiguration(votingConfiguration)
.build();
Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata());
// automatically generate a UID for the metadata if we need to
metadataBuilder.generateClusterUuidIfNeeded();
metadataBuilder.coordinationMetadata(coordinationMetadata);
coordinationState.get().setInitialState(ClusterState.builder(currentState).metadata(metadataBuilder).build());
var nodeEligibility = localNodeMayWinElection(getLastAcceptedState(), electionStrategy);
assert nodeEligibility.mayWin()
: "initial state does not allow local node to win election, reason: "
+ nodeEligibility.reason()
+ " , metadata: "
+ getLastAcceptedState().coordinationMetadata();
preVoteCollector.update(getPreVoteResponse(), null); // pick up the change to last-accepted version
startElectionScheduler();
return true;
}
}
// Package-private for testing
ClusterState improveConfiguration(ClusterState clusterState) {
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
assert validVotingConfigExclusionState(clusterState) : clusterState;
// exclude any nodes whose ID is in the voting config exclusions list ...
final Stream<String> excludedNodeIds = clusterState.getVotingConfigExclusions().stream().map(VotingConfigExclusion::getNodeId);
// ... and also automatically exclude the node IDs of master-ineligible nodes that were previously master-eligible and are still in
// the voting config. We could exclude all the master-ineligible nodes here, but there could be quite a few of them and that makes
// the logging much harder to follow.
final Stream<String> masterIneligibleNodeIdsInVotingConfig = clusterState.nodes()
.stream()
.filter(
n -> n.isMasterNode() == false
&& (clusterState.getLastAcceptedConfiguration().getNodeIds().contains(n.getId())
|| clusterState.getLastCommittedConfiguration().getNodeIds().contains(n.getId()))
)
.map(DiscoveryNode::getId);
DiscoveryNode localNode = getLocalNode();
final Set<DiscoveryNode> liveNodes = clusterState.nodes()
.stream()
.filter(DiscoveryNode::isMasterNode)
.filter((n) -> coordinationState.get().containsJoinVoteFor(n) || n.equals(localNode))
.collect(Collectors.toSet());
final VotingConfiguration newConfig = reconfigurator.reconfigure(
liveNodes,
Stream.concat(masterIneligibleNodeIdsInVotingConfig, excludedNodeIds).collect(Collectors.toSet()),
localNode,
clusterState.getLastAcceptedConfiguration()
);
if (newConfig.equals(clusterState.getLastAcceptedConfiguration()) == false) {
assert coordinationState.get().joinVotesHaveQuorumFor(newConfig);
return ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.coordinationMetadata(
CoordinationMetadata.builder(clusterState.coordinationMetadata()).lastAcceptedConfiguration(newConfig).build()
)
)
.build();
}
return clusterState;
}
/*
* Valid Voting Configuration Exclusion state criteria:
* 1. Every voting config exclusion with an ID of _absent_ should not match any nodes currently in the cluster by name
* 2. Every voting config exclusion with a name of _absent_ should not match any nodes currently in the cluster by ID
*/
static boolean validVotingConfigExclusionState(ClusterState clusterState) {
Set<VotingConfigExclusion> votingConfigExclusions = clusterState.getVotingConfigExclusions();
Set<String> nodeNamesWithAbsentId = votingConfigExclusions.stream()
.filter(e -> e.getNodeId().equals(VotingConfigExclusion.MISSING_VALUE_MARKER))
.map(VotingConfigExclusion::getNodeName)
.collect(Collectors.toSet());
Set<String> nodeIdsWithAbsentName = votingConfigExclusions.stream()
.filter(e -> e.getNodeName().equals(VotingConfigExclusion.MISSING_VALUE_MARKER))
.map(VotingConfigExclusion::getNodeId)
.collect(Collectors.toSet());
for (DiscoveryNode node : clusterState.getNodes()) {
if (node.isMasterNode() && (nodeIdsWithAbsentName.contains(node.getId()) || nodeNamesWithAbsentId.contains(node.getName()))) {
return false;
}
}
return true;
}
private final AtomicBoolean reconfigurationTaskScheduled = new AtomicBoolean();
private void scheduleReconfigurationIfNeeded() {
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
assert mode == Mode.LEADER : mode;
assert currentPublication.isPresent() == false : "Expected no publication in progress";
final ClusterState state = getLastAcceptedState();
if (improveConfiguration(state) != state && reconfigurationTaskScheduled.compareAndSet(false, true)) {
logger.trace("scheduling reconfiguration");
submitUnbatchedTask("reconfigure", new ClusterStateUpdateTask(Priority.URGENT) {
@Override
public ClusterState execute(ClusterState currentState) {
reconfigurationTaskScheduled.set(false);
synchronized (mutex) {
return improveConfiguration(currentState);
}
}
@Override
public void onFailure(Exception e) {
reconfigurationTaskScheduled.set(false);
logger.debug("reconfiguration failed", e);
}
});
}
}
@SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here
private void submitUnbatchedTask(String source, ClusterStateUpdateTask task) {
masterService.submitUnbatchedStateUpdateTask(source, task);
}
// exposed for tests
boolean missingJoinVoteFrom(DiscoveryNode node) {
return node.isMasterNode() && coordinationState.get().containsJoinVoteFor(node) == false;
}
private void handleJoin(Join join) {
synchronized (mutex) {
ensureTermAtLeast(getLocalNode(), join.term()).ifPresent(this::handleJoin);
if (coordinationState.get().electionWon()) {
// If we have already won the election then the actual join does not matter for election purposes, so swallow any exception
final boolean isNewJoinFromMasterEligibleNode = handleJoinIgnoringExceptions(join);
// If we haven't completely finished becoming master then there's already a publication scheduled which will, in turn,
// schedule a reconfiguration if needed. It's benign to schedule a reconfiguration anyway, but it might fail if it wins the
// race against the election-winning publication and log a big error message, which we can prevent by checking this here:
final boolean establishedAsMaster = mode == Mode.LEADER && getLastAcceptedState().term() == getCurrentTerm();
if (isNewJoinFromMasterEligibleNode && establishedAsMaster && publicationInProgress() == false) {
scheduleReconfigurationIfNeeded();
}
} else {
coordinationState.get().handleJoin(join); // this might fail and bubble up the exception
}
}
}
/**
* @return true iff the join was from a new node and was successfully added
*/
private boolean handleJoinIgnoringExceptions(Join join) {
try {
return coordinationState.get().handleJoin(join);
} catch (CoordinationStateRejectedException e) {
logger.debug(() -> "failed to add " + join + " - ignoring", e);
return false;
}
}
public ClusterState getLastAcceptedState() {
synchronized (mutex) {
return coordinationState.get().getLastAcceptedState();
}
}
private void getLatestStoredStateAfterWinningAnElection(ActionListener<ClusterState> listener, long joiningTerm) {
// using a SubscribableListener to stay on the current thread if (and only if) nothing async happened
final var latestStoredStateListener = new SubscribableListener<ClusterState>();
persistedStateSupplier.get().getLatestStoredState(joiningTerm, latestStoredStateListener);
latestStoredStateListener.addListener(listener.delegateResponse((delegate, e) -> {
synchronized (mutex) {
// TODO: add test coverage for this branch
becomeCandidate("failed fetching latest stored state");
}
delegate.onFailure(e);
}), transportService.getThreadPool().executor(Names.CLUSTER_COORDINATION), null);
}
@Nullable
public ClusterState getApplierState() {
return applierState;
}
private List<DiscoveryNode> getDiscoveredNodes() {
final List<DiscoveryNode> nodes = new ArrayList<>();
nodes.add(getLocalNode());
peerFinder.getFoundPeers().forEach(nodes::add);
return nodes;
}
/**
* Get the last-accepted state, adding a no-master block and removing the master node ID if we are not currently the master. This is the
* state that the master service uses as input to the cluster state update computation. Note that it's quite expensive to adjust blocks
* in a large cluster state, so avoid using this where possible.
*/
ClusterState getStateForMasterService() {
synchronized (mutex) {
// expose last accepted cluster state as base state upon which the master service
// speculatively calculates the next cluster state update
final ClusterState clusterState = coordinationState.get().getLastAcceptedState();
assert clusterState.nodes().getLocalNode() != null;
if (mode != Mode.LEADER || clusterState.term() != getCurrentTerm()) {
// the master service checks if the local node is the master node in order to fail execution of the state update early
return clusterStateWithNoMasterBlock(clusterState);
}
return clusterState;
}
}
private ClusterState getStateForJoinValidationService() {
synchronized (mutex) {
// similar to getStateForMasterService, but do not rebuild the state if not currently the master
final ClusterState clusterState = coordinationState.get().getLastAcceptedState();
assert clusterState.nodes().getLocalNode() != null;
if (mode != Mode.LEADER
|| clusterState.term() != getCurrentTerm()
|| clusterState.nodes().isLocalNodeElectedMaster() == false) {
return null;
}
return clusterState;
}
}
/**
* Add a no-master block and remove the master node ID from the given cluster state. Note that it's quite expensive to add blocks in a
* large cluster state, so avoid using this where possible.
*/
private ClusterState clusterStateWithNoMasterBlock(ClusterState clusterState) {
if (clusterState.nodes().getMasterNodeId() != null) {
// remove block if it already exists before adding new one
assert clusterState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID) == false
: "NO_MASTER_BLOCK should only be added by Coordinator";
final ClusterBlocks clusterBlocks = ClusterBlocks.builder()
.blocks(clusterState.blocks())
.addGlobalBlock(noMasterBlockService.getNoMasterBlock())
.build();
return ClusterState.builder(clusterState).blocks(clusterBlocks).nodes(clusterState.nodes().withMasterNodeId(null)).build();
} else {
return clusterState;
}
}
@Override
public void publish(
ClusterStatePublicationEvent clusterStatePublicationEvent,
ActionListener<Void> publishListener,
AckListener ackListener
) {
try {
synchronized (mutex) {
if (mode != Mode.LEADER || getCurrentTerm() != clusterStatePublicationEvent.getNewState().term()) {
logger.debug(
() -> format(
"[%s] failed publication as node is no longer master for term %s",
clusterStatePublicationEvent.getSummary(),
clusterStatePublicationEvent.getNewState().term()
)
);
throw new NotMasterException(
"node is no longer master for term "
+ clusterStatePublicationEvent.getNewState().term()
+ " while handling publication"
);
}
if (currentPublication.isPresent()) {
assert false : "[" + currentPublication.get() + "] in progress, cannot start new publication";
logger.error(
() -> format(
"[%s] failed publication as already publication in progress",
clusterStatePublicationEvent.getSummary()
)
);
// If there is another publication in progress then we are not the master node
throw new NotMasterException("publication " + currentPublication.get() + " already in progress");
}
assert assertPreviousStateConsistency(clusterStatePublicationEvent);
final ClusterState clusterState;
final long publicationContextConstructionStartMillis;
final PublicationTransportHandler.PublicationContext publicationContext;
final PublishRequest publishRequest;
try {
clusterState = clusterStatePublicationEvent.getNewState();
assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId()))
: getLocalNode() + " should be in published " + clusterState;
publicationContextConstructionStartMillis = transportService.getThreadPool().rawRelativeTimeInMillis();
publicationContext = publicationHandler.newPublicationContext(clusterStatePublicationEvent);
} catch (Exception e) {
logger.debug(() -> "[" + clusterStatePublicationEvent.getSummary() + "] publishing failed during context creation", e);
// Calling becomeCandidate here means this node steps down from being master
becomeCandidate("publication context creation");
throw new NotMasterException("publishing failed during context creation", e);
}
try (Releasable ignored = publicationContext::decRef) {
try {
clusterStatePublicationEvent.setPublicationContextConstructionElapsedMillis(
transportService.getThreadPool().rawRelativeTimeInMillis() - publicationContextConstructionStartMillis
);
publishRequest = coordinationState.get().handleClientValue(clusterState);
} catch (Exception e) {
logger.warn(
"failed to start publication of state version ["
+ clusterState.version()
+ "] in term ["
+ clusterState.term()
+ "] for ["
+ clusterStatePublicationEvent.getSummary()
+ "]",
e
);
// Calling becomeCandidate here means this node steps down from being master
becomeCandidate("publication creation");
throw new NotMasterException("publishing failed while starting", e);
}
try {
final var publication = new CoordinatorPublication(
clusterStatePublicationEvent,
publishRequest,
publicationContext,
new SubscribableListener<>(),
ackListener,
publishListener
);
currentPublication = Optional.of(publication);
final var publishNodes = publishRequest.getAcceptedState().nodes();
leaderChecker.setCurrentNodes(publishNodes);
followersChecker.setCurrentNodes(publishNodes);
lagDetector.setTrackedNodes(publishNodes);
publication.start(followersChecker.getFaultyNodes());
} catch (Exception e) {
assert false : e;
if (currentPublication.isEmpty()) {
// log an error and fail the listener
throw new IllegalStateException(e);
} else {
// becoming candidate will clean up the publication, completing the listener
becomeCandidate("publication start");
}
}
}
}
} catch (FailedToCommitClusterStateException | NotMasterException e) {
publishListener.onFailure(e);
} catch (Exception e) {
assert false : e; // all exceptions should already be caught and wrapped in a FailedToCommitClusterStateException
logger.error(() -> "[" + clusterStatePublicationEvent.getSummary() + "] publishing unexpectedly failed", e);
publishListener.onFailure(new FailedToCommitClusterStateException("publishing unexpectedly failed", e));
}
}
// there is no equals on cluster state, so we just serialize it to XContent and compare Maps
// deserialized from the resulting JSON
private boolean assertPreviousStateConsistency(ClusterStatePublicationEvent clusterStatePublicationEvent) {
if (clusterStatePublicationEvent.getOldState() != coordinationState.get().getLastAcceptedState()) {
// compare JSON representations
@FixForMultiProject // this is just so toXContent doesn't throw - we want the same contents, but don't care if it's MP or not
ToXContent.Params params = new ToXContent.MapParams(Map.of("multi-project", "true"));
String oldState = Strings.toString(ChunkedToXContent.wrapAsToXContent(clusterStatePublicationEvent.getOldState()), params);
String newState = Strings.toString(
ChunkedToXContent.wrapAsToXContent(clusterStateWithNoMasterBlock(coordinationState.get().getLastAcceptedState())),
params
);
assert XContentHelper.convertToMap(JsonXContent.jsonXContent, oldState, false)
.equals(XContentHelper.convertToMap(JsonXContent.jsonXContent, newState, false)) : oldState + " vs " + newState;
}
return true;
}
private <T> ActionListener<T> wrapWithMutex(ActionListener<T> listener) {
return new ActionListener<>() {
@Override
public void onResponse(T t) {
synchronized (mutex) {
listener.onResponse(t);
}
}
@Override
public void onFailure(Exception e) {
synchronized (mutex) {
listener.onFailure(e);
}
}
};
}
private void cancelActivePublication(String reason) {
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
currentPublication.ifPresent(coordinatorPublication -> coordinatorPublication.cancel(reason));
}
public Collection<BiConsumer<DiscoveryNode, ClusterState>> getOnJoinValidators() {
return onJoinValidators;
}
// for tests
boolean hasIdleJoinValidationService() {
return joinValidationService.isIdle();
}
// for tests, not synchronized
boolean electionSchedulerActive() {
return electionScheduler != null;
}
public void addPeerFinderListener(PeerFinderListener peerFinderListener) {
this.peerFinderListeners.add(peerFinderListener);
}
public | Coordinator |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/SerializationContext.java | {
"start": 2190,
"end": 31244
} | class ____
extends DatabindContext
implements // NOTE: not JDK serializable with 3.x (factory that creates these is)
ObjectWriteContext // 3.0, for use by jackson-core
{
/**
* Placeholder serializer used when <code>java.lang.Object</code> typed property
* is marked to be serialized.
*<br>
* NOTE: starting with 2.6, this instance is NOT used for any other types, and
* separate instances are constructed for "empty" Beans.
*/
protected final static ValueSerializer<Object> DEFAULT_UNKNOWN_SERIALIZER = new UnknownSerializer();
/*
/**********************************************************************
/* Configuration, general
/**********************************************************************
*/
/**
* Serialization configuration to use for serialization processing.
*/
protected final SerializationConfig _config;
/**
* Configuration to be used by streaming generator when it is constructed.
*
* @since 3.0
*/
protected final GeneratorSettings _generatorConfig;
/**
* Low-level {@link TokenStreamFactory} that may be used for constructing
* embedded generators.
*/
protected final TokenStreamFactory _streamFactory;
/**
* Token stream generator actively used; only set for per-call instances
*
* @since 3.0
*/
protected transient JsonGenerator _generator;
/**
* Capabilities of the output format.
*
* @since 3.0
*/
protected JacksonFeatureSet<StreamWriteCapability> _writeCapabilities;
/**
* View used for currently active serialization, if any.
*/
protected final Class<?> _activeView;
/*
/**********************************************************************
/* Configuration, serializer access
/**********************************************************************
*/
/**
* Factory used for constructing actual serializer instances.
* Only set for non-blueprint instances.
*/
protected final SerializerFactory _serializerFactory;
/**
* Serializer used to output a null value. Default implementation
* writes nulls using {@link JsonGenerator#writeNull}.
*/
protected final ValueSerializer<Object> _nullValueSerializer;
/**
* Flag set to indicate that we are using vanilla null value serialization
*/
protected final boolean _stdNullValueSerializer;
/*
/**********************************************************************
/* Helper objects for caching, reuse
/**********************************************************************
*/
/**
* Cache for doing type-to-value-serializer lookups.
*/
protected final SerializerCache _serializerCache;
/**
* For fast lookups, we will have a local non-shared read-only
* map that contains serializers previously fetched.
*/
protected final ReadOnlyClassToSerializerMap _knownSerializers;
/**
* Lazily acquired and instantiated formatter object: initialized
* first time it is needed, reused afterwards. Used via instances
* (not blueprints), so that access need not be thread-safe.
*/
protected DateFormat _dateFormat;
/**
* Lazily constructed {@link ClassIntrospector} instance: created from "blueprint"
*/
protected transient ClassIntrospector _classIntrospector;
/*
/**********************************************************************
/* Other state
/**********************************************************************
*/
/**
* Lazily-constructed holder for per-call attributes.
* Only set for non-blueprint instances.
*/
protected ContextAttributes _attributes;
/*
/**********************************************************************
/* Life-cycle
/**********************************************************************
*/
protected SerializationContext(TokenStreamFactory streamFactory,
SerializationConfig config, GeneratorSettings generatorConfig,
SerializerFactory f, SerializerCache cache)
{
_streamFactory = streamFactory;
_serializerFactory = f;
_config = config;
_generatorConfig = generatorConfig;
_serializerCache = cache;
// Default null key, value serializers configured via SerializerFactory
{
ValueSerializer<Object> ser = f.getDefaultNullValueSerializer();
if (ser == null) {
_stdNullValueSerializer = true;
ser = NullSerializer.instance;
} else {
_stdNullValueSerializer = false;
}
_nullValueSerializer = ser;
}
_activeView = config.getActiveView();
_attributes = config.getAttributes();
// Non-blueprint instances do have a read-only map; one that doesn't
// need synchronization for lookups.
_knownSerializers = _serializerCache.getReadOnlyLookupMap();
}
protected SerializationContext(SerializationContext src, SerializerCache serializerCache)
{
_streamFactory = src._streamFactory;
_serializerFactory = src._serializerFactory;
_config = src._config;
_generatorConfig = src._generatorConfig;
_serializerCache = serializerCache;
_stdNullValueSerializer = src._stdNullValueSerializer;
_nullValueSerializer = src._nullValueSerializer;
_activeView = src._activeView;
_attributes = src._attributes;
_knownSerializers = src._knownSerializers;
}
/*
/**********************************************************************
/* ObjectWriteContext impl, config access
/**********************************************************************
*/
@Override
public TokenStreamFactory tokenStreamFactory() {
return _streamFactory;
}
@Override
public FormatSchema getSchema() { return _generatorConfig.getSchema(); }
@Override
public CharacterEscapes getCharacterEscapes() { return _generatorConfig.getCharacterEscapes(); }
@Override
public PrettyPrinter getPrettyPrinter() {
PrettyPrinter pp = _generatorConfig.getPrettyPrinter();
if (pp == null) {
if (isEnabled(SerializationFeature.INDENT_OUTPUT)) {
pp = _config.constructDefaultPrettyPrinter();
}
}
return pp;
}
@Override
public boolean hasPrettyPrinter() {
return _generatorConfig.hasPrettyPrinter()
|| isEnabled(SerializationFeature.INDENT_OUTPUT);
}
@Override
public SerializableString getRootValueSeparator(SerializableString defaultSeparator) {
return _generatorConfig.getRootValueSeparator(defaultSeparator);
}
@Override
public int getStreamWriteFeatures(int defaults) {
return _config.getStreamWriteFeatures();
}
@Override
public int getFormatWriteFeatures(int defaults) {
return _config.getFormatWriteFeatures();
}
/*
/**********************************************************************
/* ObjectWriteContext impl, databind integration
/**********************************************************************
*/
@Override
public ArrayTreeNode createArrayNode() {
return _config.getNodeFactory().arrayNode();
}
@Override
public ObjectTreeNode createObjectNode() {
return _config.getNodeFactory().objectNode();
}
@Override
public void writeValue(JsonGenerator gen, Object value) throws JacksonException
{
// Let's keep track of active generator; useful mostly for error reporting...
JsonGenerator prevGen = _generator;
_assignGenerator(gen);
try {
if (value == null) {
if (_stdNullValueSerializer) { // minor perf optimization
gen.writeNull();
} else {
_nullValueSerializer.serialize(null, gen, this);
}
return;
}
Class<?> cls = value.getClass();
findTypedValueSerializer(cls, true).serialize(value, gen, this);
} finally {
_assignGenerator(prevGen);
}
}
@Override
public void writeTree(JsonGenerator gen, TreeNode tree) throws JacksonException
{
// 05-Oct-2017, tatu: Should probably optimize or something? Or not?
writeValue(gen, tree);
}
/*
/**********************************************************************
/* DatabindContext implementation (and closely related but ser-specific)
/**********************************************************************
*/
/**
* Method for accessing configuration for the serialization processing.
*/
@Override
public final SerializationConfig getConfig() { return _config; }
@Override
public final AnnotationIntrospector getAnnotationIntrospector() {
return _config.getAnnotationIntrospector();
}
@Override
public final TypeFactory getTypeFactory() {
return _config.getTypeFactory();
}
@Override
public JavaType constructSpecializedType(JavaType baseType, Class<?> subclass)
throws IllegalArgumentException
{
if (baseType.hasRawClass(subclass)) {
return baseType;
}
// Need little bit different handling due to [databind#2632]; pass `true` for
// "relaxed" type assingment checks.
return getConfig().getTypeFactory().constructSpecializedType(baseType, subclass, true);
}
@Override
public final Class<?> getActiveView() { return _activeView; }
@Override
public final boolean canOverrideAccessModifiers() {
return _config.canOverrideAccessModifiers();
}
@Override
public final boolean isEnabled(MapperFeature feature) {
return _config.isEnabled(feature);
}
@Override
public final boolean isEnabled(DatatypeFeature feature) {
return _config.isEnabled(feature);
}
@Override
public final DatatypeFeatures getDatatypeFeatures() {
return _config.getDatatypeFeatures();
}
@Override
public final JsonFormat.Value getDefaultPropertyFormat(Class<?> baseType) {
return _config.getDefaultPropertyFormat(baseType);
}
public final JsonInclude.Value getDefaultPropertyInclusion(Class<?> baseType) {
return _config.getDefaultPropertyInclusion(baseType);
}
/**
* Method for accessing default Locale to use: convenience method for
*<pre>
* getConfig().getLocale();
*</pre>
*/
@Override
public Locale getLocale() {
return _config.getLocale();
}
/**
* Method for accessing default TimeZone to use: convenience method for
*<pre>
* getConfig().getTimeZone();
*</pre>
*/
@Override
public TimeZone getTimeZone() {
return _config.getTimeZone();
}
/*
/**********************************************************************
/* Annotation, BeanDescription introspection
/**********************************************************************
*/
@Override
protected ClassIntrospector classIntrospector() {
if (_classIntrospector == null) {
_classIntrospector = _config.classIntrospectorInstance();
}
return _classIntrospector;
}
@Override
public BeanDescription introspectBeanDescription(JavaType type, AnnotatedClass ac) {
return classIntrospector().introspectForSerialization(type, ac);
}
/*
/**********************************************************************
/* Misc config access
/**********************************************************************
*/
@Override
public PropertyName findRootName(JavaType rootType) {
return _config.findRootName(this, rootType);
}
@Override
public PropertyName findRootName(Class<?> rawRootType) {
return _config.findRootName(this, rawRootType);
}
/*
/**********************************************************************
/* Generic attributes
/**********************************************************************
*/
@Override
public Object getAttribute(Object key) {
return _attributes.getAttribute(key);
}
@Override
public SerializationContext setAttribute(Object key, Object value)
{
_attributes = _attributes.withPerCallAttribute(key, value);
return this;
}
/*
/**********************************************************************
/* Access to other on/off features
/**********************************************************************
*/
/**
* Convenience method for checking whether specified serialization
* feature is enabled or not.
* Shortcut for:
*<pre>
* getConfig().isEnabled(feature);
*</pre>
*/
public final boolean isEnabled(SerializationFeature feature) {
return _config.isEnabled(feature);
}
/**
* "Bulk" access method for checking that all features specified by
* mask are enabled.
*/
public final boolean hasSerializationFeatures(int featureMask) {
return _config.hasSerializationFeatures(featureMask);
}
/**
* Accessor for checking whether input format has specified capability
* or not.
*
* @return True if input format has specified capability; false if not
*/
public final boolean isEnabled(StreamWriteCapability cap) {
Objects.requireNonNull(_writeCapabilities,
"_writeCapabilities not set for `SerializationContext`");
return _writeCapabilities.isEnabled(cap);
}
/*
/**********************************************************************
/* Access to other helper objects
/**********************************************************************
*/
/**
* Convenience method for accessing provider to find serialization filters used,
* equivalent to calling:
*<pre>
* getConfig().getFilterProvider();
*</pre>
*/
public final FilterProvider getFilterProvider() {
return _config.getFilterProvider();
}
public JsonGenerator getGenerator() {
return _generator;
}
/*
/**********************************************************************
/* Factory methods for getting appropriate TokenBuffer instances
/* (possibly overridden by backends for alternate data formats)
/**********************************************************************
*/
/**
* Specialized factory method used when we are converting values and do not
* typically have or use "real" parsers or generators.
*/
public TokenBuffer bufferForValueConversion() {
// 28-May-2021, tatu: Will directly call constructor from here, instead
// of adding a factory method, since alternate formats likely need to
// use different TokenBuffer sub[class:
// false -> no native Object Ids available (or rather not needed)
return new TokenBuffer(this, false);
}
/*
/**********************************************************************
/* Access to Object Id aspects
/**********************************************************************
*/
/**
* Method called to find the Object Id for given POJO, if one
* has been generated. Will always return a non-null Object;
* contents vary depending on whether an Object Id already
* exists or not.
*/
public abstract WritableObjectId findObjectId(Object forPojo,
ObjectIdGenerator<?> generatorType);
/*
/**********************************************************************
/* Serializer discovery: root/non-property value serializers
/**********************************************************************
*/
/**
* Method called to locate regular serializer, matching type serializer,
* and if both found, wrap them in a serializer that calls both in correct
* sequence. This method is mostly used for root-level serializer
* handling to allow for simpler caching. A call can always be replaced
* by equivalent calls to access serializer and type serializer separately.
* Note: contextualization (call to {@link ValueSerializer#createContextual}) is done
* before returning the {@link ValueSerializer}.
*
* @param rawType Type for purpose of locating a serializer; usually dynamic
* runtime type, but can also be static declared type, depending on configuration
* @param cache Whether resulting value serializer should be cached or not
*/
public ValueSerializer<Object> findTypedValueSerializer(Class<?> rawType,
boolean cache)
{
// First: do we have it cached?
ValueSerializer<Object> ser = _knownSerializers.typedValueSerializer(rawType);
if (ser != null) {
return ser;
}
// If not, compose from pieces:
JavaType fullType = _config.constructType(rawType);
ser = handleRootContextualization(findValueSerializer(rawType));
TypeSerializer typeSer = findTypeSerializer(fullType);
if (typeSer != null) {
typeSer = typeSer.forProperty(this, null);
ser = new TypeWrappedSerializer(typeSer, ser);
}
if (cache) {
_serializerCache.addTypedSerializer(rawType, ser);
}
return ser;
}
/**
* Method called to locate regular serializer, matching type serializer,
* and if both found, wrap them in a serializer that calls both in correct
* sequence. This method is mostly used for root-level serializer
* handling to allow for simpler caching. A call can always be replaced
* by equivalent calls to access serializer and type serializer separately.
* Note: contextualization (call to {@link ValueSerializer#createContextual}) is done
* before returning the {@link ValueSerializer}.
*
* @param valueType Declared type of value being serialized (which may not
* be actual runtime type); used for finding both value serializer and
* type serializer to use for adding polymorphic type (if any)
* @param cache Whether resulting value serializer should be cached or not
*/
public ValueSerializer<Object> findTypedValueSerializer(JavaType valueType, boolean cache)
{
ValueSerializer<Object> ser = _knownSerializers.typedValueSerializer(valueType);
if (ser != null) {
return ser;
}
ser = handleRootContextualization(findValueSerializer(valueType));
TypeSerializer typeSer = findTypeSerializer(valueType);
if (typeSer != null) {
typeSer = typeSer.forProperty(this, null);
ser = new TypeWrappedSerializer(typeSer, ser);
}
if (cache) {
_serializerCache.addTypedSerializer(valueType, ser);
}
return ser;
}
/**
* Method for finding (from cache) or creating (and caching) serializer for given type,
* without checking for polymorphic typing, and then contextualizing without actual
* property. This is most often used for root-level values (when writing
* sequences), but may sometimes be used for more esoteric value handling for
* delegation.
* Note: contextualization (call to {@link ValueSerializer#createContextual}) is done
* before returning the {@link ValueSerializer}.
*
* @since 3.0
*/
public ValueSerializer<Object> findRootValueSerializer(Class<?> rawType)
{
ValueSerializer<Object> ser = _knownSerializers.untypedValueSerializer(rawType);
if (ser == null) {
JavaType fullType = _config.constructType(rawType);
ser = _serializerCache.untypedValueSerializer(fullType);
if (ser == null) {
ser = _createAndCacheUntypedSerializer(rawType, fullType);
}
}
return handleRootContextualization(ser);
}
/**
* Method for finding (from cache) or creating (and caching) serializer for given type,
* without checking for polymorphic typing, and then contextualizing without actual
* property. This is most often used for root-level values (when writing
* sequences), but may sometimes be used for more esoteric value handling for
* delegation.
* Note: contextualization (call to {@link ValueSerializer#createContextual}) is done
* before returning the {@link ValueSerializer}.
*
* @since 3.0
*/
public ValueSerializer<Object> findRootValueSerializer(JavaType valueType)
{
ValueSerializer<Object> ser = _knownSerializers.untypedValueSerializer(valueType);
if (ser == null) {
ser = _createAndCacheUntypedSerializer(valueType);
}
return handleRootContextualization(ser);
}
/*
/**********************************************************************
/* Serializer discovery: property value serializers
/**********************************************************************
*/
/**
* Method used for locating "primary" property value serializer (one directly
* handling value of the property). Difference (if any) has to do with contextual resolution,
* and method(s) called: this method should only be called when caller is
* certain that this is the primary property value serializer.
* Contextualization (call to {@link ValueSerializer#createContextual(SerializationContext, BeanProperty)}
* will be done before returning the {@link ValueSerializer}.
*
* @param property Property that is being handled; will never be null, and its
* type has to match <code>valueType</code> parameter.
*/
public ValueSerializer<Object> findPrimaryPropertySerializer(JavaType valueType,
BeanProperty property)
{
ValueSerializer<Object> ser = _knownSerializers.untypedValueSerializer(valueType);
if (ser == null) {
ser = _createAndCachePropertySerializer(valueType, property);
}
return handlePrimaryContextualization(ser, property);
}
/**
* @see #findPrimaryPropertySerializer(JavaType, BeanProperty)
*/
public ValueSerializer<Object> findPrimaryPropertySerializer(Class<?> rawType,
BeanProperty property)
{
ValueSerializer<Object> ser = _knownSerializers.untypedValueSerializer(rawType);
if (ser == null) {
JavaType fullType = _config.constructType(rawType);
ser = _serializerCache.untypedValueSerializer(fullType);
if (ser == null) {
ser = _createAndCachePropertySerializer(rawType, fullType, property);
}
}
return handlePrimaryContextualization(ser, property);
}
/**
* Method similar to {@link #findPrimaryPropertySerializer(JavaType, BeanProperty)}
* but used for "content values", secondary types used by "primary" serializers
* for structured types like Arrays, {@link java.util.Collection}s, {@link java.util.Map}s
* and so on.
*<p>
* Serializer will be contextualized, but will not have type serializer wrapped.
*
* @param valueType Type of (secondary / content) values being serialized
* @param property (optional) Property that refers to values via primary type (so type
* DOES NOT necessarily match {@code valueType})
*/
public ValueSerializer<Object> findContentValueSerializer(JavaType valueType,
BeanProperty property)
{
ValueSerializer<Object> ser = _knownSerializers.untypedValueSerializer(valueType);
if (ser == null) {
ser = _createAndCachePropertySerializer(valueType, property);
}
return handleSecondaryContextualization(ser, property);
}
/**
* See {@link #findContentValueSerializer(JavaType, BeanProperty)}.
*/
public ValueSerializer<Object> findContentValueSerializer(Class<?> rawType,
BeanProperty property)
{
ValueSerializer<Object> ser = _knownSerializers.untypedValueSerializer(rawType);
if (ser == null) {
JavaType fullType = _config.constructType(rawType);
ser = _serializerCache.untypedValueSerializer(fullType);
if (ser == null) {
ser = _createAndCachePropertySerializer(rawType, fullType, property);
}
}
return handleSecondaryContextualization(ser, property);
}
/*
/**********************************************************************
/* General serializer locating functionality
/**********************************************************************
*/
/**
* @see #findValueSerializer(JavaType)
*/
public ValueSerializer<Object> findValueSerializer(Class<?> rawType)
{
ValueSerializer<Object> ser = _knownSerializers.untypedValueSerializer(rawType);
if (ser == null) {
JavaType fullType = _config.constructType(rawType);
ser = _serializerCache.untypedValueSerializer(fullType);
if (ser == null) {
ser = _createAndCacheUntypedSerializer(rawType, fullType);
}
}
return ser;
}
/**
* Method variant used when we do NOT want contextualization to happen; it will need
* to be done at a later point (many serializers are not in operational state before
* contextualization, call to {@link ValueSerializer#createContextual(SerializationContext, BeanProperty)}),
* but caller wants to be able to do that at a later point; sometimes to avoid infinite loops
*/
public ValueSerializer<Object> findValueSerializer(JavaType valueType)
{
// (see comments from above method)
ValueSerializer<Object> ser = _knownSerializers.untypedValueSerializer(valueType);
if (ser == null) {
ser = _createAndCacheUntypedSerializer(valueType);
}
return ser;
}
/*
/**********************************************************************
/* Serializer discovery: type serializers
/**********************************************************************
*/
/**
* Method called to get the {@link TypeSerializer} to use for including Type Id necessary
* for serializing for the given Java class.
* Useful for schema generators.
*/
public TypeSerializer findTypeSerializer(JavaType baseType) {
return findTypeSerializer(baseType, introspectClassAnnotations(baseType));
}
/**
* Method called to get the {@link TypeSerializer} to use for including Type Id necessary
* for serializing for the given Java class.
* Useful for schema generators.
*
* @since 3.0
*/
public TypeSerializer findTypeSerializer(JavaType baseType, AnnotatedClass classAnnotations)
{
return _config.getTypeResolverProvider().findTypeSerializer(this, baseType,
classAnnotations);
}
/**
* Like {@link #findTypeSerializer(JavaType)}, but for use from specific POJO property.
* Method called to create a type information serializer for values of given
* non-container property
* if one is needed. If not needed (no polymorphic handling configured), should
* return null.
*
* @param baseType Declared type to use as the base type for type information serializer
*
* @return Type serializer to use for property values, if one is needed; null if not.
*
* @since 3.0
*/
public TypeSerializer findPropertyTypeSerializer(JavaType baseType, AnnotatedMember accessor)
{
return _config.getTypeResolverProvider()
.findPropertyTypeSerializer(this, accessor, baseType);
}
/*
/**********************************************************************
/* Serializer discovery: key serializers
/**********************************************************************
*/
/**
* Method called to get the serializer to use for serializing
* non-null Map keys. Separation from regular
* {@link #findValueSerializer} method is because actual write
* method must be different (@link JsonGenerator#writeName};
* but also since behavior for some key types may differ.
*<p>
* Note that the serializer itself can be called with instances
* of any Java object, but not nulls.
*/
public ValueSerializer<Object> findKeySerializer(JavaType keyType, BeanProperty property)
{
// 16-Mar-2018, tatu: Used to have "default key serializer" in 2.x; dropped to let/make
// custom code use Module | SerializationContext |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/parsing/ProblemReporter.java | {
"start": 910,
"end": 1707
} | interface ____ {
/**
* Called when a fatal error is encountered during the parsing process.
* <p>Implementations must treat the given problem as fatal,
* i.e. they have to eventually raise an exception.
* @param problem the source of the error (never {@code null})
*/
void fatal(Problem problem);
/**
* Called when an error is encountered during the parsing process.
* <p>Implementations may choose to treat errors as fatal.
* @param problem the source of the error (never {@code null})
*/
void error(Problem problem);
/**
* Called when a warning is raised during the parsing process.
* <p>Warnings are <strong>never</strong> considered to be fatal.
* @param problem the source of the warning (never {@code null})
*/
void warning(Problem problem);
}
| ProblemReporter |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java | {
"start": 1477,
"end": 3183
} | class ____ {
static final String DEFERRED_PREFIX = "Deferred";
static final String OVERALL_PROCESSING_PREFIX = "Overall";
// per-method RPC processing time
@Metric MutableRatesWithAggregation rates;
@Metric MutableRatesWithAggregation deferredRpcRates;
/**
* per-method overall RPC processing time, from request arrival to when the
* response is sent back.
*/
@Metric MutableRatesWithAggregation overallRpcProcessingRates;
static final Logger LOG = LoggerFactory.getLogger(RpcDetailedMetrics.class);
final MetricsRegistry registry;
final String name;
// Mainly to facilitate testing in TestRPC.java
public MutableRatesWithAggregation getOverallRpcProcessingRates() {
return overallRpcProcessingRates;
}
RpcDetailedMetrics(int port) {
name = "RpcDetailedActivityForPort"+ port;
registry = new MetricsRegistry("rpcdetailed")
.tag("port", "RPC port", String.valueOf(port));
LOG.debug(registry.info().toString());
}
public String name() { return name; }
public static RpcDetailedMetrics create(int port) {
RpcDetailedMetrics m = new RpcDetailedMetrics(port);
return DefaultMetricsSystem.instance().register(m.name, null, m);
}
/**
* Initialize the metrics for JMX with protocol methods
* @param protocol the protocol class
*/
public void init(Class<?> protocol) {
rates.init(protocol);
deferredRpcRates.init(protocol, DEFERRED_PREFIX);
overallRpcProcessingRates.init(protocol, OVERALL_PROCESSING_PREFIX);
}
/**
* Add an RPC processing time sample
* @param rpcCallName of the RPC call
* @param processingTime the processing time
*/
//@Override // some instrumentation | RpcDetailedMetrics |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2352/dto/TheModels.java | {
"start": 265,
"end": 314
} | class ____ extends ArrayList<TheModel> {
}
| TheModels |
java | apache__camel | core/camel-core-languages/src/main/java/org/apache/camel/language/simple/SimpleExpressionBuilder.java | {
"start": 49572,
"end": 49701
} | interface ____ {
Object getKeyedEntity(Exchange exchange, Expression key);
}
}
}
| KeyedEntityRetrievalStrategy |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/serialization/AbstractDeserializationSchema.java | {
"start": 4696,
"end": 4951
} | class ____ generic. In that case, please use the
* constructor that accepts a {@link #AbstractDeserializationSchema(TypeHint) TypeHint}, or a
* {@link #AbstractDeserializationSchema(TypeInformation) TypeInformation}.
*
* @param type The | is |
java | spring-projects__spring-boot | integration-test/spring-boot-actuator-integration-tests/src/test/java/org/springframework/boot/actuate/endpoint/web/jersey/JerseyWebEndpointIntegrationTests.java | {
"start": 3045,
"end": 4377
} | class ____
extends AbstractWebEndpointIntegrationTests<AnnotationConfigServletWebServerApplicationContext> {
JerseyWebEndpointIntegrationTests() {
super(JerseyWebEndpointIntegrationTests::createApplicationContext,
JerseyWebEndpointIntegrationTests::applyAuthenticatedConfiguration);
}
private static AnnotationConfigServletWebServerApplicationContext createApplicationContext() {
AnnotationConfigServletWebServerApplicationContext context = new AnnotationConfigServletWebServerApplicationContext();
context.register(JerseyConfiguration.class);
return context;
}
private static void applyAuthenticatedConfiguration(AnnotationConfigServletWebServerApplicationContext context) {
context.register(AuthenticatedConfiguration.class);
}
@Override
protected int getPort(AnnotationConfigServletWebServerApplicationContext context) {
return context.getWebServer().getPort();
}
@Override
protected void validateErrorBody(WebTestClient.BodyContentSpec body, HttpStatus status, String path,
String message) {
// Jersey doesn't support the general error page handling
}
@Override
@Test
@Disabled("Jersey does not distinguish between /example and /example/")
protected void operationWithTrailingSlashShouldNotMatch() {
}
@Configuration(proxyBeanMethods = false)
static | JerseyWebEndpointIntegrationTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/compositefk/ManyToOneEmbeddedIdWithToOneFKTest.java | {
"start": 7069,
"end": 7567
} | class ____ {
@Id
private Integer id;
private String description;
public DataCenter() {
}
public DataCenter(Integer id, String description) {
this.id = id;
this.description = description;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
// public Integer getId() {
// return id;
// }
}
@Entity(name = "System")
@Table( name = "systems" )
public static | DataCenter |
java | apache__hadoop | hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-blockgen/src/main/java/org/apache/hadoop/tools/dynamometer/blockgenerator/XMLParserMapper.java | {
"start": 1474,
"end": 2195
} | class ____
extends Mapper<LongWritable, Text, IntWritable, BlockInfo> {
private static final Logger LOG =
LoggerFactory.getLogger(XMLParserMapper.class);
@Override
public void setup(Mapper.Context context) {
Configuration conf = context.getConfiguration();
numDataNodes = conf.getInt(GenerateBlockImagesDriver.NUM_DATANODES_KEY, -1);
parser = new XMLParser();
}
// Blockindexes should be generated serially
private int blockIndex = 0;
private int numDataNodes;
private XMLParser parser;
/**
* Read the input XML file line by line, and generate list of blocks. The
* actual parsing logic is handled by {@link XMLParser}. This mapper just
* delegates to that | XMLParserMapper |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-http12/src/main/java/org/apache/dubbo/remoting/http12/exception/HttpRequestTimeout.java | {
"start": 915,
"end": 1464
} | class ____ extends HttpStatusException {
private static final long serialVersionUID = 1L;
private final String side;
private HttpRequestTimeout(String side) {
super(HttpStatus.REQUEST_TIMEOUT.getCode());
this.side = side;
}
public String getSide() {
return side;
}
public static HttpRequestTimeout serverSide() {
return new HttpRequestTimeout("server");
}
public static HttpRequestTimeout clientSide() {
return new HttpRequestTimeout("client");
}
}
| HttpRequestTimeout |
java | google__auto | value/src/test/java/com/google/auto/value/processor/ExtensionTest.java | {
"start": 51877,
"end": 52360
} | class ____ extends AutoValueExtension {
private final Consumer<Context> checker;
ContextCheckingExtension(Consumer<Context> checker) {
this.checker = checker;
}
@Override
public boolean applicable(Context context) {
return true;
}
@Override
public String generateClass(
Context context, String className, String classToExtend, boolean isFinal) {
checker.accept(context);
return null;
}
}
}
| ContextCheckingExtension |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/introspect/BeanPropertyDefinition.java | {
"start": 686,
"end": 9199
} | class ____
implements FullyNamed
{
protected final static JsonInclude.Value EMPTY_INCLUDE = JsonInclude.Value.empty();
/*
/**********************************************************
/* Fluent factory methods for creating modified copies
/**********************************************************
*/
/**
* Method that can be used to create a definition with
* same settings as this one, but with different
* (external) name; that is, one for which
* {@link #getName()} would return <code>newName</code>.
*
* @since 2.3
*/
public abstract BeanPropertyDefinition withName(PropertyName newName);
/**
* Alternate "mutant factory" that will only change simple name, but
* leave other optional parts (like namespace) as is.
*
* @since 2.3
*/
public abstract BeanPropertyDefinition withSimpleName(String newSimpleName);
/*
/**********************************************************
/* Property name information, `FullyNamed`
/**********************************************************
*/
// public abstract String getName();
// public abstract PropertyName getFullName();
// public boolean hasName(PropertyName name);
/*
/**********************************************************
/* Property name information, other
/**********************************************************
*/
/**
* Accessor that can be used to determine implicit name from underlying
* element(s) before possible renaming. This is the "internal"
* name derived from accessor ("x" from "getX"), and is not based on
* annotations or naming strategy.
*/
public abstract String getInternalName();
/**
* Accessor for finding wrapper name to use for property (if any).
*/
public abstract PropertyName getWrapperName();
/**
* Accessor that can be called to check whether property was included
* due to an explicit marker (usually annotation), or just by naming
* convention.
*
* @return True if property was explicitly included (usually by having
* one of components being annotated); false if inclusion was purely
* due to naming or visibility definitions (that is, implicit)
*/
public abstract boolean isExplicitlyIncluded();
/**
* Accessor that can be called to check whether property name was
* due to an explicit marker (usually annotation), or just by naming
* convention or use of "use-default-name" marker (annotation).
*<p>
* Note that entries that return true from this method will always
* return true for {@link #isExplicitlyIncluded()}, but not necessarily
* vice versa.
*
* @since 2.4
*/
public boolean isExplicitlyNamed() {
return isExplicitlyIncluded();
}
/*
/**********************************************************
/* Basic property metadata
/**********************************************************
*/
/**
* @since 2.9
*/
public abstract JavaType getPrimaryType();
/**
* @since 2.9
*/
public abstract Class<?> getRawPrimaryType();
/**
* Method for accessing additional metadata.
* NOTE: will never return null, so de-referencing return value
* is safe.
*
* @since 2.3
*/
public abstract PropertyMetadata getMetadata();
/**
* Method used to check if this property is expected to have a value;
* and if none found, should either be considered invalid (and most likely
* fail deserialization), or handled by other means (by providing default
* value)
*/
public boolean isRequired() {
return getMetadata().isRequired();
}
/*
/**********************************************************
/* Capabilities
/**********************************************************
*/
public boolean couldDeserialize() { return getMutator() != null; }
public boolean couldSerialize() { return getAccessor() != null; }
/*
/**********************************************************
/* Access to accessors (fields, methods etc)
/**********************************************************
*/
public abstract boolean hasGetter();
public abstract boolean hasSetter();
public abstract boolean hasField();
public abstract boolean hasConstructorParameter();
public abstract AnnotatedMethod getGetter();
public abstract AnnotatedMethod getSetter();
public abstract AnnotatedField getField();
public abstract AnnotatedParameter getConstructorParameter();
/**
* Additional method that may be called instead of {@link #getConstructorParameter()}
* to get access to all constructor parameters, not just the highest priority one.
*
* @since 2.5
*/
public Iterator<AnnotatedParameter> getConstructorParameters() {
return ClassUtil.emptyIterator();
}
/**
* Method used to find accessor (getter, field to access) to use for accessing
* value of the property.
* Null if no such member exists.
*/
public AnnotatedMember getAccessor()
{
AnnotatedMember m = getGetter();
if (m == null) {
m = getField();
}
return m;
}
/**
* Method used to find mutator (constructor parameter, setter, field) to use for
* changing value of the property.
* Null if no such member exists.
*/
public AnnotatedMember getMutator() {
AnnotatedMember acc = getConstructorParameter();
if (acc == null) {
acc = getSetter();
if (acc == null) {
acc = getField();
}
}
return acc;
}
/**
* @since 2.3
*/
public AnnotatedMember getNonConstructorMutator() {
AnnotatedMember m = getSetter();
if (m == null) {
m = getField();
}
return m;
}
/**
* Method used to find the property member (getter, setter, field) that has
* the highest precedence in current context (getter method when serializing,
* if available, and so forth), if any.
*<p>
* Note: may throw {@link IllegalArgumentException} in case problems are found
* trying to getter or setter info.
*<p>
* Note: abstract since 2.5
*
* @since 2.1
*/
public abstract AnnotatedMember getPrimaryMember();
/*
/**********************************************************
/* More refined access to configuration features
/* (usually based on annotations and/or config overrides)
/* Since most trivial implementations do not support
/* these methods, they are implemented as no-ops.
/**********************************************************
*/
/**
* Method used to find View-inclusion definitions for the property.
*/
public Class<?>[] findViews() { return null; }
/**
* Method used to find whether property is part of a bi-directional
* reference.
*/
public AnnotationIntrospector.ReferenceProperty findReferenceType() { return null; }
/**
* @since 2.9
*/
public String findReferenceName() {
AnnotationIntrospector.ReferenceProperty ref = findReferenceType();
return (ref == null) ? null : ref.getName();
}
/**
* Method used to check whether this logical property has a marker
* to indicate it should be used as the type id for polymorphic type
* handling.
*/
public boolean isTypeId() { return false; }
/**
* Method used to check whether this logical property indicates that
* value POJOs should be written using additional Object Identifier
* (or, when multiple references exist, all but first AS Object Identifier).
*/
public ObjectIdInfo findObjectIdInfo() { return null; }
/**
* Method used to check if this property has specific inclusion override
* associated with it or not.
* It should NOT check for any default settings (global, per-type, or
* containing POJO settings)
*
* @since 2.5
*/
public abstract JsonInclude.Value findInclusion();
/**
* Method for finding all aliases of the property, if any.
*
* @return {@code List} of aliases, if any; never null (empty list if no aliases found)
*/
public abstract List<PropertyName> findAliases();
}
| BeanPropertyDefinition |
java | elastic__elasticsearch | x-pack/plugin/rank-vectors/src/test/java/org/elasticsearch/xpack/rank/vectors/mapper/RankVectorsFieldMapperTests.java | {
"start": 2541,
"end": 21050
} | class ____ extends SyntheticVectorsMapperTestCase {
private final ElementType elementType;
private final int dims;
public RankVectorsFieldMapperTests() {
this.elementType = randomFrom(ElementType.BYTE, ElementType.FLOAT, ElementType.BIT);
int baseDims = ElementType.BIT == elementType ? 4 * Byte.SIZE : 4;
int randomMultiplier = ElementType.FLOAT == elementType ? randomIntBetween(1, 64) : 1;
this.dims = baseDims * randomMultiplier;
}
@Override
protected Collection<? extends Plugin> getPlugins() {
return Collections.singletonList(new LocalStateRankVectors(SETTINGS));
}
@Override
protected void minimalMapping(XContentBuilder b) throws IOException {
indexMapping(b, IndexVersion.current());
}
@Override
protected void minimalMapping(XContentBuilder b, IndexVersion indexVersion) throws IOException {
indexMapping(b, indexVersion);
}
private void indexMapping(XContentBuilder b, IndexVersion indexVersion) throws IOException {
b.field("type", "rank_vectors").field("dims", dims);
if (elementType != ElementType.FLOAT) {
b.field("element_type", elementType.toString());
}
}
@Override
protected Object getSampleValueForDocument(boolean binaryFormat) {
return getSampleValueForDocument();
}
@Override
protected Object getSampleValueForDocument() {
int numVectors = randomIntBetween(1, 16);
return Stream.generate(
() -> elementType == ElementType.FLOAT
? convertToList(randomNormalizedVector(this.dims))
: convertToList(randomByteArrayOfLength(elementType == ElementType.BIT ? this.dims / Byte.SIZE : dims))
).limit(numVectors).toList();
}
@Override
protected void registerParameters(ParameterChecker checker) throws IOException {
checker.registerConflictCheck(
"dims",
fieldMapping(b -> b.field("type", "rank_vectors").field("dims", dims)),
fieldMapping(b -> b.field("type", "rank_vectors").field("dims", dims + 8))
);
checker.registerConflictCheck(
"element_type",
fieldMapping(b -> b.field("type", "rank_vectors").field("dims", dims).field("element_type", "byte")),
fieldMapping(b -> b.field("type", "rank_vectors").field("dims", dims).field("element_type", "float"))
);
checker.registerConflictCheck(
"element_type",
fieldMapping(b -> b.field("type", "rank_vectors").field("dims", dims).field("element_type", "float")),
fieldMapping(b -> b.field("type", "rank_vectors").field("dims", dims * 8).field("element_type", "bit"))
);
checker.registerConflictCheck(
"element_type",
fieldMapping(b -> b.field("type", "rank_vectors").field("dims", dims).field("element_type", "byte")),
fieldMapping(b -> b.field("type", "rank_vectors").field("dims", dims * 8).field("element_type", "bit"))
);
}
@Override
protected boolean supportsStoredFields() {
return false;
}
@Override
protected boolean supportsIgnoreMalformed() {
return false;
}
@Override
protected void assertSearchable(MappedFieldType fieldType) {
assertThat(fieldType, instanceOf(RankVectorsFieldMapper.RankVectorsFieldType.class));
assertFalse(fieldType.isSearchable());
}
protected void assertExistsQuery(MappedFieldType fieldType, Query query, LuceneDocument fields) {
assertThat(query, instanceOf(FieldExistsQuery.class));
FieldExistsQuery existsQuery = (FieldExistsQuery) query;
assertEquals("field", existsQuery.getField());
assertNoFieldNamesField(fields);
}
// We override this because dense vectors are the only field type that are not aggregatable but
// that do provide fielddata. TODO: resolve this inconsistency!
@Override
public void testAggregatableConsistency() {}
public void testDims() {
{
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
b.field("type", "rank_vectors");
b.field("dims", 0);
})));
assertThat(
e.getMessage(),
equalTo("Failed to parse mapping: " + "The number of dimensions should be in the range [1, 4096] but was [0]")
);
}
// test max limit for non-indexed vectors
{
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
b.field("type", "rank_vectors");
b.field("dims", 5000);
})));
assertThat(
e.getMessage(),
equalTo("Failed to parse mapping: " + "The number of dimensions should be in the range [1, 4096] but was [5000]")
);
}
}
public void testMergeDims() throws IOException {
XContentBuilder mapping = mapping(b -> {
b.startObject("field");
b.field("type", "rank_vectors");
b.endObject();
});
MapperService mapperService = createMapperService(mapping);
mapping = mapping(b -> {
b.startObject("field");
b.field("type", "rank_vectors").field("dims", dims);
b.endObject();
});
merge(mapperService, mapping);
assertEquals(
XContentHelper.convertToMap(BytesReference.bytes(mapping), false, mapping.contentType()).v2(),
XContentHelper.convertToMap(mapperService.documentMapper().mappingSource().uncompressed(), false, mapping.contentType()).v2()
);
}
public void testLargeDimsBit() throws IOException {
createMapperService(fieldMapping(b -> {
b.field("type", "rank_vectors");
b.field("dims", 1024 * Byte.SIZE);
b.field("element_type", ElementType.BIT.toString());
}));
}
public void testNonIndexedVector() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "rank_vectors").field("dims", 3)));
float[][] validVectors = { { -12.1f, 100.7f, -4 }, { 42f, .05f, -1f } };
double[] dotProduct = new double[2];
int vecId = 0;
for (float[] vector : validVectors) {
for (float value : vector) {
dotProduct[vecId] += value * value;
}
vecId++;
}
ParsedDocument doc1 = mapper.parse(source(b -> {
b.startArray("field");
for (float[] vector : validVectors) {
b.startArray();
for (float value : vector) {
b.value(value);
}
b.endArray();
}
b.endArray();
}));
List<IndexableField> fields = doc1.rootDoc().getFields("field");
assertEquals(1, fields.size());
assertThat(fields.get(0), instanceOf(BinaryDocValuesField.class));
// assert that after decoding the indexed value is equal to expected
BytesRef vectorBR = fields.get(0).binaryValue();
assertEquals(DenseVectorFieldMapper.FLOAT_ELEMENT.getNumBytes(validVectors[0].length) * validVectors.length, vectorBR.length);
float[][] decodedValues = new float[validVectors.length][];
for (int i = 0; i < validVectors.length; i++) {
decodedValues[i] = new float[validVectors[i].length];
FloatBuffer fb = ByteBuffer.wrap(vectorBR.bytes, i * Float.BYTES * validVectors[i].length, Float.BYTES * validVectors[i].length)
.order(ByteOrder.LITTLE_ENDIAN)
.asFloatBuffer();
fb.get(decodedValues[i]);
}
List<IndexableField> magFields = doc1.rootDoc().getFields("field" + RankVectorsFieldMapper.VECTOR_MAGNITUDES_SUFFIX);
assertEquals(1, magFields.size());
assertThat(magFields.get(0), instanceOf(BinaryDocValuesField.class));
BytesRef magBR = magFields.get(0).binaryValue();
assertEquals(Float.BYTES * validVectors.length, magBR.length);
FloatBuffer fb = ByteBuffer.wrap(magBR.bytes, magBR.offset, magBR.length).order(ByteOrder.LITTLE_ENDIAN).asFloatBuffer();
for (int i = 0; i < validVectors.length; i++) {
assertEquals((float) Math.sqrt(dotProduct[i]), fb.get(), 0.001f);
}
for (int i = 0; i < validVectors.length; i++) {
assertArrayEquals("Decoded dense vector values is not equal to the indexed one.", validVectors[i], decodedValues[i], 0.001f);
}
}
public void testPoorlyIndexedVector() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "rank_vectors").field("dims", 3)));
float[][] validVectors = { { -12.1f, 100.7f, -4 }, { 42f, .05f, -1f } };
double[] dotProduct = new double[2];
int vecId = 0;
for (float[] vector : validVectors) {
for (float value : vector) {
dotProduct[vecId] += value * value;
}
vecId++;
}
expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> {
b.startArray("field");
b.startArray(); // double nested array should fail
for (float[] vector : validVectors) {
b.startArray();
for (float value : vector) {
b.value(value);
}
b.endArray();
}
b.endArray();
b.endArray();
})));
}
public void testInvalidParameters() {
MapperParsingException e = expectThrows(
MapperParsingException.class,
() -> createDocumentMapper(fieldMapping(b -> b.field("type", "rank_vectors").field("dims", 3).field("element_type", "foo")))
);
assertThat(e.getMessage(), containsString("invalid element_type [foo]; available types are "));
e = expectThrows(
MapperParsingException.class,
() -> createDocumentMapper(fieldMapping(b -> b.field("type", "rank_vectors").field("dims", 3).startObject("foo").endObject()))
);
assertThat(
e.getMessage(),
containsString("Failed to parse mapping: unknown parameter [foo] on mapper [field] of type [rank_vectors]")
);
}
public void testDocumentsWithIncorrectDims() throws Exception {
int dims = 3;
XContentBuilder fieldMapping = fieldMapping(b -> {
b.field("type", "rank_vectors");
b.field("dims", dims);
});
DocumentMapper mapper = createDocumentMapper(fieldMapping);
// test that error is thrown when a document has number of dims more than defined in the mapping
float[][] invalidVector = new float[4][dims + 1];
DocumentParsingException e = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> {
b.startArray("field");
for (float[] vector : invalidVector) {
b.startArray();
for (float value : vector) {
b.value(value);
}
b.endArray();
}
b.endArray();
})));
assertThat(e.getCause().getMessage(), containsString("has more dimensions than defined in the mapping [3]"));
// test that error is thrown when a document has number of dims less than defined in the mapping
float[][] invalidVector2 = new float[4][dims - 1];
DocumentParsingException e2 = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> {
b.startArray("field");
for (float[] vector : invalidVector2) {
b.startArray();
for (float value : vector) {
b.value(value);
}
b.endArray();
}
b.endArray();
})));
assertThat(e2.getCause().getMessage(), containsString("has a different number of dimensions [2] than defined in the mapping [3]"));
// test that error is thrown when some of the vectors have correct number of dims, but others do not
DocumentParsingException e3 = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> {
b.startArray("field");
for (float[] vector : new float[4][dims]) {
b.startArray();
for (float value : vector) {
b.value(value);
}
b.endArray();
}
for (float[] vector : invalidVector2) {
b.startArray();
for (float value : vector) {
b.value(value);
}
b.endArray();
}
b.endArray();
})));
assertThat(e3.getCause().getMessage(), containsString("has a different number of dimensions [2] than defined in the mapping [3]"));
}
@Override
protected void assertFetchMany(MapperService mapperService, String field, Object value, String format, int count) throws IOException {
assumeFalse("Dense vectors currently don't support multiple values in the same field", false);
}
/**
* Dense vectors don't support doc values or string representation (for doc value parser/fetching).
* We may eventually support that, but until then, we only verify that the parsing and fields fetching matches the provided value object
*/
@Override
protected void assertFetch(MapperService mapperService, String field, Object value, String format) throws IOException {
MappedFieldType ft = mapperService.fieldType(field);
MappedFieldType.FielddataOperation fdt = MappedFieldType.FielddataOperation.SEARCH;
SourceToParse source = source(b -> b.field(ft.name(), value));
SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class);
when(searchExecutionContext.getIndexSettings()).thenReturn(mapperService.getIndexSettings());
when(searchExecutionContext.isSourceEnabled()).thenReturn(true);
when(searchExecutionContext.sourcePath(field)).thenReturn(Set.of(field));
when(searchExecutionContext.getForField(ft, fdt)).thenAnswer(inv -> fieldDataLookup(mapperService).apply(ft, () -> {
throw new UnsupportedOperationException();
}, fdt));
ValueFetcher nativeFetcher = ft.valueFetcher(searchExecutionContext, format);
ParsedDocument doc = mapperService.documentMapper().parse(source);
withLuceneIndex(mapperService, iw -> iw.addDocuments(doc.docs()), ir -> {
Source s = SourceProvider.fromLookup(mapperService.mappingLookup(), null, mapperService.getMapperMetrics().sourceFieldMetrics())
.getSource(ir.leaves().get(0), 0);
nativeFetcher.setNextReader(ir.leaves().get(0));
List<Object> fromNative = nativeFetcher.fetchValues(s, 0, new ArrayList<>());
RankVectorsFieldMapper.RankVectorsFieldType denseVectorFieldType = (RankVectorsFieldMapper.RankVectorsFieldType) ft;
switch (denseVectorFieldType.getElementType()) {
case BYTE -> assumeFalse("byte element type testing not currently added", false);
case FLOAT -> {
float[][] fetchedFloats = new float[fromNative.size()][];
for (int i = 0; i < fromNative.size(); i++) {
fetchedFloats[i] = (float[]) fromNative.get(i);
}
assertThat("fetching " + value, fetchedFloats, equalTo(value));
}
}
});
}
@Override
protected void randomFetchTestFieldConfig(XContentBuilder b) throws IOException {
b.field("type", "rank_vectors").field("dims", randomIntBetween(2, 4096)).field("element_type", "float");
}
@Override
protected Object generateRandomInputValue(MappedFieldType ft) {
RankVectorsFieldMapper.RankVectorsFieldType vectorFieldType = (RankVectorsFieldMapper.RankVectorsFieldType) ft;
int numVectors = randomIntBetween(1, 16);
return switch (vectorFieldType.getElementType()) {
case BYTE -> {
byte[][] vectors = new byte[numVectors][vectorFieldType.getVectorDimensions()];
for (int i = 0; i < numVectors; i++) {
vectors[i] = randomByteArrayOfLength(vectorFieldType.getVectorDimensions());
}
yield vectors;
}
case FLOAT -> {
float[][] vectors = new float[numVectors][vectorFieldType.getVectorDimensions()];
for (int i = 0; i < numVectors; i++) {
for (int j = 0; j < vectorFieldType.getVectorDimensions(); j++) {
vectors[i][j] = randomFloat();
}
}
yield vectors;
}
case BIT -> {
byte[][] vectors = new byte[numVectors][vectorFieldType.getVectorDimensions() / 8];
for (int i = 0; i < numVectors; i++) {
vectors[i] = randomByteArrayOfLength(vectorFieldType.getVectorDimensions() / 8);
}
yield vectors;
}
case BFLOAT16 -> throw new AssertionError();
};
}
public void testCannotBeUsedInMultifields() {
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
b.field("type", "keyword");
b.startObject("fields");
b.startObject("vectors");
minimalMapping(b);
b.endObject();
b.endObject();
})));
assertThat(e.getMessage(), containsString("Field [vectors] of type [rank_vectors] can't be used in multifields"));
}
@Override
protected IngestScriptSupport ingestScriptSupport() {
throw new AssumptionViolatedException("not supported");
}
@Override
protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) {
return new DenseVectorSyntheticSourceSupport();
}
@Override
protected boolean supportsEmptyInputArray() {
return false;
}
private static | RankVectorsFieldMapperTests |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/test/condition/RedisConditions.java | {
"start": 3726,
"end": 9666
} | class ____ implements Comparable<Version> {
private static final String VERSION_PARSE_ERROR = "Invalid version string! Could not parse segment %s within %s.";
private final int major;
private final int minor;
private final int bugfix;
private final int build;
/**
* Creates a new {@link Version} from the given integer values. At least one value has to be given but a maximum of 4.
*
* @param parts must not be {@code null} or empty.
*/
Version(int... parts) {
LettuceAssert.notNull(parts, "Parts must not be null!");
LettuceAssert.isTrue(parts.length > 0 && parts.length < 5,
String.format("Invalid parts length. 0 < %s < 5", parts.length));
this.major = parts[0];
this.minor = parts.length > 1 ? parts[1] : 0;
this.bugfix = parts.length > 2 ? parts[2] : 0;
this.build = parts.length > 3 ? parts[3] : 0;
LettuceAssert.isTrue(major >= 0, "Major version must be greater or equal zero!");
LettuceAssert.isTrue(minor >= 0, "Minor version must be greater or equal zero!");
LettuceAssert.isTrue(bugfix >= 0, "Bugfix version must be greater or equal zero!");
LettuceAssert.isTrue(build >= 0, "Build version must be greater or equal zero!");
}
/**
* Parses the given string representation of a version into a {@link Version} object.
*
* @param version must not be {@code null} or empty.
* @return
*/
public static Version parse(String version) {
LettuceAssert.notEmpty(version, "Version must not be null o empty!");
String[] parts = version.trim().split("\\.");
int[] intParts = new int[parts.length];
for (int i = 0; i < parts.length; i++) {
String input = i == parts.length - 1 ? parts[i].replaceAll("\\D.*", "") : parts[i];
if (LettuceStrings.isNotEmpty(input)) {
try {
intParts[i] = Integer.parseInt(input);
} catch (IllegalArgumentException o_O) {
throw new IllegalArgumentException(String.format(VERSION_PARSE_ERROR, input, version), o_O);
}
}
}
return new Version(intParts);
}
/**
* Returns whether the current {@link Version} is greater (newer) than the given one.
*
* @param version
* @return
*/
public boolean isGreaterThan(Version version) {
return compareTo(version) > 0;
}
/**
* Returns whether the current {@link Version} is greater (newer) or the same as the given one.
*
* @param version
* @return
*/
boolean isGreaterThanOrEqualTo(Version version) {
return compareTo(version) >= 0;
}
/**
* Returns whether the current {@link Version} is the same as the given one.
*
* @param version
* @return
*/
public boolean is(Version version) {
return equals(version);
}
/**
* Returns whether the current {@link Version} is less (older) than the given one.
*
* @param version
* @return
*/
public boolean isLessThan(Version version) {
return compareTo(version) < 0;
}
/**
* Returns whether the current {@link Version} is less (older) or equal to the current one.
*
* @param version
* @return
*/
public boolean isLessThanOrEqualTo(Version version) {
return compareTo(version) <= 0;
}
/*
* (non-Javadoc)
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
public int compareTo(Version that) {
if (that == null) {
return 1;
}
if (major != that.major) {
return major - that.major;
}
if (minor != that.minor) {
return minor - that.minor;
}
if (bugfix != that.bugfix) {
return bugfix - that.bugfix;
}
if (build != that.build) {
return build - that.build;
}
return 0;
}
/*
* (non-Javadoc)
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof Version)) {
return false;
}
Version that = (Version) obj;
return this.major == that.major && this.minor == that.minor && this.bugfix == that.bugfix
&& this.build == that.build;
}
/*
* (non-Javadoc)
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
int result = 17;
result += 31 * major;
result += 31 * minor;
result += 31 * bugfix;
result += 31 * build;
return result;
}
/*
* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
List<Integer> digits = new ArrayList<>();
digits.add(major);
digits.add(minor);
if (build != 0 || bugfix != 0) {
digits.add(bugfix);
}
if (build != 0) {
digits.add(build);
}
return digits.stream().map(Object::toString).collect(Collectors.joining("."));
}
}
}
| Version |
java | apache__camel | components/camel-zookeeper/src/main/java/org/apache/camel/component/zookeeper/NaturalSortComparator.java | {
"start": 1464,
"end": 3553
} | enum ____ {
Ascending(1),
Descending(-1);
final int direction;
Order(int direction) {
this.direction = direction;
}
}
private Order order;
public NaturalSortComparator() {
this(Order.Ascending);
}
public NaturalSortComparator(Order order) {
if (order != null) {
this.order = order;
}
}
@Override
public int compare(CharSequence first, CharSequence second) {
if (first == null && second == null) {
return 0;
}
if (first != null && second == null) {
return 1;
}
if (first == null && second != null) {
return -1;
}
int compare = 0;
int fx = 0;
int sx = 0;
// TODO first can contain null and the next line causes NPE then
while (fx < first.length() && sx < second.length() && compare == 0) {
if (isDigit(first.charAt(fx)) && isDigit(second.charAt(sx))) {
int flen = getNumSequenceLength(first, fx);
int slen = getNumSequenceLength(second, sx);
if (flen == slen) {
for (int x = 0; x < flen && compare == 0; x++) {
/** the first difference in digit wins */
compare = first.charAt(fx++) - second.charAt(sx++);
}
} else {
compare = flen - slen;
}
} else {
compare = first.charAt(fx) - second.charAt(sx);
}
fx++;
sx++;
}
if (compare == 0) {
compare = first.length() - second.length();
}
return order.direction * compare;
}
private boolean isDigit(char c) {
return c >= 48 && c < 57;
}
private int getNumSequenceLength(CharSequence sequence, int index) {
int x = index;
while (x < sequence.length() && isDigit(sequence.charAt(x))) {
x++;
}
return x - index;
}
}
| Order |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/identifier/EmbeddedIdGeneratedValueTest.java | {
"start": 2168,
"end": 2501
} | class ____ implements Serializable {
private String username;
@GeneratedValue
private Integer registrationId;
public PK() {
}
public PK(String username) {
this.username = username;
}
public String getUsername() {
return username;
}
public Integer getRegistrationId() {
return registrationId;
}
}
}
| PK |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/IntArraysBaseTest.java | {
"start": 1044,
"end": 1382
} | class ____ testing <code>{@link IntArrays}</code>, set up an instance with {@link StandardComparisonStrategy} and another
* with {@link ComparatorBasedComparisonStrategy}.
* <p>
* Is in <code>org.assertj.core.internal</code> package to be able to set {@link IntArrays#failures} appropriately.
*
* @author Joel Costigliola
*/
public | for |
java | elastic__elasticsearch | x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SeriesUtils.java | {
"start": 807,
"end": 870
} | class ____ {
private SeriesUtils() {}
private | SeriesUtils |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/tests/json/JacksonTest.java | {
"start": 1243,
"end": 6131
} | class ____ {
}
@Test
public void testEncodeUnknownNumber() {
String result = codec.toString(new Number() {
@Override
public int intValue() {
throw new UnsupportedOperationException();
}
@Override
public long longValue() {
throw new UnsupportedOperationException();
}
@Override
public float floatValue() {
throw new UnsupportedOperationException();
}
@Override
public double doubleValue() {
return 4D;
}
});
assertEquals("4.0", result);
}
@Test
public void testEncodePojoFailure() {
try {
codec.toString(new MyPojo());
fail();
} catch (EncodeException e) {
assertTrue(e.getMessage().contains(MyPojo.class.getName()));
}
}
@Test(expected = EncodeException.class)
public void encodeToBuffer() {
// if other than EncodeException happens here, then
// there is probably a leak closing the netty buffer output stream
codec.toBuffer(new RuntimeException("Unsupported"));
}
@Test
public void testDefaultConstraints() {
testReadConstraints(
DEFAULT_MAX_DEPTH,
DEFAULT_MAX_NUM_LEN,
DEFAULT_MAX_STRING_LEN,
DEFAULT_MAX_NAME_LEN);
}
public static void testReadConstraints(int defaultMaxDepth,
int maxNumberLength,
int defaultMaxStringLength,
int defaultMaxNameLength) {
testMaxNestingDepth(defaultMaxDepth);
try {
testMaxNestingDepth(defaultMaxDepth + 1);
Assert.fail();
} catch (DecodeException expected) {
}
testMaxNumberLength(maxNumberLength);
try {
testMaxNumberLength(maxNumberLength + 1);
Assert.fail();
} catch (DecodeException expected) {
}
testMaxStringLength(defaultMaxStringLength);
try {
testMaxStringLength(defaultMaxStringLength + 1);
Assert.fail();
} catch (DecodeException expected) {
}
testMaxNameLength(defaultMaxNameLength);
try {
testMaxNameLength(defaultMaxNameLength + 1);
Assert.fail();
} catch (DecodeException expected) {
}
}
private static JsonArray testMaxNestingDepth(int depth) {
String json = "[".repeat(depth) + "]".repeat(depth);
return new JsonArray(json);
}
private static JsonObject testMaxNumberLength(int len) {
String json = "{\"number\":" + "1".repeat(len) + "}";
return new JsonObject(json);
}
private static JsonObject testMaxStringLength(int len) {
String json = "{\"string\":\"" + "a".repeat(len) + "\"}";
return new JsonObject(json);
}
private static JsonObject testMaxNameLength(int len) {
String json = "{\"" + "a".repeat(len) + "\":3}";
return new JsonObject(json);
}
@Test
public void testParseMap() throws Exception {
JsonParser parser = JacksonCodec.createParser("{\"nested\":{\"key\":\"value\"},\"another\":4}");
assertEquals(JsonTokenId.ID_START_OBJECT, parser.nextToken().id());
assertEquals(JsonTokenId.ID_FIELD_NAME, parser.nextToken().id());
assertEquals(JsonTokenId.ID_START_OBJECT, parser.nextToken().id());
Map<String, Object> nested = JacksonCodec.parseObject(parser);
assertEquals(Map.of("key", "value"), nested);
assertEquals(JsonTokenId.ID_FIELD_NAME, parser.nextToken().id());
assertEquals(JsonTokenId.ID_NUMBER_INT, parser.nextToken().id());
assertEquals(JsonTokenId.ID_END_OBJECT, parser.nextToken().id());
}
@Test
public void testParseAny() throws Exception {
JsonParser parser = JacksonCodec.createParser("{\"nested\":{\"key\":\"value\"},\"another\":4}");
assertEquals(JsonTokenId.ID_START_OBJECT, parser.nextToken().id());
assertEquals(JsonTokenId.ID_FIELD_NAME, parser.nextToken().id());
assertEquals(JsonTokenId.ID_START_OBJECT, parser.nextToken().id());
Object nested = JacksonCodec.parseValue(parser);
assertEquals(Map.of("key", "value"), nested);
assertEquals(JsonTokenId.ID_FIELD_NAME, parser.nextToken().id());
assertEquals(JsonTokenId.ID_NUMBER_INT, parser.nextToken().id());
assertEquals(JsonTokenId.ID_END_OBJECT, parser.nextToken().id());
}
@Test
public void testParseArray() throws Exception {
JsonParser parser = JacksonCodec.createParser("{\"nested\":[0,1,2],\"another\":4}");
assertEquals(JsonTokenId.ID_START_OBJECT, parser.nextToken().id());
assertEquals(JsonTokenId.ID_FIELD_NAME, parser.nextToken().id());
assertEquals(JsonTokenId.ID_START_ARRAY, parser.nextToken().id());
Object nested = JacksonCodec.parseArray(parser);
assertEquals(Arrays.asList(0, 1, 2), nested);
assertEquals(JsonTokenId.ID_FIELD_NAME, parser.nextToken().id());
assertEquals(JsonTokenId.ID_NUMBER_INT, parser.nextToken().id());
assertEquals(JsonTokenId.ID_END_OBJECT, parser.nextToken().id());
}
}
| MyPojo |
java | micronaut-projects__micronaut-core | inject-java/src/main/java/io/micronaut/annotation/processing/JavaAnnotationMetadataBuilder.java | {
"start": 24138,
"end": 27582
} | class ____ extends AbstractAnnotationValueVisitor8<Object, Object> {
private final Element originatingElement;
private final ExecutableElement member;
private Object resolvedValue;
private final Map<String, Map<CharSequence, Object>> resolvedDefaults;
/**
* @param originatingElement
* @param member
* @param resolvedDefaults
*/
MetadataAnnotationValueVisitor(Element originatingElement, ExecutableElement member, Map<String, Map<CharSequence, Object>> resolvedDefaults) {
this.originatingElement = originatingElement;
this.member = member;
this.resolvedDefaults = resolvedDefaults;
}
@Override
public Object visitBoolean(boolean b, Object o) {
resolvedValue = b;
return null;
}
@Override
public Object visitByte(byte b, Object o) {
resolvedValue = b;
return null;
}
@Override
public Object visitChar(char c, Object o) {
resolvedValue = c;
return null;
}
@Override
public Object visitDouble(double d, Object o) {
resolvedValue = d;
return null;
}
@Override
public Object visitFloat(float f, Object o) {
resolvedValue = f;
return null;
}
@Override
public Object visitInt(int i, Object o) {
resolvedValue = i;
return null;
}
@Override
public Object visitLong(long i, Object o) {
resolvedValue = i;
return null;
}
@Override
public Object visitShort(short s, Object o) {
resolvedValue = s;
return null;
}
@Override
public Object visitString(String s, Object o) {
resolvedValue = s;
return null;
}
@Override
public Object visitType(TypeMirror t, Object o) {
if (t instanceof DeclaredType type) {
Element typeElement = type.asElement();
if (typeElement instanceof TypeElement element) {
String className = JavaModelUtils.getClassName(element);
resolvedValue = new AnnotationClassValue<>(className);
}
}
return null;
}
@Override
public Object visitEnumConstant(VariableElement c, Object o) {
resolvedValue = c.toString();
return null;
}
@Override
public Object visitAnnotation(AnnotationMirror a, Object o) {
if (a instanceof javax.lang.model.element.AnnotationValue) {
resolvedValue = readNestedAnnotationValue(originatingElement, a, resolvedDefaults);
}
return null;
}
@Override
public Object visitArray(List<? extends javax.lang.model.element.AnnotationValue> vals, Object o) {
var arrayValueVisitor = new ArrayValueVisitor(member);
for (javax.lang.model.element.AnnotationValue val : vals) {
val.accept(arrayValueVisitor, o);
}
resolvedValue = arrayValueVisitor.getValues();
return null;
}
/**
* Array value visitor class.
*/
private final | MetadataAnnotationValueVisitor |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldHaveSuppressedException.java | {
"start": 802,
"end": 1955
} | class ____ extends BasicErrorMessageFactory {
/**
* Creates a new <code>{@link ShouldHaveSuppressedException}</code>.
* @param actual the throwable to check suppressed exceptions.
* @param expectedSuppressedException the expected suppressed exception.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldHaveSuppressedException(Throwable actual,
Throwable expectedSuppressedException) {
return new ShouldHaveSuppressedException(actual, expectedSuppressedException);
}
private ShouldHaveSuppressedException(Throwable actual, Throwable expectedSuppressedException) {
super("%n" +
"Expecting actual:%n" +
" %s%n" +
"to have a suppressed exception with the following type and message:%n" +
" %s / %s%n" +
"but could not find any in actual's suppressed exceptions:%n" +
" %s",
actual, expectedSuppressedException.getClass().getName(), expectedSuppressedException.getMessage(),
actual.getSuppressed());
}
}
| ShouldHaveSuppressedException |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/core/support/SqlLobValue.java | {
"start": 1788,
"end": 3200
} | class ____ a reference to a {@link LobCreator} that must be closed after
* the update has completed. This is done via a call to the {@link #cleanup()} method.
* All handling of the {@code LobCreator} is done by the framework classes that use it -
* no need to set or close the {@code LobCreator} for end users of this class.
*
* <p>A usage example:
*
* <pre class="code">JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); // reusable object
* LobHandler lobHandler = new DefaultLobHandler(); // reusable object
*
* jdbcTemplate.update(
* "INSERT INTO imagedb (image_name, content, description) VALUES (?, ?, ?)",
* new Object[] {
* name,
* new SqlLobValue(contentStream, contentLength, lobHandler),
* new SqlLobValue(description, lobHandler)
* },
* new int[] {Types.VARCHAR, Types.BLOB, Types.CLOB});
* </pre>
*
* @author Thomas Risberg
* @author Juergen Hoeller
* @since 1.1
* @see org.springframework.jdbc.support.lob.LobHandler
* @see org.springframework.jdbc.support.lob.LobCreator
* @see org.springframework.jdbc.core.JdbcTemplate#update(String, Object[], int[])
* @see org.springframework.jdbc.object.SqlUpdate#update(Object[])
* @see org.springframework.jdbc.object.StoredProcedure#execute(java.util.Map)
* @deprecated as of 6.2, in favor of {@link SqlBinaryValue} and {@link SqlCharacterValue}
*/
@Deprecated(since = "6.2")
public | holds |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authentication/DisabledException.java | {
"start": 898,
"end": 1463
} | class ____ extends AccountStatusException {
@Serial
private static final long serialVersionUID = 2295984593872502361L;
/**
* Constructs a <code>DisabledException</code> with the specified message.
* @param msg the detail message
*/
public DisabledException(String msg) {
super(msg);
}
/**
* Constructs a <code>DisabledException</code> with the specified message and root
* cause.
* @param msg the detail message
* @param cause root cause
*/
public DisabledException(String msg, Throwable cause) {
super(msg, cause);
}
}
| DisabledException |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/web/server/HttpsRedirectSpecTests.java | {
"start": 6394,
"end": 6811
} | class ____ {
@Bean
SecurityWebFilterChain springSecurity(ServerHttpSecurity http) {
// @formatter:off
http
.redirectToHttps((redirectToHttps) -> redirectToHttps
.httpsRedirectWhen(new PathPatternParserServerWebExchangeMatcher("/secure"))
);
// @formatter:on
return http.build();
}
}
@Configuration
@EnableWebFlux
@EnableWebFluxSecurity
static | SometimesRedirectToHttpsInLambdaConfig |
java | apache__camel | components/camel-avro/src/main/java/org/apache/camel/dataformat/avro/AvroDataFormat.java | {
"start": 4221,
"end": 5206
} | class ____ procedure to ensure working in OSGi
Class<?> instanceClass = camelContext.getClassResolver().resolveMandatoryClass(className);
Class<?> genericContainer = camelContext.getClassResolver().resolveMandatoryClass(GENERIC_CONTAINER_CLASSNAME);
if (genericContainer.isAssignableFrom(instanceClass)) {
try {
Method method = instanceClass.getMethod("getSchema");
return (Schema) method.invoke(camelContext.getInjector().newInstance(instanceClass));
} catch (Exception ex) {
throw new CamelException("Error calling getSchema on " + instanceClass, ex);
}
} else {
throw new CamelException("Class " + instanceClass + " must be instanceof " + GENERIC_CONTAINER_CLASSNAME);
}
}
@Override
public void marshal(Exchange exchange, Object graph, OutputStream outputStream) throws Exception {
// the schema should be from the graph | loading |
java | elastic__elasticsearch | x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataTypeConverter.java | {
"start": 2310,
"end": 14056
} | class ____ {
private DataTypeConverter() {}
/**
* Get the conversion from one type to another.
*/
public static Converter converterFor(DataType from, DataType to) {
// Special handling for nulls and if conversion is not requires
if (from == to || (isDateTime(from) && isDateTime(to))) {
return DefaultConverter.IDENTITY;
}
if (to == NULL || from == NULL) {
return DefaultConverter.TO_NULL;
}
// proper converters
if (isString(to)) {
return conversionToString(from);
}
if (to == LONG) {
return conversionToLong(from);
}
if (to == UNSIGNED_LONG) {
return conversionToUnsignedLong(from);
}
if (to == INTEGER) {
return conversionToInt(from);
}
if (to == SHORT) {
return conversionToShort(from);
}
if (to == BYTE) {
return conversionToByte(from);
}
if (to == FLOAT) {
return conversionToFloat(from);
}
if (to == DOUBLE) {
return conversionToDouble(from);
}
if (isDateTime(to)) {
return conversionToDateTime(from);
}
if (to == BOOLEAN) {
return conversionToBoolean(from);
}
if (to == IP) {
return conversionToIp(from);
}
if (to == VERSION) {
return conversionToVersion(from);
}
return null;
}
private static Converter conversionToString(DataType from) {
if (isDateTime(from)) {
return DefaultConverter.DATETIME_TO_STRING;
}
return DefaultConverter.OTHER_TO_STRING;
}
private static Converter conversionToIp(DataType from) {
if (isString(from)) {
return DefaultConverter.STRING_TO_IP;
}
return null;
}
private static Converter conversionToVersion(DataType from) {
if (isString(from)) {
return DefaultConverter.STRING_TO_VERSION;
}
return null;
}
private static Converter conversionToUnsignedLong(DataType from) {
if (from.isRationalNumber()) {
return DefaultConverter.RATIONAL_TO_UNSIGNED_LONG;
}
if (from.isWholeNumber()) {
return DefaultConverter.INTEGER_TO_UNSIGNED_LONG;
}
if (from == BOOLEAN) {
return DefaultConverter.BOOL_TO_UNSIGNED_LONG;
}
if (isString(from)) {
return DefaultConverter.STRING_TO_UNSIGNED_LONG;
}
if (from == DATETIME) {
return DefaultConverter.DATETIME_TO_UNSIGNED_LONG;
}
return null;
}
private static Converter conversionToLong(DataType from) {
if (from.isRationalNumber()) {
return DefaultConverter.RATIONAL_TO_LONG;
}
if (from.isWholeNumber()) {
return DefaultConverter.INTEGER_TO_LONG;
}
if (from == BOOLEAN) {
return DefaultConverter.BOOL_TO_LONG;
}
if (isString(from)) {
return DefaultConverter.STRING_TO_LONG;
}
if (isDateTime(from)) {
return DefaultConverter.DATETIME_TO_LONG;
}
return null;
}
private static Converter conversionToInt(DataType from) {
if (from.isRationalNumber()) {
return DefaultConverter.RATIONAL_TO_INT;
}
if (from.isWholeNumber()) {
return DefaultConverter.INTEGER_TO_INT;
}
if (from == BOOLEAN) {
return DefaultConverter.BOOL_TO_INT;
}
if (isString(from)) {
return DefaultConverter.STRING_TO_INT;
}
if (isDateTime(from)) {
return DefaultConverter.DATETIME_TO_INT;
}
return null;
}
private static Converter conversionToShort(DataType from) {
if (from.isRationalNumber()) {
return DefaultConverter.RATIONAL_TO_SHORT;
}
if (from.isWholeNumber()) {
return DefaultConverter.INTEGER_TO_SHORT;
}
if (from == BOOLEAN) {
return DefaultConverter.BOOL_TO_SHORT;
}
if (isString(from)) {
return DefaultConverter.STRING_TO_SHORT;
}
if (isDateTime(from)) {
return DefaultConverter.DATETIME_TO_SHORT;
}
return null;
}
private static Converter conversionToByte(DataType from) {
if (from.isRationalNumber()) {
return DefaultConverter.RATIONAL_TO_BYTE;
}
if (from.isWholeNumber()) {
return DefaultConverter.INTEGER_TO_BYTE;
}
if (from == BOOLEAN) {
return DefaultConverter.BOOL_TO_BYTE;
}
if (isString(from)) {
return DefaultConverter.STRING_TO_BYTE;
}
if (isDateTime(from)) {
return DefaultConverter.DATETIME_TO_BYTE;
}
return null;
}
private static DefaultConverter conversionToFloat(DataType from) {
if (from.isRationalNumber()) {
return DefaultConverter.RATIONAL_TO_FLOAT;
}
if (from.isWholeNumber()) {
return DefaultConverter.INTEGER_TO_FLOAT;
}
if (from == BOOLEAN) {
return DefaultConverter.BOOL_TO_FLOAT;
}
if (isString(from)) {
return DefaultConverter.STRING_TO_FLOAT;
}
if (isDateTime(from)) {
return DefaultConverter.DATETIME_TO_FLOAT;
}
return null;
}
private static DefaultConverter conversionToDouble(DataType from) {
if (from.isRationalNumber()) {
return DefaultConverter.RATIONAL_TO_DOUBLE;
}
if (from.isWholeNumber()) {
return DefaultConverter.INTEGER_TO_DOUBLE;
}
if (from == BOOLEAN) {
return DefaultConverter.BOOL_TO_DOUBLE;
}
if (isString(from)) {
return DefaultConverter.STRING_TO_DOUBLE;
}
if (isDateTime(from)) {
return DefaultConverter.DATETIME_TO_DOUBLE;
}
return null;
}
private static DefaultConverter conversionToDateTime(DataType from) {
if (from.isRationalNumber()) {
return DefaultConverter.RATIONAL_TO_DATETIME;
}
if (from.isWholeNumber()) {
return DefaultConverter.INTEGER_TO_DATETIME;
}
if (from == BOOLEAN) {
return DefaultConverter.BOOL_TO_DATETIME; // We emit an int here which is ok because of Java's casting rules
}
if (isString(from)) {
return DefaultConverter.STRING_TO_DATETIME;
}
return null;
}
private static DefaultConverter conversionToBoolean(DataType from) {
if (from.isNumeric()) {
return DefaultConverter.NUMERIC_TO_BOOLEAN;
}
if (isString(from)) {
return DefaultConverter.STRING_TO_BOOLEAN;
}
if (isDateTime(from)) {
return DefaultConverter.DATETIME_TO_BOOLEAN;
}
return null;
}
public static byte safeToByte(long x) {
if (x > Byte.MAX_VALUE || x < Byte.MIN_VALUE) {
throw new InvalidArgumentException("[{}] out of [byte] range", x);
}
return (byte) x;
}
public static short safeToShort(long x) {
if (x > Short.MAX_VALUE || x < Short.MIN_VALUE) {
throw new InvalidArgumentException("[{}] out of [short] range", x);
}
return (short) x;
}
public static int safeToInt(long x) {
if (x > Integer.MAX_VALUE || x < Integer.MIN_VALUE) {
throw new InvalidArgumentException("[{}] out of [integer] range", x);
}
return (int) x;
}
public static int safeToInt(double x) {
if (x > Integer.MAX_VALUE || x < Integer.MIN_VALUE) {
throw new InvalidArgumentException("[{}] out of [integer] range", x);
}
// cast is safe, double can represent all of int's range
return (int) Math.round(x);
}
public static long safeDoubleToLong(double x) {
if (x > Long.MAX_VALUE || x < Long.MIN_VALUE) {
throw new InvalidArgumentException("[{}] out of [long] range", x);
}
return Math.round(x);
}
public static Long safeToLong(Number x) {
try {
if (x instanceof BigInteger) {
return ((BigInteger) x).longValueExact();
}
// integer converters are also provided double values (aggs generated on integer fields)
if (x instanceof Double || x instanceof Float) {
return safeDoubleToLong(x.doubleValue());
}
return x.longValue();
} catch (ArithmeticException ae) {
throw new InvalidArgumentException(ae, "[{}] out of [long] range", x);
}
}
public static BigInteger safeToUnsignedLong(Double x) {
if (inUnsignedLongRange(x) == false) {
throw new InvalidArgumentException("[{}] out of [unsigned_long] range", x);
}
return BigDecimal.valueOf(x).toBigInteger();
}
public static BigInteger safeToUnsignedLong(Long x) {
if (x < 0) {
throw new InvalidArgumentException("[{}] out of [unsigned_long] range", x);
}
return BigInteger.valueOf(x);
}
public static BigInteger safeToUnsignedLong(String x) {
BigInteger bi = new BigDecimal(x).toBigInteger();
if (isUnsignedLong(bi) == false) {
throw new InvalidArgumentException("[{}] out of [unsigned_long] range", x);
}
return bi;
}
// "unsafe" value conversion to unsigned long (vs. "safe", type-only conversion of safeToUnsignedLong());
// -1L -> 18446744073709551615 (=UNSIGNED_LONG_MAX)
public static BigInteger toUnsignedLong(Number number) {
BigInteger bi = BigInteger.valueOf(number.longValue());
return bi.signum() < 0 ? bi.and(UNSIGNED_LONG_MAX) : bi;
}
public static Number toInteger(double x, DataType dataType) {
long l = safeDoubleToLong(x);
if (dataType == BYTE) {
return safeToByte(l);
}
if (dataType == SHORT) {
return safeToShort(l);
}
if (dataType == INTEGER) {
return safeToInt(l);
}
return l;
}
public static boolean convertToBoolean(String val) {
String lowVal = val.toLowerCase(Locale.ROOT);
if (Booleans.isBoolean(lowVal) == false) {
throw new InvalidArgumentException("cannot cast [{}] to [boolean]", val);
}
return Booleans.parseBoolean(lowVal);
}
/**
* Converts arbitrary object to the desired data type.
* <p>
* Throws InvalidArgumentException if such conversion is not possible
*/
public static Object convert(Object value, DataType dataType) {
DataType detectedType = DataType.fromJava(value);
if (detectedType == dataType || value == null) {
return value;
}
Converter converter = converterFor(detectedType, dataType);
if (converter == null) {
throw new InvalidArgumentException(
"cannot convert from [{}], type [{}] to [{}]",
value,
detectedType.typeName(),
dataType.typeName()
);
}
return converter.convert(value);
}
/**
* Reference to a data type conversion that can be serialized. Note that the position in the enum
* is important because it is used for serialization.
*/
public | DataTypeConverter |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injection/order/InjectionOrderTest.java | {
"start": 416,
"end": 1454
} | class ____ {
@RegisterExtension
public ArcTestContainer container = new ArcTestContainer(Consumer.class, Dependency.class);
@Test
public void test() {
Consumer consumer = Arc.container().select(Consumer.class).get();
assertFalse(ConsumerSuperclass.superConstructorInjected); // subclass calls different ctor
assertTrue(consumer.superFieldInjected());
assertFalse(ConsumerSuperclass.superInitializerInjected); // overridden in a subclass
assertTrue(ConsumerSuperclass.superPrivateInitializerInjected); // not overridden, it's private
assertFalse(ConsumerSuperclass.superPrivateInitializerInjectedBeforeField);
assertTrue(Consumer.constructorInjected);
assertTrue(consumer.fieldInjected());
assertTrue(Consumer.initializerInjected);
assertFalse(Consumer.initializerInjectedBeforeField);
assertTrue(Consumer.privateInitializerInjected);
assertFalse(Consumer.subclassInjectedBeforeSuperclass);
}
static | InjectionOrderTest |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/query/QueryProvider.java | {
"start": 988,
"end": 1117
} | interface ____ {
/**
* Return the query string.
*
* @return the query string.
*/
String getQueryString();
}
| QueryProvider |
java | apache__kafka | streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/HandlingSourceTopicDeletionIntegrationTest.java | {
"start": 2066,
"end": 5936
} | class ____ {
private static final int NUM_BROKERS = 1;
private static final int NUM_THREADS = 2;
private static final long TIMEOUT = 60000;
private static final String INPUT_TOPIC = "inputTopic";
private static final String OUTPUT_TOPIC = "outputTopic";
public static final EmbeddedKafkaCluster CLUSTER = new EmbeddedKafkaCluster(NUM_BROKERS);
@BeforeAll
public static void startCluster() throws IOException {
CLUSTER.start();
}
@AfterAll
public static void closeCluster() {
CLUSTER.stop();
}
@BeforeEach
public void before() throws InterruptedException {
CLUSTER.createTopics(INPUT_TOPIC, OUTPUT_TOPIC);
}
@AfterEach
public void after() throws InterruptedException {
CLUSTER.deleteTopics(INPUT_TOPIC, OUTPUT_TOPIC);
}
@ParameterizedTest
@ValueSource(strings = {"classic", "streams"})
public void shouldThrowErrorAfterSourceTopicDeleted(final String groupProtocol, final TestInfo testName) throws InterruptedException {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream(INPUT_TOPIC, Consumed.with(Serdes.Integer(), Serdes.String()))
.to(OUTPUT_TOPIC, Produced.with(Serdes.Integer(), Serdes.String()));
final String safeTestName = safeUniqueTestName(testName);
final String appId = "app-" + safeTestName;
final Properties streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, appId);
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.IntegerSerde.class);
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class);
streamsConfiguration.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, NUM_THREADS);
streamsConfiguration.put(StreamsConfig.METADATA_MAX_AGE_CONFIG, 2000);
streamsConfiguration.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, groupProtocol);
final Topology topology = builder.build();
final AtomicBoolean calledUncaughtExceptionHandler1 = new AtomicBoolean(false);
final AtomicBoolean calledUncaughtExceptionHandler2 = new AtomicBoolean(false);
try (final KafkaStreams kafkaStreams1 = new KafkaStreams(topology, streamsConfiguration);
final KafkaStreams kafkaStreams2 = new KafkaStreams(topology, streamsConfiguration)) {
kafkaStreams1.setUncaughtExceptionHandler(exception -> {
calledUncaughtExceptionHandler1.set(true);
return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT;
});
kafkaStreams1.start();
kafkaStreams2.setUncaughtExceptionHandler(exception -> {
calledUncaughtExceptionHandler2.set(true);
return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT;
});
kafkaStreams2.start();
TestUtils.waitForCondition(
() -> kafkaStreams1.state() == State.RUNNING && kafkaStreams2.state() == State.RUNNING,
TIMEOUT,
() -> "Kafka Streams clients did not reach state RUNNING"
);
CLUSTER.deleteTopic(INPUT_TOPIC);
TestUtils.waitForCondition(
() -> kafkaStreams1.state() == State.ERROR && kafkaStreams2.state() == State.ERROR,
TIMEOUT,
() -> "Kafka Streams clients did not reach state ERROR"
);
assertThat(calledUncaughtExceptionHandler1.get(), is(true));
assertThat(calledUncaughtExceptionHandler2.get(), is(true));
}
}
}
| HandlingSourceTopicDeletionIntegrationTest |
java | grpc__grpc-java | core/src/test/java/io/grpc/internal/RetryPolicyTest.java | {
"start": 1301,
"end": 6503
} | class ____ {
@Test
public void getRetryPolicies() throws Exception {
BufferedReader reader = null;
try {
reader = new BufferedReader(new InputStreamReader(RetryPolicyTest.class.getResourceAsStream(
"/io/grpc/internal/test_retry_service_config.json"), "UTF-8"));
StringBuilder sb = new StringBuilder();
String line;
while ((line = reader.readLine()) != null) {
sb.append(line).append('\n');
}
Object serviceConfigObj = JsonParser.parse(sb.toString());
assertTrue(serviceConfigObj instanceof Map);
@SuppressWarnings("unchecked")
Map<String, ?> serviceConfig = (Map<String, ?>) serviceConfigObj;
ManagedChannelServiceConfig channelServiceConfig =
ManagedChannelServiceConfig.fromServiceConfig(
serviceConfig,
/* retryEnabled= */ true,
/* maxRetryAttemptsLimit= */ 4,
/* maxHedgedAttemptsLimit= */ 3,
/* loadBalancingConfig= */ null);
MethodDescriptor.Builder<Void, Void> builder = TestMethodDescriptors.voidMethod().toBuilder();
MethodDescriptor<Void, Void> method = builder.setFullMethodName("not/exist").build();
assertThat(channelServiceConfig.getMethodConfig(method)).isNull();
method = builder.setFullMethodName("not_exist/Foo1").build();
assertThat(channelServiceConfig.getMethodConfig(method)).isNull();
method = builder.setFullMethodName("SimpleService1/not_exist").build();
assertThat(channelServiceConfig.getMethodConfig(method).retryPolicy).isEqualTo(
new RetryPolicy(
3,
TimeUnit.MILLISECONDS.toNanos(2100),
TimeUnit.MILLISECONDS.toNanos(2200),
parseDouble("3"),
null,
ImmutableSet.of(Code.UNAVAILABLE, Code.RESOURCE_EXHAUSTED)));
method = builder.setFullMethodName("SimpleService1/Foo1").build();
assertThat(channelServiceConfig.getMethodConfig(method).retryPolicy).isEqualTo(
new RetryPolicy(
4,
TimeUnit.MILLISECONDS.toNanos(100),
TimeUnit.MILLISECONDS.toNanos(1000),
parseDouble("2"),
null,
ImmutableSet.of(Code.UNAVAILABLE)));
method = builder.setFullMethodName("SimpleService2/not_exist").build();
assertThat(channelServiceConfig.getMethodConfig(method).retryPolicy).isNull();
method = builder.setFullMethodName("SimpleService2/Foo2").build();
assertThat(channelServiceConfig.getMethodConfig(method).retryPolicy).isEqualTo(
new RetryPolicy(
4,
TimeUnit.MILLISECONDS.toNanos(100),
TimeUnit.MILLISECONDS.toNanos(1000),
parseDouble("2"),
null,
ImmutableSet.of(Code.UNAVAILABLE)));
} finally {
if (reader != null) {
reader.close();
}
}
}
@Test
public void getRetryPolicies_retryDisabled() throws Exception {
BufferedReader reader = null;
try {
reader = new BufferedReader(new InputStreamReader(RetryPolicyTest.class.getResourceAsStream(
"/io/grpc/internal/test_retry_service_config.json"), "UTF-8"));
StringBuilder sb = new StringBuilder();
String line;
while ((line = reader.readLine()) != null) {
sb.append(line).append('\n');
}
Object serviceConfigObj = JsonParser.parse(sb.toString());
assertTrue(serviceConfigObj instanceof Map);
@SuppressWarnings("unchecked")
Map<String, ?> serviceConfig = (Map<String, ?>) serviceConfigObj;
ManagedChannelServiceConfig channelServiceConfig =
ManagedChannelServiceConfig.fromServiceConfig(
serviceConfig,
/* retryEnabled= */ false,
/* maxRetryAttemptsLimit= */ 4,
/* maxHedgedAttemptsLimit= */ 3,
/* loadBalancingConfig= */ null);
MethodDescriptor.Builder<Void, Void> builder = TestMethodDescriptors.voidMethod().toBuilder();
MethodDescriptor<Void, Void> method =
builder.setFullMethodName("SimpleService1/Foo1").build();
assertThat(channelServiceConfig.getMethodConfig(method).retryPolicy).isNull();
} finally {
if (reader != null) {
reader.close();
}
}
}
@Test
public void getThrottle() throws Exception {
BufferedReader reader = null;
try {
reader = new BufferedReader(new InputStreamReader(RetryPolicyTest.class.getResourceAsStream(
"/io/grpc/internal/test_retry_service_config.json"), "UTF-8"));
StringBuilder sb = new StringBuilder();
String line;
while ((line = reader.readLine()) != null) {
sb.append(line).append('\n');
}
Object serviceConfigObj = JsonParser.parse(sb.toString());
assertTrue(serviceConfigObj instanceof Map);
@SuppressWarnings("unchecked")
Map<String, ?> serviceConfig = (Map<String, ?>) serviceConfigObj;
Throttle throttle = ServiceConfigUtil.getThrottlePolicy(serviceConfig);
assertEquals(new Throttle(10f, 0.1f), throttle);
} finally {
if (reader != null) {
reader.close();
}
}
}
}
| RetryPolicyTest |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/annotation/web/configuration/SecurityReactorContextConfiguration.java | {
"start": 2583,
"end": 3371
} | class ____ {
private SecurityContextHolderStrategy securityContextHolderStrategy = SecurityContextHolder
.getContextHolderStrategy();
@Bean
SecurityReactorContextSubscriberRegistrar securityReactorContextSubscriberRegistrar() {
SecurityReactorContextSubscriberRegistrar registrar = new SecurityReactorContextSubscriberRegistrar();
registrar.setSecurityContextHolderStrategy(this.securityContextHolderStrategy);
return registrar;
}
@Autowired(required = false)
void setSecurityContextHolderStrategy(SecurityContextHolderStrategy securityContextHolderStrategy) {
Assert.notNull(securityContextHolderStrategy, "securityContextHolderStrategy cannot be null");
this.securityContextHolderStrategy = securityContextHolderStrategy;
}
static | SecurityReactorContextConfiguration |
java | elastic__elasticsearch | x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRInfoTransportActionTests.java | {
"start": 1663,
"end": 7279
} | class ____ extends ESTestCase {
private MockLicenseState licenseState;
private ClusterService clusterService;
@Before
public void init() {
licenseState = mock(MockLicenseState.class);
clusterService = mock(ClusterService.class);
}
public void testAvailable() {
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor();
CCRInfoTransportAction featureSet = new CCRInfoTransportAction(
transportService,
mock(ActionFilters.class),
Settings.EMPTY,
licenseState
);
when(licenseState.isAllowed(CcrConstants.CCR_FEATURE)).thenReturn(false);
assertThat(featureSet.available(), equalTo(false));
when(licenseState.isAllowed(CcrConstants.CCR_FEATURE)).thenReturn(true);
assertThat(featureSet.available(), equalTo(true));
}
public void testEnabled() {
Settings.Builder settings = Settings.builder().put("xpack.ccr.enabled", false);
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor();
CCRInfoTransportAction featureSet = new CCRInfoTransportAction(
transportService,
mock(ActionFilters.class),
settings.build(),
licenseState
);
assertThat(featureSet.enabled(), equalTo(false));
settings = Settings.builder().put("xpack.ccr.enabled", true);
featureSet = new CCRInfoTransportAction(transportService, mock(ActionFilters.class), settings.build(), licenseState);
assertThat(featureSet.enabled(), equalTo(true));
}
public void testName() {
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor();
CCRInfoTransportAction featureSet = new CCRInfoTransportAction(
transportService,
mock(ActionFilters.class),
Settings.EMPTY,
licenseState
);
assertThat(featureSet.name(), equalTo("ccr"));
}
public void testUsageStats() throws Exception {
final var projectId = randomProjectIdOrDefault();
ProjectMetadata.Builder project = ProjectMetadata.builder(projectId);
int numFollowerIndices = randomIntBetween(0, 32);
for (int i = 0; i < numFollowerIndices; i++) {
IndexMetadata.Builder followerIndex = IndexMetadata.builder("follow_index" + i)
.settings(settings(IndexVersion.current()).put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true))
.numberOfShards(1)
.numberOfReplicas(0)
.creationDate(i)
.putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, new HashMap<>());
project.put(followerIndex);
}
// Add a regular index, to check that we do not take that one into account:
IndexMetadata.Builder regularIndex = IndexMetadata.builder("my_index")
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(0)
.creationDate(numFollowerIndices);
project.put(regularIndex);
int numAutoFollowPatterns = randomIntBetween(0, 32);
Map<String, AutoFollowMetadata.AutoFollowPattern> patterns = Maps.newMapWithExpectedSize(numAutoFollowPatterns);
for (int i = 0; i < numAutoFollowPatterns; i++) {
AutoFollowMetadata.AutoFollowPattern pattern = new AutoFollowMetadata.AutoFollowPattern(
"remote_cluser",
Collections.singletonList("logs" + i + "*"),
Collections.emptyList(),
null,
Settings.EMPTY,
true,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null
);
patterns.put("pattern" + i, pattern);
}
project.putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap()));
ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).putProjectMetadata(project).build();
Mockito.when(clusterService.state()).thenReturn(clusterState);
ThreadPool threadPool = mock(ThreadPool.class);
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(threadPool);
var usageAction = new CCRUsageTransportAction(
transportService,
null,
threadPool,
mock(ActionFilters.class),
Settings.EMPTY,
licenseState,
TestProjectResolvers.singleProject(projectId)
);
PlainActionFuture<XPackUsageFeatureResponse> future = new PlainActionFuture<>();
usageAction.localClusterStateOperation(null, null, clusterState, future);
CCRInfoTransportAction.Usage ccrUsage = (CCRInfoTransportAction.Usage) future.get().getUsage();
assertThat(ccrUsage.enabled(), equalTo(true));
assertThat(ccrUsage.available(), equalTo(false));
assertThat(ccrUsage.getNumberOfFollowerIndices(), equalTo(numFollowerIndices));
if (numFollowerIndices != 0) {
assertThat(ccrUsage.getLastFollowTimeInMillis(), greaterThanOrEqualTo(0L));
} else {
assertThat(ccrUsage.getLastFollowTimeInMillis(), nullValue());
}
assertThat(ccrUsage.getNumberOfAutoFollowPatterns(), equalTo(numAutoFollowPatterns));
}
}
| CCRInfoTransportActionTests |
java | apache__hadoop | hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java | {
"start": 15400,
"end": 16249
} | class ____ {
@JsonProperty("type")
String type;
@JsonProperty("count")
Sample count;
@JsonProperty("time")
Sample time;
@JsonProperty("max_memory")
Sample max_memory;
@JsonProperty("max_vcores")
Sample max_vcores;
@JsonProperty("priority")
int priority;
@JsonProperty("execution_type")
String executionType = ExecutionType.GUARANTEED.name();
@Override
public String toString(){
return "\nTaskDefinition " + type
+ " Count[" + count + "] Time[" + time + "] Memory[" + max_memory
+ "] Vcores[" + max_vcores + "] Priority[" + priority
+ "] ExecutionType[" + executionType + "]";
}
}
/**
* Class used to parse value sample information.
*/
@SuppressWarnings({ "membername", "checkstyle:visibilitymodifier" })
public static | TaskDefinition |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/InterruptedExceptionSwallowed.java | {
"start": 3186,
"end": 9841
} | class ____ extends BugChecker
implements MethodTreeMatcher, TryTreeMatcher {
private static final String METHOD_DESCRIPTION =
"This method can throw InterruptedException but declares that it throws Exception/Throwable."
+ " This makes it difficult for callers to recognize the need to handle interruption"
+ " properly.";
@Override
public Description matchMethod(MethodTree tree, VisitorState state) {
if (state.errorProneOptions().isTestOnlyTarget()) {
return NO_MATCH;
}
if (MAIN_METHOD.matches(tree, state)) {
return NO_MATCH;
}
Type interrupted = state.getSymtab().interruptedExceptionType;
if (tree.getThrows().stream().anyMatch(t -> isSubtype(getType(t), interrupted, state))) {
return NO_MATCH;
}
ImmutableSet<Type> thrownExceptions = ASTHelpers.getThrownExceptions(tree.getBody(), state);
// Bail out if none of the exceptions thrown are subtypes of InterruptedException.
if (thrownExceptions.stream().noneMatch(t -> isSubtype(t, interrupted, state))) {
return NO_MATCH;
}
// Bail if any of the thrown exceptions are masking InterruptedException: that is, we don't want
// to suggest updating with `throws Exception, InterruptedException`.
if (thrownExceptions.stream()
.anyMatch(t -> !isSameType(t, interrupted, state) && isSubtype(interrupted, t, state))) {
return NO_MATCH;
}
Set<Type> exceptions =
Stream.concat(
thrownExceptions.stream()
.filter(t -> !isSubtype(t, state.getSymtab().runtimeExceptionType, state)),
tree.getThrows().stream()
.filter(t -> !isSubtype(interrupted, getType(t), state))
.map(ASTHelpers::getType))
.collect(toCollection(HashSet::new));
for (Type type : ImmutableSet.copyOf(exceptions)) {
exceptions.removeIf(t -> !isSameType(t, type, state) && isSubtype(t, type, state));
}
// Don't suggest adding more than five exceptions to the method signature.
if (exceptions.size() > 5) {
return NO_MATCH;
}
SuggestedFix fix = narrowExceptionTypes(tree, exceptions, state);
return buildDescription(tree).setMessage(METHOD_DESCRIPTION).addFix(fix).build();
}
private static SuggestedFix narrowExceptionTypes(
MethodTree tree, Set<Type> exceptions, VisitorState state) {
SuggestedFix.Builder fix = SuggestedFix.builder();
fix.replace(
getStartPosition(tree.getThrows().getFirst()),
state.getEndPosition(getLast(tree.getThrows())),
exceptions.stream().map(t -> qualifyType(state, fix, t)).sorted().collect(joining(", ")));
return fix.build();
}
@Override
public Description matchTry(TryTree tree, VisitorState state) {
for (CatchTree catchTree : tree.getCatches()) {
Type type = getType(catchTree.getParameter());
Type interrupted = state.getSymtab().interruptedExceptionType;
ImmutableList<Type> caughtTypes = extractTypes(type);
if (caughtTypes.stream().anyMatch(t -> isSubtype(t, interrupted, state))) {
return NO_MATCH;
}
if (caughtTypes.stream().anyMatch(t -> isSubtype(interrupted, t, state))) {
ImmutableSet<Type> thrownExceptions = getThrownExceptions(tree, state);
if (thrownExceptions.stream().anyMatch(t -> isSubtype(t, interrupted, state))
&& !blockChecksForInterruptedException(catchTree.getBlock(), state)
&& !(exceptionIsRethrown(catchTree, catchTree.getParameter(), state)
&& methodDeclaresInterruptedException(state.findEnclosing(MethodTree.class), state))
&& !isSuppressed(catchTree.getParameter(), state)) {
return describeMatch(catchTree, createFix(catchTree));
}
}
}
return NO_MATCH;
}
private boolean exceptionIsRethrown(
CatchTree catchTree, VariableTree parameter, VisitorState state) {
AtomicBoolean rethrown = new AtomicBoolean();
new TreePathScanner<Void, Void>() {
@Override
public Void visitThrow(ThrowTree throwTree, Void unused) {
VarSymbol parameterSymbol = getSymbol(parameter);
if (parameterSymbol.equals(getSymbol(throwTree.getExpression()))) {
rethrown.set(true);
}
return super.visitThrow(throwTree, null);
}
}.scan(new TreePath(state.getPath(), catchTree), null);
return rethrown.get();
}
private boolean methodDeclaresInterruptedException(MethodTree enclosing, VisitorState state) {
if (enclosing == null) {
return false;
}
return enclosing.getThrows().stream()
.anyMatch(t -> isSameType(getType(t), state.getSymtab().interruptedExceptionType, state));
}
private static SuggestedFix createFix(CatchTree catchTree) {
List<? extends StatementTree> block = catchTree.getBlock().getStatements();
String fix =
String.format(
"if (%s instanceof InterruptedException) {\nThread.currentThread().interrupt();\n}\n",
catchTree.getParameter().getName());
if (block.isEmpty()) {
return SuggestedFix.replace(catchTree.getBlock(), String.format("{%s}", fix));
}
return SuggestedFix.prefixWith(block.getFirst(), fix);
}
private static boolean blockChecksForInterruptedException(BlockTree block, VisitorState state) {
return TRUE.equals(
new TreeScanner<Boolean, Void>() {
@Override
public Boolean reduce(Boolean a, Boolean b) {
return TRUE.equals(a) || TRUE.equals(b);
}
@Override
public Boolean visitInstanceOf(InstanceOfTree instanceOfTree, Void unused) {
return isSubtype(
getType(instanceOfTree.getType()),
state.getSymtab().interruptedExceptionType,
state);
}
}.scan(block, null));
}
/**
* Returns the exceptions that need to be handled by {@code tryTree}'s catch blocks, or be
* propagated out.
*/
private static ImmutableSet<Type> getThrownExceptions(TryTree tryTree, VisitorState state) {
ScanThrownTypes scanner = new ScanThrownTypes(state);
scanner.scanResources(tryTree);
scanner.scan(tryTree.getBlock(), null);
return ImmutableSet.copyOf(scanner.getThrownTypes());
}
private static ImmutableList<Type> extractTypes(@Nullable Type type) {
if (type == null) {
return ImmutableList.of();
}
if (type.isUnion()) {
UnionClassType unionType = (UnionClassType) type;
return ImmutableList.copyOf(unionType.getAlternativeTypes());
}
return ImmutableList.of(type);
}
}
| InterruptedExceptionSwallowed |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/AnnotationPosition.java | {
"start": 13636,
"end": 13692
} | enum ____ {
BEFORE,
AFTER,
EITHER
}
}
| Position |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/gem/MappingControlUseGem.java | {
"start": 223,
"end": 331
} | enum ____ {
BUILT_IN_CONVERSION,
COMPLEX_MAPPING,
DIRECT,
MAPPING_METHOD
}
| MappingControlUseGem |
java | elastic__elasticsearch | modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordAnalyzerProvider.java | {
"start": 787,
"end": 1226
} | class ____ extends AbstractIndexAnalyzerProvider<KeywordAnalyzer> {
private final KeywordAnalyzer keywordAnalyzer;
public KeywordAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
this.keywordAnalyzer = new KeywordAnalyzer();
}
@Override
public KeywordAnalyzer get() {
return this.keywordAnalyzer;
}
}
| KeywordAnalyzerProvider |
java | netty__netty | common/src/main/java/io/netty/util/NetUtilInitializations.java | {
"start": 4800,
"end": 5795
} | interface ____ its INET address, fall back to isLoopback().
if (loopbackIface == null) {
try {
for (NetworkInterface iface: ifaces) {
if (iface.isLoopback()) {
Enumeration<InetAddress> i = SocketUtils.addressesFromNetworkInterface(iface);
if (i.hasMoreElements()) {
// Found the one with INET address.
loopbackIface = iface;
loopbackAddr = i.nextElement();
break;
}
}
}
if (loopbackIface == null) {
logger.warn("Failed to find the loopback interface");
}
} catch (SocketException e) {
logger.warn("Failed to find the loopback interface", e);
}
}
if (loopbackIface != null) {
// Found the loopback | from |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UseCorrectAssertInTestsTest.java | {
"start": 1038,
"end": 2569
} | class ____ {
private static final String ASSERT_THAT_IMPORT =
"import static com.google.common.truth.Truth.assertThat;";
private static final String ASSERT_WITH_MESSAGE_IMPORT =
"import static com.google.common.truth.Truth.assertWithMessage;";
private static final String INPUT = "in/FooTest.java";
private static final String OUTPUT = "out/FooTest.java";
private static final String TEST_ONLY = "-XepCompilingTestOnlyCode";
private final BugCheckerRefactoringTestHelper refactoringHelper =
BugCheckerRefactoringTestHelper.newInstance(UseCorrectAssertInTests.class, getClass());
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(UseCorrectAssertInTests.class, getClass());
@Test
public void correctAssertInTest() {
refactoringHelper
.addInputLines(
INPUT,
inputWithExpressionsAndImport(
ASSERT_THAT_IMPORT, //
"assertThat(true).isTrue();"))
.expectUnchanged()
.doTest();
}
@Test
public void noAssertInTestsFound() {
refactoringHelper
.addInputLines(INPUT, inputWithExpressions("int a = 1;"))
.expectUnchanged()
.doTest();
}
@Test
public void diagnosticIssuedAtFirstAssert() {
compilationHelper
.addSourceLines(
INPUT,
"import org.junit.runner.RunWith;",
"import org.junit.runners.JUnit4;",
"@RunWith(JUnit4.class)",
"public | UseCorrectAssertInTestsTest |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/SqlDateSerializerTest.java | {
"start": 1059,
"end": 1685
} | class ____ extends SerializerTestBase<Date> {
@Override
protected TypeSerializer<Date> createSerializer() {
return new SqlDateSerializer();
}
@Override
protected int getLength() {
return 8;
}
@Override
protected Class<Date> getTypeClass() {
return Date.class;
}
@Override
protected Date[] getTestData() {
return new Date[] {
new Date(0L),
Date.valueOf("1970-01-01"),
Date.valueOf("1990-10-14"),
Date.valueOf("2013-08-12"),
Date.valueOf("2040-05-12")
};
}
}
| SqlDateSerializerTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/EdgeManagerTest.java | {
"start": 2284,
"end": 6600
} | class ____ {
@RegisterExtension
static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_RESOURCE =
TestingUtils.defaultExecutorExtension();
@Test
void testGetConsumedPartitionGroup() throws Exception {
JobVertex v1 = new JobVertex("source");
JobVertex v2 = new JobVertex("sink");
ExecutionGraph eg = buildExecutionGraph(v1, v2, 2, 2, ALL_TO_ALL);
ConsumedPartitionGroup groupRetrievedByDownstreamVertex =
Objects.requireNonNull(eg.getJobVertex(v2.getID()))
.getTaskVertices()[0]
.getAllConsumedPartitionGroups()
.get(0);
IntermediateResultPartition consumedPartition =
Objects.requireNonNull(eg.getJobVertex(v1.getID()))
.getProducedDataSets()[0]
.getPartitions()[0];
ConsumedPartitionGroup groupRetrievedByIntermediateResultPartition =
consumedPartition.getConsumedPartitionGroups().get(0);
assertThat(groupRetrievedByIntermediateResultPartition)
.isEqualTo(groupRetrievedByDownstreamVertex);
ConsumedPartitionGroup groupRetrievedByScheduledResultPartition =
eg.getSchedulingTopology()
.getResultPartition(consumedPartition.getPartitionId())
.getConsumedPartitionGroups()
.get(0);
assertThat(groupRetrievedByScheduledResultPartition)
.isEqualTo(groupRetrievedByDownstreamVertex);
}
@Test
void testCalculateNumberOfConsumers() throws Exception {
testCalculateNumberOfConsumers(5, 2, ALL_TO_ALL, new int[] {2, 2});
testCalculateNumberOfConsumers(5, 2, POINTWISE, new int[] {1, 1});
testCalculateNumberOfConsumers(2, 5, ALL_TO_ALL, new int[] {5, 5, 5, 5, 5});
testCalculateNumberOfConsumers(2, 5, POINTWISE, new int[] {3, 3, 3, 2, 2});
testCalculateNumberOfConsumers(5, 5, ALL_TO_ALL, new int[] {5, 5, 5, 5, 5});
testCalculateNumberOfConsumers(5, 5, POINTWISE, new int[] {1, 1, 1, 1, 1});
}
private void testCalculateNumberOfConsumers(
int producerParallelism,
int consumerParallelism,
DistributionPattern distributionPattern,
int[] expectedConsumers)
throws Exception {
JobVertex producer = new JobVertex("producer");
JobVertex consumer = new JobVertex("consumer");
ExecutionGraph eg =
buildExecutionGraph(
producer,
consumer,
producerParallelism,
consumerParallelism,
distributionPattern);
List<ConsumedPartitionGroup> partitionGroups =
Arrays.stream(checkNotNull(eg.getJobVertex(consumer.getID())).getTaskVertices())
.flatMap(ev -> ev.getAllConsumedPartitionGroups().stream())
.collect(Collectors.toList());
int index = 0;
for (ConsumedPartitionGroup partitionGroup : partitionGroups) {
assertThat(partitionGroup.getNumConsumers()).isEqualTo(expectedConsumers[index++]);
}
}
private ExecutionGraph buildExecutionGraph(
JobVertex producer,
JobVertex consumer,
int producerParallelism,
int consumerParallelism,
DistributionPattern distributionPattern)
throws Exception {
producer.setParallelism(producerParallelism);
consumer.setParallelism(consumerParallelism);
producer.setInvokableClass(NoOpInvokable.class);
consumer.setInvokableClass(NoOpInvokable.class);
connectNewDataSetAsInput(
consumer, producer, distributionPattern, ResultPartitionType.BLOCKING);
JobGraph jobGraph = JobGraphTestUtils.batchJobGraph(producer, consumer);
SchedulerBase scheduler =
SchedulerTestingUtils.createScheduler(
jobGraph,
ComponentMainThreadExecutorServiceAdapter.forMainThread(),
EXECUTOR_RESOURCE.getExecutor());
return scheduler.getExecutionGraph();
}
}
| EdgeManagerTest |
java | apache__camel | components/camel-consul/src/generated/java/org/apache/camel/component/consul/endpoint/ConsulStatusProducerInvokeOnHeaderFactory.java | {
"start": 418,
"end": 1044
} | class ____ implements InvokeOnHeaderStrategy {
@Override
public Object invoke(Object obj, String key, Exchange exchange, AsyncCallback callback) throws Exception {
org.apache.camel.component.consul.endpoint.ConsulStatusProducer target = (org.apache.camel.component.consul.endpoint.ConsulStatusProducer) obj;
switch (key) {
case "leader":
case "LEADER": return target.invokeChecks(exchange.getMessage());
case "peers":
case "PEERS": return target.invokePeers(exchange.getMessage());
default: return null;
}
}
}
| ConsulStatusProducerInvokeOnHeaderFactory |
java | apache__camel | components/camel-smooks/src/generated/java/org/apache/camel/dataformat/smooks/SmooksDataFormatConfigurer.java | {
"start": 728,
"end": 2311
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("SmooksConfig", java.lang.String.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
SmooksDataFormat target = (SmooksDataFormat) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "smooksconfig":
case "smooksConfig": target.setSmooksConfig(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "smooksconfig":
case "smooksConfig": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
SmooksDataFormat target = (SmooksDataFormat) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "smooksconfig":
case "smooksConfig": return target.getSmooksConfig();
default: return null;
}
}
}
| SmooksDataFormatConfigurer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/ops/EnumsParameterTest.java | {
"start": 5213,
"end": 5265
} | enum ____ {
MOBILE,
LAND_LINE;
}
}
}
| PhoneType |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/PersistentCommitData.java | {
"start": 1865,
"end": 6255
} | class ____<T extends PersistentCommitData>
implements Serializable, IOStatisticsSource {
private static final Logger LOG = LoggerFactory.getLogger(PersistentCommitData.class);
/**
* Supported version value: {@value}.
* If this is changed the value of {@code serialVersionUID} will change,
* to avoid deserialization problems.
*/
public static final int VERSION = 2;
/**
* Validate the data: those fields which must be non empty, must be set.
* @throws ValidationFailure if the data is invalid
*/
public abstract void validate() throws ValidationFailure;
/**
* Serialize to JSON and then to a byte array, after performing a
* preflight validation of the data to be saved.
* @return the data in a persistable form.
* @param serializer serializer to use
* @throws IOException serialization problem or validation failure.
*/
public abstract byte[] toBytes(JsonSerialization<T> serializer) throws IOException;
/**
* Save to a hadoop filesystem.
* The destination file is overwritten, and on s3a stores the
* performance flag is set to turn off all existence checks and
* parent dir cleanup.
* The assumption here is: the job knows what it is doing.
*
* @param fs filesystem
* @param path path
* @param serializer serializer to use
* @return IOStats from the output stream.
*
* @throws IOException IO exception
*/
public abstract IOStatistics save(FileSystem fs, Path path, JsonSerialization<T> serializer)
throws IOException;
/**
* Load an instance from a status, then validate it.
* This uses the openFile() API, which S3A supports for
* faster load and declaring sequential access, always
* @param <T> type of persistent format
* @param fs filesystem
* @param status status of file to load
* @param serializer serializer to use
* @return the loaded instance
* @throws IOException IO failure
* @throws ValidationFailure if the data is invalid
*/
public static <T extends PersistentCommitData> T load(FileSystem fs,
FileStatus status,
JsonSerialization<T> serializer)
throws IOException {
Path path = status.getPath();
LOG.debug("Reading commit data from file {}", path);
T result = serializer.load(fs, path, status);
result.validate();
return result;
}
/**
* Save to a file.
* This uses the createFile() API, which S3A supports for
* faster load and declaring sequential access, always
*
* @param <T> type of persistent format
* @param fs filesystem
* @param path path to save to
* @param instance data to save
* @param serializer serializer to use
* @param performance skip all safety check on the write
*
* @return any IOStatistics from the output stream, or null
*
* @throws IOException IO failure
*/
public static <T extends PersistentCommitData> IOStatistics saveFile(
final FileSystem fs,
final Path path,
final T instance,
final JsonSerialization<T> serializer,
final boolean performance)
throws IOException {
FSDataOutputStreamBuilder builder = fs.createFile(path)
.create()
.recursive()
.overwrite(true);
// switch to performance mode
builder.opt(FS_S3A_CREATE_PERFORMANCE, performance);
return saveToStream(path, instance, builder, serializer);
}
/**
* Save to a file.
* This uses the createFile() API, which S3A supports for
* faster load and declaring sequential access, always
* @param <T> type of persistent format
* @param path path to save to (used for logging)
* @param instance data to save
* @param builder builder already prepared for the write
* @param serializer serializer to use
* @return any IOStatistics from the output stream, or null
* @throws IOException IO failure
*/
public static <T extends PersistentCommitData> IOStatistics saveToStream(
final Path path,
final T instance,
final FSDataOutputStreamBuilder builder,
final JsonSerialization<T> serializer) throws IOException {
LOG.debug("saving commit data to file {}", path);
FSDataOutputStream dataOutputStream = builder.build();
try {
dataOutputStream.write(serializer.toBytes(instance));
} finally {
dataOutputStream.close();
}
return dataOutputStream.getIOStatistics();
}
}
| PersistentCommitData |
java | spring-projects__spring-boot | module/spring-boot-jdbc/src/test/java/org/springframework/boot/jdbc/autoconfigure/XADataSourceAutoConfigurationTests.java | {
"start": 1865,
"end": 6259
} | class ____ {
@Test
void wrapExistingXaDataSource() {
ApplicationContext context = createContext(WrapExisting.class);
context.getBean(DataSource.class);
XADataSource source = context.getBean(XADataSource.class);
MockXADataSourceWrapper wrapper = context.getBean(MockXADataSourceWrapper.class);
assertThat(wrapper.getXaDataSource()).isEqualTo(source);
}
@Test
void createFromUrl() {
ApplicationContext context = createContext(FromProperties.class, "spring.datasource.url:jdbc:hsqldb:mem:test",
"spring.datasource.username:un");
context.getBean(DataSource.class);
MockXADataSourceWrapper wrapper = context.getBean(MockXADataSourceWrapper.class);
JDBCXADataSource dataSource = (JDBCXADataSource) wrapper.getXaDataSource();
assertThat(dataSource).isNotNull();
assertThat(dataSource.getUrl()).isEqualTo("jdbc:hsqldb:mem:test");
assertThat(dataSource.getUser()).isEqualTo("un");
}
@Test
void createNonEmbeddedFromXAProperties() {
new ApplicationContextRunner().withConfiguration(AutoConfigurations.of(XADataSourceAutoConfiguration.class))
.withUserConfiguration(FromProperties.class)
.withClassLoader(new FilteredClassLoader("org.h2.Driver", "org.hsqldb.jdbcDriver"))
.withPropertyValues("spring.datasource.xa.data-source-class-name:com.ibm.db2.jcc.DB2XADataSource",
"spring.datasource.xa.properties.user:test", "spring.datasource.xa.properties.password:secret")
.run((context) -> {
MockXADataSourceWrapper wrapper = context.getBean(MockXADataSourceWrapper.class);
XADataSource xaDataSource = wrapper.getXaDataSource();
assertThat(xaDataSource).isInstanceOf(DB2XADataSource.class);
});
}
@Test
void createFromClass() throws Exception {
ApplicationContext context = createContext(FromProperties.class,
"spring.datasource.xa.data-source-class-name:org.hsqldb.jdbc.pool.JDBCXADataSource",
"spring.datasource.xa.properties.login-timeout:123");
context.getBean(DataSource.class);
MockXADataSourceWrapper wrapper = context.getBean(MockXADataSourceWrapper.class);
JDBCXADataSource dataSource = (JDBCXADataSource) wrapper.getXaDataSource();
assertThat(dataSource).isNotNull();
assertThat(dataSource.getLoginTimeout()).isEqualTo(123);
}
@Test
void definesPropertiesBasedConnectionDetailsByDefault() {
new ApplicationContextRunner().withConfiguration(AutoConfigurations.of(XADataSourceAutoConfiguration.class))
.withUserConfiguration(FromProperties.class)
.run((context) -> assertThat(context).hasSingleBean(PropertiesJdbcConnectionDetails.class));
}
@Test
void shouldUseCustomConnectionDetailsWhenDefined() {
JdbcConnectionDetails connectionDetails = mock(JdbcConnectionDetails.class);
given(connectionDetails.getUsername()).willReturn("user-1");
given(connectionDetails.getPassword()).willReturn("password-1");
given(connectionDetails.getJdbcUrl()).willReturn("jdbc:postgresql://postgres.example.com:12345/database-1");
given(connectionDetails.getDriverClassName()).willReturn(DatabaseDriver.POSTGRESQL.getDriverClassName());
given(connectionDetails.getXaDataSourceClassName())
.willReturn(DatabaseDriver.POSTGRESQL.getXaDataSourceClassName());
new ApplicationContextRunner().withConfiguration(AutoConfigurations.of(XADataSourceAutoConfiguration.class))
.withUserConfiguration(FromProperties.class)
.withBean(JdbcConnectionDetails.class, () -> connectionDetails)
.run((context) -> {
assertThat(context).hasSingleBean(JdbcConnectionDetails.class)
.doesNotHaveBean(PropertiesJdbcConnectionDetails.class);
MockXADataSourceWrapper wrapper = context.getBean(MockXADataSourceWrapper.class);
PGXADataSource dataSource = (PGXADataSource) wrapper.getXaDataSource();
assertThat(dataSource).isNotNull();
assertThat(dataSource.getUrl()).startsWith("jdbc:postgresql://postgres.example.com:12345/database-1");
assertThat(dataSource.getUser()).isEqualTo("user-1");
assertThat(dataSource.getPassword()).isEqualTo("password-1");
});
}
private ApplicationContext createContext(Class<?> configuration, String... env) {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
TestPropertyValues.of(env).applyTo(context);
context.register(configuration, XADataSourceAutoConfiguration.class);
context.refresh();
return context;
}
@Configuration(proxyBeanMethods = false)
static | XADataSourceAutoConfigurationTests |
java | netty__netty | transport-classes-epoll/src/main/java/io/netty/channel/epoll/LinuxSocket.java | {
"start": 1510,
"end": 20337
} | class ____ extends Socket {
private static final long MAX_UINT32_T = 0xFFFFFFFFL;
LinuxSocket(int fd) {
super(fd);
}
SocketProtocolFamily family() {
return ipv6 ? SocketProtocolFamily.INET6 : SocketProtocolFamily.INET;
}
int sendmmsg(NativeDatagramPacketArray.NativeDatagramPacket[] msgs,
int offset, int len) throws IOException {
return Native.sendmmsg(intValue(), ipv6, msgs, offset, len);
}
int recvmmsg(NativeDatagramPacketArray.NativeDatagramPacket[] msgs,
int offset, int len) throws IOException {
return Native.recvmmsg(intValue(), ipv6, msgs, offset, len);
}
int recvmsg(NativeDatagramPacketArray.NativeDatagramPacket msg) throws IOException {
return Native.recvmsg(intValue(), ipv6, msg);
}
void setTimeToLive(int ttl) throws IOException {
setTimeToLive(intValue(), ttl);
}
void setInterface(InetAddress address) throws IOException {
final NativeInetAddress a = NativeInetAddress.newInstance(address);
setInterface(intValue(), ipv6, a.address(), a.scopeId(), interfaceIndex(address));
}
void setNetworkInterface(NetworkInterface netInterface) throws IOException {
InetAddress address = deriveInetAddress(netInterface, family() == SocketProtocolFamily.INET6);
if (address.equals(family() == SocketProtocolFamily.INET ? Native.INET_ANY : Native.INET6_ANY)) {
throw new IOException("NetworkInterface does not support " + family());
}
final NativeInetAddress nativeAddress = NativeInetAddress.newInstance(address);
setInterface(intValue(), ipv6, nativeAddress.address(), nativeAddress.scopeId(), interfaceIndex(netInterface));
}
InetAddress getInterface() throws IOException {
NetworkInterface inf = getNetworkInterface();
if (inf != null) {
Enumeration<InetAddress> addresses = SocketUtils.addressesFromNetworkInterface(inf);
if (addresses.hasMoreElements()) {
return addresses.nextElement();
}
}
return null;
}
NetworkInterface getNetworkInterface() throws IOException {
int ret = getInterface(intValue(), ipv6);
if (ipv6) {
return NetworkInterface.getByIndex(ret);
}
InetAddress address = inetAddress(ret);
return address != null ? NetworkInterface.getByInetAddress(address) : null;
}
private static InetAddress inetAddress(int value) {
byte[] var1 = {
(byte) (value >>> 24 & 255),
(byte) (value >>> 16 & 255),
(byte) (value >>> 8 & 255),
(byte) (value & 255)
};
try {
return InetAddress.getByAddress(var1);
} catch (UnknownHostException ignore) {
return null;
}
}
void joinGroup(InetAddress group, NetworkInterface netInterface, InetAddress source) throws IOException {
final NativeInetAddress g = NativeInetAddress.newInstance(group);
final boolean isIpv6 = group instanceof Inet6Address;
final NativeInetAddress i = NativeInetAddress.newInstance(deriveInetAddress(netInterface, isIpv6));
if (source != null) {
if (source.getClass() != group.getClass()) {
throw new IllegalArgumentException("Source address is different type to group");
}
final NativeInetAddress s = NativeInetAddress.newInstance(source);
joinSsmGroup(intValue(), ipv6 && isIpv6, g.address(), i.address(),
g.scopeId(), interfaceIndex(netInterface), s.address());
} else {
joinGroup(intValue(), ipv6 && isIpv6, g.address(), i.address(), g.scopeId(), interfaceIndex(netInterface));
}
}
void leaveGroup(InetAddress group, NetworkInterface netInterface, InetAddress source) throws IOException {
final NativeInetAddress g = NativeInetAddress.newInstance(group);
final boolean isIpv6 = group instanceof Inet6Address;
final NativeInetAddress i = NativeInetAddress.newInstance(deriveInetAddress(netInterface, isIpv6));
if (source != null) {
if (source.getClass() != group.getClass()) {
throw new IllegalArgumentException("Source address is different type to group");
}
final NativeInetAddress s = NativeInetAddress.newInstance(source);
leaveSsmGroup(intValue(), ipv6 && isIpv6, g.address(), i.address(),
g.scopeId(), interfaceIndex(netInterface), s.address());
} else {
leaveGroup(intValue(), ipv6 && isIpv6, g.address(), i.address(), g.scopeId(), interfaceIndex(netInterface));
}
}
private static int interfaceIndex(NetworkInterface networkInterface) {
return networkInterface.getIndex();
}
private static int interfaceIndex(InetAddress address) throws IOException {
NetworkInterface iface = NetworkInterface.getByInetAddress(address);
if (iface != null) {
return iface.getIndex();
}
return -1;
}
void setTcpDeferAccept(int deferAccept) throws IOException {
setTcpDeferAccept(intValue(), deferAccept);
}
void setTcpQuickAck(boolean quickAck) throws IOException {
setTcpQuickAck(intValue(), quickAck ? 1 : 0);
}
void setTcpCork(boolean tcpCork) throws IOException {
setTcpCork(intValue(), tcpCork ? 1 : 0);
}
void setSoBusyPoll(int loopMicros) throws IOException {
setSoBusyPoll(intValue(), loopMicros);
}
void setTcpNotSentLowAt(long tcpNotSentLowAt) throws IOException {
if (tcpNotSentLowAt < 0 || tcpNotSentLowAt > MAX_UINT32_T) {
throw new IllegalArgumentException("tcpNotSentLowAt must be a uint32_t");
}
setTcpNotSentLowAt(intValue(), (int) tcpNotSentLowAt);
}
void setTcpFastOpen(int tcpFastopenBacklog) throws IOException {
setTcpFastOpen(intValue(), tcpFastopenBacklog);
}
void setTcpKeepIdle(int seconds) throws IOException {
setTcpKeepIdle(intValue(), seconds);
}
void setTcpKeepIntvl(int seconds) throws IOException {
setTcpKeepIntvl(intValue(), seconds);
}
void setTcpKeepCnt(int probes) throws IOException {
setTcpKeepCnt(intValue(), probes);
}
void setTcpUserTimeout(int milliseconds) throws IOException {
setTcpUserTimeout(intValue(), milliseconds);
}
void setIpBindAddressNoPort(boolean enabled) throws IOException {
setIpBindAddressNoPort(intValue(), enabled ? 1 : 0);
}
void setIpMulticastAll(boolean enabled) throws IOException {
setIpMulticastAll(intValue(), ipv6, enabled ? 1 : 0);
}
void setIpFreeBind(boolean enabled) throws IOException {
setIpFreeBind(intValue(), enabled ? 1 : 0);
}
void setIpTransparent(boolean enabled) throws IOException {
setIpTransparent(intValue(), enabled ? 1 : 0);
}
void setIpRecvOrigDestAddr(boolean enabled) throws IOException {
setIpRecvOrigDestAddr(intValue(), enabled ? 1 : 0);
}
int getTimeToLive() throws IOException {
return getTimeToLive(intValue());
}
void getTcpInfo(EpollTcpInfo info) throws IOException {
getTcpInfo(intValue(), info.info);
}
void setTcpMd5Sig(InetAddress address, byte[] key) throws IOException {
final NativeInetAddress a = NativeInetAddress.newInstance(address);
setTcpMd5Sig(intValue(), ipv6, a.address(), a.scopeId(), key);
}
boolean isTcpCork() throws IOException {
return isTcpCork(intValue()) != 0;
}
int getSoBusyPoll() throws IOException {
return getSoBusyPoll(intValue());
}
int getTcpDeferAccept() throws IOException {
return getTcpDeferAccept(intValue());
}
boolean isTcpQuickAck() throws IOException {
return isTcpQuickAck(intValue()) != 0;
}
long getTcpNotSentLowAt() throws IOException {
return getTcpNotSentLowAt(intValue()) & MAX_UINT32_T;
}
int getTcpKeepIdle() throws IOException {
return getTcpKeepIdle(intValue());
}
int getTcpKeepIntvl() throws IOException {
return getTcpKeepIntvl(intValue());
}
int getTcpKeepCnt() throws IOException {
return getTcpKeepCnt(intValue());
}
int getTcpUserTimeout() throws IOException {
return getTcpUserTimeout(intValue());
}
boolean isIpBindAddressNoPort() throws IOException {
return isIpBindAddressNoPort(intValue()) != 0;
}
boolean isIpMulticastAll() throws IOException {
return isIpMulticastAll(intValue(), ipv6) != 0;
}
boolean isIpFreeBind() throws IOException {
return isIpFreeBind(intValue()) != 0;
}
boolean isIpTransparent() throws IOException {
return isIpTransparent(intValue()) != 0;
}
boolean isIpRecvOrigDestAddr() throws IOException {
return isIpRecvOrigDestAddr(intValue()) != 0;
}
PeerCredentials getPeerCredentials() throws IOException {
return getPeerCredentials(intValue());
}
boolean isLoopbackModeDisabled() throws IOException {
return getIpMulticastLoop(intValue(), ipv6) == 0;
}
void setLoopbackModeDisabled(boolean loopbackModeDisabled) throws IOException {
setIpMulticastLoop(intValue(), ipv6, loopbackModeDisabled ? 0 : 1);
}
boolean isUdpGro() throws IOException {
return isUdpGro(intValue()) != 0;
}
void setUdpGro(boolean gro) throws IOException {
setUdpGro(intValue(), gro ? 1 : 0);
}
long sendFile(DefaultFileRegion src, long baseOffset, long offset, long length) throws IOException {
// Open the file-region as it may be created via the lazy constructor. This is needed as we directly access
// the FileChannel field via JNI.
src.open();
long res = sendFile(intValue(), src, baseOffset, offset, length);
if (res >= 0) {
return res;
}
return ioResult("sendfile", (int) res);
}
public void bindVSock(VSockAddress address) throws IOException {
int res = bindVSock(/*fd*/intValue(), address.getCid(), address.getPort());
if (res < 0) {
throw newIOException("bindVSock", res);
}
}
public boolean connectVSock(VSockAddress address) throws IOException {
int res = connectVSock(/*fd*/intValue(), address.getCid(), address.getPort());
if (res < 0) {
return Errors.handleConnectErrno("connectVSock", res);
}
return true;
}
public VSockAddress remoteVSockAddress() {
byte[] addr = remoteVSockAddress(/*fd*/intValue());
if (addr == null) {
return null;
}
int cid = getIntAt(addr, 0);
int port = getIntAt(addr, 4);
return new VSockAddress(cid, port);
}
public VSockAddress localVSockAddress() {
byte[] addr = localVSockAddress(/*fd*/intValue());
if (addr == null) {
return null;
}
int cid = getIntAt(addr, 0);
int port = getIntAt(addr, 4);
return new VSockAddress(cid, port);
}
private static int getIntAt(byte[] array, int startIndex) {
return array[startIndex] << 24 | (array[startIndex + 1] & 0xFF) << 16
| (array[startIndex + 2] & 0xFF) << 8 | (array[startIndex + 3] & 0xFF);
}
private static InetAddress deriveInetAddress(NetworkInterface netInterface, boolean ipv6) {
final InetAddress ipAny = ipv6 ? Native.INET6_ANY : Native.INET_ANY;
if (netInterface != null) {
final Enumeration<InetAddress> ias = netInterface.getInetAddresses();
while (ias.hasMoreElements()) {
final InetAddress ia = ias.nextElement();
final boolean isV6 = ia instanceof Inet6Address;
if (isV6 == ipv6) {
return ia;
}
}
}
return ipAny;
}
public static LinuxSocket newSocket(int fd) {
return new LinuxSocket(fd);
}
public static LinuxSocket newVSockStream() {
return new LinuxSocket(newVSockStream0());
}
static int newVSockStream0() {
int res = newVSockStreamFd();
if (res < 0) {
throw new ChannelException(newIOException("newVSockStream", res));
}
return res;
}
public static LinuxSocket newSocketStream(boolean ipv6) {
return new LinuxSocket(newSocketStream0(ipv6));
}
/**
* @deprecated use {@link #newSocketStream(SocketProtocolFamily)}
*/
@Deprecated
public static LinuxSocket newSocketStream(InternetProtocolFamily protocol) {
return new LinuxSocket(newSocketStream0(protocol));
}
public static LinuxSocket newSocketStream(SocketProtocolFamily protocol) {
return new LinuxSocket(newSocketStream0(protocol));
}
public static LinuxSocket newSocketStream() {
return newSocketStream(isIPv6Preferred());
}
public static LinuxSocket newSocketDgram(boolean ipv6) {
return new LinuxSocket(newSocketDgram0(ipv6));
}
/**
* @deprecated use {@link #newSocketDgram(SocketProtocolFamily)}
*/
@Deprecated
public static LinuxSocket newSocketDgram(InternetProtocolFamily family) {
return new LinuxSocket(newSocketDgram0(family));
}
public static LinuxSocket newSocketDgram(SocketProtocolFamily family) {
return new LinuxSocket(newSocketDgram0(family));
}
public static LinuxSocket newSocketDgram() {
return newSocketDgram(isIPv6Preferred());
}
public static LinuxSocket newSocketDomain() {
return new LinuxSocket(newSocketDomain0());
}
public static LinuxSocket newSocketDomainDgram() {
return new LinuxSocket(newSocketDomainDgram0());
}
private static native int newVSockStreamFd();
private static native int bindVSock(int fd, int cid, int port);
private static native int connectVSock(int fd, int cid, int port);
private static native byte[] remoteVSockAddress(int fd);
private static native byte[] localVSockAddress(int fd);
private static native void joinGroup(int fd, boolean ipv6, byte[] group, byte[] interfaceAddress,
int scopeId, int interfaceIndex) throws IOException;
private static native void joinSsmGroup(int fd, boolean ipv6, byte[] group, byte[] interfaceAddress,
int scopeId, int interfaceIndex, byte[] source) throws IOException;
private static native void leaveGroup(int fd, boolean ipv6, byte[] group, byte[] interfaceAddress,
int scopeId, int interfaceIndex) throws IOException;
private static native void leaveSsmGroup(int fd, boolean ipv6, byte[] group, byte[] interfaceAddress,
int scopeId, int interfaceIndex, byte[] source) throws IOException;
private static native long sendFile(int socketFd, DefaultFileRegion src, long baseOffset,
long offset, long length) throws IOException;
private static native int getTcpDeferAccept(int fd) throws IOException;
private static native int isTcpQuickAck(int fd) throws IOException;
private static native int isTcpCork(int fd) throws IOException;
private static native int getSoBusyPoll(int fd) throws IOException;
private static native int getTcpNotSentLowAt(int fd) throws IOException;
private static native int getTcpKeepIdle(int fd) throws IOException;
private static native int getTcpKeepIntvl(int fd) throws IOException;
private static native int getTcpKeepCnt(int fd) throws IOException;
private static native int getTcpUserTimeout(int fd) throws IOException;
private static native int getTimeToLive(int fd) throws IOException;
private static native int isIpBindAddressNoPort(int fd) throws IOException;
private static native int isIpMulticastAll(int fd, boolean ipv6) throws IOException;
private static native int isIpFreeBind(int fd) throws IOException;
private static native int isIpTransparent(int fd) throws IOException;
private static native int isIpRecvOrigDestAddr(int fd) throws IOException;
private static native void getTcpInfo(int fd, long[] array) throws IOException;
private static native PeerCredentials getPeerCredentials(int fd) throws IOException;
private static native void setTcpDeferAccept(int fd, int deferAccept) throws IOException;
private static native void setTcpQuickAck(int fd, int quickAck) throws IOException;
private static native void setTcpCork(int fd, int tcpCork) throws IOException;
private static native void setSoBusyPoll(int fd, int loopMicros) throws IOException;
private static native void setTcpNotSentLowAt(int fd, int tcpNotSentLowAt) throws IOException;
private static native void setTcpFastOpen(int fd, int tcpFastopenBacklog) throws IOException;
private static native void setTcpKeepIdle(int fd, int seconds) throws IOException;
private static native void setTcpKeepIntvl(int fd, int seconds) throws IOException;
private static native void setTcpKeepCnt(int fd, int probes) throws IOException;
private static native void setTcpUserTimeout(int fd, int milliseconds)throws IOException;
private static native void setIpBindAddressNoPort(int fd, int ipBindAddressNoPort) throws IOException;
private static native void setIpMulticastAll(int fd, boolean ipv6, int enabled) throws IOException;
private static native void setIpFreeBind(int fd, int freeBind) throws IOException;
private static native void setIpTransparent(int fd, int transparent) throws IOException;
private static native void setIpRecvOrigDestAddr(int fd, int transparent) throws IOException;
private static native void setTcpMd5Sig(
int fd, boolean ipv6, byte[] address, int scopeId, byte[] key) throws IOException;
private static native void setInterface(
int fd, boolean ipv6, byte[] interfaceAddress, int scopeId, int networkInterfaceIndex) throws IOException;
private static native int getInterface(int fd, boolean ipv6);
private static native int getIpMulticastLoop(int fd, boolean ipv6) throws IOException;
private static native void setIpMulticastLoop(int fd, boolean ipv6, int enabled) throws IOException;
private static native void setTimeToLive(int fd, int ttl) throws IOException;
private static native int isUdpGro(int fd) throws IOException;
private static native void setUdpGro(int fd, int gro) throws IOException;
}
| LinuxSocket |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/CommonsCliOptions.java | {
"start": 1845,
"end": 10749
} | class ____ implements Options {
public static CommonsCliOptions parse(String source, String[] args) throws ParseException {
CLIManager cliManager = new CLIManager();
return new CommonsCliOptions(source, cliManager, cliManager.parse(args));
}
protected final String source;
protected final CLIManager cliManager;
protected final CommandLine commandLine;
protected CommonsCliOptions(String source, CLIManager cliManager, CommandLine commandLine) {
this.source = requireNonNull(source);
this.cliManager = requireNonNull(cliManager);
this.commandLine = requireNonNull(commandLine);
}
@Override
public String source() {
return source;
}
@Override
public Optional<Map<String, String>> userProperties() {
if (commandLine.hasOption(CLIManager.USER_PROPERTY)) {
return Optional.of(toMap(commandLine.getOptionProperties(CLIManager.USER_PROPERTY)));
}
return Optional.empty();
}
@Override
public Optional<Boolean> showVersionAndExit() {
if (commandLine.hasOption(CLIManager.SHOW_VERSION_AND_EXIT)) {
return Optional.of(Boolean.TRUE);
}
return Optional.empty();
}
@Override
public Optional<Boolean> showVersion() {
if (commandLine.hasOption(CLIManager.SHOW_VERSION)) {
return Optional.of(Boolean.TRUE);
}
return Optional.empty();
}
@Override
public Optional<Boolean> quiet() {
if (commandLine.hasOption(CLIManager.QUIET)) {
return Optional.of(Boolean.TRUE);
}
return Optional.empty();
}
@Override
public Optional<Boolean> verbose() {
if (commandLine.hasOption(CLIManager.VERBOSE)) {
return Optional.of(Boolean.TRUE);
}
return Optional.empty();
}
@Override
public Optional<Boolean> showErrors() {
if (commandLine.hasOption(CLIManager.SHOW_ERRORS) || verbose().orElse(false)) {
return Optional.of(Boolean.TRUE);
}
return Optional.empty();
}
@Override
public Optional<String> failOnSeverity() {
if (commandLine.hasOption(CLIManager.FAIL_ON_SEVERITY)) {
return Optional.of(commandLine.getOptionValue(CLIManager.FAIL_ON_SEVERITY));
}
return Optional.empty();
}
@Override
public Optional<Boolean> nonInteractive() {
if (commandLine.hasOption(CLIManager.NON_INTERACTIVE) || commandLine.hasOption(CLIManager.BATCH_MODE)) {
return Optional.of(Boolean.TRUE);
}
return Optional.empty();
}
@Override
public Optional<Boolean> forceInteractive() {
if (commandLine.hasOption(CLIManager.FORCE_INTERACTIVE)) {
return Optional.of(Boolean.TRUE);
}
return Optional.empty();
}
@Override
public Optional<String> altUserSettings() {
if (commandLine.hasOption(CLIManager.ALTERNATE_USER_SETTINGS)) {
return Optional.of(commandLine.getOptionValue(CLIManager.ALTERNATE_USER_SETTINGS));
}
return Optional.empty();
}
@Override
public Optional<String> altProjectSettings() {
if (commandLine.hasOption(CLIManager.ALTERNATE_PROJECT_SETTINGS)) {
return Optional.of(commandLine.getOptionValue(CLIManager.ALTERNATE_PROJECT_SETTINGS));
}
return Optional.empty();
}
@Override
public Optional<String> altInstallationSettings() {
if (commandLine.hasOption(CLIManager.ALTERNATE_INSTALLATION_SETTINGS)) {
return Optional.of(commandLine.getOptionValue(CLIManager.ALTERNATE_INSTALLATION_SETTINGS));
}
if (commandLine.hasOption(CLIManager.ALTERNATE_GLOBAL_SETTINGS)) {
return Optional.of(commandLine.getOptionValue(CLIManager.ALTERNATE_GLOBAL_SETTINGS));
}
return Optional.empty();
}
@Override
public Optional<String> altUserToolchains() {
if (commandLine.hasOption(CLIManager.ALTERNATE_USER_TOOLCHAINS)) {
return Optional.of(commandLine.getOptionValue(CLIManager.ALTERNATE_USER_TOOLCHAINS));
}
return Optional.empty();
}
@Override
public Optional<String> altInstallationToolchains() {
if (commandLine.hasOption(CLIManager.ALTERNATE_INSTALLATION_TOOLCHAINS)) {
return Optional.of(commandLine.getOptionValue(CLIManager.ALTERNATE_INSTALLATION_TOOLCHAINS));
}
if (commandLine.hasOption(CLIManager.ALTERNATE_GLOBAL_TOOLCHAINS)) {
return Optional.of(commandLine.getOptionValue(CLIManager.ALTERNATE_GLOBAL_TOOLCHAINS));
}
return Optional.empty();
}
@Override
public Optional<String> logFile() {
if (commandLine.hasOption(CLIManager.LOG_FILE)) {
return Optional.of(commandLine.getOptionValue(CLIManager.LOG_FILE));
}
return Optional.empty();
}
@Override
public Optional<Boolean> rawStreams() {
if (commandLine.hasOption(CLIManager.RAW_STREAMS)) {
return Optional.of(Boolean.TRUE);
}
return Optional.empty();
}
@Override
public Optional<String> color() {
if (commandLine.hasOption(CLIManager.COLOR)) {
if (commandLine.getOptionValue(CLIManager.COLOR) != null) {
return Optional.of(commandLine.getOptionValue(CLIManager.COLOR));
} else {
return Optional.of("auto");
}
}
return Optional.empty();
}
@Override
public Optional<Boolean> offline() {
if (commandLine.hasOption(CLIManager.OFFLINE)) {
return Optional.of(Boolean.TRUE);
}
return Optional.empty();
}
@Override
public Optional<Boolean> help() {
if (commandLine.hasOption(CLIManager.HELP)) {
return Optional.of(Boolean.TRUE);
}
return Optional.empty();
}
@Override
public void warnAboutDeprecatedOptions(ParserRequest request, Consumer<String> printWriter) {
if (cliManager.getUsedDeprecatedOptions().isEmpty()) {
return;
}
printWriter.accept("Detected deprecated option use in " + source);
for (Option option : cliManager.getUsedDeprecatedOptions()) {
StringBuilder sb = new StringBuilder();
sb.append("The option ");
if (option.getOpt() != null) {
sb.append("-").append(option.getOpt());
}
if (option.getLongOpt() != null) {
if (option.getOpt() != null) {
sb.append(",");
}
sb.append("--").append(option.getLongOpt());
}
sb.append(" is deprecated ");
if (option.getDeprecated().isForRemoval()) {
sb.append("and will be removed in a future version");
}
if (option.getDeprecated().getSince() != null) {
sb.append(" since ")
.append(request.commandName())
.append(" ")
.append(option.getDeprecated().getSince());
}
printWriter.accept(sb.toString());
}
}
@Override
public final Options interpolate(UnaryOperator<String> callback) {
try {
// now that we have properties, interpolate all arguments
Interpolator interpolator = createInterpolator();
CommandLine.Builder commandLineBuilder = CommandLine.builder();
commandLineBuilder.setDeprecatedHandler(o -> {});
for (Option option : commandLine.getOptions()) {
if (!CommonsCliOptions.CLIManager.USER_PROPERTY.equals(option.getOpt())) {
List<String> values = option.getValuesList();
for (ListIterator<String> it = values.listIterator(); it.hasNext(); ) {
it.set(interpolator.interpolate(it.next(), callback));
}
}
commandLineBuilder.addOption(option);
}
for (String arg : commandLine.getArgList()) {
commandLineBuilder.addArg(interpolator.interpolate(arg, callback));
}
return copy(source, cliManager, commandLineBuilder.get());
} catch (InterpolatorException e) {
throw new IllegalArgumentException("Could not interpolate CommonsCliOptions", e);
}
}
protected CommonsCliOptions copy(String source, CLIManager cliManager, CommandLine commandLine) {
return new CommonsCliOptions(source, cliManager, commandLine);
}
@Override
public void displayHelp(ParserRequest request, Consumer<String> printStream) {
cliManager.displayHelp(request.command(), printStream);
}
protected static | CommonsCliOptions |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java | {
"start": 17561,
"end": 23269
} | class ____ implements Runnable {
final String name = getClass().getSimpleName();
/** Check leases periodically. */
@Override
public void run() {
for(; shouldRunMonitor && fsnamesystem.isRunning(); ) {
boolean needSync = false;
try {
// sleep now to avoid infinite loop if an exception was thrown.
Thread.sleep(fsnamesystem.getLeaseRecheckIntervalMs());
// pre-filter the leases w/o the fsn lock.
Collection<Lease> candidates = getExpiredCandidateLeases();
if (candidates.isEmpty()) {
continue;
}
fsnamesystem.writeLockInterruptibly(RwLockMode.GLOBAL);
try {
if (!fsnamesystem.isInSafeMode()) {
needSync = checkLeases(candidates);
}
} finally {
fsnamesystem.writeUnlock(RwLockMode.GLOBAL, "leaseManager");
// lease reassignments should to be sync'ed.
if (needSync) {
fsnamesystem.getEditLog().logSync();
}
}
} catch(InterruptedException ie) {
LOG.debug("{} is interrupted", name, ie);
} catch(Throwable e) {
LOG.warn("Unexpected throwable: ", e);
}
}
}
}
/** Check the leases beginning from the oldest.
* @return true is sync is needed.
*/
@VisibleForTesting
synchronized boolean checkLeases() {
return checkLeases(getExpiredCandidateLeases());
}
private synchronized boolean checkLeases(Collection<Lease> leasesToCheck) {
boolean needSync = false;
assert fsnamesystem.hasWriteLock(RwLockMode.GLOBAL);
long start = monotonicNow();
for (Lease leaseToCheck : leasesToCheck) {
if (isMaxLockHoldToReleaseLease(start)) {
break;
}
if (!leaseToCheck.expiredHardLimit(Time.monotonicNow())) {
continue;
}
LOG.info("{} has expired hard limit", leaseToCheck);
final List<Long> removing = new ArrayList<>();
// need to create a copy of the oldest lease files, because
// internalReleaseLease() removes files corresponding to empty files,
// i.e. it needs to modify the collection being iterated over
// causing ConcurrentModificationException
Collection<Long> files = leaseToCheck.getFiles();
Long[] leaseINodeIds = files.toArray(new Long[files.size()]);
FSDirectory fsd = fsnamesystem.getFSDirectory();
String p = null;
String newHolder = getInternalLeaseHolder();
for(Long id : leaseINodeIds) {
try {
INodesInPath iip = INodesInPath.fromINode(fsd.getInode(id));
p = iip.getPath();
// Sanity check to make sure the path is correct
if (!p.startsWith("/")) {
throw new IOException("Invalid path in the lease " + p);
}
final INodeFile lastINode = iip.getLastINode().asFile();
if (fsnamesystem.isFileDeleted(lastINode)) {
// INode referred by the lease could have been deleted.
removeLease(lastINode.getId());
continue;
}
boolean completed = false;
try {
completed = fsnamesystem.internalReleaseLease(
leaseToCheck, p, iip, newHolder);
} catch (IOException e) {
LOG.warn("Cannot release the path {} in the lease {}. It will be "
+ "retried.", p, leaseToCheck, e);
continue;
}
if (LOG.isDebugEnabled()) {
if (completed) {
LOG.debug("Lease recovery for inode {} is complete. File closed"
+ ".", id);
} else {
LOG.debug("Started block recovery {} lease {}", p, leaseToCheck);
}
}
// If a lease recovery happened, we need to sync later.
if (!needSync && !completed) {
needSync = true;
}
} catch (IOException e) {
LOG.warn("Removing lease with an invalid path: {},{}", p,
leaseToCheck, e);
removing.add(id);
}
if (isMaxLockHoldToReleaseLease(start)) {
LOG.debug("Breaking out of checkLeases after {} ms.",
fsnamesystem.getMaxLockHoldToReleaseLeaseMs());
break;
}
}
for(Long id : removing) {
removeLease(leaseToCheck, id);
}
}
return needSync;
}
/** @return true if max lock hold is reached */
private boolean isMaxLockHoldToReleaseLease(long start) {
return monotonicNow() - start >
fsnamesystem.getMaxLockHoldToReleaseLeaseMs();
}
@Override
public synchronized String toString() {
return getClass().getSimpleName() + "= {"
+ "\n leases=" + leases
+ "\n leasesById=" + leasesById
+ "\n}";
}
void startMonitor() {
Preconditions.checkState(lmthread == null,
"Lease Monitor already running");
shouldRunMonitor = true;
lmthread = new Daemon(new Monitor());
lmthread.start();
}
void stopMonitor() {
if (lmthread != null) {
shouldRunMonitor = false;
try {
lmthread.interrupt();
lmthread.join(3000);
} catch (InterruptedException ie) {
LOG.warn("Encountered exception ", ie);
}
lmthread = null;
}
}
/**
* Trigger the currently-running Lease monitor to re-check
* its leases immediately. This is for use by unit tests.
*/
@VisibleForTesting
public void triggerMonitorCheckNow() {
Preconditions.checkState(lmthread != null,
"Lease monitor is not running");
lmthread.interrupt();
}
@VisibleForTesting
public void runLeaseChecks() {
checkLeases();
}
}
| Monitor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/events/LegacyPostCommitListenerTest.java | {
"start": 6535,
"end": 6819
} | class ____ implements PostInsertEventListener {
int fired;
@Override
public void onPostInsert(PostInsertEvent event) {
fired++;
}
@Override
public boolean requiresPostCommitHandling(EntityPersister persister) {
return true;
}
}
}
| LegacyPostCommitInsertEventListener |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/access/expression/AbstractSecurityExpressionHandlerTests.java | {
"start": 1356,
"end": 2587
} | class ____ {
private AbstractSecurityExpressionHandler<Object> handler;
@BeforeEach
public void setUp() {
this.handler = new AbstractSecurityExpressionHandler<Object>() {
@Override
protected SecurityExpressionOperations createSecurityExpressionRoot(Authentication authentication,
Object o) {
return new SecurityExpressionRoot(authentication) {
};
}
};
}
@Test
public void beanNamesAreCorrectlyResolved() {
this.handler.setApplicationContext(new AnnotationConfigApplicationContext(TestConfiguration.class));
Expression expression = this.handler.getExpressionParser()
.parseExpression("@number10.compareTo(@number20) < 0");
assertThat(expression.getValue(this.handler.createEvaluationContext(mock(Authentication.class), new Object())))
.isEqualTo(true);
}
@Test
public void setExpressionParserNull() {
assertThatIllegalArgumentException().isThrownBy(() -> this.handler.setExpressionParser(null));
}
@Test
public void setExpressionParser() {
SpelExpressionParser parser = new SpelExpressionParser();
this.handler.setExpressionParser(parser);
assertThat(parser == this.handler.getExpressionParser()).isTrue();
}
@Configuration
static | AbstractSecurityExpressionHandlerTests |
java | spring-projects__spring-boot | module/spring-boot-http-client/src/main/java/org/springframework/boot/http/client/JettyClientHttpRequestFactoryBuilder.java | {
"start": 1454,
"end": 6289
} | class ____
extends AbstractClientHttpRequestFactoryBuilder<JettyClientHttpRequestFactory> {
private final JettyHttpClientBuilder httpClientBuilder;
JettyClientHttpRequestFactoryBuilder() {
this(null, new JettyHttpClientBuilder());
}
private JettyClientHttpRequestFactoryBuilder(@Nullable List<Consumer<JettyClientHttpRequestFactory>> customizers,
JettyHttpClientBuilder httpClientBuilder) {
super(customizers);
this.httpClientBuilder = httpClientBuilder;
}
@Override
public JettyClientHttpRequestFactoryBuilder withCustomizer(Consumer<JettyClientHttpRequestFactory> customizer) {
return new JettyClientHttpRequestFactoryBuilder(mergedCustomizers(customizer), this.httpClientBuilder);
}
@Override
public JettyClientHttpRequestFactoryBuilder withCustomizers(
Collection<Consumer<JettyClientHttpRequestFactory>> customizers) {
return new JettyClientHttpRequestFactoryBuilder(mergedCustomizers(customizers), this.httpClientBuilder);
}
/**
* Return a new {@link JettyClientHttpRequestFactoryBuilder} that applies additional
* customization to the underlying {@link HttpClient}.
* @param httpClientCustomizer the customizer to apply
* @return a new {@link JettyClientHttpRequestFactoryBuilder} instance
*/
public JettyClientHttpRequestFactoryBuilder withHttpClientCustomizer(Consumer<HttpClient> httpClientCustomizer) {
Assert.notNull(httpClientCustomizer, "'httpClientCustomizer' must not be null");
return new JettyClientHttpRequestFactoryBuilder(getCustomizers(),
this.httpClientBuilder.withCustomizer(httpClientCustomizer));
}
/**
* Return a new {@link JettyClientHttpRequestFactoryBuilder} that uses the given
* factory to create the {@link HttpClientTransport}.
* @param httpClientTransportFactory the {@link HttpClientTransport} factory to use
* @return a new {@link JettyClientHttpRequestFactoryBuilder} instance
* @since 4.0.0
*/
public JettyClientHttpRequestFactoryBuilder withHttpClientTransportFactory(
Function<ClientConnector, HttpClientTransport> httpClientTransportFactory) {
Assert.notNull(httpClientTransportFactory, "'httpClientTransportFactory' must not be null");
return new JettyClientHttpRequestFactoryBuilder(getCustomizers(),
this.httpClientBuilder.withHttpClientTransportFactory(httpClientTransportFactory));
}
/**
* Return a new {@link JettyClientHttpRequestFactoryBuilder} that applies additional
* customization to the underlying {@link HttpClientTransport}.
* @param httpClientTransportCustomizer the customizer to apply
* @return a new {@link JettyClientHttpRequestFactoryBuilder} instance
*/
public JettyClientHttpRequestFactoryBuilder withHttpClientTransportCustomizer(
Consumer<HttpClientTransport> httpClientTransportCustomizer) {
Assert.notNull(httpClientTransportCustomizer, "'httpClientTransportCustomizer' must not be null");
return new JettyClientHttpRequestFactoryBuilder(getCustomizers(),
this.httpClientBuilder.withHttpClientTransportCustomizer(httpClientTransportCustomizer));
}
/**
* Return a new {@link JettyClientHttpRequestFactoryBuilder} that applies additional
* customization to the underlying {@link ClientConnector}.
* @param clientConnectorCustomizerCustomizer the customizer to apply
* @return a new {@link JettyClientHttpRequestFactoryBuilder} instance
*/
public JettyClientHttpRequestFactoryBuilder withClientConnectorCustomizerCustomizer(
Consumer<ClientConnector> clientConnectorCustomizerCustomizer) {
Assert.notNull(clientConnectorCustomizerCustomizer, "'clientConnectorCustomizerCustomizer' must not be null");
return new JettyClientHttpRequestFactoryBuilder(getCustomizers(),
this.httpClientBuilder.withClientConnectorCustomizerCustomizer(clientConnectorCustomizerCustomizer));
}
/**
* Return a new {@link JettyClientHttpRequestFactoryBuilder} that applies the given
* customizer. This can be useful for applying pre-packaged customizations.
* @param customizer the customizer to apply
* @return a new {@link JettyClientHttpRequestFactoryBuilder}
* @since 4.0.0
*/
public JettyClientHttpRequestFactoryBuilder with(UnaryOperator<JettyClientHttpRequestFactoryBuilder> customizer) {
return customizer.apply(this);
}
@Override
protected JettyClientHttpRequestFactory createClientHttpRequestFactory(HttpClientSettings settings) {
HttpClient httpClient = this.httpClientBuilder.build(settings.withTimeouts(null, null));
JettyClientHttpRequestFactory requestFactory = new JettyClientHttpRequestFactory(httpClient);
PropertyMapper map = PropertyMapper.get();
map.from(settings::connectTimeout).asInt(Duration::toMillis).to(requestFactory::setConnectTimeout);
map.from(settings::readTimeout).asInt(Duration::toMillis).to(requestFactory::setReadTimeout);
return requestFactory;
}
static | JettyClientHttpRequestFactoryBuilder |
java | apache__flink | flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/client/proxy/KvStateClientProxyImplTest.java | {
"start": 4020,
"end": 4282
} | class ____ implements KvStateLocationOracle {
@Override
public CompletableFuture<KvStateLocation> requestKvStateLocation(
JobID jobId, String registrationName) {
return null;
}
}
}
| TestingKvStateLocationOracle |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/CheckpointStoreITCase.java | {
"start": 5786,
"end": 7259
} | class ____
implements HighAvailabilityServicesFactory {
private static volatile CountDownLatch fetchRemoteCheckpointsStart = new CountDownLatch(1);
private static volatile CountDownLatch fetchRemoteCheckpointsFinished =
new CountDownLatch(1);
static void reset() {
fetchRemoteCheckpointsStart = new CountDownLatch(1);
fetchRemoteCheckpointsFinished = new CountDownLatch(1);
}
@Override
public HighAvailabilityServices createHAServices(
Configuration configuration, Executor executor) {
final CheckpointRecoveryFactory checkpointRecoveryFactory =
PerJobCheckpointRecoveryFactory.withoutCheckpointStoreRecovery(
maxCheckpoints -> {
fetchRemoteCheckpointsStart.countDown();
try {
fetchRemoteCheckpointsFinished.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
return new StandaloneCompletedCheckpointStore(maxCheckpoints);
});
return new EmbeddedHaServicesWithLeadershipControl(executor, checkpointRecoveryFactory);
}
}
private static final | BlockingHighAvailabilityServiceFactory |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/component/extension/ComponentVerifierExtensionHelper.java | {
"start": 1677,
"end": 1741
} | class ____ {
/**
* Custom | ComponentVerifierExtensionHelper |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/module/ManifestParser.java | {
"start": 574,
"end": 3603
} | class ____ {
private static final String TAG = "ManifestParser";
private static final String GLIDE_MODULE_VALUE = "GlideModule";
private final Context context;
public ManifestParser(Context context) {
this.context = context;
}
// getApplicationInfo returns null in Compose previews, see #4977 and b/263613353.
@SuppressWarnings("ConstantConditions")
@Nullable
private ApplicationInfo getOurApplicationInfo() throws NameNotFoundException {
return context
.getPackageManager()
.getApplicationInfo(context.getPackageName(), PackageManager.GET_META_DATA);
}
@SuppressWarnings("deprecation")
public List<GlideModule> parse() {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Loading Glide modules");
}
List<GlideModule> modules = new ArrayList<>();
try {
ApplicationInfo appInfo = getOurApplicationInfo();
if (appInfo == null || appInfo.metaData == null) {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Got null app info metadata");
}
return modules;
}
if (Log.isLoggable(TAG, Log.VERBOSE)) {
Log.v(TAG, "Got app info metadata: " + appInfo.metaData);
}
for (String key : appInfo.metaData.keySet()) {
if (GLIDE_MODULE_VALUE.equals(appInfo.metaData.get(key))) {
modules.add(parseModule(key));
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Loaded Glide module: " + key);
}
}
}
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Finished loading Glide modules");
}
} catch (PackageManager.NameNotFoundException e) {
if (Log.isLoggable(TAG, Log.ERROR)) {
Log.e(TAG, "Failed to parse glide modules", e);
}
}
return modules;
}
@SuppressWarnings("deprecation")
private static GlideModule parseModule(String className) {
Class<?> clazz;
try {
clazz = Class.forName(className);
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Unable to find GlideModule implementation", e);
}
Object module = null;
try {
module = clazz.getDeclaredConstructor().newInstance();
// These can't be combined until API minimum is 19.
} catch (InstantiationException e) {
throwInstantiateGlideModuleException(clazz, e);
} catch (IllegalAccessException e) {
throwInstantiateGlideModuleException(clazz, e);
} catch (NoSuchMethodException e) {
throwInstantiateGlideModuleException(clazz, e);
} catch (InvocationTargetException e) {
throwInstantiateGlideModuleException(clazz, e);
}
if (!(module instanceof GlideModule)) {
throw new RuntimeException("Expected instanceof GlideModule, but found: " + module);
}
return (GlideModule) module;
}
private static void throwInstantiateGlideModuleException(Class<?> clazz, Exception e) {
throw new RuntimeException("Unable to instantiate GlideModule implementation for " + clazz, e);
}
}
| ManifestParser |
java | quarkusio__quarkus | extensions/grpc/runtime/src/main/java/io/quarkus/grpc/runtime/ClientInterceptorStorage.java | {
"start": 96,
"end": 1242
} | class ____ {
private final Set<Class<?>> perClientInterceptors;
private final Set<Class<?>> globalInterceptors;
public ClientInterceptorStorage(Set<Class<?>> perClientInterceptors,
Set<Class<?>> globalInterceptors) {
this.perClientInterceptors = Set.copyOf(perClientInterceptors);
this.globalInterceptors = Set.copyOf(globalInterceptors);
}
public Set<Class<?>> getPerClientInterceptors(Set<String> interceptorClasses) {
Set<Class<?>> ret = new HashSet<Class<?>>();
for (Class<?> interceptor : perClientInterceptors) {
if (interceptorClasses.contains(interceptor.getName())) {
ret.add(interceptor);
}
}
return ret;
}
public Class<?> getPerClientInterceptor(String interceptorClass) {
for (Class<?> interceptor : perClientInterceptors) {
if (interceptor.getName().equals(interceptorClass)) {
return interceptor;
}
}
return null;
}
public Set<Class<?>> getGlobalInterceptors() {
return globalInterceptors;
}
}
| ClientInterceptorStorage |
java | processing__processing4 | java/test/processing/mode/java/preproc/MissingIdentifierMessageSimplifierStrategyTest.java | {
"start": 272,
"end": 953
} | class ____ {
private PreprocessIssueMessageSimplifier.PreprocIssueMessageSimplifierStrategy strategy;
@Before
public void setup() {
strategy = PreprocessIssueMessageSimplifier.get()
.createMissingIdentifierSimplifierStrategy();
}
@Test
public void testPresent() {
Optional<PdeIssueEmitter.IssueMessageSimplification> msg = strategy.simplify("Missing identifier at ';'");
Assert.assertTrue(msg.isPresent());
}
@Test
public void testNotPresent() {
Optional<PdeIssueEmitter.IssueMessageSimplification> msg = strategy.simplify("String x = \" \\\" \"");
Assert.assertTrue(msg.isEmpty());
}
} | MissingIdentifierMessageSimplifierStrategyTest |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/common/LimitTestPrograms.java | {
"start": 1233,
"end": 2660
} | class ____ {
static final Row[] DATA1 =
new Row[] {
Row.of(2, "a", 6),
Row.of(4, "b", 8),
Row.of(6, "c", 10),
Row.of(1, "a", 5),
Row.of(3, "b", 7),
Row.of(5, "c", 9)
};
static final Row[] DATA2 = new Row[] {Row.of(8, "d", 3), Row.of(7, "e", 2)};
public static final TableTestProgram LIMIT =
TableTestProgram.of("limit", "validates limit node")
.setupTableSource(
SourceTestStep.newBuilder("source_t")
.addSchema("a INT", "b VARCHAR", "c INT")
.producedBeforeRestore(DATA1)
.producedAfterRestore(DATA2)
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema("a INT", "b VARCHAR", "c BIGINT")
.consumedBeforeRestore(
"+I[2, a, 6]", "+I[4, b, 8]", "+I[6, c, 10]")
.consumedAfterRestore(new String[] {})
.build())
.runSql("INSERT INTO sink_t SELECT * from source_t LIMIT 3")
.build();
}
| LimitTestPrograms |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/TriHttp2RemoteFlowController.java | {
"start": 27425,
"end": 31372
} | class ____ extends WritabilityMonitor implements Http2StreamVisitor {
private final Listener listener;
ListenerWritabilityMonitor(Listener listener) {
this.listener = listener;
}
@Override
public boolean visit(Http2Stream stream) throws Http2Exception {
FlowState state = state(stream);
if (isWritable(state) != state.markedWritability()) {
notifyWritabilityChanged(state);
}
return true;
}
@Override
void windowSize(FlowState state, int initialWindowSize) {
super.windowSize(state, initialWindowSize);
try {
checkStateWritability(state);
} catch (Http2Exception e) {
throw new RuntimeException("Caught unexpected exception from window", e);
}
}
@Override
void incrementWindowSize(FlowState state, int delta) throws Http2Exception {
super.incrementWindowSize(state, delta);
checkStateWritability(state);
}
@Override
void initialWindowSize(int newWindowSize) throws Http2Exception {
super.initialWindowSize(newWindowSize);
if (isWritableConnection()) {
// If the write operation does not occur we still need to check all streams because they
// may have transitioned from writable to not writable.
checkAllWritabilityChanged();
}
}
@Override
void enqueueFrame(FlowState state, FlowControlled frame) throws Http2Exception {
super.enqueueFrame(state, frame);
checkConnectionThenStreamWritabilityChanged(state);
}
@Override
void stateCancelled(FlowState state) {
try {
checkConnectionThenStreamWritabilityChanged(state);
} catch (Http2Exception e) {
throw new RuntimeException("Caught unexpected exception from checkAllWritabilityChanged", e);
}
}
@Override
void channelWritabilityChange() throws Http2Exception {
if (connectionState.markedWritability() != isChannelWritable()) {
checkAllWritabilityChanged();
}
}
private void checkStateWritability(FlowState state) throws Http2Exception {
if (isWritable(state) != state.markedWritability()) {
if (state == connectionState) {
checkAllWritabilityChanged();
} else {
notifyWritabilityChanged(state);
}
}
}
private void notifyWritabilityChanged(FlowState state) {
state.markedWritability(!state.markedWritability());
try {
listener.writabilityChanged(state.stream);
} catch (Throwable cause) {
logger.error("Caught Throwable from listener.writabilityChanged", cause);
}
}
private void checkConnectionThenStreamWritabilityChanged(FlowState state) throws Http2Exception {
// It is possible that the connection window and/or the individual stream writability could change.
if (isWritableConnection() != connectionState.markedWritability()) {
checkAllWritabilityChanged();
} else if (isWritable(state) != state.markedWritability()) {
notifyWritabilityChanged(state);
}
// does not check overflow anymore: Let receiver continue receiving the pending bytes.
}
private void checkAllWritabilityChanged() throws Http2Exception {
// Make sure we mark that we have notified as a result of this change.
connectionState.markedWritability(isWritableConnection());
connection.forEachActiveStream(this);
}
}
}
| ListenerWritabilityMonitor |
java | google__dagger | dagger-runtime/main/java/dagger/internal/SetBuilder.java | {
"start": 1046,
"end": 2367
} | class ____<T> {
private static final String SET_CONTRIBUTIONS_CANNOT_BE_NULL =
"Set contributions cannot be null";
private final List<T> contributions;
private SetBuilder(int estimatedSize) {
contributions = new ArrayList<>(estimatedSize);
}
/**
* {@code estimatedSize} is the number of bindings which contribute to the set. They may each
* provide {@code [0..n)} instances to the set. Because the final size is unknown, {@code
* contributions} are collected in a list and only hashed in {@link #build()}.
*/
public static <T> SetBuilder<T> newSetBuilder(int estimatedSize) {
return new SetBuilder<T>(estimatedSize);
}
public SetBuilder<T> add(T t) {
contributions.add(checkNotNull(t, SET_CONTRIBUTIONS_CANNOT_BE_NULL));
return this;
}
public SetBuilder<T> addAll(Collection<? extends T> collection) {
for (T item : collection) {
checkNotNull(item, SET_CONTRIBUTIONS_CANNOT_BE_NULL);
}
contributions.addAll(collection);
return this;
}
public Set<T> build() {
if (contributions.isEmpty()) {
return Collections.emptySet();
} else if (contributions.size() == 1) {
return Collections.singleton(contributions.get(0));
} else {
return Collections.unmodifiableSet(new HashSet<>(contributions));
}
}
}
| SetBuilder |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng2605BogusProfileActivationTest.java | {
"start": 1132,
"end": 2245
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that profiles are not accidentally activated when they have no activation element at all and
* the user did not request their activation via id.
*
* @throws Exception in case of failure
*/
@Test
public void testitMNG2605() throws Exception {
File testDir = extractResources("/mng-2605");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("--settings");
verifier.addCliArgument("settings.xml");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
Properties props = verifier.loadProperties("target/profile.properties");
assertNull(props.getProperty("project.properties.pomProperty"));
assertNull(props.getProperty("project.properties.settingsProperty"));
assertNull(props.getProperty("project.properties.profilesProperty"));
}
}
| MavenITmng2605BogusProfileActivationTest |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/ClientStateTest.java | {
"start": 3927,
"end": 26298
} | class ____ {
private final ClientState client = new ClientState(1);
private final ClientState zeroCapacityClient = new ClientState(0);
@Test
public void previousStateConstructorShouldCreateAValidObject() {
final ClientState clientState = new ClientState(
Set.of(TASK_0_0, TASK_0_1),
Set.of(TASK_0_2, TASK_0_3),
mkMap(mkEntry(TASK_0_0, 5L), mkEntry(TASK_0_2, -1L)),
EMPTY_CLIENT_TAGS,
4
);
// all the "next assignment" fields should be empty
assertThat(clientState.activeTaskCount(), is(0));
assertThat(clientState.activeTaskLoad(), is(0.0));
assertThat(clientState.activeTasks(), is(empty()));
assertThat(clientState.standbyTaskCount(), is(0));
assertThat(clientState.standbyTasks(), is(empty()));
assertThat(clientState.assignedTaskCount(), is(0));
assertThat(clientState.assignedTasks(), is(empty()));
// and the "previous assignment" fields should match the constructor args
assertThat(clientState.prevActiveTasks(), is(Set.of(TASK_0_0, TASK_0_1)));
assertThat(clientState.prevStandbyTasks(), is(Set.of(TASK_0_2, TASK_0_3)));
assertThat(clientState.previousAssignedTasks(), is(Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)));
assertThat(clientState.capacity(), is(4));
assertThat(clientState.lagFor(TASK_0_0), is(5L));
assertThat(clientState.lagFor(TASK_0_2), is(-1L));
}
@Test
public void shouldHaveNotReachedCapacityWhenAssignedTasksLessThanCapacity() {
assertFalse(client.reachedCapacity());
}
@Test
public void shouldHaveReachedCapacityWhenAssignedTasksGreaterThanOrEqualToCapacity() {
client.assignActive(TASK_0_1);
assertTrue(client.reachedCapacity());
}
@Test
public void shouldRefuseDoubleActiveTask() {
final ClientState clientState = new ClientState(1);
clientState.assignActive(TASK_0_0);
assertThrows(IllegalArgumentException.class, () -> clientState.assignActive(TASK_0_0));
}
@Test
public void shouldRefuseActiveAndStandbyTask() {
final ClientState clientState = new ClientState(1);
clientState.assignActive(TASK_0_0);
assertThrows(IllegalArgumentException.class, () -> clientState.assignStandby(TASK_0_0));
}
@Test
public void shouldRefuseDoubleStandbyTask() {
final ClientState clientState = new ClientState(1);
clientState.assignStandby(TASK_0_0);
assertThrows(IllegalArgumentException.class, () -> clientState.assignStandby(TASK_0_0));
}
@Test
public void shouldRefuseStandbyAndActiveTask() {
final ClientState clientState = new ClientState(1);
clientState.assignStandby(TASK_0_0);
assertThrows(IllegalArgumentException.class, () -> clientState.assignActive(TASK_0_0));
}
@Test
public void shouldRefuseToUnassignNotAssignedActiveTask() {
final ClientState clientState = new ClientState(1);
assertThrows(IllegalArgumentException.class, () -> clientState.unassignActive(TASK_0_0));
}
@Test
public void shouldRefuseToUnassignNotAssignedStandbyTask() {
final ClientState clientState = new ClientState(1);
assertThrows(IllegalArgumentException.class, () -> clientState.unassignStandby(TASK_0_0));
}
@Test
public void shouldRefuseToUnassignActiveTaskAsStandby() {
final ClientState clientState = new ClientState(1);
clientState.assignActive(TASK_0_0);
assertThrows(IllegalArgumentException.class, () -> clientState.unassignStandby(TASK_0_0));
}
@Test
public void shouldRefuseToUnassignStandbyTaskAsActive() {
final ClientState clientState = new ClientState(1);
clientState.assignStandby(TASK_0_0);
assertThrows(IllegalArgumentException.class, () -> clientState.unassignActive(TASK_0_0));
}
@Test
public void shouldUnassignActiveTask() {
final ClientState clientState = new ClientState(1);
clientState.assignActive(TASK_0_0);
assertThat(clientState, hasActiveTasks(1));
clientState.unassignActive(TASK_0_0);
assertThat(clientState, hasActiveTasks(0));
}
@Test
public void shouldUnassignStandbyTask() {
final ClientState clientState = new ClientState(1);
clientState.assignStandby(TASK_0_0);
assertThat(clientState, hasStandbyTasks(1));
clientState.unassignStandby(TASK_0_0);
assertThat(clientState, hasStandbyTasks(0));
}
@Test
public void shouldNotModifyActiveView() {
final ClientState clientState = new ClientState(1);
final Set<TaskId> taskIds = clientState.activeTasks();
assertThrows(UnsupportedOperationException.class, () -> taskIds.add(TASK_0_0));
assertThat(clientState, hasActiveTasks(0));
}
@Test
public void shouldNotModifyStandbyView() {
final ClientState clientState = new ClientState(1);
final Set<TaskId> taskIds = clientState.standbyTasks();
assertThrows(UnsupportedOperationException.class, () -> taskIds.add(TASK_0_0));
assertThat(clientState, hasStandbyTasks(0));
}
@Test
public void shouldNotModifyAssignedView() {
final ClientState clientState = new ClientState(1);
final Set<TaskId> taskIds = clientState.assignedTasks();
assertThrows(UnsupportedOperationException.class, () -> taskIds.add(TASK_0_0));
assertThat(clientState, hasActiveTasks(0));
assertThat(clientState, hasStandbyTasks(0));
}
@Test
public void shouldAddActiveTasksToBothAssignedAndActive() {
client.assignActive(TASK_0_1);
assertThat(client.activeTasks(), equalTo(Collections.singleton(TASK_0_1)));
assertThat(client.assignedTasks(), equalTo(Collections.singleton(TASK_0_1)));
assertThat(client.assignedTaskCount(), equalTo(1));
assertThat(client.standbyTasks().size(), equalTo(0));
}
@Test
public void shouldAddStandbyTasksToBothStandbyAndAssigned() {
client.assignStandby(TASK_0_1);
assertThat(client.assignedTasks(), equalTo(Collections.singleton(TASK_0_1)));
assertThat(client.standbyTasks(), equalTo(Collections.singleton(TASK_0_1)));
assertThat(client.assignedTaskCount(), equalTo(1));
assertThat(client.activeTasks().size(), equalTo(0));
}
@Test
public void shouldAddPreviousActiveTasksToPreviousAssignedAndPreviousActive() {
client.addPreviousActiveTasks(Set.of(TASK_0_1, TASK_0_2));
assertThat(client.prevActiveTasks(), equalTo(Set.of(TASK_0_1, TASK_0_2)));
assertThat(client.previousAssignedTasks(), equalTo(Set.of(TASK_0_1, TASK_0_2)));
}
@Test
public void shouldAddPreviousStandbyTasksToPreviousAssignedAndPreviousStandby() {
client.addPreviousStandbyTasks(Set.of(TASK_0_1, TASK_0_2));
assertThat(client.prevActiveTasks().size(), equalTo(0));
assertThat(client.previousAssignedTasks(), equalTo(Set.of(TASK_0_1, TASK_0_2)));
}
@Test
public void shouldHaveAssignedTaskIfActiveTaskAssigned() {
client.assignActive(TASK_0_1);
assertTrue(client.hasAssignedTask(TASK_0_1));
}
@Test
public void shouldHaveAssignedTaskIfStandbyTaskAssigned() {
client.assignStandby(TASK_0_1);
assertTrue(client.hasAssignedTask(TASK_0_1));
}
@Test
public void shouldNotHaveAssignedTaskIfTaskNotAssigned() {
client.assignActive(TASK_0_1);
assertFalse(client.hasAssignedTask(TASK_0_2));
}
@Test
public void shouldHaveMoreAvailableCapacityWhenCapacityTheSameButFewerAssignedTasks() {
final ClientState otherClient = new ClientState(1);
client.assignActive(TASK_0_1);
assertTrue(otherClient.hasMoreAvailableCapacityThan(client));
assertFalse(client.hasMoreAvailableCapacityThan(otherClient));
}
@Test
public void shouldHaveMoreAvailableCapacityWhenCapacityHigherAndSameAssignedTaskCount() {
final ClientState otherClient = new ClientState(2);
assertTrue(otherClient.hasMoreAvailableCapacityThan(client));
assertFalse(client.hasMoreAvailableCapacityThan(otherClient));
}
@Test
public void shouldUseMultiplesOfCapacityToDetermineClientWithMoreAvailableCapacity() {
final ClientState otherClient = new ClientState(2);
for (int i = 0; i < 7; i++) {
otherClient.assignActive(new TaskId(0, i));
}
for (int i = 7; i < 11; i++) {
client.assignActive(new TaskId(0, i));
}
assertTrue(otherClient.hasMoreAvailableCapacityThan(client));
}
@Test
public void shouldHaveMoreAvailableCapacityWhenCapacityIsTheSameButAssignedTasksIsLess() {
final ClientState client = new ClientState(3);
final ClientState otherClient = new ClientState(3);
for (int i = 0; i < 4; i++) {
client.assignActive(new TaskId(0, i));
otherClient.assignActive(new TaskId(0, i));
}
otherClient.assignActive(new TaskId(0, 5));
assertTrue(client.hasMoreAvailableCapacityThan(otherClient));
}
@Test
public void shouldThrowIllegalStateExceptionIfCapacityOfThisClientStateIsZero() {
assertThrows(IllegalStateException.class, () -> zeroCapacityClient.hasMoreAvailableCapacityThan(client));
}
@Test
public void shouldThrowIllegalStateExceptionIfCapacityOfOtherClientStateIsZero() {
assertThrows(IllegalStateException.class, () -> client.hasMoreAvailableCapacityThan(zeroCapacityClient));
}
@Test
public void shouldHaveUnfulfilledQuotaWhenActiveTaskSizeLessThanCapacityTimesTasksPerThread() {
client.assignActive(new TaskId(0, 1));
assertTrue(client.hasUnfulfilledQuota(2));
}
@Test
public void shouldNotHaveUnfulfilledQuotaWhenActiveTaskSizeGreaterEqualThanCapacityTimesTasksPerThread() {
client.assignActive(new TaskId(0, 1));
assertFalse(client.hasUnfulfilledQuota(1));
}
@Test
public void shouldAddTasksWithLatestOffsetToPrevActiveTasks() {
final Map<TaskId, Long> taskOffsetSums = Collections.singletonMap(TASK_0_1, Task.LATEST_OFFSET);
client.addPreviousTasksAndOffsetSums("c1", taskOffsetSums);
client.initializePrevTasks(Collections.emptyMap(), false);
assertThat(client.prevActiveTasks(), equalTo(Collections.singleton(TASK_0_1)));
assertThat(client.previousAssignedTasks(), equalTo(Collections.singleton(TASK_0_1)));
assertTrue(client.prevStandbyTasks().isEmpty());
}
@Test
public void shouldThrowWhenSomeOwnedPartitionsAreNotRecognizedWhenInitializingPrevTasks() {
final Map<TopicPartition, TaskId> taskForPartitionMap = Collections.singletonMap(TP_0_1, TASK_0_1);
client.addOwnedPartitions(Collections.singleton(TP_0_0), "c1");
client.addPreviousTasksAndOffsetSums("c1", Collections.emptyMap());
assertThrows(IllegalStateException.class, () -> client.initializePrevTasks(taskForPartitionMap, false));
}
@Test
public void shouldFilterOutUnrecognizedPartitionsAndInitializePrevTasksWhenUsingNamedTopologies() {
final Map<TopicPartition, TaskId> taskForPartitionMap = Collections.singletonMap(TP_0_1, TASK_0_1);
client.addOwnedPartitions(Collections.singleton(TP_0_0), "c1");
client.addPreviousTasksAndOffsetSums("c1", Collections.emptyMap());
client.initializePrevTasks(taskForPartitionMap, true);
assertThat(client.prevActiveTasks().isEmpty(), is(true));
assertThat(client.previousAssignedTasks().isEmpty(), is(true));
assertThat(client.prevStandbyTasks().isEmpty(), is(true));
}
@Test
public void shouldReturnPreviousStatefulTasksForConsumer() {
client.addPreviousTasksAndOffsetSums("c1", mkMap(
mkEntry(TASK_0_0, 100L),
mkEntry(TASK_0_1, Task.LATEST_OFFSET)
));
client.addPreviousTasksAndOffsetSums("c2", Collections.singletonMap(TASK_0_2, 0L));
client.addPreviousTasksAndOffsetSums("c3", Collections.emptyMap());
client.initializePrevTasks(Collections.emptyMap(), false);
assertThat(client.prevOwnedStatefulTasksByConsumer("c1"), equalTo(Set.of(TASK_0_0, TASK_0_1)));
assertThat(client.prevOwnedStatefulTasksByConsumer("c2"), equalTo(Set.of(TASK_0_2)));
assertTrue(client.prevOwnedStatefulTasksByConsumer("c3").isEmpty());
}
@Test
public void shouldReturnPreviousActiveStandbyTasksForConsumer() {
client.addOwnedPartitions(Set.of(TP_0_1, TP_1_1), "c1");
client.addOwnedPartitions(Set.of(TP_0_2, TP_1_2), "c2");
client.initializePrevTasks(
mkMap(
mkEntry(TP_0_0, TASK_0_0),
mkEntry(TP_0_1, TASK_0_1),
mkEntry(TP_0_2, TASK_0_2),
mkEntry(TP_1_0, TASK_0_0),
mkEntry(TP_1_1, TASK_0_1),
mkEntry(TP_1_2, TASK_0_2)),
false
);
client.addPreviousTasksAndOffsetSums("c1", mkMap(
mkEntry(TASK_0_1, Task.LATEST_OFFSET),
mkEntry(TASK_0_0, 10L)));
client.addPreviousTasksAndOffsetSums("c2", Collections.singletonMap(TASK_0_2, 0L));
assertThat(client.prevOwnedStatefulTasksByConsumer("c1"), equalTo(Set.of(TASK_0_1, TASK_0_0)));
assertThat(client.prevOwnedStatefulTasksByConsumer("c2"), equalTo(Set.of(TASK_0_2)));
assertThat(client.prevOwnedActiveTasksByConsumer(), equalTo(
mkMap(
mkEntry("c1", Collections.singleton(TASK_0_1)),
mkEntry("c2", Collections.singleton(TASK_0_2))
))
);
assertThat(client.prevOwnedStandbyByConsumer(), equalTo(
mkMap(
mkEntry("c1", Collections.singleton(TASK_0_0)),
mkEntry("c2", Collections.emptySet())
))
);
}
@Test
public void shouldReturnAssignedTasksForConsumer() {
final List<TaskId> allTasks = new ArrayList<>(asList(TASK_0_0, TASK_0_1, TASK_0_2));
client.assignActiveTasks(allTasks);
client.assignActiveToConsumer(TASK_0_0, "c1");
// calling it multiple tasks should be idempotent
client.assignActiveToConsumer(TASK_0_0, "c1");
client.assignActiveToConsumer(TASK_0_1, "c1");
client.assignActiveToConsumer(TASK_0_2, "c2");
client.assignStandbyToConsumer(TASK_0_2, "c1");
client.assignStandbyToConsumer(TASK_0_0, "c2");
// calling it multiple tasks should be idempotent
client.assignStandbyToConsumer(TASK_0_0, "c2");
client.revokeActiveFromConsumer(TASK_0_1, "c1");
// calling it multiple tasks should be idempotent
client.revokeActiveFromConsumer(TASK_0_1, "c1");
assertThat(client.assignedActiveTasksByConsumer(), equalTo(mkMap(
mkEntry("c1", Set.of(TASK_0_0, TASK_0_1)),
mkEntry("c2", Set.of(TASK_0_2))
)));
assertThat(client.assignedStandbyTasksByConsumer(), equalTo(mkMap(
mkEntry("c1", Set.of(TASK_0_2)),
mkEntry("c2", Set.of(TASK_0_0))
)));
assertThat(client.revokingActiveTasksByConsumer(), equalTo(Collections.singletonMap("c1", Set.of(TASK_0_1))));
}
@Test
public void shouldAddTasksInOffsetSumsMapToPrevStandbyTasks() {
final Map<TaskId, Long> taskOffsetSums = mkMap(
mkEntry(TASK_0_1, 0L),
mkEntry(TASK_0_2, 100L)
);
client.addPreviousTasksAndOffsetSums("c1", taskOffsetSums);
client.initializePrevTasks(Collections.emptyMap(), false);
assertThat(client.prevStandbyTasks(), equalTo(Set.of(TASK_0_1, TASK_0_2)));
assertThat(client.previousAssignedTasks(), equalTo(Set.of(TASK_0_1, TASK_0_2)));
assertTrue(client.prevActiveTasks().isEmpty());
}
@Test
public void shouldComputeTaskLags() {
final Map<TaskId, Long> taskOffsetSums = mkMap(
mkEntry(TASK_0_1, 0L),
mkEntry(TASK_0_2, 100L)
);
final Map<TaskId, Long> allTaskEndOffsetSums = mkMap(
mkEntry(TASK_0_1, 500L),
mkEntry(TASK_0_2, 100L)
);
client.addPreviousTasksAndOffsetSums("c1", taskOffsetSums);
client.computeTaskLags(null, allTaskEndOffsetSums);
assertThat(client.lagFor(TASK_0_1), equalTo(500L));
assertThat(client.lagFor(TASK_0_2), equalTo(0L));
}
@Test
public void shouldNotTryToLookupTasksThatWerePreviouslyAssignedButNoLongerExist() {
final Map<TaskId, Long> clientReportedTaskEndOffsetSums = mkMap(
mkEntry(NAMED_TASK_T0_0_0, 500L),
mkEntry(NAMED_TASK_T1_0_0, 500L)
);
final Map<TaskId, Long> allTaskEndOffsetSumsComputedByAssignor = Collections.singletonMap(NAMED_TASK_T0_0_0, 500L);
client.addPreviousTasksAndOffsetSums("c1", clientReportedTaskEndOffsetSums);
client.computeTaskLags(null, allTaskEndOffsetSumsComputedByAssignor);
assertThrows(IllegalStateException.class, () -> client.lagFor(NAMED_TASK_T1_0_0));
client.assignActive(NAMED_TASK_T0_0_0);
assertThat(client.prevTasksByLag("c1"), equalTo(mkSortedSet(NAMED_TASK_T0_0_0)));
}
@Test
public void shouldReturnEndOffsetSumForLagOfTaskWeDidNotPreviouslyOwn() {
final Map<TaskId, Long> taskOffsetSums = Collections.emptyMap();
final Map<TaskId, Long> allTaskEndOffsetSums = Collections.singletonMap(TASK_0_1, 500L);
client.addPreviousTasksAndOffsetSums("c1", taskOffsetSums);
client.computeTaskLags(null, allTaskEndOffsetSums);
assertThat(client.lagFor(TASK_0_1), equalTo(500L));
}
@Test
public void shouldReturnLatestOffsetForLagOfPreviousActiveRunningTask() {
final Map<TaskId, Long> taskOffsetSums = Collections.singletonMap(TASK_0_1, Task.LATEST_OFFSET);
final Map<TaskId, Long> allTaskEndOffsetSums = Collections.singletonMap(TASK_0_1, 500L);
client.addPreviousTasksAndOffsetSums("c1", taskOffsetSums);
client.computeTaskLags(null, allTaskEndOffsetSums);
assertThat(client.lagFor(TASK_0_1), equalTo(Task.LATEST_OFFSET));
}
@Test
public void shouldReturnUnknownOffsetSumForLagOfTaskWithUnknownOffset() {
final Map<TaskId, Long> taskOffsetSums = Collections.singletonMap(TASK_0_1, UNKNOWN_OFFSET_SUM);
final Map<TaskId, Long> allTaskEndOffsetSums = Collections.singletonMap(TASK_0_1, 500L);
client.addPreviousTasksAndOffsetSums("c1", taskOffsetSums);
client.computeTaskLags(null, allTaskEndOffsetSums);
assertThat(client.lagFor(TASK_0_1), equalTo(UNKNOWN_OFFSET_SUM));
}
@Test
public void shouldReturnEndOffsetSumIfOffsetSumIsGreaterThanEndOffsetSum() {
final Map<TaskId, Long> taskOffsetSums = Collections.singletonMap(TASK_0_1, 5L);
final Map<TaskId, Long> allTaskEndOffsetSums = Collections.singletonMap(TASK_0_1, 1L);
client.addPreviousTasksAndOffsetSums("c1", taskOffsetSums);
client.computeTaskLags(null, allTaskEndOffsetSums);
assertThat(client.lagFor(TASK_0_1), equalTo(1L));
}
@Test
public void shouldThrowIllegalStateExceptionIfTaskLagsMapIsNotEmpty() {
final Map<TaskId, Long> taskOffsetSums = Collections.singletonMap(TASK_0_1, 5L);
final Map<TaskId, Long> allTaskEndOffsetSums = Collections.singletonMap(TASK_0_1, 1L);
client.computeTaskLags(null, taskOffsetSums);
assertThrows(IllegalStateException.class, () -> client.computeTaskLags(null, allTaskEndOffsetSums));
}
@Test
public void shouldThrowIllegalStateExceptionOnLagForUnknownTask() {
final Map<TaskId, Long> taskOffsetSums = Collections.singletonMap(TASK_0_1, 0L);
final Map<TaskId, Long> allTaskEndOffsetSums = Collections.singletonMap(TASK_0_1, 500L);
client.addPreviousTasksAndOffsetSums("c1", taskOffsetSums);
client.computeTaskLags(null, allTaskEndOffsetSums);
assertThrows(IllegalStateException.class, () -> client.lagFor(TASK_0_2));
}
@Test
public void shouldThrowIllegalStateExceptionIfAttemptingToInitializeNonEmptyPrevTaskSets() {
client.addPreviousActiveTasks(Collections.singleton(TASK_0_1));
assertThrows(IllegalStateException.class, () -> client.initializePrevTasks(Collections.emptyMap(), false));
}
@Test
public void shouldThrowIllegalStateExceptionIfAssignedTasksForConsumerToNonClientAssignActive() {
assertThrows(IllegalStateException.class, () -> client.assignActiveToConsumer(TASK_0_0, "c1"));
}
@Test
public void shouldReturnClientTags() {
final Map<String, String> clientTags = mkMap(mkEntry("k1", "v1"));
assertEquals(clientTags, new ClientState(null, 0, clientTags).clientTags());
}
@Test
public void shouldReturnEmptyClientTagsMapByDefault() {
assertTrue(new ClientState().clientTags().isEmpty());
}
@Test
public void shouldSetProcessId() {
assertEquals(PID_1, new ClientState(PID_1, 1).processId());
assertEquals(PID_2, new ClientState(PID_2, mkMap()).processId());
assertEquals(PID_3, new ClientState(PID_3, 1, mkMap()).processId());
assertNull(new ClientState().processId());
}
@Test
public void shouldCopyState() {
final ClientState clientState = new ClientState(Set.of(new TaskId(0, 0)), Set.of(new TaskId(0, 1)), Collections.emptyMap(), EMPTY_CLIENT_TAGS, 1, processIdForInt(1));
final ClientState clientStateCopy = new ClientState(clientState);
assertEquals(clientStateCopy.processId(), clientState.processId());
assertEquals(clientStateCopy.capacity(), clientState.capacity());
assertEquals(clientStateCopy.prevActiveTasks(), clientStateCopy.prevActiveTasks());
assertEquals(clientStateCopy.prevStandbyTasks(), clientStateCopy.prevStandbyTasks());
assertThat(clientStateCopy.prevActiveTasks(), equalTo(clientState.prevActiveTasks()));
assertThat(clientStateCopy.prevStandbyTasks(), equalTo(clientState.prevStandbyTasks()));
}
}
| ClientStateTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/mapper/flattened/KeyedFlattenedLeafFieldDataTests.java | {
"start": 1158,
"end": 6796
} | class ____ extends ESTestCase {
private LeafOrdinalsFieldData delegate;
@Before
public void setUpDelegate() {
BytesRef[] allTerms = new BytesRef[60];
long[] documentOrds = new long[50];
int index = 0;
for (int ord = 0; ord < allTerms.length; ord++) {
String key;
if (ord < 20) {
key = "apple";
} else if (ord < 30) {
key = "avocado";
} else if (ord < 40) {
key = "banana";
} else if (ord < 41) {
key = "cantaloupe";
} else {
key = "cucumber";
}
allTerms[ord] = prefixedValue(key, "value" + ord);
// Do not include the term 'avocado' in the mock document.
if (key.equals("avocado") == false) {
documentOrds[index++] = ord;
}
}
delegate = new MockLeafOrdinalsFieldData(allTerms, documentOrds);
}
private BytesRef prefixedValue(String key, String value) {
String term = FlattenedFieldParser.createKeyedValue(key, value);
return new BytesRef(term);
}
public void testFindOrdinalBounds() throws IOException {
testFindOrdinalBounds("apple", delegate, 0, 19);
testFindOrdinalBounds("avocado", delegate, 20, 29);
testFindOrdinalBounds("banana", delegate, 30, 39);
testFindOrdinalBounds("berry", delegate, -1, -1);
testFindOrdinalBounds("cantaloupe", delegate, 40, 40);
testFindOrdinalBounds("cucumber", delegate, 41, 59);
LeafOrdinalsFieldData emptyDelegate = new MockLeafOrdinalsFieldData(new BytesRef[0], new long[0]);
testFindOrdinalBounds("apple", emptyDelegate, -1, -1);
BytesRef[] terms = new BytesRef[] { prefixedValue("prefix", "value") };
LeafOrdinalsFieldData singleValueDelegate = new MockLeafOrdinalsFieldData(terms, new long[0]);
testFindOrdinalBounds("prefix", singleValueDelegate, 0, 0);
testFindOrdinalBounds("prefix1", singleValueDelegate, -1, -1);
terms = new BytesRef[] {
prefixedValue("prefix", "value"),
prefixedValue("prefix1", "value"),
prefixedValue("prefix1", "value1"),
prefixedValue("prefix2", "value"),
prefixedValue("prefix3", "value") };
LeafOrdinalsFieldData oddLengthDelegate = new MockLeafOrdinalsFieldData(terms, new long[0]);
testFindOrdinalBounds("prefix", oddLengthDelegate, 0, 0);
testFindOrdinalBounds("prefix1", oddLengthDelegate, 1, 2);
testFindOrdinalBounds("prefix2", oddLengthDelegate, 3, 3);
testFindOrdinalBounds("prefix3", oddLengthDelegate, 4, 4);
}
public void testFindOrdinalBounds(String key, LeafOrdinalsFieldData delegate, long expectedMinOrd, long expectedMacOrd)
throws IOException {
BytesRef bytesKey = new BytesRef(key);
long actualMinOrd = KeyedFlattenedLeafFieldData.findMinOrd(bytesKey, delegate.getOrdinalsValues());
assertEquals(expectedMinOrd, actualMinOrd);
long actualMaxOrd = KeyedFlattenedLeafFieldData.findMaxOrd(bytesKey, delegate.getOrdinalsValues());
assertEquals(expectedMacOrd, actualMaxOrd);
}
public void testAdvanceExact() throws IOException {
LeafOrdinalsFieldData avocadoFieldData = new KeyedFlattenedLeafFieldData("avocado", delegate, MOCK_TO_SCRIPT_FIELD);
assertFalse(avocadoFieldData.getOrdinalsValues().advanceExact(0));
LeafOrdinalsFieldData bananaFieldData = new KeyedFlattenedLeafFieldData("banana", delegate, MOCK_TO_SCRIPT_FIELD);
assertTrue(bananaFieldData.getOrdinalsValues().advanceExact(0));
LeafOrdinalsFieldData nonexistentFieldData = new KeyedFlattenedLeafFieldData("berry", delegate, MOCK_TO_SCRIPT_FIELD);
assertFalse(nonexistentFieldData.getOrdinalsValues().advanceExact(0));
}
public void testNextOrd() throws IOException {
LeafOrdinalsFieldData fieldData = new KeyedFlattenedLeafFieldData("banana", delegate, MOCK_TO_SCRIPT_FIELD);
SortedSetDocValues docValues = fieldData.getOrdinalsValues();
docValues.advanceExact(0);
int retrievedOrds = 0;
for (int i = 0; i < docValues.docValueCount(); i++) {
long ord = docValues.nextOrd();
assertTrue(0 <= ord && ord < 10);
retrievedOrds++;
BytesRef expectedValue = new BytesRef("value" + (ord + 30));
BytesRef actualValue = docValues.lookupOrd(ord);
assertEquals(expectedValue, actualValue);
}
assertEquals(10, retrievedOrds);
}
public void testLookupOrd() throws IOException {
LeafOrdinalsFieldData appleFieldData = new KeyedFlattenedLeafFieldData("apple", delegate, MOCK_TO_SCRIPT_FIELD);
SortedSetDocValues appleDocValues = appleFieldData.getOrdinalsValues();
assertEquals(new BytesRef("value0"), appleDocValues.lookupOrd(0));
LeafOrdinalsFieldData cantaloupeFieldData = new KeyedFlattenedLeafFieldData("cantaloupe", delegate, MOCK_TO_SCRIPT_FIELD);
SortedSetDocValues cantaloupeDocValues = cantaloupeFieldData.getOrdinalsValues();
assertEquals(new BytesRef("value40"), cantaloupeDocValues.lookupOrd(0));
LeafOrdinalsFieldData cucumberFieldData = new KeyedFlattenedLeafFieldData("cucumber", delegate, MOCK_TO_SCRIPT_FIELD);
SortedSetDocValues cucumberDocValues = cucumberFieldData.getOrdinalsValues();
assertEquals(new BytesRef("value41"), cucumberDocValues.lookupOrd(0));
}
private static | KeyedFlattenedLeafFieldDataTests |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/RdnFieldExtractor.java | {
"start": 399,
"end": 478
} | class ____ extract RDN field values from X500 principal DER encoding.
*/
public | to |
java | quarkusio__quarkus | test-framework/junit5-component/src/test/java/io/quarkus/test/component/declarative/IgnoreNestedClassesTest.java | {
"start": 795,
"end": 860
} | class ____ {
}
@Unremovable
@Singleton
static | Alpha |
java | spring-projects__spring-boot | module/spring-boot-mustache/src/test/java/org/springframework/boot/mustache/autoconfigure/MustacheAutoConfigurationTests.java | {
"start": 1745,
"end": 9036
} | class ____ {
@Test
void registerBeansForServletApp() {
configure(new WebApplicationContextRunner()).run((context) -> {
assertThat(context).hasSingleBean(Mustache.Compiler.class);
assertThat(context).hasSingleBean(MustacheResourceTemplateLoader.class);
assertThat(context).hasSingleBean(MustacheViewResolver.class);
});
}
@Test
void registerCompilerForServletApp() {
configure(new WebApplicationContextRunner()).withUserConfiguration(CustomCompilerConfiguration.class)
.run((context) -> {
assertThat(context).hasSingleBean(Mustache.Compiler.class);
assertThat(context).hasSingleBean(MustacheResourceTemplateLoader.class);
assertThat(context).hasSingleBean(MustacheViewResolver.class);
Assertions.assertThat(context.getBean(Mustache.Compiler.class).standardsMode).isTrue();
});
}
@Test
void registerBeansForReactiveApp() {
configure(new ReactiveWebApplicationContextRunner()).run((context) -> {
assertThat(context).hasSingleBean(Mustache.Compiler.class);
assertThat(context).hasSingleBean(MustacheResourceTemplateLoader.class);
assertThat(context).doesNotHaveBean(MustacheViewResolver.class);
assertThat(context)
.hasSingleBean(org.springframework.boot.mustache.reactive.view.MustacheViewResolver.class);
});
}
@Test
void registerCompilerForReactiveApp() {
configure(new ReactiveWebApplicationContextRunner()).withUserConfiguration(CustomCompilerConfiguration.class)
.run((context) -> {
assertThat(context).hasSingleBean(Mustache.Compiler.class);
assertThat(context).hasSingleBean(MustacheResourceTemplateLoader.class);
assertThat(context).doesNotHaveBean(MustacheViewResolver.class);
assertThat(context)
.hasSingleBean(org.springframework.boot.mustache.reactive.view.MustacheViewResolver.class);
Assertions.assertThat(context.getBean(Mustache.Compiler.class).standardsMode).isTrue();
});
}
@Test
void defaultServletViewResolverConfiguration() {
configure(new WebApplicationContextRunner()).run((context) -> {
MustacheViewResolver viewResolver = context.getBean(MustacheViewResolver.class);
assertThat(viewResolver).extracting("allowRequestOverride", InstanceOfAssertFactories.BOOLEAN).isFalse();
assertThat(viewResolver).extracting("allowSessionOverride", InstanceOfAssertFactories.BOOLEAN).isFalse();
assertThat(viewResolver).extracting("cache", InstanceOfAssertFactories.BOOLEAN).isFalse();
assertThat(viewResolver).extracting("charset").isEqualTo("UTF-8");
assertThat(viewResolver).extracting("contentType").isEqualTo("text/html;charset=UTF-8");
assertThat(viewResolver).extracting("exposeRequestAttributes", InstanceOfAssertFactories.BOOLEAN).isFalse();
assertThat(viewResolver).extracting("exposeSessionAttributes", InstanceOfAssertFactories.BOOLEAN).isFalse();
assertThat(viewResolver).extracting("exposeSpringMacroHelpers", InstanceOfAssertFactories.BOOLEAN).isTrue();
assertThat(viewResolver).extracting("prefix").isEqualTo("classpath:/templates/");
assertThat(viewResolver).extracting("requestContextAttribute").isNull();
assertThat(viewResolver).extracting("suffix").isEqualTo(".mustache");
});
}
@Test
void defaultReactiveViewResolverConfiguration() {
configure(new ReactiveWebApplicationContextRunner()).run((context) -> {
org.springframework.boot.mustache.reactive.view.MustacheViewResolver viewResolver = context
.getBean(org.springframework.boot.mustache.reactive.view.MustacheViewResolver.class);
assertThat(viewResolver).extracting("charset").isEqualTo("UTF-8");
assertThat(viewResolver).extracting("prefix").isEqualTo("classpath:/templates/");
assertThat(viewResolver).extracting("requestContextAttribute").isNull();
assertThat(viewResolver).extracting("suffix").isEqualTo(".mustache");
Assertions.assertThat(viewResolver.getSupportedMediaTypes())
.containsExactly(MediaType.parseMediaType("text/html;charset=UTF-8"));
});
}
@Test
void allowRequestOverrideCanBeCustomizedOnServletViewResolver() {
assertViewResolverProperty(ViewResolverKind.SERVLET, "spring.mustache.servlet.allow-request-override=true",
"allowRequestOverride", true);
}
@Test
void allowSessionOverrideCanBeCustomizedOnServletViewResolver() {
assertViewResolverProperty(ViewResolverKind.SERVLET, "spring.mustache.servlet.allow-session-override=true",
"allowSessionOverride", true);
}
@Test
void cacheCanBeCustomizedOnServletViewResolver() {
assertViewResolverProperty(ViewResolverKind.SERVLET, "spring.mustache.servlet.cache=true", "cache", true);
}
@ParameterizedTest
@EnumSource
void charsetCanBeCustomizedOnViewResolver(ViewResolverKind kind) {
assertViewResolverProperty(kind, "spring.mustache.charset=UTF-16", "charset", "UTF-16");
if (kind == ViewResolverKind.SERVLET) {
assertViewResolverProperty(kind, "spring.mustache.charset=UTF-16", "contentType",
"text/html;charset=UTF-16");
}
}
@Test
void exposeRequestAttributesCanBeCustomizedOnServletViewResolver() {
assertViewResolverProperty(ViewResolverKind.SERVLET, "spring.mustache.servlet.expose-request-attributes=true",
"exposeRequestAttributes", true);
}
@Test
void exposeSessionAttributesCanBeCustomizedOnServletViewResolver() {
assertViewResolverProperty(ViewResolverKind.SERVLET, "spring.mustache.servlet.expose-session-attributes=true",
"exposeSessionAttributes", true);
}
@Test
void exposeSpringMacroHelpersCanBeCustomizedOnServletViewResolver() {
assertViewResolverProperty(ViewResolverKind.SERVLET, "spring.mustache.servlet.expose-spring-macro-helpers=true",
"exposeSpringMacroHelpers", true);
}
@ParameterizedTest
@EnumSource
void prefixCanBeCustomizedOnViewResolver(ViewResolverKind kind) {
assertViewResolverProperty(kind, "spring.mustache.prefix=classpath:/mustache-templates/", "prefix",
"classpath:/mustache-templates/");
}
@ParameterizedTest
@EnumSource
void requestContextAttributeCanBeCustomizedOnViewResolver(ViewResolverKind kind) {
assertViewResolverProperty(kind, "spring.mustache.request-context-attribute=test", "requestContextAttribute",
"test");
}
@ParameterizedTest
@EnumSource
void suffixCanBeCustomizedOnViewResolver(ViewResolverKind kind) {
assertViewResolverProperty(kind, "spring.mustache.suffix=.tache", "suffix", ".tache");
}
@Test
void mediaTypesCanBeCustomizedOnReactiveViewResolver() {
assertViewResolverProperty(ViewResolverKind.REACTIVE,
"spring.mustache.reactive.media-types=text/xml;charset=UTF-8,text/plain;charset=UTF-16", "mediaTypes",
Arrays.asList(MediaType.parseMediaType("text/xml;charset=UTF-8"),
MediaType.parseMediaType("text/plain;charset=UTF-16")));
}
private void assertViewResolverProperty(ViewResolverKind kind, String property, String field,
Object expectedValue) {
kind.runner()
.withConfiguration(AutoConfigurations.of(MustacheAutoConfiguration.class))
.withPropertyValues(property)
.run((context) -> assertThat(context.getBean(kind.viewResolverClass())).extracting(field)
.isEqualTo(expectedValue));
}
private <T extends AbstractApplicationContextRunner<T, ?, ?>> T configure(T runner) {
return runner.withConfiguration(AutoConfigurations.of(MustacheAutoConfiguration.class));
}
@Configuration(proxyBeanMethods = false)
static | MustacheAutoConfigurationTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/internal/BinderHelper.java | {
"start": 3561,
"end": 4255
} | class ____ {
private BinderHelper() {
}
public static final Set<String> PRIMITIVE_NAMES = Set.of(
byte.class.getName(),
short.class.getName(),
int.class.getName(),
long.class.getName(),
float.class.getName(),
double.class.getName(),
char.class.getName(),
boolean.class.getName()
);
public static boolean isPrimitive(String elementTypeName) {
return PRIMITIVE_NAMES.contains( elementTypeName );
}
/**
* Here we address a fundamental problem: the {@code @JoinColumn}
* annotation specifies the referenced column in the target table
* via {@code referencedColumnName}, but Hibernate needs to know
* which property or field of the target entity | BinderHelper |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java | {
"start": 1204,
"end": 4399
} | class ____ implements Writable {
static { // register a ctor
WritableFactories.setFactory
(ProtocolSignature.class,
new WritableFactory() {
@Override
public Writable newInstance() { return new ProtocolSignature(); }
});
}
private long version;
private int[] methods = null; // an array of method hash codes
/**
* default constructor
*/
public ProtocolSignature() {
}
/**
* Constructor
*
* @param version server version
* @param methodHashcodes hash codes of the methods supported by server
*/
public ProtocolSignature(long version, int[] methodHashcodes) {
this.version = version;
this.methods = methodHashcodes;
}
public long getVersion() {
return version;
}
public int[] getMethods() {
return methods;
}
@Override
public void readFields(DataInput in) throws IOException {
version = in.readLong();
boolean hasMethods = in.readBoolean();
if (hasMethods) {
int numMethods = in.readInt();
methods = new int[numMethods];
for (int i=0; i<numMethods; i++) {
methods[i] = in.readInt();
}
}
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(version);
if (methods == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeInt(methods.length);
for (int method : methods) {
out.writeInt(method);
}
}
}
/**
* Calculate a method's hash code considering its method
* name, returning type, and its parameter types
*
* @param method a method
* @return its hash code
*/
static int getFingerprint(Method method) {
int hashcode = method.getName().hashCode();
hashcode = hashcode + 31*method.getReturnType().getName().hashCode();
for (Class<?> type : method.getParameterTypes()) {
hashcode = 31*hashcode ^ type.getName().hashCode();
}
return hashcode;
}
/**
* Convert an array of Method into an array of hash codes
*
* @param methods
* @return array of hash codes
*/
private static int[] getFingerprints(Method[] methods) {
if (methods == null) {
return null;
}
int[] hashCodes = new int[methods.length];
for (int i = 0; i<methods.length; i++) {
hashCodes[i] = getFingerprint(methods[i]);
}
return hashCodes;
}
/**
* Get the hash code of an array of methods
* Methods are sorted before hashcode is calculated.
* So the returned value is irrelevant of the method order in the array.
*
* @param methods an array of methods
* @return the hash code
*/
static int getFingerprint(Method[] methods) {
return getFingerprint(getFingerprints(methods));
}
/**
* Get the hash code of an array of hashcodes
* Hashcodes are sorted before hashcode is calculated.
* So the returned value is irrelevant of the hashcode order in the array.
*
* @param methods an array of methods
* @return the hash code
*/
static int getFingerprint(int[] hashcodes) {
Arrays.sort(hashcodes);
return Arrays.hashCode(hashcodes);
}
private static | ProtocolSignature |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/comparator/BooleanComparator.java | {
"start": 997,
"end": 2454
} | class ____ implements Comparator<Boolean>, Serializable {
/**
* A shared default instance of this comparator,
* treating {@code true} lower than {@code false}.
*/
public static final BooleanComparator TRUE_LOW = new BooleanComparator(true);
/**
* A shared default instance of this comparator,
* treating {@code true} higher than {@code false}.
*/
public static final BooleanComparator TRUE_HIGH = new BooleanComparator(false);
private final boolean trueLow;
/**
* Create a BooleanComparator that sorts boolean values based on
* the provided flag.
* <p>Alternatively, you can use the default shared instances:
* {@code BooleanComparator.TRUE_LOW} and
* {@code BooleanComparator.TRUE_HIGH}.
* @param trueLow whether to treat true as lower or higher than false
* @see #TRUE_LOW
* @see #TRUE_HIGH
*/
public BooleanComparator(boolean trueLow) {
this.trueLow = trueLow;
}
@Override
public int compare(Boolean left, Boolean right) {
int multiplier = this.trueLow ? -1 : 1;
return multiplier * Boolean.compare(left, right);
}
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof BooleanComparator that && this.trueLow == that.trueLow));
}
@Override
public int hashCode() {
return Boolean.hashCode(this.trueLow);
}
@Override
public String toString() {
return "BooleanComparator: " + (this.trueLow ? "true low" : "true high");
}
}
| BooleanComparator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.