language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/utils/AnnotationUtils.java | {
"start": 14995,
"end": 18284
} | class ____ of meta annotation
* @param <A> the type of required annotation
* @return {@link #findMetaAnnotation(Class, Class)}
*/
static <A extends Annotation> A findMetaAnnotation(
AnnotatedElement annotatedElement, String metaAnnotationClassName) {
return findMetaAnnotation(annotatedElement, resolveAnnotationType(annotatedElement, metaAnnotationClassName));
}
/**
* Find the meta annotation from the annotation type by meta annotation type
*
* @param annotationType the {@link Annotation annotation} type
* @param metaAnnotationType the meta annotation type
* @param <A> the type of required annotation
* @return If found, return the {@link CollectionUtils#first(Collection)} matched result, return <code>null</code>.
* If it requires more result, please consider to use {@link #findMetaAnnotations(Class, Class)}
* @see #findMetaAnnotations(Class, Class)
*/
static <A extends Annotation> A findMetaAnnotation(
Class<? extends Annotation> annotationType, Class<A> metaAnnotationType) {
return first(findMetaAnnotations(annotationType, metaAnnotationType));
}
/**
* Find the meta annotation from the annotated element by meta annotation type
*
* @param annotatedElement the annotated element
* @param metaAnnotationType the meta annotation type
* @param <A> the type of required annotation
* @return If found, return the {@link CollectionUtils#first(Collection)} matched result, return <code>null</code>.
* If it requires more result, please consider to use {@link #findMetaAnnotations(AnnotatedElement, Class)}
* @see #findMetaAnnotations(AnnotatedElement, Class)
*/
static <A extends Annotation> A findMetaAnnotation(AnnotatedElement annotatedElement, Class<A> metaAnnotationType) {
return first(findMetaAnnotations(annotatedElement, metaAnnotationType));
}
/**
* Tests the annotated element is annotated the specified annotations or not
*
* @param type the annotated type
* @param matchAll If <code>true</code>, checking all annotation types are present or not, or match any
* @param annotationTypes the specified annotation types
* @return If the specified annotation types are present, return <code>true</code>, or <code>false</code>
*/
static boolean isAnnotationPresent(
Class<?> type, boolean matchAll, Class<? extends Annotation>... annotationTypes) {
int size = annotationTypes == null ? 0 : annotationTypes.length;
if (size < 1) {
return false;
}
int presentCount = 0;
for (int i = 0; i < size; i++) {
Class<? extends Annotation> annotationType = annotationTypes[i];
if (findAnnotation(type, annotationType) != null || findMetaAnnotation(type, annotationType) != null) {
presentCount++;
}
}
return matchAll ? presentCount == size : presentCount > 0;
}
/**
* Tests the annotated element is annotated the specified annotation or not
*
* @param type the annotated type
* @param annotationType the | name |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/DescribeClientQuotasResult.java | {
"start": 1146,
"end": 1947
} | class ____ {
private final KafkaFuture<Map<ClientQuotaEntity, Map<String, Double>>> entities;
/**
* Maps an entity to its configured quota value(s). Note if no value is defined for a quota
* type for that entity's config, then it is not included in the resulting value map.
*
* @param entities future for the collection of entities that matched the filter
*/
public DescribeClientQuotasResult(KafkaFuture<Map<ClientQuotaEntity, Map<String, Double>>> entities) {
this.entities = entities;
}
/**
* Returns a map from quota entity to a future which can be used to check the status of the operation.
*/
public KafkaFuture<Map<ClientQuotaEntity, Map<String, Double>>> entities() {
return entities;
}
}
| DescribeClientQuotasResult |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/functions/RichCoGroupFunction.java | {
"start": 1425,
"end": 1744
} | class ____<IN1, IN2, OUT> extends AbstractRichFunction
implements CoGroupFunction<IN1, IN2, OUT> {
private static final long serialVersionUID = 1L;
@Override
public abstract void coGroup(Iterable<IN1> first, Iterable<IN2> second, Collector<OUT> out)
throws Exception;
}
| RichCoGroupFunction |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/JDBCException.java | {
"start": 331,
"end": 2317
} | class ____ extends HibernateException {
private final SQLException sqlException;
private final String message;
private final String sql;
/**
* Constructs a {@code JDBCException} using the given information.
*
* @param message The message explaining the exception condition
* @param cause The underlying cause
*/
public JDBCException(String message, SQLException cause) {
super( message, cause );
this.message = message;
this.sqlException = cause;
this.sql = null;
}
/**
* Constructs a {@code JDBCException} using the given information.
*
* @param message The message explaining the exception condition
* @param cause The underlying cause
* @param sql The sql being executed when the exception occurred
*/
public JDBCException(String message, SQLException cause, String sql) {
super( sql == null ? message : message + " [" + sql + "]", cause );
this.message = message;
this.sqlException = cause;
this.sql = sql;
}
/**
* Get the X/Open or ANSI SQL SQLState error code from the underlying {@link SQLException}.
*
* @return The X/Open or ANSI SQL SQLState error code; may return null.
*
* @see SQLException#getSQLState()
*/
public String getSQLState() {
return sqlException.getSQLState();
}
/**
* Get the vendor specific error code from the underlying {@link SQLException}.
*
* @return The vendor specific error code
*
* @see SQLException#getErrorCode()
*/
public int getErrorCode() {
return sqlException.getErrorCode();
}
/**
* Get the underlying {@link SQLException}.
*
* @return The SQLException
*/
public SQLException getSQLException() {
return sqlException;
}
/**
* Get the actual SQL statement being executed when the exception occurred.
*
* @return The SQL statement; may return null.
*/
public String getSQL() {
return sql;
}
/**
* @return The error message without the SQL statement appended
*/
public String getErrorMessage() {
return message;
}
}
| JDBCException |
java | apache__camel | components/camel-aws/camel-aws2-kinesis/src/main/java/org/apache/camel/component/aws2/kinesis/client/impl/KinesisAsyncClientStandardImpl.java | {
"start": 1893,
"end": 5188
} | class ____ implements KinesisAsyncInternalClient {
private static final Logger LOG = LoggerFactory.getLogger(KinesisAsyncClientStandardImpl.class);
private Kinesis2Configuration configuration;
/**
* Constructor that uses the config file.
*/
public KinesisAsyncClientStandardImpl(Kinesis2Configuration configuration) {
LOG.trace("Creating an AWS Async Kinesis manager using static credentials.");
this.configuration = configuration;
}
/**
* Getting the Kinesis Async client that is used.
*
* @return Amazon Kinesis Async Client.
*/
@Override
public KinesisAsyncClient getKinesisAsyncClient() {
var clientBuilder = KinesisAsyncClient.builder();
var isClientConfigFound = false;
SdkAsyncHttpClient.Builder httpClientBuilder = null;
if (ObjectHelper.isNotEmpty(configuration.getProxyHost()) && ObjectHelper.isNotEmpty(configuration.getProxyPort())) {
var proxyConfig = ProxyConfiguration
.builder()
.scheme(configuration.getProxyProtocol().toString())
.host(configuration.getProxyHost())
.port(configuration.getProxyPort())
.build();
httpClientBuilder = NettyNioAsyncHttpClient
.builder()
.proxyConfiguration(proxyConfig);
isClientConfigFound = true;
}
if (Objects.nonNull(configuration.getAccessKey()) && Objects.nonNull(configuration.getSecretKey())) {
var cred = AwsBasicCredentials.create(configuration.getAccessKey(), configuration.getSecretKey());
if (isClientConfigFound) {
clientBuilder = clientBuilder
.httpClientBuilder(httpClientBuilder)
.credentialsProvider(StaticCredentialsProvider.create(cred));
} else {
clientBuilder = clientBuilder.credentialsProvider(StaticCredentialsProvider.create(cred));
}
} else {
if (!isClientConfigFound) {
clientBuilder = clientBuilder.httpClientBuilder(null);
}
}
if (ObjectHelper.isNotEmpty(configuration.getRegion())) {
clientBuilder = clientBuilder.region(Region.of(configuration.getRegion()));
}
if (configuration.isOverrideEndpoint()) {
clientBuilder.endpointOverride(URI.create(configuration.getUriEndpointOverride()));
}
if (configuration.isTrustAllCertificates()) {
if (httpClientBuilder == null) {
httpClientBuilder = NettyNioAsyncHttpClient.builder();
}
SdkAsyncHttpClient ahc = httpClientBuilder
.buildWithDefaults(AttributeMap
.builder()
.put(
SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES,
Boolean.TRUE)
.build());
// set created http client to use instead of builder
clientBuilder.httpClient(ahc);
clientBuilder.httpClientBuilder(null);
}
return clientBuilder.build();
}
}
| KinesisAsyncClientStandardImpl |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/argumentselectiondefects/ParameterTest.java | {
"start": 8033,
"end": 8627
} | class ____ {
abstract void target(Object o);
void test() {
new Object() {
void test() {
// BUG: Diagnostic contains: Object
target(this);
}
};
}
}
""")
.doTest();
}
@Test
public void getName_usesOwner_fromGetMethodInAnonymousClass() {
CompilationTestHelper.newInstance(PrintNameOfFirstArgument.class, getClass())
.addSourceLines(
"Test.java",
"""
abstract | Test |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/createTable/OracleCreateTableTest38.java | {
"start": 1067,
"end": 4014
} | class ____ extends OracleTest {
public void test_types() throws Exception {
String sql = //
"CREATE TABLE list_customers "
+ " ( customer_id NUMBER(6)"
+ " , cust_first_name VARCHAR2(20) "
+ " , cust_last_name VARCHAR2(20)"
+ " , cust_address CUST_ADDRESS_TYP"
+ " , nls_territory VARCHAR2(30)"
+ " , cust_email VARCHAR2(40))"
+ " PARTITION BY LIST (nls_territory) ("
+ " PARTITION asia VALUES ('CHINA', 'THAILAND'),"
+ " PARTITION europe VALUES ('GERMANY', 'ITALY', 'SWITZERLAND'),"
+ " PARTITION west VALUES ('AMERICA'),"
+ " PARTITION east VALUES ('INDIA'),"
+ " PARTITION rest VALUES (DEFAULT));";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
assertEquals("CREATE TABLE list_customers (\n" +
"\tcustomer_id NUMBER(6),\n" +
"\tcust_first_name VARCHAR2(20),\n" +
"\tcust_last_name VARCHAR2(20),\n" +
"\tcust_address CUST_ADDRESS_TYP,\n" +
"\tnls_territory VARCHAR2(30),\n" +
"\tcust_email VARCHAR2(40)\n" +
")\n" +
"PARTITION BY LIST (nls_territory) (\n" +
"\tPARTITION asia VALUES ('CHINA', 'THAILAND'),\n" +
"\tPARTITION europe VALUES ('GERMANY', 'ITALY', 'SWITZERLAND'),\n" +
"\tPARTITION west VALUES ('AMERICA'),\n" +
"\tPARTITION east VALUES ('INDIA'),\n" +
"\tPARTITION rest VALUES (DEFAULT)\n" +
");",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(6, visitor.getColumns().size());
assertTrue(visitor.getColumns().contains(new TableStat.Column("list_customers", "customer_id")));
}
}
| OracleCreateTableTest38 |
java | dropwizard__dropwizard | dropwizard-health/src/main/java/io/dropwizard/health/check/tcp/TcpHealthCheck.java | {
"start": 336,
"end": 2657
} | class ____ extends HealthCheck {
private static final Logger LOGGER = LoggerFactory.getLogger(TcpHealthCheck.class);
private static final Duration DEFAULT_CONNECTION_TIMEOUT = Duration.ofSeconds(2);
@NonNull
private final String host;
private final int port;
private final Duration connectionTimeout;
public TcpHealthCheck(@NonNull final String host,
final int port) {
this(host, port, DEFAULT_CONNECTION_TIMEOUT);
}
public TcpHealthCheck(@NonNull final String host,
final int port,
final Duration connectionTimeout) {
this.host = Objects.requireNonNull(host);
this.port = port;
if (connectionTimeout.isNegative()) {
throw new IllegalStateException("connectionTimeout must be a non-negative value.");
}
if (connectionTimeout.toMillis() > Integer.MAX_VALUE) {
throw new IllegalStateException("Cannot configure a connectionTimeout greater than the max integer value");
}
this.connectionTimeout = connectionTimeout;
}
@Override
protected Result check() throws IOException {
final boolean isHealthy = tcpCheck(host, port);
if (isHealthy) {
LOGGER.debug("Health check against url={}:{} successful", host, port);
return Result.healthy();
}
LOGGER.debug("Health check against url={}:{} failed", host, port);
return Result.unhealthy("TCP health check against host=%s port=%s failed", host, port);
}
/**
* Performs a health check via TCP against an external dependency.
* By default, uses the Java {@link Socket} API, but can be overridden to allow for different behavior.
*
* @param host the host to check.
* @param port the port to check.
* @return whether the check was successful or not.
*/
protected boolean tcpCheck(final String host, final int port) throws IOException {
try (Socket socket = new Socket()) {
socket.connect(new InetSocketAddress(host, port), (int) connectionTimeout.toMillis());
return socket.isConnected();
}
}
// visible for testing
Duration getConnectionTimeout() {
return connectionTimeout;
}
}
| TcpHealthCheck |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/healthcheck/v2/processor/TcpHealthCheckProcessor.java | {
"start": 5841,
"end": 8123
} | class ____ implements Runnable {
SelectionKey key;
public PostProcessor(SelectionKey key) {
this.key = key;
}
@Override
public void run() {
Beat beat = (Beat) key.attachment();
SocketChannel channel = (SocketChannel) key.channel();
try {
if (!beat.isHealthy()) {
//invalid beat means this server is no longer responsible for the current service
key.cancel();
key.channel().close();
beat.finishCheck();
return;
}
if (key.isValid() && key.isConnectable()) {
//connected
channel.finishConnect();
beat.finishCheck(true, false, System.currentTimeMillis() - beat.getTask().getStartTime(),
"tcp:ok+");
}
if (key.isValid() && key.isReadable()) {
//disconnected
ByteBuffer buffer = ByteBuffer.allocate(128);
if (channel.read(buffer) == -1) {
key.cancel();
key.channel().close();
} else {
// not terminate request, ignore
SRV_LOG.warn(
"Tcp check ok, but the connected server responses some msg. Connection won't be closed.");
}
}
} catch (ConnectException e) {
// unable to connect, possibly port not opened
beat.finishCheck(false, true, switchDomain.getTcpHealthParams().getMax(),
"tcp:unable2connect:" + e.getMessage());
} catch (Exception e) {
beat.finishCheck(false, false, switchDomain.getTcpHealthParams().getMax(),
"tcp:error:" + e.getMessage());
try {
key.cancel();
key.channel().close();
} catch (Exception ignore) {
}
}
}
}
private | PostProcessor |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/ValueObjectBinderTests.java | {
"start": 22871,
"end": 23032
} | class ____<T> {
private final T value;
GenericValue(T value) {
this.value = value;
}
T getValue() {
return this.value;
}
}
static | GenericValue |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/targetthis/NestedMapper.java | {
"start": 337,
"end": 526
} | interface ____ {
NestedMapper INSTANCE = Mappers.getMapper( NestedMapper.class );
@Mapping( target = ".", source = "customer.item" )
OrderItem map(OrderDTO order);
}
| NestedMapper |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegIntsEvaluator.java | {
"start": 1125,
"end": 4051
} | class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(NegIntsEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator v;
private final DriverContext driverContext;
private Warnings warnings;
public NegIntsEvaluator(Source source, EvalOperator.ExpressionEvaluator v,
DriverContext driverContext) {
this.source = source;
this.v = v;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (IntBlock vBlock = (IntBlock) v.eval(page)) {
IntVector vVector = vBlock.asVector();
if (vVector == null) {
return eval(page.getPositionCount(), vBlock);
}
return eval(page.getPositionCount(), vVector);
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += v.baseRamBytesUsed();
return baseRamBytesUsed;
}
public IntBlock eval(int positionCount, IntBlock vBlock) {
try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (vBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
int v = vBlock.getInt(vBlock.getFirstValueIndex(p));
try {
result.appendInt(Neg.processInts(v));
} catch (ArithmeticException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
public IntBlock eval(int positionCount, IntVector vVector) {
try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
int v = vVector.getInt(p);
try {
result.appendInt(Neg.processInts(v));
} catch (ArithmeticException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
@Override
public String toString() {
return "NegIntsEvaluator[" + "v=" + v + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(v);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static | NegIntsEvaluator |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/convert/multiple/StringToNavigableSetConverter.java | {
"start": 1096,
"end": 1427
} | class ____ extends StringToIterableConverter<NavigableSet> {
public StringToNavigableSetConverter(FrameworkModel frameworkModel) {
super(frameworkModel);
}
@Override
protected NavigableSet createMultiValue(int size, Class<?> multiValueType) {
return new TreeSet();
}
}
| StringToNavigableSetConverter |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/UnregisteredMetricGroups.java | {
"start": 8261,
"end": 9032
} | class ____ extends InternalOperatorMetricGroup {
private static final OperatorID DEFAULT_OPERATOR_ID = new OperatorID(0, 0);
private static final String DEFAULT_OPERATOR_NAME = "UnregisteredOperator";
protected UnregisteredOperatorMetricGroup() {
this(new UnregisteredTaskMetricGroup());
}
UnregisteredOperatorMetricGroup(TaskMetricGroup parent) {
super(
NoOpMetricRegistry.INSTANCE,
parent,
DEFAULT_OPERATOR_ID,
DEFAULT_OPERATOR_NAME,
Collections.emptyMap());
}
}
/** A safe drop-in replacement for {@link JobManagerOperatorMetricGroup}s. */
public static | UnregisteredOperatorMetricGroup |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/jdbc/env/spi/QualifiedObjectNameFormatter.java | {
"start": 482,
"end": 1248
} | interface ____ {
/**
* Render a formatted a table name
*
* @param qualifiedTableName The table name
* @param dialect The dialect
*
* @return The formatted name,
*/
String format(QualifiedTableName qualifiedTableName, Dialect dialect);
/**
* Render a formatted sequence name
*
* @param qualifiedSequenceName The sequence name
* @param dialect The dialect
*
* @return The formatted name
*/
String format(QualifiedSequenceName qualifiedSequenceName, Dialect dialect);
/**
* Render a formatted non-table and non-sequence qualified name
*
* @param qualifiedName The name
* @param dialect The dialect
*
* @return The formatted name
*/
String format(QualifiedName qualifiedName, Dialect dialect);
}
| QualifiedObjectNameFormatter |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/SendMessageRequestHeader.java | {
"start": 1675,
"end": 6548
} | class ____ extends TopicQueueRequestHeader {
@CFNotNull
private String producerGroup;
@CFNotNull
@RocketMQResource(ResourceType.TOPIC)
private String topic;
@CFNotNull
private String defaultTopic;
@CFNotNull
private Integer defaultTopicQueueNums;
@CFNotNull
private Integer queueId;
@CFNotNull
private Integer sysFlag;
@CFNotNull
private Long bornTimestamp;
@CFNotNull
private Integer flag;
@CFNullable
private String properties;
@CFNullable
private Integer reconsumeTimes;
@CFNullable
private Boolean unitMode;
@CFNullable
private Boolean batch;
private Integer maxReconsumeTimes;
@Override
public void checkFields() throws RemotingCommandException {
}
public String getProducerGroup() {
return producerGroup;
}
public void setProducerGroup(String producerGroup) {
this.producerGroup = producerGroup;
}
@Override
public String getTopic() {
return topic;
}
@Override
public void setTopic(String topic) {
this.topic = topic;
}
public String getDefaultTopic() {
return defaultTopic;
}
public void setDefaultTopic(String defaultTopic) {
this.defaultTopic = defaultTopic;
}
public Integer getDefaultTopicQueueNums() {
return defaultTopicQueueNums;
}
public void setDefaultTopicQueueNums(Integer defaultTopicQueueNums) {
this.defaultTopicQueueNums = defaultTopicQueueNums;
}
@Override
public Integer getQueueId() {
return queueId;
}
@Override
public void setQueueId(Integer queueId) {
this.queueId = queueId;
}
public Integer getSysFlag() {
return sysFlag;
}
public void setSysFlag(Integer sysFlag) {
this.sysFlag = sysFlag;
}
public Long getBornTimestamp() {
return bornTimestamp;
}
public void setBornTimestamp(Long bornTimestamp) {
this.bornTimestamp = bornTimestamp;
}
public Integer getFlag() {
return flag;
}
public void setFlag(Integer flag) {
this.flag = flag;
}
public String getProperties() {
return properties;
}
public void setProperties(String properties) {
this.properties = properties;
}
public Integer getReconsumeTimes() {
if (null == reconsumeTimes) {
return 0;
}
return reconsumeTimes;
}
public void setReconsumeTimes(Integer reconsumeTimes) {
this.reconsumeTimes = reconsumeTimes;
}
public boolean isUnitMode() {
if (null == unitMode) {
return false;
}
return unitMode;
}
public void setUnitMode(Boolean isUnitMode) {
this.unitMode = isUnitMode;
}
public Integer getMaxReconsumeTimes() {
return maxReconsumeTimes;
}
public void setMaxReconsumeTimes(final Integer maxReconsumeTimes) {
this.maxReconsumeTimes = maxReconsumeTimes;
}
public boolean isBatch() {
if (null == batch) {
return false;
}
return batch;
}
public void setBatch(Boolean batch) {
this.batch = batch;
}
public static SendMessageRequestHeader parseRequestHeader(RemotingCommand request) throws RemotingCommandException {
SendMessageRequestHeaderV2 requestHeaderV2 = null;
SendMessageRequestHeader requestHeader = null;
switch (request.getCode()) {
case RequestCode.SEND_BATCH_MESSAGE:
case RequestCode.SEND_MESSAGE_V2:
requestHeaderV2 = request.decodeCommandCustomHeader(SendMessageRequestHeaderV2.class);
case RequestCode.SEND_MESSAGE:
if (null == requestHeaderV2) {
requestHeader = request.decodeCommandCustomHeader(SendMessageRequestHeader.class);
} else {
requestHeader = SendMessageRequestHeaderV2.createSendMessageRequestHeaderV1(requestHeaderV2);
}
default:
break;
}
return requestHeader;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("producerGroup", producerGroup)
.add("topic", topic)
.add("defaultTopic", defaultTopic)
.add("defaultTopicQueueNums", defaultTopicQueueNums)
.add("queueId", queueId)
.add("sysFlag", sysFlag)
.add("bornTimestamp", bornTimestamp)
.add("flag", flag)
.add("properties", properties)
.add("reconsumeTimes", reconsumeTimes)
.add("unitMode", unitMode)
.add("batch", batch)
.add("maxReconsumeTimes", maxReconsumeTimes)
.toString();
}
}
| SendMessageRequestHeader |
java | apache__logging-log4j2 | log4j-1.2-api/src/test/java/org/apache/log4j/PropertyConfiguratorTest.java | {
"start": 4408,
"end": 5579
} | class ____ extends AppenderSkeleton {
private RollingPolicy rollingPolicy;
private TriggeringPolicy triggeringPolicy;
private boolean append;
public RollingFileAppender() {}
@Override
public void append(final LoggingEvent event) {}
@Override
public void close() {}
public boolean getAppend() {
return append;
}
public RollingPolicy getRollingPolicy() {
return rollingPolicy;
}
public TriggeringPolicy getTriggeringPolicy() {
return triggeringPolicy;
}
@Override
public boolean requiresLayout() {
return true;
}
public void setAppend(final boolean val) {
append = val;
}
public void setRollingPolicy(final RollingPolicy policy) {
rollingPolicy = policy;
}
public void setTriggeringPolicy(final TriggeringPolicy policy) {
triggeringPolicy = policy;
}
}
/**
* Mock definition of org.apache.log4j.rolling.RollingPolicy from extras companion.
*/
public static | RollingFileAppender |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java | {
"start": 23478,
"end": 24014
} | class ____ {
String word;
String topField;
float score;
ScoreTerm(String word, String topField, float score) {
this.word = word;
this.topField = topField;
this.score = score;
}
void update(String word, String topField, float score) {
this.word = word;
this.topField = topField;
this.score = score;
}
}
/**
* Use for frequencies and to avoid renewing Integers.
*/
private static | ScoreTerm |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/script/CtxMap.java | {
"start": 1144,
"end": 2467
} | class ____<T extends Metadata> extends AbstractMap<String, Object> {
protected static final String SOURCE = "_source";
protected Map<String, Object> source;
protected final T metadata;
/**
* Create CtxMap from a source and metadata
*
* @param source the source document map
* @param metadata the metadata map
*/
public CtxMap(Map<String, Object> source, T metadata) {
this.source = source;
this.metadata = metadata;
Set<String> badKeys = Sets.intersection(this.metadata.keySet(), this.source.keySet());
if (badKeys.size() > 0) {
throw new IllegalArgumentException(
"unexpected metadata ["
+ badKeys.stream().sorted().map(k -> k + ":" + this.source.get(k)).collect(Collectors.joining(", "))
+ "] in source"
);
}
}
/**
* Does this access to the internal {@link #source} map occur directly via ctx? ie {@code ctx['myField']}.
* Or does it occur via the {@link #SOURCE} key? ie {@code ctx['_source']['myField']}.
*
* Defaults to indirect, {@code ctx['_source']}
*/
protected boolean directSourceAccess() {
return false;
}
/**
* get the source map, if externally modified then the guarantees of this | CtxMap |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/jpa/boot/internal/EntityManagerFactoryBuilderImpl.java | {
"start": 15107,
"end": 15620
} | class ____ to the environment; for the time being this only has any effect in EE
// container situations, calling back into PersistenceUnitInfo#addClassTransformer
persistenceUnit.pushClassTransformer( enhancementContext );
final var classTransformer = persistenceUnit.getClassTransformer();
if ( classTransformer != null ) {
final var classLoader = persistenceUnit.getTempClassLoader();
if ( classLoader == null ) {
throw new PersistenceException( "Enhancement requires a temp | transformation |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/create/MySqlCreateUserTest_1.java | {
"start": 970,
"end": 2446
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "CREATE USER 'jeffrey'@'localhost';";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
assertEquals("CREATE USER 'jeffrey'@'localhost';", //
SQLUtils.toMySqlString(stmt));
assertEquals("create user 'jeffrey'@'localhost';", //
SQLUtils.toMySqlString(stmt, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION));
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("City")));
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("t2")));
// assertTrue(visitor.getColumns().contains(new Column("t2", "id")));
}
}
| MySqlCreateUserTest_1 |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsNavBlock.java | {
"start": 1013,
"end": 1190
} | class ____ extends HtmlBlock {
@Override
protected void render(Block html) {
html
.div("#nav")
.h3().__("Logs").__()
.__();
}
}
| AggregatedLogsNavBlock |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizerTests.java | {
"start": 787,
"end": 7917
} | class ____ extends BaseTokenStreamTestCase {
private static final String UNKNOWN_TOKEN = "<unk>";
private static final List<String> NEVER_SPLIT = List.of("<mask>");
public void testSimpleTokenization() throws IOException {
TestNLPAnalyzer analyzer = new TestNLPAnalyzer(
List.of(UNKNOWN_TOKEN, PREFIX + "a", "b", "c", "d", "cd", PREFIX + "ab", PREFIX + "abc", PREFIX + "abcd", "<mask>"),
List.of(0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 5.0, 10.0, 0.0),
UNKNOWN_TOKEN,
new PrecompiledCharMapNormalizer.Config(new int[0], "")
);
assertAnalyzesToNoCharFilter(analyzer, "", new String[0]);
assertAnalyzesToNoCharFilter(analyzer, "abcd", new String[] { PREFIX + "abcd" });
}
public void testLessSimpleTokenization() throws IOException {
TestNLPAnalyzer analyzer = new TestNLPAnalyzer(
List.of(
UNKNOWN_TOKEN,
PREFIX + "ab",
"cd",
PREFIX + "abc",
"a",
"b",
"c",
"ABC",
"abcdabcd",
"q",
"r",
"qr",
"<mask>",
"aa",
"aaaa"
),
List.of(0.0, 0.0, -0.1, -0.2, -0.3, -0.4, -0.5, -0.5, 20.0, 20.5, 20.5, -0.5, 0.0, -13.5467, -14.9644),
UNKNOWN_TOKEN,
new PrecompiledCharMapNormalizer.Config(new int[0], "")
);
assertAnalyzesToNoCharFilter(analyzer, "", new String[0]);
assertAnalyzesToNoCharFilter(analyzer, "abcd", new String[] { PREFIX + "ab", "cd" });
assertAnalyzesToNoCharFilter(analyzer, "abc", new String[] { PREFIX + "abc" });
assertAnalyzesToNoCharFilter(analyzer, "AB", new String[] { PREFIX + "AB" });
assertAnalyzesToNoCharFilter(analyzer, "abcc", new String[] { PREFIX + "abc", "c" });
assertAnalyzesToNoCharFilter(analyzer, " \nabcd \n\n abcc \n", new String[] { PREFIX + "ab", "cd", PREFIX + "abc", "c" });
}
public void testLessSimpleTokenizationForRepeatingCharacters() throws IOException {
TestNLPAnalyzer analyzer = new TestNLPAnalyzer(
List.of(UNKNOWN_TOKEN, "HH", "HHHH", PREFIX + "H", "HHH", PREFIX + "HH", PREFIX, PREFIX + "HHH"),
List.of(0.0, -13.5467, -14.9644, -9.17478, -15.1165, -13.201, -7.97025, -15.602),
UNKNOWN_TOKEN,
PrecompiledCharMapNormalizer.fromBase64EncodedResource(
"/org/elasticsearch/xpack/ml/inference.nlp.tokenizers/spm_precompiled_normalizer.txt"
)
);
assertAnalyzesToNoCharFilter(analyzer, "HHHHHHHHHHHH", new String[] { PREFIX, "HHHH", "HHHH", "HHHH" });
assertAnalyzesToNoCharFilter(analyzer, "HHHHHHHHHHH", new String[] { PREFIX + "HHH", "HHHH", "HHHH" });
assertAnalyzesToNoCharFilter(analyzer, "HHHHHHHHHH", new String[] { PREFIX + "HH", "HHHH", "HHHH" });
assertAnalyzesToNoCharFilter(analyzer, "HHHHHHHHH", new String[] { PREFIX + "H", "HHHH", "HHHH" });
assertAnalyzesToNoCharFilter(analyzer, "HHHHHHHH", new String[] { PREFIX, "HHHH", "HHHH" });
assertAnalyzesToNoCharFilter(analyzer, "HHHHHHH", new String[] { PREFIX + "HHH", "HHHH" });
assertAnalyzesToNoCharFilter(analyzer, "HHHHHH", new String[] { PREFIX + "HH", "HHHH" });
assertAnalyzesToNoCharFilter(analyzer, "HHHHH", new String[] { PREFIX + "H", "HHHH" });
assertAnalyzesToNoCharFilter(analyzer, "HHHH", new String[] { PREFIX, "HHHH" });
assertAnalyzesToNoCharFilter(analyzer, "HHH", new String[] { PREFIX + "HHH" });
assertAnalyzesToNoCharFilter(analyzer, "HH", new String[] { PREFIX + "HH" });
assertAnalyzesToNoCharFilter(analyzer, "H", new String[] { PREFIX + "H" });
}
public void testLessSimpleTokenizationWithNeverSplit() throws IOException {
TestNLPAnalyzer analyzer = new TestNLPAnalyzer(
List.of(
UNKNOWN_TOKEN,
PREFIX + "ab",
"cd",
PREFIX + "cd",
PREFIX + "abc",
"a",
"b",
"c",
"ABC",
"abcdabcd",
"q",
"r",
"qr",
"<mask>"
),
List.of(0.0, 0.0, -0.1, -0.2, -0.2, -0.3, -0.4, -0.5, -0.5, 20.0, 20.5, 20.5, -0.5, 0.0),
UNKNOWN_TOKEN,
new PrecompiledCharMapNormalizer.Config(new int[0], "")
);
assertAnalyzesToNoCharFilter(analyzer, "<mask>", new String[] { "<mask>" });
assertAnalyzesToNoCharFilter(analyzer, "<mask>abcd<mask>", new String[] { "<mask>", PREFIX + "ab", "cd", "<mask>" });
assertAnalyzesToNoCharFilter(
analyzer,
"<mask> \nab<mask>cd \n\n abcc<mask> \n",
new String[] { "<mask>", PREFIX + "ab", "<mask>", PREFIX + "cd", PREFIX + "abc", "c", "<mask>" }
);
}
public void testTriePrefixMatch() {
List<BytesRef> inputs = new ArrayList<>(
List.of(
new BytesRef("a"),
new BytesRef("b"),
new BytesRef("c"),
new BytesRef("d"),
new BytesRef("cd"),
new BytesRef("ab"),
new BytesRef("abc"),
new BytesRef("abcd")
)
);
Collections.shuffle(inputs, random());
UnigramTokenizer.BytesTrie bytesTrie = UnigramTokenizer.BytesTrie.build(inputs);
String input = "abcd";
assertThat(
bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(),
contains("a", "ab", "abc", "abcd")
);
input = "bcd";
assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), contains("b"));
input = "cd";
assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), contains("c", "cd"));
input = "d";
assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), contains("d"));
input = "";
assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), empty());
input = "zabcd";
assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), empty());
input = "azbcd";
assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), contains("a"));
input = "abzcd";
assertThat(bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(), contains("a", "ab"));
input = "abcdz";
assertThat(
bytesTrie.matchingPrefixes(new BytesRef(input)).stream().map(BytesRef::utf8ToString).toList(),
contains("a", "ab", "abc", "abcd")
);
}
private static | UnigramTokenizerTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mixed/Folder.java | {
"start": 173,
"end": 204
} | class ____ extends Item {
}
| Folder |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/scan/DefaultPackageScanClassResolver.java | {
"start": 13434,
"end": 13858
} | class ____ from the JAR
* @param classes to add found and matching classes
*/
private void doLoadImplementationsInJar(
PackageScanFilter test, String parent, List<String> entries, Set<Class<?>> classes) {
for (String entry : entries) {
if (entry.startsWith(parent)) {
addIfMatching(test, entry, classes);
}
}
}
/**
* Add the | entries |
java | alibaba__nacos | plugin/config/src/main/java/com/alibaba/nacos/plugin/config/constants/ConfigChangePointCutTypes.java | {
"start": 760,
"end": 1715
} | enum ____ {
/**
* Publish or update config through http.
*/
PUBLISH_BY_HTTP("publishOrUpdateByHttp"),
/**
* Publish config through rpc.
*/
PUBLISH_BY_RPC("publishOrUpdateByRpc"),
/**
* Remove by id through http.
*/
REMOVE_BY_HTTP("removeSingleByHttp"),
/**
* Remove through rpc.
*/
REMOVE_BY_RPC("removeSingleByRpc"),
/**
* Import config file through http/console.
*/
IMPORT_BY_HTTP("importFileByHttp"),
/**
* Remove by ids through http.
*/
REMOVE_BATCH_HTTP("removeBatchByHttp");
private final String value;
ConfigChangePointCutTypes(String value) {
this.value = value;
}
public String value() {
return value;
}
public boolean equals(ConfigChangePointCutTypes configChangePointCutTypes) {
return this.compareTo(configChangePointCutTypes) == 0;
}
}
| ConfigChangePointCutTypes |
java | dropwizard__dropwizard | docs/source/examples/core/src/main/java/io/dropwizard/documentation/CustomTaskApp.java | {
"start": 219,
"end": 553
} | class ____ extends Application<Configuration> {
@Override
// core: CustomTaskApp#run
public void run(Configuration configuration, Environment environment) {
Database database = new Database();
environment.admin().addTask(new TruncateDatabaseTask(database));
}
// core: CustomTaskApp#run
}
| CustomTaskApp |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1500/Issue1510.java | {
"start": 772,
"end": 1289
} | class ____ {
@JSONField(format = "yyyy-MM-dd")
private Date startTime;
@JSONField(format = "yyyy-MM-dd")
private Date endTime;
public Date getStartTime() {
return startTime;
}
public void setStartTime(Date startTime) {
this.startTime = startTime;
}
public Date getEndTime() {
return endTime;
}
public void setEndTime(Date endTime) {
this.endTime = endTime;
}
}
}
| Model |
java | quarkusio__quarkus | extensions/oidc-client-graphql/runtime/src/main/java/io/quarkus/oidc/client/graphql/runtime/OidcGraphQLClientIntegrationRecorder.java | {
"start": 345,
"end": 1687
} | class ____ {
public void enhanceGraphQLClientConfigurationWithOidc(Map<String, String> configKeysToOidcClients,
String defaultOidcClientName) {
OidcClients oidcClients = Arc.container().instance(OidcClients.class).get();
GraphQLClientsConfiguration configs = GraphQLClientsConfiguration.getInstance();
configs.getClients().forEach((graphQLClientKey, value) -> {
String oidcClient = configKeysToOidcClients.get(graphQLClientKey);
if (oidcClient == null) {
oidcClient = defaultOidcClientName;
}
Map<String, Uni<String>> dynamicHeaders = configs.getClient(graphQLClientKey).getDynamicHeaders();
dynamicHeaders.put("Authorization", getToken(oidcClients, oidcClient));
});
}
public Uni<String> getToken(OidcClients clients, String oidcClientId) {
if (oidcClientId == null) {
return clients.getClient()
.getTokens()
.map(Tokens::getAccessToken)
.map(token -> "Bearer " + token);
} else {
return clients.getClient(oidcClientId)
.getTokens()
.map(Tokens::getAccessToken)
.map(token -> "Bearer " + token);
}
}
}
| OidcGraphQLClientIntegrationRecorder |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java | {
"start": 1115,
"end": 2526
} | class ____ implements ComponentRestartPolicy {
private static AlwaysRestartPolicy INSTANCE = new AlwaysRestartPolicy();
private AlwaysRestartPolicy() {
}
public static AlwaysRestartPolicy getInstance() {
return INSTANCE;
}
@Override public boolean isLongLived() {
return true;
}
/**
* This is always false since these components never terminate
*
* @param component
* @return
*/
@Override public boolean hasCompleted(Component component) {
return false;
}
/**
* This is always false since these components never terminate
*
* @param component
* @return
*/
@Override public boolean hasCompletedSuccessfully(Component component) {
return false;
}
@Override public boolean shouldRelaunchInstance(
ComponentInstance componentInstance, ContainerStatus containerStatus) {
return true;
}
@Override public boolean isReadyForDownStream(Component dependentComponent) {
if (dependentComponent.getNumReadyInstances() < dependentComponent
.getNumDesiredInstances()) {
return false;
}
return true;
}
@Override public boolean allowUpgrades() {
return true;
}
@Override public boolean shouldTerminate(Component component) {
return false;
}
@Override public boolean allowContainerRetriesForInstance(
ComponentInstance componentInstance) {
return true;
}
}
| AlwaysRestartPolicy |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/HandlerMethodAnnotationDetectionTests.java | {
"start": 7216,
"end": 8022
} | class ____ {
@InitBinder
public void initBinder(WebDataBinder dataBinder, @RequestParam("datePattern") String pattern) {
CustomDateEditor dateEditor = new CustomDateEditor(new SimpleDateFormat(pattern), false);
dataBinder.registerCustomEditor(Date.class, dateEditor);
}
@ModelAttribute
public void initModel(@RequestHeader("header1") Date date, Model model) {
model.addAttribute("attr1", date);
}
@RequestMapping(value="/path1/path2", method=RequestMethod.POST)
@ModelAttribute("attr2")
public Date handle(@RequestHeader("header2") Date date) throws Exception {
return date;
}
@ExceptionHandler(Exception.class)
@ResponseBody
public String handleException(Exception exception) {
return exception.getMessage();
}
}
@Controller
abstract static | SimpleController |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/http/impl/NettyFileUploadDataFactory.java | {
"start": 947,
"end": 2183
} | class ____ extends DefaultHttpDataFactory {
private final ContextInternal context;
private final HttpServerRequest request;
private final Supplier<Handler<HttpServerFileUpload>> lazyUploadHandler;
public NettyFileUploadDataFactory(ContextInternal context, HttpServerRequest request, Supplier<Handler<HttpServerFileUpload>> lazyUploadHandler) {
super(false);
this.context = context;
this.request = request;
this.lazyUploadHandler = lazyUploadHandler;
}
@Override
public FileUpload createFileUpload(HttpRequest httpRequest, String name, String filename, String contentType, String contentTransferEncoding, Charset charset, long size) {
NettyFileUpload nettyUpload = new NettyFileUpload(
context,
request,
name,
filename,
contentType,
contentTransferEncoding,
charset,
size);
HttpServerFileUploadImpl upload = new HttpServerFileUploadImpl(context, nettyUpload, name, filename, contentType, contentTransferEncoding, charset,
size);
Handler<HttpServerFileUpload> uploadHandler = lazyUploadHandler.get();
if (uploadHandler != null) {
context.dispatch(upload, uploadHandler);
}
return nettyUpload;
}
}
| NettyFileUploadDataFactory |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/util/ZooKeeperUtils.java | {
"start": 34986,
"end": 35346
} | class ____ implements ACLProvider {
@Override
public List<ACL> getDefaultAcl() {
return ZooDefs.Ids.CREATOR_ALL_ACL;
}
@Override
public List<ACL> getAclForPath(String path) {
return ZooDefs.Ids.CREATOR_ALL_ACL;
}
}
/** ZooKeeper client ACL mode enum. */
public | SecureAclProvider |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/language/AbstractTypedLanguageTest.java | {
"start": 1461,
"end": 5770
} | class ____<
T extends TypedExpressionDefinition.AbstractBuilder<T, E>, E extends TypedExpressionDefinition>
extends ContextTestSupport {
protected final String expression;
protected final Function<LanguageBuilderFactory, T> factory;
protected AbstractTypedLanguageTest(String expression, Function<LanguageBuilderFactory, T> factory) {
this.expression = expression;
this.factory = factory;
}
@Override
public boolean isUseRouteBuilder() {
return false;
}
protected void assertResult(String uriSuffix, TestContext context) throws Exception {
MockEndpoint mockEndpoint = getMockEndpoint(String.format("mock:%s", uriSuffix));
mockEndpoint.expectedMessageCount(1);
template.sendBody(String.format("direct:%s", uriSuffix), context.getContentToSend());
assertMockEndpointsSatisfied();
assertTypeInstanceOf(context.getBodyReceivedType(), mockEndpoint.getReceivedExchanges().get(0).getIn().getBody());
assertBodyReceived(context.getBodyReceived(), mockEndpoint.getReceivedExchanges().get(0).getIn().getBody());
}
protected void assertTypeInstanceOf(Class<?> expected, Object body) {
if (expected != null) {
assertIsInstanceOf(expected, body);
}
}
protected void assertBodyReceived(Object expected, Object body) {
if (expected != null) {
if (expected instanceof Integer && body instanceof Integer) {
// java objects for number crap
Assertions.assertEquals((int) expected, (int) body);
} else {
Assertions.assertEquals(expected, body);
}
}
}
protected Object defaultContentToSend() {
return "1";
}
protected TestContext testWithoutTypeContext() {
return new TestContext(defaultContentToSend(), "1", String.class);
}
@Test
void testExpressionOnly() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:expression-only")
.setBody()
.expression(
expression(
factory.apply(expression())
.expression(expression)
.end()
)
).to("mock:expression-only");
}
});
context.start();
assertResult("expression-only", testWithoutTypeContext());
}
protected TestContext testWithTypeContext() {
return new TestContext(defaultContentToSend(), 1, Integer.class);
}
@Test
void testTypedWithClass() throws Exception {
TestContext testContext = testWithTypeContext();
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:typed-with-class")
.setBody()
.expression(
expression(
factory.apply(expression())
.expression(expression)
.resultType(testContext.getBodyReceivedType())
.end()
)
).to("mock:typed-with-class");
}
});
context.start();
assertResult("typed-with-class", testContext);
}
@Test
void testTypedWithName() throws Exception {
TestContext testContext = testWithTypeContext();
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:typed-with-name")
.split(
expression(
factory.apply(expression())
.expression(expression)
.resultTypeName(testContext.getBodyReceivedType().getName())
.end()
)
).to("mock:typed-with-name");
}
});
context.start();
assertResult("typed-with-name", testContext);
}
protected static | AbstractTypedLanguageTest |
java | spring-projects__spring-framework | spring-messaging/src/test/java/org/springframework/messaging/simp/stomp/StompBrokerRelayMessageHandlerTests.java | {
"start": 1996,
"end": 12050
} | class ____ {
private StompBrokerRelayMessageHandler brokerRelay;
private final StubMessageChannel outboundChannel = new StubMessageChannel();
private final StubTcpOperations tcpClient = new StubTcpOperations();
private final ArgumentCaptor<Runnable> messageCountTaskCaptor = ArgumentCaptor.forClass(Runnable.class);
@BeforeEach
void setup() {
this.brokerRelay = new StompBrokerRelayMessageHandler(new StubMessageChannel(),
this.outboundChannel, new StubMessageChannel(), Collections.singletonList("/topic")) {
@Override
protected void startInternal() {
publishBrokerAvailableEvent(); // Force this, since we'll never actually connect
super.startInternal();
}
};
this.brokerRelay.setTcpClient(this.tcpClient);
this.brokerRelay.setTaskScheduler(mock());
}
@Test
void virtualHost() {
this.brokerRelay.setVirtualHost("ABC");
this.brokerRelay.start();
this.brokerRelay.handleMessage(connectMessage("sess1", "joe"));
assertThat(this.tcpClient.getSentMessages()).hasSize(2);
StompHeaderAccessor headers1 = this.tcpClient.getSentHeaders(0);
assertThat(headers1.getCommand()).isEqualTo(StompCommand.CONNECT);
assertThat(headers1.getSessionId()).isEqualTo(StompBrokerRelayMessageHandler.SYSTEM_SESSION_ID);
assertThat(headers1.getHost()).isEqualTo("ABC");
StompHeaderAccessor headers2 = this.tcpClient.getSentHeaders(1);
assertThat(headers2.getCommand()).isEqualTo(StompCommand.CONNECT);
assertThat(headers2.getSessionId()).isEqualTo("sess1");
assertThat(headers2.getHost()).isEqualTo("ABC");
}
@Test
void loginAndPasscode() {
this.brokerRelay.setSystemLogin("syslogin");
this.brokerRelay.setSystemPasscode("syspasscode");
this.brokerRelay.setClientLogin("clientlogin");
this.brokerRelay.setClientPasscode("clientpasscode");
this.brokerRelay.start();
this.brokerRelay.handleMessage(connectMessage("sess1", "joe"));
assertThat(this.tcpClient.getSentMessages()).hasSize(2);
StompHeaderAccessor headers1 = this.tcpClient.getSentHeaders(0);
assertThat(headers1.getCommand()).isEqualTo(StompCommand.CONNECT);
assertThat(headers1.getLogin()).isEqualTo("syslogin");
assertThat(headers1.getPasscode()).isEqualTo("syspasscode");
StompHeaderAccessor headers2 = this.tcpClient.getSentHeaders(1);
assertThat(headers2.getCommand()).isEqualTo(StompCommand.CONNECT);
assertThat(headers2.getLogin()).isEqualTo("clientlogin");
assertThat(headers2.getPasscode()).isEqualTo("clientpasscode");
}
@Test
void destinationExcluded() {
this.brokerRelay.start();
this.brokerRelay.handleMessage(connectMessage("sess1", "joe"));
SimpMessageHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.CONNECTED);
accessor.setLeaveMutable(true);
this.tcpClient.handleMessage(MessageBuilder.createMessage(new byte[0], accessor.getMessageHeaders()));
accessor = SimpMessageHeaderAccessor.create(SimpMessageType.MESSAGE);
accessor.setSessionId("sess1");
accessor.setDestination("/user/daisy/foo");
this.brokerRelay.handleMessage(MessageBuilder.createMessage(new byte[0], accessor.getMessageHeaders()));
assertThat(this.tcpClient.getSentMessages()).hasSize(2);
StompHeaderAccessor headers = this.tcpClient.getSentHeaders(0);
assertThat(headers.getCommand()).isEqualTo(StompCommand.CONNECT);
assertThat(headers.getSessionId()).isEqualTo(StompBrokerRelayMessageHandler.SYSTEM_SESSION_ID);
headers = this.tcpClient.getSentHeaders(1);
assertThat(headers.getCommand()).isEqualTo(StompCommand.CONNECT);
assertThat(headers.getSessionId()).isEqualTo("sess1");
}
@Test // gh-22822
void destinationExcludedWithHeartbeat() {
Message<byte[]> connectMessage = connectMessage("sess1", "joe");
MessageHeaderAccessor.getAccessor(connectMessage, StompHeaderAccessor.class).setHeartbeat(10000, 10000);
this.brokerRelay.start();
this.brokerRelay.handleMessage(connectMessage);
SimpMessageHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.CONNECTED);
accessor.setLeaveMutable(true);
this.tcpClient.handleMessage(MessageBuilder.createMessage(new byte[0], accessor.getMessageHeaders()));
// Run the messageCountTask to clear the message count
verify(this.brokerRelay.getTaskScheduler()).scheduleWithFixedDelay(this.messageCountTaskCaptor.capture(), eq(Duration.ofMillis(5000L)));
this.messageCountTaskCaptor.getValue().run();
accessor = SimpMessageHeaderAccessor.create(SimpMessageType.MESSAGE);
accessor.setSessionId("sess1");
accessor.setDestination("/user/daisy/foo");
this.brokerRelay.handleMessage(MessageBuilder.createMessage(new byte[0], accessor.getMessageHeaders()));
assertThat(this.tcpClient.getSentMessages()).hasSize(3);
assertThat(this.tcpClient.getSentHeaders(2).getMessageType()).isEqualTo(SimpMessageType.HEARTBEAT);
}
@Test
void messageFromBrokerIsEnriched() {
this.brokerRelay.start();
this.brokerRelay.handleMessage(connectMessage("sess1", "joe"));
assertThat(this.tcpClient.getSentMessages()).hasSize(2);
assertThat(this.tcpClient.getSentHeaders(0).getCommand()).isEqualTo(StompCommand.CONNECT);
assertThat(this.tcpClient.getSentHeaders(1).getCommand()).isEqualTo(StompCommand.CONNECT);
this.tcpClient.handleMessage(message(StompCommand.MESSAGE, null, null, null));
Message<byte[]> message = this.outboundChannel.getMessages().get(0);
StompHeaderAccessor accessor = StompHeaderAccessor.getAccessor(message, StompHeaderAccessor.class);
assertThat(accessor.getSessionId()).isEqualTo("sess1");
assertThat(accessor.getUser().getName()).isEqualTo("joe");
}
// SPR-12820
@Test
void connectWhenBrokerNotAvailable() {
this.brokerRelay.start();
this.brokerRelay.stopInternal();
this.brokerRelay.handleMessage(connectMessage("sess1", "joe"));
Message<byte[]> message = this.outboundChannel.getMessages().get(0);
StompHeaderAccessor accessor = StompHeaderAccessor.getAccessor(message, StompHeaderAccessor.class);
assertThat(accessor.getCommand()).isEqualTo(StompCommand.ERROR);
assertThat(accessor.getSessionId()).isEqualTo("sess1");
assertThat(accessor.getUser().getName()).isEqualTo("joe");
assertThat(accessor.getMessage()).isEqualTo("Broker not available.");
}
@Test
void sendAfterBrokerUnavailable() {
this.brokerRelay.start();
assertThat(this.brokerRelay.getConnectionCount()).isEqualTo(1);
this.brokerRelay.handleMessage(connectMessage("sess1", "joe"));
assertThat(this.brokerRelay.getConnectionCount()).isEqualTo(2);
this.brokerRelay.stopInternal();
this.brokerRelay.handleMessage(message(StompCommand.SEND, "sess1", "joe", "/foo"));
assertThat(this.brokerRelay.getConnectionCount()).isEqualTo(1);
Message<byte[]> message = this.outboundChannel.getMessages().get(0);
StompHeaderAccessor accessor = StompHeaderAccessor.getAccessor(message, StompHeaderAccessor.class);
assertThat(accessor.getCommand()).isEqualTo(StompCommand.ERROR);
assertThat(accessor.getSessionId()).isEqualTo("sess1");
assertThat(accessor.getUser().getName()).isEqualTo("joe");
assertThat(accessor.getMessage()).isEqualTo("Broker not available.");
}
@Test
@SuppressWarnings("rawtypes")
void systemSubscription() {
MessageHandler handler = mock();
this.brokerRelay.setSystemSubscriptions(Collections.singletonMap("/topic/foo", handler));
this.brokerRelay.start();
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.CONNECTED);
accessor.setLeaveMutable(true);
MessageHeaders headers = accessor.getMessageHeaders();
this.tcpClient.handleMessage(MessageBuilder.createMessage(new byte[0], headers));
assertThat(this.tcpClient.getSentMessages()).hasSize(2);
assertThat(this.tcpClient.getSentHeaders(0).getCommand()).isEqualTo(StompCommand.CONNECT);
assertThat(this.tcpClient.getSentHeaders(1).getCommand()).isEqualTo(StompCommand.SUBSCRIBE);
assertThat(this.tcpClient.getSentHeaders(1).getDestination()).isEqualTo("/topic/foo");
Message<byte[]> message = message(StompCommand.MESSAGE, null, null, "/topic/foo");
this.tcpClient.handleMessage(message);
ArgumentCaptor<Message> captor = ArgumentCaptor.forClass(Message.class);
verify(handler).handleMessage(captor.capture());
assertThat(captor.getValue()).isSameAs(message);
}
@Test
void alreadyConnected() {
this.brokerRelay.start();
Message<byte[]> connect = connectMessage("sess1", "joe");
this.brokerRelay.handleMessage(connect);
assertThat(this.tcpClient.getSentMessages()).hasSize(2);
StompHeaderAccessor headers1 = this.tcpClient.getSentHeaders(0);
assertThat(headers1.getCommand()).isEqualTo(StompCommand.CONNECT);
assertThat(headers1.getSessionId()).isEqualTo(StompBrokerRelayMessageHandler.SYSTEM_SESSION_ID);
StompHeaderAccessor headers2 = this.tcpClient.getSentHeaders(1);
assertThat(headers2.getCommand()).isEqualTo(StompCommand.CONNECT);
assertThat(headers2.getSessionId()).isEqualTo("sess1");
this.brokerRelay.handleMessage(connect);
assertThat(this.tcpClient.getSentMessages()).hasSize(2);
assertThat(this.outboundChannel.getMessages()).isEmpty();
}
private Message<byte[]> connectMessage(String sessionId, String user) {
StompHeaderAccessor headers = StompHeaderAccessor.create(StompCommand.CONNECT);
headers.setSessionId(sessionId);
headers.setUser(new TestPrincipal(user));
headers.setLeaveMutable(true);
return MessageBuilder.createMessage(new byte[0], headers.getMessageHeaders());
}
private Message<byte[]> message(StompCommand command, String sessionId, String user, String destination) {
StompHeaderAccessor accessor = StompHeaderAccessor.create(command);
if (sessionId != null) {
accessor.setSessionId(sessionId);
}
if (user != null) {
accessor.setUser(new TestPrincipal(user));
}
if (destination != null) {
accessor.setDestination(destination);
}
accessor.setLeaveMutable(true);
return MessageBuilder.createMessage(new byte[0], accessor.getMessageHeaders());
}
private static CompletableFuture<Void> getVoidFuture() {
return CompletableFuture.completedFuture(null);
}
private static | StompBrokerRelayMessageHandlerTests |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/annotation/InjectAnnotationBeanPostProcessorTests.java | {
"start": 42889,
"end": 42995
} | class ____ {
@Inject
public AnnotatedDefaultConstructorBean() {
}
}
}
| AnnotatedDefaultConstructorBean |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/executor/loader/javassist/JavassistProxyFactory.java | {
"start": 9328,
"end": 9436
} | class ____ {
private static final Log log = LogFactory.getLog(JavassistProxyFactory.class);
}
}
| LogHolder |
java | processing__processing4 | core/src/processing/opengl/PGL.java | {
"start": 86330,
"end": 86656
} | interface
____ static boolean SHAPE_TEXT_SUPPORTED;
protected static int SEG_MOVETO;
protected static int SEG_LINETO;
protected static int SEG_QUADTO;
protected static int SEG_CUBICTO;
protected static int SEG_CLOSE;
protected abstract FontOutline createFontOutline(char ch, Object font);
protected | protected |
java | apache__spark | core/src/main/java/org/apache/spark/shuffle/sort/io/LocalDiskShuffleDriverComponents.java | {
"start": 1053,
"end": 1710
} | class ____ implements ShuffleDriverComponents {
private BlockManagerMaster blockManagerMaster;
@Override
public Map<String, String> initializeApplication() {
blockManagerMaster = SparkEnv.get().blockManager().master();
return Collections.emptyMap();
}
@Override
public void cleanupApplication() {
// nothing to clean up
}
@Override
public void removeShuffle(int shuffleId, boolean blocking) {
if (blockManagerMaster == null) {
throw new IllegalStateException("Driver components must be initialized before using");
}
blockManagerMaster.removeShuffle(shuffleId, blocking);
}
}
| LocalDiskShuffleDriverComponents |
java | reactor__reactor-core | reactor-core/src/jcstress/java/reactor/core/publisher/FluxConcatMapNoPrefetchStressTest.java | {
"start": 1192,
"end": 1807
} | class ____ {
final StressSubscriber<Object> stressSubscriber = new StressSubscriber<>();
final FluxConcatMapNoPrefetchSubscriber<Object, Object> concatMapImmediate = new FluxConcatMapNoPrefetchSubscriber<>(
stressSubscriber,
Mono::just,
FluxConcatMap.ErrorMode.IMMEDIATE
);
@JCStressTest
@Outcome(id = {"false, false"}, expect = ACCEPTABLE, desc = "No concurrent invocations")
@Outcome(id = {"true, false"}, expect = FORBIDDEN, desc = "onNext while onError")
@Outcome(id = {"false, true"}, expect = FORBIDDEN, desc = "onError while onNext")
@State
public static | FluxConcatMapNoPrefetchStressTest |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/binding/MapperMethod.java | {
"start": 8399,
"end": 10265
} | class ____ {
private final String name;
private final SqlCommandType type;
public SqlCommand(Configuration configuration, Class<?> mapperInterface, Method method) {
final String methodName = method.getName();
final Class<?> declaringClass = method.getDeclaringClass();
MappedStatement ms = resolveMappedStatement(mapperInterface, methodName, declaringClass, configuration);
if (ms == null) {
if (method.getAnnotation(Flush.class) == null) {
throw new BindingException(
"Invalid bound statement (not found): " + mapperInterface.getName() + "." + methodName);
}
name = null;
type = SqlCommandType.FLUSH;
} else {
name = ms.getId();
type = ms.getSqlCommandType();
if (type == SqlCommandType.UNKNOWN) {
throw new BindingException("Unknown execution method for: " + name);
}
}
}
public String getName() {
return name;
}
public SqlCommandType getType() {
return type;
}
private MappedStatement resolveMappedStatement(Class<?> mapperInterface, String methodName, Class<?> declaringClass,
Configuration configuration) {
String statementId = mapperInterface.getName() + "." + methodName;
if (configuration.hasStatement(statementId)) {
return configuration.getMappedStatement(statementId);
}
if (mapperInterface.equals(declaringClass)) {
return null;
}
for (Class<?> superInterface : mapperInterface.getInterfaces()) {
if (declaringClass.isAssignableFrom(superInterface)) {
MappedStatement ms = resolveMappedStatement(superInterface, methodName, declaringClass, configuration);
if (ms != null) {
return ms;
}
}
}
return null;
}
}
public static | SqlCommand |
java | elastic__elasticsearch | x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java | {
"start": 2162,
"end": 60459
} | class ____ extends ESRestTestCase {
private static final String BASIC_AUTH_VALUE = UsernamePasswordToken.basicAuthHeaderValue(
"x_pack_rest_user",
SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING
);
private static final RequestOptions POST_DATA = RequestOptions.DEFAULT.toBuilder()
.setWarningsHandler(
warnings -> Collections.singletonList(
"Posting data directly to anomaly detection jobs is deprecated, "
+ "in a future major version it will be compulsory to use a datafeed"
).equals(warnings) == false
)
.build();
private static final RequestOptions FLUSH_OPTIONS = RequestOptions.DEFAULT.toBuilder()
.setWarningsHandler(
warnings -> Collections.singletonList(
"Forcing any buffered data to be processed is deprecated, "
+ "in a future major version it will be compulsory to use a datafeed"
).equals(warnings) == false
)
.build();
@Override
protected Settings restClientSettings() {
return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build();
}
@Override
protected boolean preserveTemplatesUponCompletion() {
return true;
}
public void testPutJob_GivenFarequoteConfig() throws Exception {
Response response = createFarequoteJob("given-farequote-config-job");
String responseAsString = EntityUtils.toString(response.getEntity());
assertThat(responseAsString, containsString("\"job_id\":\"given-farequote-config-job\""));
assertThat(responseAsString, containsString("\"results_index_name\":\"shared\""));
String mlIndicesResponseAsString = getMlResultsIndices();
assertThat(mlIndicesResponseAsString, containsString("green open .ml-anomalies-shared-000001"));
String aliasesResponseAsString = getAliases();
LogManager.getLogger(MlRestTestStateCleaner.class).warn(aliasesResponseAsString);
assertThat(
aliasesResponseAsString,
containsString(
"\".ml-anomalies-shared-000001\":{\"aliases\":"
+ "{\".ml-anomalies-.write-given-farequote-config-job\":"
+ "{\"is_hidden\":true},\".ml-anomalies-given-farequote-config-job\""
)
);
}
public void testGetJob_GivenNoSuchJob() {
ResponseException e = expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/non-existing-job/_stats"))
);
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404));
assertThat(e.getMessage(), containsString("No known job with id 'non-existing-job'"));
}
public void testGetJob_GivenJobExists() throws Exception {
createFarequoteJob("get-job_given-job-exists-job");
Response response = client().performRequest(
new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/get-job_given-job-exists-job/_stats")
);
String responseAsString = EntityUtils.toString(response.getEntity());
assertThat(responseAsString, containsString("\"count\":1"));
assertThat(responseAsString, containsString("\"job_id\":\"get-job_given-job-exists-job\""));
}
public void testGetJobs_GivenSingleJob() throws Exception {
String jobId = "get-jobs_given-single-job-job";
createFarequoteJob(jobId);
// Explicit _all
String explictAll = EntityUtils.toString(
client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/_all")).getEntity()
);
assertThat(explictAll, containsString("\"count\":1"));
assertThat(explictAll, containsString("\"job_id\":\"" + jobId + "\""));
// Implicit _all
String implicitAll = EntityUtils.toString(
client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors")).getEntity()
);
assertThat(implicitAll, containsString("\"count\":1"));
assertThat(implicitAll, containsString("\"job_id\":\"" + jobId + "\""));
}
public void testGetJobs_GivenMultipleJobs() throws Exception {
createFarequoteJob("given-multiple-jobs-job-1");
createFarequoteJob("given-multiple-jobs-job-2");
createFarequoteJob("given-multiple-jobs-job-3");
// Explicit _all
String explicitAll = EntityUtils.toString(
client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/_all")).getEntity()
);
assertThat(explicitAll, containsString("\"count\":3"));
assertThat(explicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-1\""));
assertThat(explicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-2\""));
assertThat(explicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-3\""));
// Implicit _all
String implicitAll = EntityUtils.toString(
client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors")).getEntity()
);
assertThat(implicitAll, containsString("\"count\":3"));
assertThat(implicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-1\""));
assertThat(implicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-2\""));
assertThat(implicitAll, containsString("\"job_id\":\"given-multiple-jobs-job-3\""));
}
// tests the _xpack/usage endpoint
public void testUsage() throws IOException {
createFarequoteJob("job-1");
createFarequoteJob("job-2");
Map<String, Object> usage = entityAsMap(client().performRequest(new Request("GET", "_xpack/usage")));
assertEquals(2, XContentMapValues.extractValue("ml.jobs._all.count", usage));
assertEquals(2, XContentMapValues.extractValue("ml.jobs.closed.count", usage));
openJob("job-1");
usage = entityAsMap(client().performRequest(new Request("GET", "_xpack/usage")));
assertEquals(2, XContentMapValues.extractValue("ml.jobs._all.count", usage));
assertEquals(1, XContentMapValues.extractValue("ml.jobs.closed.count", usage));
assertEquals(1, XContentMapValues.extractValue("ml.jobs.opened.count", usage));
}
public void testOpenJob_GivenTimeout_Returns408() throws IOException {
String jobId = "test-timeout-returns-408";
createFarequoteJob(jobId);
ResponseException e = expectThrows(ResponseException.class, () -> openJob(jobId, Optional.of(TimeValue.timeValueNanos(1L))));
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.REQUEST_TIMEOUT.getStatus()));
}
private Response createFarequoteJob(String jobId) throws IOException {
return putJob(jobId, """
{
"description":"Analysis of response time by airline",
"analysis_config" : {
"bucket_span": "3600s",
"detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}]
},
"data_description" : {
"time_field":"time",
"time_format":"yyyy-MM-dd HH:mm:ssX"
}
}""");
}
public void testCantCreateJobWithSameID() throws Exception {
String jobTemplate = """
{
"analysis_config" : {
"detectors" :[{"function":"metric","field_name":"responsetime"}]
},
"data_description": {},
"results_index_name" : "%s"}""";
String jobId = "cant-create-job-with-same-id-job";
putJob(jobId, Strings.format(jobTemplate, "index-1"));
ResponseException e = expectThrows(ResponseException.class, () -> putJob(jobId, Strings.format(jobTemplate, "index-2")));
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400));
assertThat(e.getMessage(), containsString("The job cannot be created with the Id '" + jobId + "'. The Id is already used."));
}
public void testCreateJobsWithIndexNameOption() throws Exception {
String jobTemplate = """
{
"analysis_config" : {
"detectors" :[{"function":"metric","field_name":"responsetime"}]
},
"data_description": {},
"results_index_name" : "%s"}""";
String jobId1 = "create-jobs-with-index-name-option-job-1";
String indexName = "non-default-index-000001";
putJob(jobId1, Strings.format(jobTemplate, indexName));
String jobId2 = "create-jobs-with-index-name-option-job-2";
putJob(jobId2, Strings.format(jobTemplate, indexName));
// With security enabled GET _aliases throws an index_not_found_exception
// if no aliases have been created. In multi-node tests the alias may not
// appear immediately so wait here.
assertBusy(() -> {
try {
String aliasesResponse = getAliases();
assertThat(aliasesResponse, containsString(Strings.format("""
"%s":{"aliases":{""", AnomalyDetectorsIndex.jobResultsAliasedName("custom-" + indexName))));
assertThat(
aliasesResponse,
containsString(
Strings.format(
"""
"%s":{"filter":{"term":{"job_id":{"value":"%s"}}},"is_hidden":true}""",
AnomalyDetectorsIndex.jobResultsAliasedName(jobId1),
jobId1
)
)
);
assertThat(aliasesResponse, containsString(Strings.format("""
"%s":{"is_hidden":true}""", AnomalyDetectorsIndex.resultsWriteAlias(jobId1))));
assertThat(
aliasesResponse,
containsString(
Strings.format(
"""
"%s":{"filter":{"term":{"job_id":{"value":"%s"}}},"is_hidden":true}""",
AnomalyDetectorsIndex.jobResultsAliasedName(jobId2),
jobId2
)
)
);
assertThat(aliasesResponse, containsString(Strings.format("""
"%s":{"is_hidden":true}""", AnomalyDetectorsIndex.resultsWriteAlias(jobId2))));
} catch (ResponseException e) {
throw new AssertionError(e);
}
});
String responseAsString = getMlResultsIndices();
assertThat(responseAsString, containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName));
assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1))));
assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))));
{ // create jobId1 docs
String id = Strings.format("%s_bucket_%s_%s", jobId1, "1234", 300);
Request createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_doc/" + id);
createResultRequest.setJsonEntity(Strings.format("""
{"job_id":"%s", "timestamp": "%s", "result_type":"bucket", "bucket_span": "%s"}""", jobId1, "1234", 1));
client().performRequest(createResultRequest);
id = Strings.format("%s_bucket_%s_%s", jobId1, "1236", 300);
createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_doc/" + id);
createResultRequest.setJsonEntity(Strings.format("""
{"job_id":"%s", "timestamp": "%s", "result_type":"bucket", "bucket_span": "%s"}""", jobId1, "1236", 1));
client().performRequest(createResultRequest);
refreshAllIndices();
responseAsString = EntityUtils.toString(
client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1 + "/results/buckets"))
.getEntity()
);
assertThat(responseAsString, containsString("\"count\":2"));
responseAsString = EntityUtils.toString(
client().performRequest(new Request("GET", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_search")).getEntity()
);
assertThat(responseAsString, containsString("\"value\":2"));
}
{ // create jobId2 docs
String id = Strings.format("%s_bucket_%s_%s", jobId2, "1234", 300);
Request createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) + "/_doc/" + id);
createResultRequest.setJsonEntity(Strings.format("""
{"job_id":"%s", "timestamp": "%s", "result_type":"bucket", "bucket_span": "%s"}""", jobId2, "1234", 1));
client().performRequest(createResultRequest);
id = Strings.format("%s_bucket_%s_%s", jobId2, "1236", 300);
createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) + "/_doc/" + id);
createResultRequest.setJsonEntity(Strings.format("""
{"job_id":"%s", "timestamp": "%s", "result_type":"bucket", "bucket_span": "%s"}""", jobId2, "1236", 1));
client().performRequest(createResultRequest);
refreshAllIndices();
responseAsString = EntityUtils.toString(
client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2 + "/results/buckets"))
.getEntity()
);
assertThat(responseAsString, containsString("\"count\":2"));
responseAsString = EntityUtils.toString(
client().performRequest(new Request("GET", AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) + "/_search")).getEntity()
);
assertThat(responseAsString, containsString("\"value\":2"));
}
client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1));
// check that indices still exist, but no longer have job1 entries and aliases are gone
responseAsString = getAliases();
assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1))));
assertThat(responseAsString, containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))); // job2 still exists
responseAsString = getMlResultsIndices();
assertThat(responseAsString, containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName));
refreshAllIndices();
responseAsString = EntityUtils.toString(
client().performRequest(
new Request("GET", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "/_count")
).getEntity()
);
assertThat(responseAsString, containsString("\"count\":2"));
// Delete the second job and verify aliases are gone, and original concrete/custom index is gone
client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2));
responseAsString = getAliases();
assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))));
refreshAllIndices();
responseAsString = getMlResultsIndices();
assertThat(
responseAsString,
not(containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "-000001"))
);
}
// The same as testCreateJobsWithIndexNameOption but we don't supply the "-000001" suffix to the index name supplied in the job config
// We test that the final index name does indeed have the suffix.
public void testCreateJobsWithIndexNameNo6DigitSuffixOption() throws Exception {
String jobTemplate = """
{
"analysis_config" : {
"detectors" :[{"function":"metric","field_name":"responsetime"}]
},
"data_description": {},
"results_index_name" : "%s"}""";
String jobId1 = "create-jobs-with-index-name-option-job-1";
String indexName = "non-default-index";
putJob(jobId1, Strings.format(jobTemplate, indexName));
String jobId2 = "create-jobs-with-index-name-option-job-2";
putJob(jobId2, Strings.format(jobTemplate, indexName));
// With security enabled GET _aliases throws an index_not_found_exception
// if no aliases have been created. In multi-node tests the alias may not
// appear immediately so wait here.
assertBusy(() -> {
try {
String aliasesResponse = getAliases();
assertThat(aliasesResponse, containsString(Strings.format("""
"%s":{"aliases":{""", AnomalyDetectorsIndex.jobResultsAliasedName("custom-" + indexName + "-000001"))));
assertThat(
aliasesResponse,
containsString(
Strings.format(
"""
"%s":{"filter":{"term":{"job_id":{"value":"%s"}}},"is_hidden":true}""",
AnomalyDetectorsIndex.jobResultsAliasedName(jobId1),
jobId1
)
)
);
assertThat(aliasesResponse, containsString(Strings.format("""
"%s":{"is_hidden":true}""", AnomalyDetectorsIndex.resultsWriteAlias(jobId1))));
assertThat(
aliasesResponse,
containsString(
Strings.format(
"""
"%s":{"filter":{"term":{"job_id":{"value":"%s"}}},"is_hidden":true}""",
AnomalyDetectorsIndex.jobResultsAliasedName(jobId2),
jobId2
)
)
);
assertThat(aliasesResponse, containsString(Strings.format("""
"%s":{"is_hidden":true}""", AnomalyDetectorsIndex.resultsWriteAlias(jobId2))));
} catch (ResponseException e) {
throw new AssertionError(e);
}
});
String responseAsString = getMlResultsIndices();
assertThat(responseAsString, containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "-000001"));
assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1))));
assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))));
{ // create jobId1 docs
String id = Strings.format("%s_bucket_%s_%s", jobId1, "1234", 300);
Request createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_doc/" + id);
createResultRequest.setJsonEntity(Strings.format("""
{"job_id":"%s", "timestamp": "%s", "result_type":"bucket", "bucket_span": "%s"}""", jobId1, "1234", 1));
client().performRequest(createResultRequest);
id = Strings.format("%s_bucket_%s_%s", jobId1, "1236", 300);
createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_doc/" + id);
createResultRequest.setJsonEntity(Strings.format("""
{"job_id":"%s", "timestamp": "%s", "result_type":"bucket", "bucket_span": "%s"}""", jobId1, "1236", 1));
client().performRequest(createResultRequest);
refreshAllIndices();
responseAsString = EntityUtils.toString(
client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1 + "/results/buckets"))
.getEntity()
);
assertThat(responseAsString, containsString("\"count\":2"));
responseAsString = EntityUtils.toString(
client().performRequest(new Request("GET", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_search")).getEntity()
);
assertThat(responseAsString, containsString("\"value\":2"));
}
{ // create jobId2 docs
String id = Strings.format("%s_bucket_%s_%s", jobId2, "1234", 300);
Request createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) + "/_doc/" + id);
createResultRequest.setJsonEntity(Strings.format("""
{"job_id":"%s", "timestamp": "%s", "result_type":"bucket", "bucket_span": "%s"}""", jobId2, "1234", 1));
client().performRequest(createResultRequest);
id = Strings.format("%s_bucket_%s_%s", jobId2, "1236", 300);
createResultRequest = new Request("PUT", AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) + "/_doc/" + id);
createResultRequest.setJsonEntity(Strings.format("""
{"job_id":"%s", "timestamp": "%s", "result_type":"bucket", "bucket_span": "%s"}""", jobId2, "1236", 1));
client().performRequest(createResultRequest);
refreshAllIndices();
responseAsString = EntityUtils.toString(
client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2 + "/results/buckets"))
.getEntity()
);
assertThat(responseAsString, containsString("\"count\":2"));
responseAsString = EntityUtils.toString(
client().performRequest(new Request("GET", AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) + "/_search")).getEntity()
);
assertThat(responseAsString, containsString("\"value\":2"));
}
client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1));
// check that indices still exist, but no longer have job1 entries and aliases are gone
responseAsString = getAliases();
assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1))));
assertThat(responseAsString, containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))); // job2 still exists
responseAsString = getMlResultsIndices();
assertThat(responseAsString, containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "-000001"));
refreshAllIndices();
responseAsString = EntityUtils.toString(
client().performRequest(
new Request("GET", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "-000001" + "/_count")
).getEntity()
);
assertThat(responseAsString, containsString("\"count\":2"));
// Delete the second job and verify aliases are gone, and original concrete/custom index is gone
client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2));
responseAsString = getAliases();
assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))));
refreshAllIndices();
responseAsString = getMlResultsIndices();
assertThat(
responseAsString,
not(containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "-000001"))
);
}
public void testCreateJobInSharedIndexUpdatesMapping() throws Exception {
String jobTemplate = """
{
"analysis_config" : {
"detectors" :[{"function":"metric","field_name":"metric", "by_field_name":"%s"}]
},
"data_description": {}
}""";
String jobId1 = "create-job-in-shared-index-updates-mapping-job-1";
String byFieldName1 = "responsetime";
String jobId2 = "create-job-in-shared-index-updates-mapping-job-2";
String byFieldName2 = "cpu-usage";
putJob(jobId1, Strings.format(jobTemplate, byFieldName1));
String mlIndicesResponseAsString = getMlResultsIndices();
assertThat(
mlIndicesResponseAsString,
containsString(
"green open " + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT
)
);
// Check the index mapping contains the first by_field_name
Request getResultsMappingRequest = new Request(
"GET",
AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX
+ AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT
+ MlIndexAndAlias.FIRST_INDEX_SIX_DIGIT_SUFFIX
+ "/_mapping"
);
getResultsMappingRequest.addParameter("pretty", null);
String resultsMappingAfterJob1 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity());
assertThat(resultsMappingAfterJob1, containsString(byFieldName1));
assertThat(resultsMappingAfterJob1, not(containsString(byFieldName2)));
putJob(jobId2, Strings.format(jobTemplate, byFieldName2));
// Check the index mapping now contains both fields
String resultsMappingAfterJob2 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity());
assertThat(resultsMappingAfterJob2, containsString(byFieldName1));
assertThat(resultsMappingAfterJob2, containsString(byFieldName2));
}
public void testCreateJobInCustomSharedIndexUpdatesMapping() throws Exception {
String jobTemplate = """
{
"analysis_config" : {
"detectors" :[{"function":"metric","field_name":"metric", "by_field_name":"%s"}]
},
"data_description": {},
"results_index_name" : "shared-index"}""";
String jobId1 = "create-job-in-custom-shared-index-updates-mapping-job-1";
String byFieldName1 = "responsetime";
String jobId2 = "create-job-in-custom-shared-index-updates-mapping-job-2";
String byFieldName2 = "cpu-usage";
putJob(jobId1, Strings.format(jobTemplate, byFieldName1));
// Check the index mapping contains the first by_field_name
Request getResultsMappingRequest = new Request(
"GET",
AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-shared-index-000001/_mapping"
);
getResultsMappingRequest.addParameter("pretty", null);
String resultsMappingAfterJob1 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity());
assertThat(resultsMappingAfterJob1, containsString(byFieldName1));
assertThat(resultsMappingAfterJob1, not(containsString(byFieldName2)));
putJob(jobId2, Strings.format(jobTemplate, byFieldName2));
// Check the index mapping now contains both fields
String resultsMappingAfterJob2 = EntityUtils.toString(client().performRequest(getResultsMappingRequest).getEntity());
assertThat(resultsMappingAfterJob2, containsString(byFieldName1));
assertThat(resultsMappingAfterJob2, containsString(byFieldName2));
}
public void testCreateJob_WithClashingFieldMappingsFails() throws Exception {
String jobTemplate = """
{
"analysis_config" : {
"detectors" :[{"function":"metric","field_name":"metric", "by_field_name":"%s"}]
},
"data_description": {}
}""";
String jobId1 = "job-with-response-field";
String byFieldName1;
String jobId2 = "job-will-fail-with-mapping-error-on-response-field";
String byFieldName2;
// we should get the friendly advice nomatter which way around the clashing fields are seen
if (randomBoolean()) {
byFieldName1 = "response";
byFieldName2 = "response.time";
} else {
byFieldName1 = "response.time";
byFieldName2 = "response";
}
putJob(jobId1, Strings.format(jobTemplate, byFieldName1));
ResponseException e = expectThrows(ResponseException.class, () -> putJob(jobId2, Strings.format(jobTemplate, byFieldName2)));
assertThat(
e.getMessage(),
containsString(
"This job would cause a mapping clash with existing field [response] - "
+ "avoid the clash by assigning a dedicated results index"
)
);
}
public void testOpenJobFailsWhenPersistentTaskAssignmentDisabled() throws Exception {
String jobId = "open-job-with-persistent-task-assignment-disabled";
createFarequoteJob(jobId);
Request disablePersistentTaskAssignmentRequest = new Request("PUT", "_cluster/settings");
disablePersistentTaskAssignmentRequest.setJsonEntity("""
{
"persistent": {
"cluster.persistent_tasks.allocation.enable": "none"
}
}""");
Response disablePersistentTaskAssignmentResponse = client().performRequest(disablePersistentTaskAssignmentRequest);
assertThat(entityAsMap(disablePersistentTaskAssignmentResponse), hasEntry("acknowledged", true));
try {
ResponseException exception = expectThrows(ResponseException.class, () -> openJob(jobId));
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(429));
assertThat(
EntityUtils.toString(exception.getResponse().getEntity()),
containsString(
"Cannot open jobs because persistent task assignment is disabled by the "
+ "[cluster.persistent_tasks.allocation.enable] setting"
)
);
} finally {
// Try to revert the cluster setting change even if the test fails,
// because otherwise this setting will cause many other tests to fail
Request enablePersistentTaskAssignmentRequest = new Request("PUT", "_cluster/settings");
enablePersistentTaskAssignmentRequest.setJsonEntity("""
{
"persistent": {
"cluster.persistent_tasks.allocation.enable": "all"
}
}""");
Response enablePersistentTaskAssignmentResponse = client().performRequest(disablePersistentTaskAssignmentRequest);
assertThat(entityAsMap(enablePersistentTaskAssignmentResponse), hasEntry("acknowledged", true));
}
}
public void testDeleteJob() throws Exception {
String jobId = "delete-job-job";
String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT
+ MlIndexAndAlias.FIRST_INDEX_SIX_DIGIT_SUFFIX;
createFarequoteJob(jobId);
// Use _cat/indices/.ml-anomalies-* instead of _cat/indices/_all to workaround https://github.com/elastic/elasticsearch/issues/45652
String indicesBeforeDelete = EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/indices/" + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "*"))
.getEntity()
);
assertThat(indicesBeforeDelete, containsString(indexName));
client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId));
// check that the index still exists (it's shared by default)
String indicesAfterDelete = EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/indices/" + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "*"))
.getEntity()
);
assertThat(indicesAfterDelete, containsString(indexName));
waitUntilIndexIsEmpty(indexName);
// check that the job itself is gone
expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))
);
}
public void testOutOfOrderData() throws Exception {
String jobId = "job-with-out-of-order-docs";
createFarequoteJob(jobId);
openJob(jobId);
Request postDataRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data");
// Post data is deprecated, so expect a deprecation warning
postDataRequest.setOptions(POST_DATA);
// Bucket span is 1h (3600s). So, posting data within the same hour should not result in out of order data
postDataRequest.setJsonEntity("{ \"airline\":\"LOT\", \"responsetime\":100, \"time\":\"2019-07-01 00:00:00Z\" }");
client().performRequest(postDataRequest);
postDataRequest.setJsonEntity("{ \"airline\":\"LOT\", \"responsetime\":100, \"time\":\"2019-07-01 00:30:00Z\" }");
client().performRequest(postDataRequest);
// out of order, but in the same time bucket
postDataRequest.setJsonEntity("{ \"airline\":\"LOT\", \"responsetime\":100, \"time\":\"2019-07-01 00:10:00Z\" }");
client().performRequest(postDataRequest);
Request flushRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush");
flushRequest.setOptions(FLUSH_OPTIONS);
Response flushResponse = client().performRequest(flushRequest);
assertThat(entityAsMap(flushResponse), hasEntry("flushed", true));
closeJob(jobId);
String stats = EntityUtils.toString(
client().performRequest(new Request("GET", "_ml/anomaly_detectors/" + jobId + "/_stats")).getEntity()
);
// assert 2019-07-01 00:30:00Z
assertThat(stats, containsString("\"latest_record_timestamp\":1561941000000"));
assertThat(stats, containsString("\"out_of_order_timestamp_count\":0"));
assertThat(stats, containsString("\"processed_record_count\":3"));
client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId));
}
public void testDeleteJob_TimingStatsDocumentIsDeleted() throws Exception {
String jobId = "delete-job-with-timing-stats-document-job";
String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT
+ MlIndexAndAlias.FIRST_INDEX_SIX_DIGIT_SUFFIX;
createFarequoteJob(jobId);
assertThat(
EntityUtils.toString(client().performRequest(new Request("GET", indexName + "/_count")).getEntity()),
containsString("\"count\":0")
); // documents related to the job do not exist yet
openJob(jobId);
Request postDataRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data");
// Post data is deprecated, so expect a deprecation warning
postDataRequest.setOptions(POST_DATA);
postDataRequest.setJsonEntity("""
{ "airline":"LOT", "response_time":100, "time":"2019-07-01 00:00:00Z" }""");
client().performRequest(postDataRequest);
postDataRequest.setJsonEntity("""
{ "airline":"LOT", "response_time":100, "time":"2019-07-01 02:00:00Z" }""");
client().performRequest(postDataRequest);
Request flushRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush");
flushRequest.setOptions(FLUSH_OPTIONS);
Response flushResponse = client().performRequest(flushRequest);
assertThat(entityAsMap(flushResponse), hasEntry("flushed", true));
closeJob(jobId);
String timingStatsDoc = EntityUtils.toString(
client().performRequest(new Request("GET", indexName + "/_doc/" + TimingStats.documentId(jobId))).getEntity()
);
assertThat(timingStatsDoc, containsString("\"bucket_count\":2")); // TimingStats doc exists, 2 buckets have been processed
client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId));
waitUntilIndexIsEmpty(indexName); // when job is being deleted, it also deletes all related documents from the shared index
// check that the TimingStats documents got deleted
ResponseException exception = expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("GET", indexName + "/_doc/" + TimingStats.documentId(jobId)))
);
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(404));
// check that the job itself is gone
exception = expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))
);
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(404));
}
public void testDeleteJobAsync() throws Exception {
String jobId = "delete-job-async-job";
String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT
+ MlIndexAndAlias.FIRST_INDEX_SIX_DIGIT_SUFFIX;
createFarequoteJob(jobId);
// Use _cat/indices/.ml-anomalies-* instead of _cat/indices/_all to workaround https://github.com/elastic/elasticsearch/issues/45652
String indicesBeforeDelete = EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/indices/" + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "*"))
.getEntity()
);
assertThat(indicesBeforeDelete, containsString(indexName));
Response response = client().performRequest(
new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "?wait_for_completion=false")
);
// Wait for task to complete
String taskId = extractTaskId(response);
Response taskResponse = client().performRequest(new Request("GET", "_tasks/" + taskId + "?wait_for_completion=true"));
assertThat(EntityUtils.toString(taskResponse.getEntity()), containsString("\"acknowledged\":true"));
// check that the index still exists (it's shared by default)
String indicesAfterDelete = EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/indices/" + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "*"))
.getEntity()
);
assertThat(indicesAfterDelete, containsString(indexName));
waitUntilIndexIsEmpty(indexName);
// check that the job itself is gone
expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))
);
}
private void waitUntilIndexIsEmpty(String indexName) throws Exception {
assertBusy(() -> {
try {
String count = EntityUtils.toString(client().performRequest(new Request("GET", indexName + "/_count")).getEntity());
assertThat(count, containsString("\"count\":0"));
} catch (Exception e) {
fail(e.getMessage());
}
});
}
private static String extractTaskId(Response response) throws IOException {
String responseAsString = EntityUtils.toString(response.getEntity());
Pattern matchTaskId = Pattern.compile(".*\"task\":.*\"(.*)\".*");
Matcher taskIdMatcher = matchTaskId.matcher(responseAsString);
assertTrue(taskIdMatcher.matches());
return taskIdMatcher.group(1);
}
public void testDeleteJobAfterMissingIndex() throws Exception {
String jobId = "delete-job-after-missing-index-job";
String aliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId);
String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT
+ MlIndexAndAlias.FIRST_INDEX_SIX_DIGIT_SUFFIX;
createFarequoteJob(jobId);
// Use _cat/indices/.ml-anomalies-* instead of _cat/indices/_all to workaround https://github.com/elastic/elasticsearch/issues/45652
String indicesBeforeDelete = EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/indices/" + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "*"))
.getEntity()
);
assertThat(indicesBeforeDelete, containsString(indexName));
// Manually delete the index so that we can test that deletion proceeds
// normally anyway
client().performRequest(new Request("DELETE", indexName));
client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId));
// check index was deleted
String indicesAfterDelete = EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/indices/" + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "*"))
.getEntity()
);
assertThat(indicesAfterDelete, not(containsString(aliasName)));
assertThat(indicesAfterDelete, not(containsString(indexName)));
expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))
);
}
public void testDeleteJobAfterMissingAliases() throws Exception {
String jobId = "delete-job-after-missing-alias-job";
String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId);
String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(jobId);
String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT
+ MlIndexAndAlias.FIRST_INDEX_SIX_DIGIT_SUFFIX;
createFarequoteJob(jobId);
// With security enabled cat aliases throws an index_not_found_exception
// if no aliases have been created. In multi-node tests the alias may not
// appear immediately so wait here.
assertBusy(() -> {
try {
String aliases = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/aliases")).getEntity());
assertThat(aliases, containsString(readAliasName));
assertThat(aliases, containsString(writeAliasName));
} catch (ResponseException e) {
throw new AssertionError(e);
}
});
// Manually delete the aliases so that we can test that deletion proceeds
// normally anyway
client().performRequest(new Request("DELETE", indexName + "/_alias/" + readAliasName));
client().performRequest(new Request("DELETE", indexName + "/_alias/" + writeAliasName));
// check aliases were deleted
expectThrows(ResponseException.class, () -> client().performRequest(new Request("GET", indexName + "/_alias/" + readAliasName)));
expectThrows(ResponseException.class, () -> client().performRequest(new Request("GET", indexName + "/_alias/" + writeAliasName)));
client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId));
}
public void testMultiIndexDelete() throws Exception {
String jobId = "multi-index-delete-job";
String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT
+ MlIndexAndAlias.FIRST_INDEX_SIX_DIGIT_SUFFIX;
createFarequoteJob(jobId);
// Make the job's results span an extra two indices, i.e. three in total.
// To do this the job's results alias needs to encompass all three indices.
Request extraIndex1 = new Request("PUT", indexName + "-001");
extraIndex1.setJsonEntity(Strings.format("""
{
"aliases": {
"%s": {
"is_hidden": true,
"filter": {
"term": {
"%s": "%s"
}
}
}
}
}""", AnomalyDetectorsIndex.jobResultsAliasedName(jobId), Job.ID, jobId));
// Creating an index with a leading dot (".") is now deprecated.
// Ensure the ensuing warning exception doesn't cause a test case failure
try {
client().performRequest(extraIndex1);
} catch (org.elasticsearch.client.WarningFailureException e) {
logger.warn(e.getMessage());
}
Request extraIndex2 = new Request("PUT", indexName + "-002");
extraIndex2.setJsonEntity(Strings.format("""
{
"aliases": {
"%s": {
"is_hidden": true,
"filter": {
"term": {
"%s": "%s"
}
}
}
}
}""", AnomalyDetectorsIndex.jobResultsAliasedName(jobId), Job.ID, jobId));
// Creating an index with a leading dot (".") is now deprecated.
// Ensure the ensuing warning exception doesn't cause a test case failure
try {
client().performRequest(extraIndex2);
} catch (org.elasticsearch.client.WarningFailureException e) {
logger.warn(e.getMessage());
}
// Use _cat/indices/.ml-anomalies-* instead of _cat/indices/_all to workaround https://github.com/elastic/elasticsearch/issues/45652
String indicesBeforeDelete = EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/indices/" + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "*"))
.getEntity()
);
assertThat(indicesBeforeDelete, containsString(indexName));
assertThat(indicesBeforeDelete, containsString(indexName + "-001"));
assertThat(indicesBeforeDelete, containsString(indexName + "-002"));
// Add some documents to each index to make sure the DBQ clears them out
Request createDoc0 = new Request("PUT", indexName + "/_doc/" + 123);
createDoc0.setJsonEntity(Strings.format("""
{"job_id":"%s", "timestamp": "%s", "bucket_span":%d, "result_type":"record"}""", jobId, 123, 1));
client().performRequest(createDoc0);
Request createDoc1 = new Request("PUT", indexName + "-001/_doc/" + 123);
createDoc1.setEntity(createDoc0.getEntity());
client().performRequest(createDoc1);
Request createDoc2 = new Request("PUT", indexName + "-002/_doc/" + 123);
createDoc2.setEntity(createDoc0.getEntity());
client().performRequest(createDoc2);
// Also index a few through the alias for the first job
Request createDoc3 = new Request("PUT", indexName + "/_doc/" + 456);
createDoc3.setEntity(createDoc0.getEntity());
client().performRequest(createDoc3);
refreshAllIndices();
// check for the documents
assertThat(
EntityUtils.toString(client().performRequest(new Request("GET", indexName + "/_count")).getEntity()),
containsString("\"count\":2")
);
assertThat(
EntityUtils.toString(client().performRequest(new Request("GET", indexName + "-001/_count")).getEntity()),
containsString("\"count\":1")
);
assertThat(
EntityUtils.toString(client().performRequest(new Request("GET", indexName + "-002/_count")).getEntity()),
containsString("\"count\":1")
);
// Delete
client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId));
refreshAllIndices();
// check that the default shared index still exists but is empty
String indicesAfterDelete = EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/indices/" + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "*"))
.getEntity()
);
assertThat(indicesAfterDelete, containsString(indexName));
// other results indices should be deleted as this test job ID is the only job in those indices
assertThat(indicesAfterDelete, not(containsString(indexName + "-001")));
assertThat(indicesAfterDelete, not(containsString(indexName + "-002")));
assertThat(
EntityUtils.toString(client().performRequest(new Request("GET", indexName + "/_count")).getEntity()),
containsString("\"count\":0")
);
expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))
);
}
public void testDelete_multipleRequest() throws Exception {
String jobId = "delete-job-multiple-times";
createFarequoteJob(jobId);
Map<Long, Response> responses = ConcurrentCollections.newConcurrentMap();
Map<Long, ResponseException> responseExceptions = ConcurrentCollections.newConcurrentMap();
AtomicReference<IOException> ioe = new AtomicReference<>();
AtomicInteger recreationGuard = new AtomicInteger(0);
AtomicReference<Response> recreationResponse = new AtomicReference<>();
AtomicReference<ResponseException> recreationException = new AtomicReference<>();
Runnable deleteJob = () -> {
boolean forceDelete = randomBoolean();
try {
String url = MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId;
if (forceDelete) {
url += "?force=true";
}
Response response = client().performRequest(new Request("DELETE", url));
responses.put(Thread.currentThread().getId(), response);
} catch (ResponseException re) {
responseExceptions.put(Thread.currentThread().getId(), re);
} catch (IOException e) {
ioe.set(e);
}
// Immediately after the first deletion finishes, recreate the job. This should pick up
// race conditions where another delete request deletes part of the newly created job.
if (recreationGuard.getAndIncrement() == 0) {
try {
recreationResponse.set(createFarequoteJob(jobId));
} catch (ResponseException re) {
recreationException.set(re);
} catch (IOException e) {
logger.error("Error trying to recreate the job", e);
ioe.set(e);
}
}
};
// The idea is to hit the situation where one request waits for
// the other to complete. This is difficult to schedule but
// hopefully it will happen in CI
int numThreads = 5;
Thread[] threads = new Thread[numThreads];
for (int i = 0; i < numThreads; i++) {
threads[i] = new Thread(deleteJob);
}
for (int i = 0; i < numThreads; i++) {
threads[i].start();
}
for (int i = 0; i < numThreads; i++) {
threads[i].join();
}
if (ioe.get() != null) {
// This looks redundant but the check is done so we can
// print the exception's error message
assertNull(ioe.get().getMessage(), ioe.get());
}
assertEquals(numThreads, responses.size() + responseExceptions.size());
// 404s are ok as it means the job had already been deleted.
for (ResponseException re : responseExceptions.values()) {
assertEquals(re.getMessage(), 404, re.getResponse().getStatusLine().getStatusCode());
}
for (Response response : responses.values()) {
assertEquals(EntityUtils.toString(response.getEntity()), 200, response.getStatusLine().getStatusCode());
}
assertNotNull(recreationResponse.get());
assertEquals(
EntityUtils.toString(recreationResponse.get().getEntity()),
200,
recreationResponse.get().getStatusLine().getStatusCode()
);
if (recreationException.get() != null) {
assertNull(recreationException.get().getMessage(), recreationException.get());
}
String expectedReadAliasString = Strings.format(
"""
"%s":{"filter":{"term":{"job_id":{"value":"%s"}}},"is_hidden":true}""",
AnomalyDetectorsIndex.jobResultsAliasedName(jobId),
jobId
);
String expectedWriteAliasString = Strings.format("""
"%s":{"is_hidden":true}""", AnomalyDetectorsIndex.resultsWriteAlias(jobId));
try {
// The idea of the code above is that the deletion is sufficiently time-consuming that
// all threads enter the deletion call before the first one exits it. Usually this happens,
// but in the case that it does not the job that is recreated may get deleted.
// It is not a error if the job does not exist but the following assertions
// will fail in that case.
client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId));
// Check that the job aliases exist. These are the last thing to be deleted when a job is deleted, so
// if there's been a race between deletion and recreation these are what will be missing.
String aliases = getAliases();
assertThat(aliases, containsString(expectedReadAliasString));
assertThat(aliases, containsString(expectedWriteAliasString));
} catch (ResponseException missingJobException) {
// The job does not exist
assertThat(missingJobException.getResponse().getStatusLine().getStatusCode(), equalTo(404));
// The job aliases should be deleted
String aliases = getAliases();
assertThat(aliases, not(containsString(expectedReadAliasString)));
assertThat(aliases, not(containsString(expectedWriteAliasString)));
}
assertEquals(numThreads, recreationGuard.get());
}
private String getMlResultsIndices() throws IOException {
// Use _cat/indices/.ml-anomalies-* instead of _cat/indices/_all to workaround https://github.com/elastic/elasticsearch/issues/45652
return EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/indices/" + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "*"))
.getEntity()
);
}
private String getAliases() throws IOException {
final Request aliasesRequest = new Request("GET", "/_aliases");
// Allow system index deprecation warnings - this can be removed once system indices are omitted from responses rather than
// triggering a deprecation warning.
aliasesRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> {
if (warnings.isEmpty()) {
return false;
} else if (warnings.size() > 1) {
return true;
} else {
return warnings.get(0).startsWith("this request accesses system indices:") == false;
}
}).build());
Response response = client().performRequest(aliasesRequest);
return EntityUtils.toString(response.getEntity());
}
private void openJob(String jobId) throws IOException {
Response response = openJob(jobId, Optional.empty());
assertThat(entityAsMap(response), hasEntry("opened", true));
}
private Response openJob(String jobId, Optional<TimeValue> timeout) throws IOException {
StringBuilder path = new StringBuilder(MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open");
if (timeout.isPresent()) {
path.append("?timeout=" + timeout.get().getStringRep());
}
Response openResponse = client().performRequest(new Request("POST", path.toString()));
return openResponse;
}
private void closeJob(String jobId) throws IOException {
Response closeResponse = client().performRequest(
new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close")
);
assertThat(entityAsMap(closeResponse), hasEntry("closed", true));
}
private Response putJob(String jobId, String jsonBody) throws IOException {
Request request = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId);
request.setJsonEntity(jsonBody);
return client().performRequest(request);
}
@After
public void clearMlState() throws Exception {
new MlRestTestStateCleaner(logger, adminClient()).resetFeatures();
// Don't check analytics jobs as they are independent of anomaly detection jobs and should not be created by this test.
waitForPendingTasks(adminClient(), taskName -> taskName.contains(MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME));
// Finally, clean up any lingering persistent tasks (such as "_close", "_close[n]" etc.) that may negatively
// impact subsequent tests.
client().performRequest(new Request("POST", "/_tasks/_cancel"));
}
}
| MlJobIT |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java | {
"start": 1681,
"end": 8390
} | class ____ extends ESTestCase {
private static final ObjectParser<Map<String, Object>, Void> PARSER = new ObjectParser<>(
"CompletionOptionParser",
SearchResponseUtils.unknownMetaFieldConsumer,
HashMap::new
);
static {
SearchResponseUtils.declareInnerHitsParseFields(PARSER);
PARSER.declareString(
(map, value) -> map.put(Suggest.Suggestion.Entry.Option.TEXT.getPreferredName(), value),
Suggest.Suggestion.Entry.Option.TEXT
);
PARSER.declareFloat(
(map, value) -> map.put(Suggest.Suggestion.Entry.Option.SCORE.getPreferredName(), value),
Suggest.Suggestion.Entry.Option.SCORE
);
PARSER.declareObject(
(map, value) -> map.put(CompletionSuggestion.Entry.Option.CONTEXTS.getPreferredName(), value),
(p, c) -> parseContexts(p),
CompletionSuggestion.Entry.Option.CONTEXTS
);
}
private static Map<String, Set<String>> parseContexts(XContentParser parser) throws IOException {
Map<String, Set<String>> contexts = new HashMap<>();
while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) {
ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser);
String key = parser.currentName();
ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser);
Set<String> values = new HashSet<>();
while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) {
ensureExpectedToken(XContentParser.Token.VALUE_STRING, parser.currentToken(), parser);
values.add(parser.text());
}
contexts.put(key, values);
}
return contexts;
}
public static Option parseOption(XContentParser parser) {
Map<String, Object> values = PARSER.apply(parser, null);
Text text = new Text((String) values.get(Suggest.Suggestion.Entry.Option.TEXT.getPreferredName()));
Float score = (Float) values.get(Suggest.Suggestion.Entry.Option.SCORE.getPreferredName());
@SuppressWarnings("unchecked")
Map<String, Set<String>> contexts = (Map<String, Set<String>>) values.get(
CompletionSuggestion.Entry.Option.CONTEXTS.getPreferredName()
);
if (contexts == null) {
contexts = Collections.emptyMap();
}
SearchHit hit = null;
// the option either prints SCORE or inlines the search hit
if (score == null) {
hit = SearchResponseUtils.searchHitFromMap(values);
score = hit.getScore();
}
CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(-1, text, score, contexts);
option.setHit(hit);
return option;
}
public static Option createTestItem() {
Text text = new Text(randomAlphaOfLengthBetween(5, 15));
int docId = randomInt();
int numberOfContexts = randomIntBetween(0, 3);
Map<String, Set<String>> contexts = new HashMap<>();
for (int i = 0; i < numberOfContexts; i++) {
int numberOfValues = randomIntBetween(0, 3);
Set<String> values = new HashSet<>();
for (int v = 0; v < numberOfValues; v++) {
values.add(randomAlphaOfLengthBetween(5, 15));
}
contexts.put(randomAlphaOfLengthBetween(5, 15), values);
}
SearchHit hit = null;
float score = randomFloat();
if (randomBoolean()) {
hit = SearchHitTests.createTestItem(false, true);
score = hit.getScore();
}
Option option = new CompletionSuggestion.Entry.Option(docId, text, score, contexts);
option.setHit(hit);
if (hit != null) {
hit.decRef();
}
return option;
}
public void testFromXContent() throws IOException {
doTestFromXContent(false);
}
public void testFromXContentWithRandomFields() throws IOException {
doTestFromXContent(true);
}
private void doTestFromXContent(boolean addRandomFields) throws IOException {
Option option = createTestItem();
XContentType xContentType = randomFrom(XContentType.values());
boolean humanReadable = randomBoolean();
BytesReference originalBytes = toShuffledXContent(option, xContentType, ToXContent.EMPTY_PARAMS, humanReadable);
BytesReference mutated;
if (addRandomFields) {
// "contexts" is an object consisting of key/array pairs, we shouldn't add anything random there
// also there can be inner search hits fields inside this option, we need to exclude another couple of paths
// where we cannot add random stuff. We also exclude the root level, this is done for SearchHits as all unknown fields
// for SearchHit on a root level are interpreted as meta-fields and will be kept
Predicate<String> excludeFilter = (path) -> path.endsWith(CompletionSuggestion.Entry.Option.CONTEXTS.getPreferredName())
|| path.endsWith("highlight")
|| path.contains("fields")
|| path.contains("_source")
|| path.contains("inner_hits")
|| path.isEmpty();
mutated = insertRandomFields(xContentType, originalBytes, excludeFilter, random());
} else {
mutated = originalBytes;
}
Option parsed;
try (XContentParser parser = createParser(xContentType.xContent(), mutated)) {
parsed = parseOption(parser);
assertNull(parser.nextToken());
}
assertEquals(option.getText(), parsed.getText());
assertEquals(option.getHighlighted(), parsed.getHighlighted());
assertEquals(option.getScore(), parsed.getScore(), Float.MIN_VALUE);
assertEquals(option.collateMatch(), parsed.collateMatch());
assertEquals(option.getContexts(), parsed.getContexts());
assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, humanReadable), xContentType);
}
public void testToXContent() throws IOException {
Map<String, Set<String>> contexts = Collections.singletonMap("key", Collections.singleton("value"));
CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(1, new Text("someText"), 1.3f, contexts);
BytesReference xContent = toXContent(option, XContentType.JSON, randomBoolean());
assertEquals("""
{"text":"someText","score":1.3,"contexts":{"key":["value"]}}""", xContent.utf8ToString());
}
}
| CompletionSuggestionOptionTests |
java | apache__camel | components/camel-aws/camel-aws2-eventbridge/src/test/java/org/apache/camel/component/aws2/eventbridge/EventbridgeClientFactoryTest.java | {
"start": 1419,
"end": 3153
} | class ____ {
@Test
public void getStandardEventbridgeClientDefault() {
EventbridgeConfiguration eventbridgeConfiguration = new EventbridgeConfiguration();
EventbridgeInternalClient eventbridgeClient = EventbridgeClientFactory.getEventbridgeClient(eventbridgeConfiguration);
assertTrue(eventbridgeClient instanceof EventbridgeClientStandardImpl);
}
@Test
public void getStandardEventbridgeClient() {
EventbridgeConfiguration eventbridgeConfiguration = new EventbridgeConfiguration();
eventbridgeConfiguration.setUseDefaultCredentialsProvider(false);
EventbridgeInternalClient eventbridgeClient = EventbridgeClientFactory.getEventbridgeClient(eventbridgeConfiguration);
assertTrue(eventbridgeClient instanceof EventbridgeClientStandardImpl);
}
@Test
public void getIAMOptimizedEventbridgeClient() {
EventbridgeConfiguration eventbridgeConfiguration = new EventbridgeConfiguration();
eventbridgeConfiguration.setUseDefaultCredentialsProvider(true);
EventbridgeInternalClient eventbridgeClient = EventbridgeClientFactory.getEventbridgeClient(eventbridgeConfiguration);
assertTrue(eventbridgeClient instanceof EventbridgeClientIAMOptimizedImpl);
}
@Test
public void getSessionTokenEventbridgeClient() {
EventbridgeConfiguration eventbridgeConfiguration = new EventbridgeConfiguration();
eventbridgeConfiguration.setUseSessionCredentials(true);
EventbridgeInternalClient eventbridgeClient = EventbridgeClientFactory.getEventbridgeClient(eventbridgeConfiguration);
assertTrue(eventbridgeClient instanceof EventbridgeClientSessionTokenImpl);
}
}
| EventbridgeClientFactoryTest |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/WebProperties.java | {
"start": 9951,
"end": 16025
} | class ____ {
private boolean customized;
/**
* Maximum time the response should be cached, in seconds if no duration
* suffix is not specified.
*/
@DurationUnit(ChronoUnit.SECONDS)
private @Nullable Duration maxAge;
/**
* Indicate that the cached response can be reused only if re-validated
* with the server.
*/
private @Nullable Boolean noCache;
/**
* Indicate to not cache the response in any case.
*/
private @Nullable Boolean noStore;
/**
* Indicate that once it has become stale, a cache must not use the
* response without re-validating it with the server.
*/
private @Nullable Boolean mustRevalidate;
/**
* Indicate intermediaries (caches and others) that they should not
* transform the response content.
*/
private @Nullable Boolean noTransform;
/**
* Indicate that any cache may store the response.
*/
private @Nullable Boolean cachePublic;
/**
* Indicate that the response message is intended for a single user and
* must not be stored by a shared cache.
*/
private @Nullable Boolean cachePrivate;
/**
* Same meaning as the "must-revalidate" directive, except that it does
* not apply to private caches.
*/
private @Nullable Boolean proxyRevalidate;
/**
* Maximum time the response can be served after it becomes stale, in
* seconds if no duration suffix is not specified.
*/
@DurationUnit(ChronoUnit.SECONDS)
private @Nullable Duration staleWhileRevalidate;
/**
* Maximum time the response may be used when errors are encountered, in
* seconds if no duration suffix is not specified.
*/
@DurationUnit(ChronoUnit.SECONDS)
private @Nullable Duration staleIfError;
/**
* Maximum time the response should be cached by shared caches, in seconds
* if no duration suffix is not specified.
*/
@DurationUnit(ChronoUnit.SECONDS)
private @Nullable Duration sMaxAge;
public @Nullable Duration getMaxAge() {
return this.maxAge;
}
public void setMaxAge(@Nullable Duration maxAge) {
this.customized = true;
this.maxAge = maxAge;
}
public @Nullable Boolean getNoCache() {
return this.noCache;
}
public void setNoCache(@Nullable Boolean noCache) {
this.customized = true;
this.noCache = noCache;
}
public @Nullable Boolean getNoStore() {
return this.noStore;
}
public void setNoStore(@Nullable Boolean noStore) {
this.customized = true;
this.noStore = noStore;
}
public @Nullable Boolean getMustRevalidate() {
return this.mustRevalidate;
}
public void setMustRevalidate(@Nullable Boolean mustRevalidate) {
this.customized = true;
this.mustRevalidate = mustRevalidate;
}
public @Nullable Boolean getNoTransform() {
return this.noTransform;
}
public void setNoTransform(@Nullable Boolean noTransform) {
this.customized = true;
this.noTransform = noTransform;
}
public @Nullable Boolean getCachePublic() {
return this.cachePublic;
}
public void setCachePublic(@Nullable Boolean cachePublic) {
this.customized = true;
this.cachePublic = cachePublic;
}
public @Nullable Boolean getCachePrivate() {
return this.cachePrivate;
}
public void setCachePrivate(@Nullable Boolean cachePrivate) {
this.customized = true;
this.cachePrivate = cachePrivate;
}
public @Nullable Boolean getProxyRevalidate() {
return this.proxyRevalidate;
}
public void setProxyRevalidate(@Nullable Boolean proxyRevalidate) {
this.customized = true;
this.proxyRevalidate = proxyRevalidate;
}
public @Nullable Duration getStaleWhileRevalidate() {
return this.staleWhileRevalidate;
}
public void setStaleWhileRevalidate(@Nullable Duration staleWhileRevalidate) {
this.customized = true;
this.staleWhileRevalidate = staleWhileRevalidate;
}
public @Nullable Duration getStaleIfError() {
return this.staleIfError;
}
public void setStaleIfError(@Nullable Duration staleIfError) {
this.customized = true;
this.staleIfError = staleIfError;
}
public @Nullable Duration getSMaxAge() {
return this.sMaxAge;
}
public void setSMaxAge(@Nullable Duration sMaxAge) {
this.customized = true;
this.sMaxAge = sMaxAge;
}
public @Nullable CacheControl toHttpCacheControl() {
PropertyMapper map = PropertyMapper.get();
CacheControl control = createCacheControl();
map.from(this::getMustRevalidate).whenTrue().toCall(control::mustRevalidate);
map.from(this::getNoTransform).whenTrue().toCall(control::noTransform);
map.from(this::getCachePublic).whenTrue().toCall(control::cachePublic);
map.from(this::getCachePrivate).whenTrue().toCall(control::cachePrivate);
map.from(this::getProxyRevalidate).whenTrue().toCall(control::proxyRevalidate);
map.from(this::getStaleWhileRevalidate)
.to((duration) -> control.staleWhileRevalidate(duration.getSeconds(), TimeUnit.SECONDS));
map.from(this::getStaleIfError)
.to((duration) -> control.staleIfError(duration.getSeconds(), TimeUnit.SECONDS));
map.from(this::getSMaxAge)
.to((duration) -> control.sMaxAge(duration.getSeconds(), TimeUnit.SECONDS));
// check if cacheControl remained untouched
if (control.getHeaderValue() == null) {
return null;
}
return control;
}
private CacheControl createCacheControl() {
if (Boolean.TRUE.equals(this.noStore)) {
return CacheControl.noStore();
}
if (Boolean.TRUE.equals(this.noCache)) {
return CacheControl.noCache();
}
if (this.maxAge != null) {
return CacheControl.maxAge(this.maxAge.getSeconds(), TimeUnit.SECONDS);
}
return CacheControl.empty();
}
private boolean hasBeenCustomized() {
return this.customized;
}
}
}
}
}
| Cachecontrol |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/stubbing/SmartNullsGenericBugTest.java | {
"start": 2139,
"end": 2190
} | class ____ extends AbstractDao<Entity> {}
}
| ConcreteDao |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java | {
"start": 4866,
"end": 4947
} | class ____ extends TextOutputFormat {
public static | TrackingTextOutputFormat |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/annotation/AnnotationValueResolver.java | {
"start": 2236,
"end": 2551
} | enum ____
*/
default <E extends Enum<E>> EnumSet<E> enumValuesSet(@NonNull String member, @NonNull Class<E> enumType) {
E[] values = enumValues(member, enumType);
return values.length == 0 ? EnumSet.noneOf(enumType) : EnumSet.copyOf(Arrays.asList(values));
}
/**
* Return the | type |
java | apache__camel | components/camel-quartz/src/main/java/org/apache/camel/component/quartz/SchedulerInitTask.java | {
"start": 1073,
"end": 1384
} | interface ____ {
/**
* The task to run when initializing and starting the scheduler (task is only run once).
*
* @param scheduler the scheduler
* @throws Exception can be thrown if error in the task
*/
void initializeTask(Scheduler scheduler) throws Exception;
}
| SchedulerInitTask |
java | google__dagger | hilt-android/main/java/dagger/hilt/android/internal/lifecycle/HiltViewModelMap.java | {
"start": 1096,
"end": 1179
} | interface ____ {
/** Internal qualifier for the multibinding set of | HiltViewModelMap |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/functions/ProcessTableFunction.java | {
"start": 23890,
"end": 28905
} | interface ____<TimeType> {
/**
* Returns the timestamp of the currently processed event.
*
* <p>An event can be either the row of a table or a firing timer:
*
* <h1>Row event timestamp</h1>
*
* <p>The timestamp of the row currently being processed within the {@code eval()} method.
*
* <p>Powered by the function call's {@code on_time} argument, this method will return the
* content of the referenced time attribute column. Returns {@code null} if the {@code
* on_time} argument doesn't reference a time attribute column in the currently processed
* table.
*
* <h1>Timer event timestamp</h1>
*
* <p>The timestamp of the firing timer currently being processed within the {@code
* onTimer()} method.
*
* @return the event-time timestamp, or {@code null} if no timestamp is present
*/
TimeType time();
/**
* Returns the current event-time watermark.
*
* <p>Watermarks are generated in sources and sent through the topology for advancing the
* logical clock in each Flink subtask. The current watermark of a Flink subtask is the
* global minimum watermark of all inputs (i.e. across all parallel inputs and table
* partitions).
*
* <p>This method returns the current watermark of the Flink subtask that evaluates the PTF.
* Thus, the returned timestamp represents the entire Flink subtask, independent of the
* currently processed partition. This behavior is similar to a call to {@code SELECT
* CURRENT_WATERMARK(...)} in SQL.
*
* <p>If a watermark was not received from all inputs, the method returns {@code null}.
*
* <p>In case this method is called within the {@code onTimer()} method, the returned
* watermark is the triggering watermark that currently fires the timer.
*
* @return the current watermark of the Flink subtask, or {@code null} if no common logical
* time could be determined from the inputs
*/
TimeType currentWatermark();
/**
* Registers a timer under the given name.
*
* <p>The timer fires when the {@link #currentWatermark()} advances the logical clock of the
* Flink subtask to a timestamp later or equal to the desired timestamp. In other words: A
* timer only fires if a watermark was received from all inputs and the timestamp is smaller
* or equal to the minimum of all received watermarks.
*
* <p>Timers can be named for distinguishing them in the {@code onTimer()} method.
* Registering a timer under the same name twice will replace an existing timer.
*
* <p>Note: Because only PTFs taking set semantic tables support state, and timers are a
* special kind of state, at least one {@link ArgumentTrait#SET_SEMANTIC_TABLE} table
* argument must be declared.
*
* @param name identifier of the timer
* @param time timestamp when the timer should fire
*/
void registerOnTime(String name, TimeType time);
/**
* Registers a timer.
*
* <p>The timer fires when the {@link #currentWatermark()} advances the logical clock of the
* Flink subtask to a timestamp later or equal to the desired timestamp. In other words: A
* timer only fires if a watermark was received from all inputs and the timestamp is smaller
* or equal to the minimum of all received watermarks.
*
* <p>Only one timer can be registered for a given time.
*
* <p>Note: Because only PTFs taking set semantic tables support state, and timers are a
* special kind of state, at least one {@link ArgumentTrait#SET_SEMANTIC_TABLE} table
* argument must be declared.
*
* @param time timestamp when the timer should fire
*/
void registerOnTime(TimeType time);
/**
* Clears a timer that was previously registered under the given name.
*
* <p>The call is ignored if no timer can be found.
*
* @param name identifier of the timer
*/
void clearTimer(String name);
/**
* Clears a timer that was previously registered for a given time.
*
* <p>The call is ignored if no timer can be found. Named timers cannot be deleted with this
* method.
*
* @param time timestamp when the timer should have fired
*/
void clearTimer(TimeType time);
/** Deletes all timers within the virtual partition. */
void clearAllTimers();
}
/** Special {@link Context} that is available when the {@code onTimer()} method is called. */
@PublicEvolving
public | TimeContext |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/restart/RestartListener.java | {
"start": 813,
"end": 920
} | interface ____ {
/**
* Called before an application restart.
*/
void beforeRestart();
}
| RestartListener |
java | elastic__elasticsearch | x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/ArchiveUsageTransportAction.java | {
"start": 1295,
"end": 2638
} | class ____ extends XPackUsageFeatureTransportAction {
private final XPackLicenseState licenseState;
private final ProjectResolver projectResolver;
@Inject
public ArchiveUsageTransportAction(
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
XPackLicenseState licenseState,
ProjectResolver projectResolver
) {
super(XPackUsageFeatureAction.ARCHIVE.name(), transportService, clusterService, threadPool, actionFilters);
this.licenseState = licenseState;
this.projectResolver = projectResolver;
}
@Override
protected void localClusterStateOperation(
Task task,
XPackUsageRequest request,
ClusterState state,
ActionListener<XPackUsageFeatureResponse> listener
) {
int numArchiveIndices = 0;
for (IndexMetadata indexMetadata : projectResolver.getProjectMetadata(state)) {
if (indexMetadata.getCreationVersion().isLegacyIndexVersion()) {
numArchiveIndices++;
}
}
listener.onResponse(
new XPackUsageFeatureResponse(new ArchiveFeatureSetUsage(ARCHIVE_FEATURE.checkWithoutTracking(licenseState), numArchiveIndices))
);
}
}
| ArchiveUsageTransportAction |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/VectorSearchRestoreTest.java | {
"start": 1496,
"end": 1849
} | class ____ extends RestoreTestBase {
public VectorSearchRestoreTest() {
super(StreamExecVectorSearchTableFunction.class);
}
@Override
public List<TableTestProgram> programs() {
return Arrays.asList(
SYNC_VECTOR_SEARCH, ASYNC_VECTOR_SEARCH, VECTOR_SEARCH_WITH_RUNTIME_CONFIG);
}
}
| VectorSearchRestoreTest |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java | {
"start": 6665,
"end": 7954
} | class ____ extends FilterDirectoryReader {
protected final SubReaderWrapper subReaderWrapper;
public DirectoryReaderWrapper(DirectoryReader in, SubReaderWrapper subReaderWrapper) throws IOException {
super(in, subReaderWrapper);
this.subReaderWrapper = subReaderWrapper;
}
}
public Engine.Searcher wrapSearcher(Engine.Searcher searcher) {
final IndexReader reader = newReader(searcher.getIndexReader());
/*
* pass the original searcher to the super.newSearcher() method to
* make sure this is the searcher that will be released later on.
* If we wrap an index reader here must not pass the wrapped version
* to the manager on release otherwise the reader will be closed too
* early. - good news, stuff will fail all over the place if we don't
* get this right here
*/
SearcherCloseable closeable = new SearcherCloseable(searcher, inFlightSearchers);
return new Engine.Searcher(
searcher.source(),
reader,
searcher.getSimilarity(),
searcher.getQueryCache(),
searcher.getQueryCachingPolicy(),
closeable
);
}
private static final | DirectoryReaderWrapper |
java | quarkusio__quarkus | extensions/cache/deployment/src/test/java/io/quarkus/cache/test/runtime/DefaultKeyCacheTest.java | {
"start": 486,
"end": 3385
} | class ____ {
private static final Object KEY = new Object();
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest().withApplicationRoot(jar -> jar.addClass(CachedService.class));
@Inject
CachedService cachedService;
@Test
public void testAllCacheAnnotations() {
// STEP 1
// Action: no-arg @CacheResult-annotated method call.
// Expected effect: method invoked and result cached.
// Verified by: STEP 2.
String value1 = cachedService.cachedMethod();
// STEP 2
// Action: same call as STEP 1.
// Expected effect: method not invoked and result coming from the cache.
// Verified by: same object reference between STEPS 1 and 2 results.
String value2 = cachedService.cachedMethod();
assertTrue(value1 == value2);
// STEP 3
// Action: @CacheResult-annotated method call with a key argument.
// Expected effect: method invoked and result cached.
// Verified by: different objects references between STEPS 2 and 3 results.
String value3 = cachedService.cachedMethodWithKey(KEY);
assertTrue(value2 != value3);
// STEP 4
// Action: default key cache entry invalidation.
// Expected effect: STEP 2 cache entry removed.
// Verified by: STEP 5.
cachedService.invalidate();
// STEP 5
// Action: same call as STEP 2.
// Expected effect: method invoked because of STEP 4 and result cached.
// Verified by: different objects references between STEPS 2 and 5 results.
String value5 = cachedService.cachedMethod();
assertTrue(value2 != value5);
// STEP 6
// Action: same call as STEP 3.
// Expected effect: method not invoked and result coming from the cache.
// Verified by: same object reference between STEPS 3 and 6 results.
String value6 = cachedService.cachedMethodWithKey(KEY);
assertTrue(value3 == value6);
// STEP 7
// Action: full cache invalidation.
// Expected effect: empty cache.
// Verified by: STEPS 8 and 9.
cachedService.invalidateAll();
// STEP 8
// Action: same call as STEP 5.
// Expected effect: method invoked because of STEP 7 and result cached.
// Verified by: different objects references between STEPS 5 and 8 results.
String value8 = cachedService.cachedMethod();
assertTrue(value5 != value8);
// STEP 9
// Action: same call as STEP 6.
// Expected effect: method invoked because of STEP 7 and result cached.
// Verified by: different objects references between STEPS 6 and 9 results.
String value9 = cachedService.cachedMethodWithKey(KEY);
assertTrue(value6 != value9);
}
@Singleton
static | DefaultKeyCacheTest |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/X509ConfigurerTests.java | {
"start": 9878,
"end": 10393
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.x509(withDefaults());
// @formatter:on
return http.build();
}
@Bean
UserDetailsService userDetailsService() {
UserDetails user = User.withDefaultPasswordEncoder()
.username("rod")
.password("password")
.roles("USER", "ADMIN")
.build();
return new InMemoryUserDetailsManager(user);
}
}
@Configuration
@EnableWebSecurity
static | DefaultsInLambdaConfig |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testjar/JobKillCommitter.java | {
"start": 3575,
"end": 3676
} | class ____ a empty implementation of reducer method that
* does nothing
*/
public static | provides |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/FromFileMulticastToFilesTest.java | {
"start": 1065,
"end": 3886
} | class ____ extends ContextTestSupport {
@Test
public void testFromFileMulticastToFiles() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from(fileUri("?initialDelay=0&delay=10")).multicast().pipeline()
.transform(body().prepend("HEADER:"))
.to(fileUri("out/?fileName=header.txt")).to("mock:header").end().pipeline()
.transform(body().prepend("FOOTER:"))
.to(fileUri("out/?fileName=footer.txt")).to("mock:footer").end().end()
.to("mock:end");
}
});
context.start();
MockEndpoint header = getMockEndpoint("mock:header");
header.expectedBodiesReceived("HEADER:foo");
header.expectedFileExists(testFile("out/header.txt"));
MockEndpoint footer = getMockEndpoint("mock:footer");
footer.expectedBodiesReceived("FOOTER:foo");
footer.expectedFileExists(testFile("out/footer.txt"));
MockEndpoint end = getMockEndpoint("mock:end");
end.expectedMessageCount(1);
end.expectedFileExists(testFile(".camel/foo.txt"));
template.sendBodyAndHeader(fileUri(), "foo", Exchange.FILE_NAME, "foo.txt");
assertMockEndpointsSatisfied();
}
@Test
public void testFromFileMulticastParallelToFiles() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from(fileUri("?initialDelay=0&delay=10")).multicast().parallelProcessing().pipeline()
.transform(body().prepend("HEADER:"))
.to(fileUri("out/?fileName=header.txt")).to("mock:header").end().pipeline()
.transform(body().prepend("FOOTER:"))
.to(fileUri("out/?fileName=footer.txt")).to("mock:footer").end().end()
.to("mock:end");
}
});
context.start();
MockEndpoint header = getMockEndpoint("mock:header");
header.expectedBodiesReceived("HEADER:foo");
header.expectedFileExists(testFile("out/header.txt"));
MockEndpoint footer = getMockEndpoint("mock:footer");
footer.expectedBodiesReceived("FOOTER:foo");
footer.expectedFileExists(testFile("out/footer.txt"));
MockEndpoint end = getMockEndpoint("mock:end");
end.expectedMessageCount(1);
end.expectedFileExists(testFile(".camel/foo.txt"));
template.sendBodyAndHeader(fileUri(), "foo", Exchange.FILE_NAME, "foo.txt");
assertMockEndpointsSatisfied();
}
@Override
public boolean isUseRouteBuilder() {
return false;
}
}
| FromFileMulticastToFilesTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/Mail.java | {
"start": 1381,
"end": 4205
} | class ____ {
private final MailOptionsImpl mailOptions;
/** The action to execute. */
private final ThrowingRunnable<? extends Exception> runnable;
/**
* The priority of the mail. The priority does not determine the order, but helps to hide
* upstream mails from downstream processors to avoid live/deadlocks.
*/
private final int priority;
/** The description of the mail that is used for debugging and error-reporting. */
private final String descriptionFormat;
private final Object[] descriptionArgs;
private final StreamTaskActionExecutor actionExecutor;
public Mail(
ThrowingRunnable<? extends Exception> runnable,
int priority,
String descriptionFormat,
Object... descriptionArgs) {
this(
MailboxExecutor.MailOptions.options(),
runnable,
priority,
descriptionFormat,
descriptionArgs);
}
public Mail(
MailboxExecutor.MailOptions mailOptions,
ThrowingRunnable<? extends Exception> runnable,
int priority,
String descriptionFormat,
Object... descriptionArgs) {
this(
mailOptions,
runnable,
priority,
StreamTaskActionExecutor.IMMEDIATE,
descriptionFormat,
descriptionArgs);
}
public Mail(
MailboxExecutor.MailOptions mailOptions,
ThrowingRunnable<? extends Exception> runnable,
int priority,
StreamTaskActionExecutor actionExecutor,
String descriptionFormat,
Object... descriptionArgs) {
this.mailOptions = (MailOptionsImpl) mailOptions;
this.runnable = Preconditions.checkNotNull(runnable);
this.priority = priority;
this.descriptionFormat =
descriptionFormat == null ? runnable.toString() : descriptionFormat;
this.descriptionArgs = Preconditions.checkNotNull(descriptionArgs);
this.actionExecutor = actionExecutor;
}
public MailboxExecutor.MailOptions getMailOptions() {
return mailOptions;
}
public int getPriority() {
/** See {@link MailboxExecutor.MailOptions#deferrable()} ()}. */
return mailOptions.isDeferrable() ? TaskMailbox.MIN_PRIORITY : priority;
}
public void tryCancel(boolean mayInterruptIfRunning) {
if (runnable instanceof Future) {
((Future<?>) runnable).cancel(mayInterruptIfRunning);
}
}
@Override
public String toString() {
return String.format(descriptionFormat, descriptionArgs);
}
public void run() throws Exception {
actionExecutor.runThrowing(runnable);
}
}
| Mail |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/main/java/org/apache/camel/spring/xml/handler/CamelNamespaceHandler.java | {
"start": 19330,
"end": 39557
} | class ____ extends BeanDefinitionParser {
public CamelContextBeanDefinitionParser(Class<?> type) {
super(type, false);
}
@Override
protected void doParse(Element element, ParserContext parserContext, BeanDefinitionBuilder builder) {
doBeforeParse(element);
super.doParse(element, parserContext, builder);
String contextId = element.getAttribute("id");
boolean implicitId = false;
// lets avoid folks having to explicitly give an ID to a camel context
if (ObjectHelper.isEmpty(contextId)) {
// if no explicit id was set then use a default auto generated name
CamelContextNameStrategy strategy = new DefaultCamelContextNameStrategy();
contextId = strategy.getName();
element.setAttributeNS(null, "id", contextId);
implicitId = true;
}
// now lets parse the routes with JAXB
Binder<Node> binder;
try {
binder = getJaxbContext().createBinder();
} catch (JAXBException e) {
throw new BeanDefinitionStoreException("Failed to create the JAXB binder", e);
}
Object value = parseUsingJaxb(element, parserContext, binder);
CamelContextFactoryBean factoryBean = null;
if (value instanceof CamelContextFactoryBean) {
// set the property value with the JAXB parsed value
factoryBean = (CamelContextFactoryBean) value;
builder.addPropertyValue("id", contextId);
builder.addPropertyValue("implicitId", implicitId);
builder.addPropertyValue("restConfiguration", factoryBean.getRestConfiguration());
builder.addPropertyValue("rests", factoryBean.getRests());
builder.addPropertyValue("routeConfigurations", factoryBean.getRouteConfigurations());
builder.addPropertyValue("routeTemplates", factoryBean.getRouteTemplates());
builder.addPropertyValue("templatedRoutes", factoryBean.getTemplatedRoutes());
builder.addPropertyValue("routes", factoryBean.getRoutes());
builder.addPropertyValue("intercepts", factoryBean.getIntercepts());
builder.addPropertyValue("interceptFroms", factoryBean.getInterceptFroms());
builder.addPropertyValue("interceptSendToEndpoints", factoryBean.getInterceptSendToEndpoints());
builder.addPropertyValue("dataFormats", factoryBean.getDataFormats());
builder.addPropertyValue("transformers", factoryBean.getTransformers());
builder.addPropertyValue("validators", factoryBean.getValidators());
builder.addPropertyValue("onCompletions", factoryBean.getOnCompletions());
builder.addPropertyValue("onExceptions", factoryBean.getOnExceptions());
builder.addPropertyValue("routeConfigurationRefs", factoryBean.getRouteConfigurationRefs());
builder.addPropertyValue("routeTemplateRefs", factoryBean.getRouteTemplateRefs());
builder.addPropertyValue("builderRefs", factoryBean.getBuilderRefs());
builder.addPropertyValue("routeRefs", factoryBean.getRouteRefs());
builder.addPropertyValue("restRefs", factoryBean.getRestRefs());
builder.addPropertyValue("globalOptions", factoryBean.getGlobalOptions());
builder.addPropertyValue("packageScan", factoryBean.getPackageScan());
builder.addPropertyValue("contextScan", factoryBean.getContextScan());
if (factoryBean.getPackages().length > 0) {
builder.addPropertyValue("packages", factoryBean.getPackages());
}
builder.addPropertyValue("camelPropertyPlaceholder", factoryBean.getCamelPropertyPlaceholder());
builder.addPropertyValue("camelJMXAgent", factoryBean.getCamelJMXAgent());
builder.addPropertyValue("camelStreamCachingStrategy", factoryBean.getCamelStreamCachingStrategy());
builder.addPropertyValue("camelRouteController", factoryBean.getCamelRouteController());
builder.addPropertyValue("threadPoolProfiles", factoryBean.getThreadPoolProfiles());
builder.addPropertyValue("beansFactory", factoryBean.getBeansFactory());
builder.addPropertyValue("beans", factoryBean.getBeans());
builder.addPropertyValue("defaultServiceCallConfiguration", factoryBean.getDefaultServiceCallConfiguration());
builder.addPropertyValue("serviceCallConfigurations", factoryBean.getServiceCallConfigurations());
// add any depends-on
addDependsOn(factoryBean, builder);
}
NodeList list = element.getChildNodes();
int size = list.getLength();
for (int i = 0; i < size; i++) {
Node child = list.item(i);
if (child instanceof Element) {
Element childElement = (Element) child;
String localName = child.getLocalName();
if (localName.equals("endpoint")) {
registerEndpoint(childElement, parserContext, contextId);
} else if (localName.equals("routeBuilder")) {
addDependsOnToRouteBuilder(childElement, parserContext, contextId);
} else {
BeanDefinitionParser parser = parserMap.get(localName);
if (parser != null) {
BeanDefinition definition = parser.parse(childElement, parserContext);
String id = childElement.getAttribute("id");
if (ObjectHelper.isNotEmpty(id)) {
parserContext.registerComponent(new BeanComponentDefinition(definition, id));
// set the templates with the camel context
if (localName.equals("template") || localName.equals("fluentTemplate")
|| localName.equals("consumerTemplate")
|| localName.equals("proxy") || localName.equals("export")) {
// set the camel context
definition.getPropertyValues().addPropertyValue("camelContext",
new RuntimeBeanReference(contextId));
}
}
}
}
}
}
// register templates if not already defined
registerTemplates(element, parserContext, contextId);
// lets inject the namespaces into any namespace aware POJOs
injectNamespaces(element, binder);
// inject bean post processor so we can support @Produce etc.
// no bean processor element so lets create it by our self
injectBeanPostProcessor(element, parserContext, contextId, builder, factoryBean);
}
}
protected void addDependsOn(CamelContextFactoryBean factoryBean, BeanDefinitionBuilder builder) {
String dependsOn = factoryBean.getDependsOn();
if (ObjectHelper.isNotEmpty(dependsOn)) {
// comma, whitespace and semi colon is valid separators in Spring depends-on
String[] depends = dependsOn.split(",|;|\\s");
if (depends == null) {
throw new IllegalArgumentException("Cannot separate depends-on, was: " + dependsOn);
} else {
for (String depend : depends) {
depend = depend.trim();
LOG.debug("Adding dependsOn {} to CamelContext({})", depend, factoryBean.getId());
builder.addDependsOn(depend);
}
}
}
}
private void addDependsOnToRouteBuilder(Element childElement, ParserContext parserContext, String contextId) {
// setting the depends-on explicitly is required since Spring 3.0
String routeBuilderName = childElement.getAttribute("ref");
if (ObjectHelper.isNotEmpty(routeBuilderName)) {
// set depends-on to the context for a routeBuilder bean
try {
BeanDefinition definition = parserContext.getRegistry().getBeanDefinition(routeBuilderName);
Method getDependsOn = definition.getClass().getMethod("getDependsOn", new Class[] {});
String[] dependsOn = (String[]) getDependsOn.invoke(definition);
if (dependsOn == null || dependsOn.length == 0) {
dependsOn = new String[] { contextId };
} else {
String[] temp = new String[dependsOn.length + 1];
System.arraycopy(dependsOn, 0, temp, 0, dependsOn.length);
temp[dependsOn.length] = contextId;
dependsOn = temp;
}
Method method = definition.getClass().getMethod("setDependsOn", String[].class);
method.invoke(definition, (Object) dependsOn);
} catch (Exception e) {
// Do nothing here
}
}
}
protected void injectNamespaces(Element element, Binder<Node> binder) {
NodeList list = element.getChildNodes();
Namespaces namespaces = null;
int size = list.getLength();
for (int i = 0; i < size; i++) {
Node child = list.item(i);
if (child instanceof Element) {
Element childElement = (Element) child;
Object object = binder.getJAXBNode(child);
if (object instanceof NamespaceAware) {
NamespaceAware namespaceAware = (NamespaceAware) object;
if (namespaces == null) {
namespaces = NamespacesHelper.namespaces(element);
}
namespaces.configure(namespaceAware);
}
injectNamespaces(childElement, binder);
}
}
}
protected void injectBeanPostProcessor(
Element element, ParserContext parserContext, String contextId, BeanDefinitionBuilder builder,
CamelContextFactoryBean factoryBean) {
Element childElement = element.getOwnerDocument().createElement("beanPostProcessor");
element.appendChild(childElement);
String beanPostProcessorId = contextId + ":beanPostProcessor";
childElement.setAttribute("id", beanPostProcessorId);
BeanDefinition definition = beanPostProcessorParser.parse(childElement, parserContext);
// only register to camel context id as a String. Then we can look it up later
// otherwise we get a circular reference in spring and it will not allow custom bean post processing
// see more at CAMEL-1663
definition.getPropertyValues().addPropertyValue("camelId", contextId);
if (factoryBean != null && factoryBean.getBeanPostProcessorEnabled() != null) {
// configure early whether bean post processor is enabled or not
definition.getPropertyValues().addPropertyValue("enabled", factoryBean.getBeanPostProcessorEnabled());
}
builder.addPropertyReference("beanPostProcessor", beanPostProcessorId);
}
/**
* Used for auto registering producer, fluent producer and consumer templates if not already defined in XML.
*/
protected void registerTemplates(Element element, ParserContext parserContext, String contextId) {
boolean template = false;
boolean fluentTemplate = false;
boolean consumerTemplate = false;
NodeList list = element.getChildNodes();
int size = list.getLength();
for (int i = 0; i < size; i++) {
Node child = list.item(i);
if (child instanceof Element) {
Element childElement = (Element) child;
String localName = childElement.getLocalName();
if ("template".equals(localName)) {
template = true;
} else if ("fluentTemplate".equals(localName)) {
fluentTemplate = true;
} else if ("consumerTemplate".equals(localName)) {
consumerTemplate = true;
}
}
}
if (!template) {
// either we have not used template before or we have auto registered it already and therefore we
// need it to allow to do it so it can remove the existing auto registered as there is now a clash id
// since we have multiple camel contexts
boolean existing = autoRegisterMap.get("template") != null;
boolean inUse = false;
try {
inUse = parserContext.getRegistry().isBeanNameInUse("template");
} catch (BeanCreationException e) {
// Spring Eclipse Tooling may throw an exception when you edit the Spring XML online in Eclipse
// when the isBeanNameInUse method is invoked, so ignore this and continue (CAMEL-2739)
LOG.debug("Error checking isBeanNameInUse(template). This exception will be ignored", e);
}
if (!inUse || existing) {
String id = "template";
// auto create a template
Element templateElement = element.getOwnerDocument().createElement("template");
templateElement.setAttribute("id", id);
BeanDefinitionParser parser = parserMap.get("template");
BeanDefinition definition = parser.parse(templateElement, parserContext);
// auto register it
autoRegisterBeanDefinition(id, definition, parserContext, contextId);
}
}
if (!fluentTemplate) {
// either we have not used fluentTemplate before or we have auto registered it already and therefore we
// need it to allow to do it so it can remove the existing auto registered as there is now a clash id
// since we have multiple camel contexts
boolean existing = autoRegisterMap.get("fluentTemplate") != null;
boolean inUse = false;
try {
inUse = parserContext.getRegistry().isBeanNameInUse("fluentTemplate");
} catch (BeanCreationException e) {
// Spring Eclipse Tooling may throw an exception when you edit the Spring XML online in Eclipse
// when the isBeanNameInUse method is invoked, so ignore this and continue (CAMEL-2739)
LOG.debug("Error checking isBeanNameInUse(fluentTemplate). This exception will be ignored", e);
}
if (!inUse || existing) {
String id = "fluentTemplate";
// auto create a fluentTemplate
Element templateElement = element.getOwnerDocument().createElement("fluentTemplate");
templateElement.setAttribute("id", id);
BeanDefinitionParser parser = parserMap.get("fluentTemplate");
BeanDefinition definition = parser.parse(templateElement, parserContext);
// auto register it
autoRegisterBeanDefinition(id, definition, parserContext, contextId);
}
}
if (!consumerTemplate) {
// either we have not used template before or we have auto registered it already and therefore we
// need it to allow to do it so it can remove the existing auto registered as there is now a clash id
// since we have multiple camel contexts
boolean existing = autoRegisterMap.get("consumerTemplate") != null;
boolean inUse = false;
try {
inUse = parserContext.getRegistry().isBeanNameInUse("consumerTemplate");
} catch (BeanCreationException e) {
// Spring Eclipse Tooling may throw an exception when you edit the Spring XML online in Eclipse
// when the isBeanNameInUse method is invoked, so ignore this and continue (CAMEL-2739)
LOG.debug("Error checking isBeanNameInUse(consumerTemplate). This exception will be ignored", e);
}
if (!inUse || existing) {
String id = "consumerTemplate";
// auto create a template
Element templateElement = element.getOwnerDocument().createElement("consumerTemplate");
templateElement.setAttribute("id", id);
BeanDefinitionParser parser = parserMap.get("consumerTemplate");
BeanDefinition definition = parser.parse(templateElement, parserContext);
// auto register it
autoRegisterBeanDefinition(id, definition, parserContext, contextId);
}
}
}
private void autoRegisterBeanDefinition(
String id, BeanDefinition definition, ParserContext parserContext, String contextId) {
// it is a bit cumbersome to work with the spring bean definition parser
// as we kinda need to eagerly register the bean definition on the parser context
// and then later we might find out that we should not have done that in case we have multiple camel contexts
// that would have a id clash by auto registering the same bean definition with the same id such as a producer template
// see if we have already auto registered this id
BeanDefinition existing = autoRegisterMap.get(id);
if (existing == null) {
// no then add it to the map and register it
autoRegisterMap.put(id, definition);
parserContext.registerComponent(new BeanComponentDefinition(definition, id));
if (LOG.isDebugEnabled()) {
LOG.debug("Registered default: {} with id: {} on camel context: {}", definition.getBeanClassName(), id,
contextId);
}
} else {
// ups we have already registered it before with same id, but on another camel context
// this is not good so we need to remove all traces of this auto registering.
// end user must manually add the needed XML elements and provide unique ids access all camel context himself.
LOG.debug("Unregistered default: {} with id: {} as we have multiple camel contexts and they must use unique ids."
+ " You must define the definition in the XML file manually to avoid id clashes when using multiple camel contexts",
definition.getBeanClassName(), id);
parserContext.getRegistry().removeBeanDefinition(id);
}
}
private void registerEndpoint(Element childElement, ParserContext parserContext, String contextId) {
String id = childElement.getAttribute("id");
// must have an id to be registered
if (ObjectHelper.isNotEmpty(id)) {
// skip underscore as they are internal naming and should not be registered
if (id.startsWith("_")) {
LOG.debug("Skip registering endpoint starting with underscore: {}", id);
return;
}
BeanDefinition definition = endpointParser.parse(childElement, parserContext);
definition.getPropertyValues().addPropertyValue("camelContext", new RuntimeBeanReference(contextId));
// Need to add this dependency of CamelContext for Spring 3.0
try {
Method method = definition.getClass().getMethod("setDependsOn", String[].class);
method.invoke(definition, (Object) new String[] { contextId });
} catch (Exception e) {
// Do nothing here
}
parserContext.registerComponent(new BeanComponentDefinition(definition, id));
}
}
}
| CamelContextBeanDefinitionParser |
java | quarkusio__quarkus | extensions/hibernate-reactive/deployment/src/test/java/io/quarkus/hibernate/reactive/validation/ReactiveValidationModeMultipleTestCase.java | {
"start": 269,
"end": 1123
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyEntity.class, ReactiveTestValidationResource.class)
.addAsResource("application-validation-mode-multiple.properties", "application.properties"));
@Test
public void testValidEntity() {
String entityName = "Post method should not persist an entity having a Size constraint of 50 on the name column if validation was enabled.";
RestAssured.given().body(entityName).when().post("/validation").then()
.body(is("entity name too long"));
}
@Test
public void testDDL() {
RestAssured.when().get("/validation").then()
.body(is("nullable: false"));
}
}
| ReactiveValidationModeMultipleTestCase |
java | google__guava | guava-gwt/src-super/com/google/common/collect/super/com/google/common/collect/ImmutableList.java | {
"start": 10060,
"end": 11381
} | class ____<E> extends ImmutableCollection.Builder<E> {
private final ArrayList<E> contents;
public Builder() {
contents = Lists.newArrayList();
}
Builder(int capacity) {
contents = Lists.newArrayListWithCapacity(capacity);
}
@CanIgnoreReturnValue
@Override
public Builder<E> add(E element) {
contents.add(checkNotNull(element));
return this;
}
@CanIgnoreReturnValue
@Override
public Builder<E> addAll(Iterable<? extends E> elements) {
super.addAll(elements);
return this;
}
@CanIgnoreReturnValue
@Override
public Builder<E> add(E... elements) {
checkNotNull(elements); // for GWT
super.add(elements);
return this;
}
@CanIgnoreReturnValue
@Override
public Builder<E> addAll(Iterator<? extends E> elements) {
super.addAll(elements);
return this;
}
@CanIgnoreReturnValue
Builder<E> combine(Builder<E> builder) {
checkNotNull(builder);
contents.addAll(builder.contents);
return this;
}
@Override
public ImmutableList<E> build() {
return copyOf(contents);
}
ImmutableList<E> buildSorted(Comparator<? super E> comparator) {
Collections.sort(contents, comparator);
return copyOf(contents);
}
}
}
| Builder |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/annotations/SchedulerSupport.java | {
"start": 2083,
"end": 2294
} | class ____ on RxJava's {@linkplain Schedulers#newThread() new thread scheduler}
* or takes timing information from it.
*/
String NEW_THREAD = "io.reactivex:new-thread";
/**
* The operator/ | runs |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionTests.java | {
"start": 773,
"end": 1622
} | class ____ extends AggregatorFunctionTestCase {
@Override
protected SourceOperator simpleInput(BlockFactory blockFactory, int size) {
return new SequenceBooleanBlockSourceOperator(blockFactory, IntStream.range(0, size).mapToObj(l -> randomBoolean()).toList());
}
@Override
protected AggregatorFunctionSupplier aggregatorFunction() {
return new MaxBooleanAggregatorFunctionSupplier();
}
@Override
protected String expectedDescriptionOfAggregator() {
return "max of booleans";
}
@Override
public void assertSimpleOutput(List<Page> input, Block result) {
Boolean max = input.stream().flatMap(p -> allBooleans(p.getBlock(0))).max(Comparator.naturalOrder()).get();
assertThat(((BooleanBlock) result).getBoolean(0), equalTo(max));
}
}
| MaxBooleanAggregatorFunctionTests |
java | google__dagger | javatests/dagger/functional/membersinject/MembersInjectionOrdering.java | {
"start": 1310,
"end": 1362
} | class ____ {
@Inject First first;
}
static | Base |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java | {
"start": 1307,
"end": 6283
} | class ____ implements Metadata.ProjectCustom {
public static final String TYPE = "index_lifecycle";
public static final ParseField OPERATION_MODE_FIELD = new ParseField("operation_mode");
public static final ParseField POLICIES_FIELD = new ParseField("policies");
public static final IndexLifecycleMetadata EMPTY = new IndexLifecycleMetadata(Collections.emptySortedMap(), OperationMode.RUNNING);
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<IndexLifecycleMetadata, Void> PARSER = new ConstructingObjectParser<>(
TYPE,
a -> new IndexLifecycleMetadata(
((List<LifecyclePolicyMetadata>) a[0]).stream()
.collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity())),
OperationMode.valueOf((String) a[1])
)
);
static {
PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> LifecyclePolicyMetadata.parse(p, n), v -> {
throw new IllegalArgumentException("ordered " + POLICIES_FIELD.getPreferredName() + " are not supported");
}, POLICIES_FIELD);
PARSER.declareString(ConstructingObjectParser.constructorArg(), OPERATION_MODE_FIELD);
}
private final Map<String, LifecyclePolicyMetadata> policyMetadatas;
private final OperationMode operationMode;
// a slightly different view of the policyMetadatas -- it's hot in a couple of places so we pre-calculate it
private final Map<String, LifecyclePolicy> policies;
private static Map<String, LifecyclePolicy> policiesMap(final Map<String, LifecyclePolicyMetadata> policyMetadatas) {
final Map<String, LifecyclePolicy> policies = new HashMap<>(policyMetadatas.size());
for (LifecyclePolicyMetadata policyMetadata : policyMetadatas.values()) {
LifecyclePolicy policy = policyMetadata.getPolicy();
policies.put(policy.getName(), policy);
}
return Collections.unmodifiableMap(policies);
}
public IndexLifecycleMetadata(Map<String, LifecyclePolicyMetadata> policies, OperationMode operationMode) {
this.policyMetadatas = Collections.unmodifiableMap(policies);
this.operationMode = operationMode;
this.policies = policiesMap(policyMetadatas);
}
public IndexLifecycleMetadata(StreamInput in) throws IOException {
int size = in.readVInt();
TreeMap<String, LifecyclePolicyMetadata> policies = new TreeMap<>();
for (int i = 0; i < size; i++) {
policies.put(in.readString(), new LifecyclePolicyMetadata(in));
}
this.policyMetadatas = policies;
this.operationMode = in.readEnum(OperationMode.class);
this.policies = policiesMap(policyMetadatas);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeMap(policyMetadatas, StreamOutput::writeWriteable);
out.writeEnum(operationMode);
}
public Map<String, LifecyclePolicyMetadata> getPolicyMetadatas() {
return policyMetadatas;
}
/**
* @deprecated use {@link LifecycleOperationMetadata#getILMOperationMode()} instead. This may be incorrect.
*/
@Deprecated(since = "8.7.0")
public OperationMode getOperationMode() {
return operationMode;
}
public Map<String, LifecyclePolicy> getPolicies() {
return policies;
}
@Override
public Diff<Metadata.ProjectCustom> diff(Metadata.ProjectCustom previousState) {
return new IndexLifecycleMetadataDiff((IndexLifecycleMetadata) previousState, this);
}
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return Iterators.concat(
ChunkedToXContentHelper.xContentObjectFields(POLICIES_FIELD.getPreferredName(), policyMetadatas),
Iterators.single((builder, params) -> builder.field(OPERATION_MODE_FIELD.getPreferredName(), operationMode))
);
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.minimumCompatible();
}
@Override
public String getWriteableName() {
return TYPE;
}
@Override
public EnumSet<Metadata.XContentContext> context() {
return Metadata.ALL_CONTEXTS;
}
@Override
public int hashCode() {
return Objects.hash(policyMetadatas, operationMode);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj.getClass() != getClass()) {
return false;
}
IndexLifecycleMetadata other = (IndexLifecycleMetadata) obj;
return Objects.equals(policyMetadatas, other.policyMetadatas) && Objects.equals(operationMode, other.operationMode);
}
@Override
public String toString() {
return Strings.toString(this, false, true);
}
public static | IndexLifecycleMetadata |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/LiteralsOnTheRight.java | {
"start": 541,
"end": 983
} | class ____ extends OptimizerRules.OptimizerExpressionRule<BinaryOperator<?, ?, ?, ?>> {
public LiteralsOnTheRight() {
super(OptimizerRules.TransformDirection.UP);
}
@Override
public BinaryOperator<?, ?, ?, ?> rule(BinaryOperator<?, ?, ?, ?> be, LogicalOptimizerContext ctx) {
return be.left() instanceof Literal && (be.right() instanceof Literal) == false ? be.swapLeftAndRight() : be;
}
}
| LiteralsOnTheRight |
java | micronaut-projects__micronaut-core | http-server-tck/src/main/java/io/micronaut/http/server/tck/CorsAssertion.java | {
"start": 1078,
"end": 3456
} | class ____ {
private final String vary;
private final String accessControlAllowCredentials;
private final String origin;
private final List<HttpMethod> allowMethods;
private final String maxAge;
private final Boolean allowPrivateNetwork;
private CorsAssertion(String vary,
String accessControlAllowCredentials,
String origin,
List<HttpMethod> allowMethods,
String maxAge,
Boolean allowPrivateNetwork) {
this.vary = vary;
this.accessControlAllowCredentials = accessControlAllowCredentials;
this.origin = origin;
this.allowMethods = allowMethods;
this.maxAge = maxAge;
this.allowPrivateNetwork = allowPrivateNetwork;
}
/**
* Validate the CORS assertions.
* @param response HTTP Response to run CORS assertions against it.
*/
public void validate(HttpResponse<?> response) {
if (StringUtils.isNotEmpty(vary)) {
assertEquals(vary, response.getHeaders().get(HttpHeaders.VARY));
}
if (StringUtils.isNotEmpty(accessControlAllowCredentials)) {
assertEquals(accessControlAllowCredentials, response.getHeaders().get(HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS));
}
if (StringUtils.isNotEmpty(origin)) {
assertEquals(origin, response.getHeaders().get(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN));
}
if (CollectionUtils.isNotEmpty(allowMethods)) {
assertEquals(allowMethods.stream().map(HttpMethod::toString).collect(Collectors.joining(",")),
response.getHeaders().get(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS));
}
if (StringUtils.isNotEmpty(maxAge)) {
assertEquals(maxAge, response.getHeaders().get(HttpHeaders.ACCESS_CONTROL_MAX_AGE));
}
if (allowPrivateNetwork != null && allowPrivateNetwork) {
assertEquals(allowPrivateNetwork, response.getHeaders().get(HttpHeaders.ACCESS_CONTROL_ALLOW_PRIVATE_NETWORK, Boolean.class).orElse(Boolean.FALSE));
}
}
/**
*
* @return a CORS Assertion Builder.
*/
public static Builder builder() {
return new Builder();
}
/**
* CORS Assertion Builder.
*/
public static | CorsAssertion |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/exception/OracleExceptionSorterTest_closeConn_3.java | {
"start": 529,
"end": 2330
} | class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
assertEquals(0, JdbcStatManager.getInstance().getSqlList().size());
dataSource = new DruidDataSource();
dataSource.setExceptionSorter(new OracleExceptionSorter());
dataSource.setDriver(new OracleMockDriver());
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setPoolPreparedStatements(true);
dataSource.setMaxOpenPreparedStatements(100);
}
@Override
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
}
public void test_connect() throws Exception {
String sql = "SELECT 1";
{
DruidPooledConnection conn = dataSource.getConnection();
PreparedStatement pstmt = conn.prepareStatement(sql);
pstmt.execute();
pstmt.close();
conn.close();
assertEquals(0, dataSource.getActiveCount());
assertEquals(1, dataSource.getPoolingCount());
assertEquals(1, dataSource.getCreateCount());
}
DruidPooledConnection conn = dataSource.getConnection();
MockConnection mockConn = conn.unwrap(MockConnection.class);
assertNotNull(mockConn);
conn.setAutoCommit(false);
conn.setReadOnly(false);
SQLException exception = new SQLException("xx", "xxx", 28);
mockConn.setError(exception);
conn.close();
{
Connection conn2 = dataSource.getConnection();
conn2.close();
}
assertEquals(0, dataSource.getActiveCount());
assertEquals(1, dataSource.getPoolingCount());
assertEquals(2, dataSource.getCreateCount());
}
}
| OracleExceptionSorterTest_closeConn_3 |
java | apache__dubbo | dubbo-plugin/dubbo-mcp/src/main/java/org/apache/dubbo/mcp/core/McpApplicationDeployListener.java | {
"start": 2551,
"end": 11369
} | class ____ implements ApplicationDeployListener {
private static final ErrorTypeAwareLogger logger =
LoggerFactory.getErrorTypeAwareLogger(McpApplicationDeployListener.class);
private DubboServiceToolRegistry toolRegistry;
private boolean mcpEnable = true;
private volatile ServiceConfig<McpSseService> sseServiceConfig;
private volatile ServiceConfig<McpStreamableService> streamableServiceConfig;
private static DubboMcpSseTransportProvider dubboMcpSseTransportProvider;
private static DubboMcpStreamableTransportProvider dubboMcpStreamableTransportProvider;
private McpAsyncServer mcpAsyncServer;
@Override
public void onInitialize(ApplicationModel scopeModel) {}
@Override
public void onStarting(ApplicationModel applicationModel) {}
public static DubboMcpSseTransportProvider getDubboMcpSseTransportProvider() {
return dubboMcpSseTransportProvider;
}
public static DubboMcpStreamableTransportProvider getDubboMcpStreamableTransportProvider() {
return dubboMcpStreamableTransportProvider;
}
@Override
public void onStarted(ApplicationModel applicationModel) {
Configuration globalConf = ConfigurationUtils.getGlobalConfiguration(applicationModel);
mcpEnable = globalConf.getBoolean(McpConstant.SETTINGS_MCP_ENABLE, false);
if (!mcpEnable) {
logger.info("MCP service is disabled, skipping initialization");
return;
}
try {
logger.info("Initializing MCP server and dynamic service registration");
// Initialize service filter
McpServiceFilter mcpServiceFilter = new McpServiceFilter(applicationModel);
String protocol = globalConf.getString(McpConstant.SETTINGS_MCP_PROTOCOL, "streamable");
McpSchema.ServerCapabilities.ToolCapabilities toolCapabilities =
new McpSchema.ServerCapabilities.ToolCapabilities(true);
McpSchema.ServerCapabilities serverCapabilities =
new McpSchema.ServerCapabilities(null, null, null, null, null, toolCapabilities);
Integer sessionTimeout =
globalConf.getInt(McpConstant.SETTINGS_MCP_SESSION_TIMEOUT, McpConstant.DEFAULT_SESSION_TIMEOUT);
if ("streamable".equals(protocol)) {
dubboMcpStreamableTransportProvider =
new DubboMcpStreamableTransportProvider(new ObjectMapper(), sessionTimeout);
mcpAsyncServer = McpServer.async(getDubboMcpStreamableTransportProvider())
.capabilities(serverCapabilities)
.build();
} else if ("sse".equals(protocol)) {
dubboMcpSseTransportProvider = new DubboMcpSseTransportProvider(new ObjectMapper(), sessionTimeout);
mcpAsyncServer = McpServer.async(getDubboMcpSseTransportProvider())
.capabilities(serverCapabilities)
.build();
} else {
logger.error(
LoggerCodeConstants.COMMON_UNEXPECTED_EXCEPTION, "", "", "not support protocol " + protocol);
}
FrameworkModel frameworkModel = applicationModel.getFrameworkModel();
DefaultOpenAPIService defaultOpenAPIService = new DefaultOpenAPIService(frameworkModel);
DubboOpenApiToolConverter toolConverter = new DubboOpenApiToolConverter(defaultOpenAPIService);
DubboMcpGenericCaller genericCaller = new DubboMcpGenericCaller(applicationModel);
toolRegistry = new DubboServiceToolRegistry(mcpAsyncServer, toolConverter, genericCaller, mcpServiceFilter);
applicationModel.getBeanFactory().registerBean(toolRegistry);
Collection<ProviderModel> providerModels =
applicationModel.getApplicationServiceRepository().allProviderModels();
int registeredCount = 0;
for (ProviderModel pm : providerModels) {
int serviceRegisteredCount = toolRegistry.registerService(pm);
registeredCount += serviceRegisteredCount;
}
if ("streamable".equals(protocol)) {
exportMcpStreamableService(applicationModel);
} else {
exportMcpSSEService(applicationModel);
}
logger.info(
"MCP server initialized successfully, {} existing tools registered, dynamic registration enabled",
registeredCount);
} catch (Exception e) {
logger.error(
LoggerCodeConstants.COMMON_UNEXPECTED_EXCEPTION,
"",
"",
"MCP service initialization failed: " + e.getMessage(),
e);
}
}
@Override
public void onStopping(ApplicationModel applicationModel) {
if (toolRegistry != null) {
logger.info("MCP server stopping, clearing tool registry");
toolRegistry.clearRegistry();
}
}
@Override
public void onStopped(ApplicationModel applicationModel) {
if (mcpEnable && mcpAsyncServer != null) {
mcpAsyncServer.close();
}
}
@Override
public void onFailure(ApplicationModel applicationModel, Throwable cause) {}
private void exportMcpSSEService(ApplicationModel applicationModel) {
McpSseServiceImpl mcpSseServiceImpl =
applicationModel.getBeanFactory().getOrRegisterBean(McpSseServiceImpl.class);
ExecutorService internalServiceExecutor = applicationModel
.getFrameworkModel()
.getBeanFactory()
.getBean(FrameworkExecutorRepository.class)
.getInternalServiceExecutor();
this.sseServiceConfig = InternalServiceConfigBuilder.<McpSseService>newBuilder(applicationModel)
.interfaceClass(McpSseService.class)
.protocol(CommonConstants.TRIPLE, McpConstant.MCP_SERVICE_PROTOCOL)
.port(getRegisterPort(), String.valueOf(McpConstant.MCP_SERVICE_PORT))
.registryId("internal-mcp-registry")
.executor(internalServiceExecutor)
.ref(mcpSseServiceImpl)
.version(V1)
.build();
sseServiceConfig.export();
logger.info("MCP service exported on: {}", sseServiceConfig.getExportedUrls());
}
private void exportMcpStreamableService(ApplicationModel applicationModel) {
McpStreamableServiceImpl mcpStreamableServiceImpl =
applicationModel.getBeanFactory().getOrRegisterBean(McpStreamableServiceImpl.class);
ExecutorService internalServiceExecutor = applicationModel
.getFrameworkModel()
.getBeanFactory()
.getBean(FrameworkExecutorRepository.class)
.getInternalServiceExecutor();
this.streamableServiceConfig = InternalServiceConfigBuilder.<McpStreamableService>newBuilder(applicationModel)
.interfaceClass(McpStreamableService.class)
.protocol(CommonConstants.TRIPLE, McpConstant.MCP_SERVICE_PROTOCOL)
.port(getRegisterPort(), String.valueOf(McpConstant.MCP_SERVICE_PORT))
.registryId("internal-mcp-registry")
.executor(internalServiceExecutor)
.ref(mcpStreamableServiceImpl)
.version(V1)
.build();
streamableServiceConfig.export();
logger.info("MCP service exported on: {}", streamableServiceConfig.getExportedUrls());
}
/**
* Get the Mcp service register port.
* First, try to get config from user configuration, if not found, get from protocol config.
* Second, try to get config from protocol config, if not found, get a random available port.
*/
private int getRegisterPort() {
Configuration globalConf = ConfigurationUtils.getGlobalConfiguration(ApplicationModel.defaultModel());
int mcpPort = globalConf.getInt(McpConstant.SETTINGS_MCP_PORT, -1);
if (mcpPort != -1) {
return mcpPort;
}
ApplicationModel applicationModel = ApplicationModel.defaultModel();
Collection<ProtocolConfig> protocolConfigs =
applicationModel.getApplicationConfigManager().getProtocols();
if (CollectionUtils.isNotEmpty(protocolConfigs)) {
for (ProtocolConfig protocolConfig : protocolConfigs) {
if (CommonConstants.TRIPLE.equals(protocolConfig.getName())) {
return protocolConfig.getPort();
}
}
}
return NetUtils.getAvailablePort();
}
}
| McpApplicationDeployListener |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webmvc/src/main/java/org/springframework/cloud/gateway/server/mvc/handler/GatewayServerResponseBuilder.java | {
"start": 6436,
"end": 7086
} | class ____ extends AbstractGatewayServerResponse {
private final WriteFunction writeFunction;
WriteFunctionResponse(HttpStatusCode statusCode, HttpHeaders headers, MultiValueMap<String, Cookie> cookies,
WriteFunction writeFunction) {
super(statusCode, headers, cookies);
Objects.requireNonNull(writeFunction, "WriteFunction must not be null");
this.writeFunction = writeFunction;
}
@Override
protected @Nullable ModelAndView writeToInternal(HttpServletRequest request, HttpServletResponse response,
Context context) throws Exception {
return this.writeFunction.write(request, response);
}
}
}
| WriteFunctionResponse |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestCancelConnectorSyncJobAction.java | {
"start": 1003,
"end": 2096
} | class ____ extends BaseRestHandler {
private static final String CONNECTOR_SYNC_JOB_ID_PARAM = CONNECTOR_SYNC_JOB_ID_FIELD.getPreferredName();
@Override
public String getName() {
return "connector_sync_job_cancel_action";
}
@Override
public List<Route> routes() {
return List.of(
new Route(
RestRequest.Method.PUT,
"/" + EnterpriseSearch.CONNECTOR_SYNC_JOB_API_ENDPOINT + "/{" + CONNECTOR_SYNC_JOB_ID_PARAM + "}/_cancel"
)
);
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
CancelConnectorSyncJobAction.Request request = new CancelConnectorSyncJobAction.Request(
restRequest.param(CONNECTOR_SYNC_JOB_ID_PARAM)
);
return restChannel -> client.execute(
CancelConnectorSyncJobAction.INSTANCE,
request,
new RestToXContentListener<>(restChannel, ConnectorUpdateActionResponse::status)
);
}
}
| RestCancelConnectorSyncJobAction |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/i18n/MessageParamTest.java | {
"start": 391,
"end": 788
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyBundle.class));
@Test
public void testValidation() {
Assertions.assertEquals("Hello there!", MessageBundles.get(MyBundle.class).hello("there", "!"));
}
@MessageBundle
public | MessageParamTest |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/filter/FilterEventAdapter.java | {
"start": 816,
"end": 19821
} | class ____ extends FilterAdapter {
public FilterEventAdapter() {
}
public ConnectionProxy connection_connect(FilterChain chain, Properties info) throws SQLException {
connection_connectBefore(chain, info);
ConnectionProxy connection = super.connection_connect(chain, info);
connection_connectAfter(connection);
return connection;
}
public void connection_connectBefore(FilterChain chain, Properties info) {
}
public void connection_connectAfter(ConnectionProxy connection) {
}
@Override
public StatementProxy connection_createStatement(FilterChain chain,
ConnectionProxy connection) throws SQLException {
StatementProxy statement = super.connection_createStatement(chain, connection);
statementCreateAfter(statement);
return statement;
}
@Override
public StatementProxy connection_createStatement(FilterChain chain, ConnectionProxy connection, int resultSetType,
int resultSetConcurrency) throws SQLException {
StatementProxy statement = super.connection_createStatement(chain, connection, resultSetType,
resultSetConcurrency);
statementCreateAfter(statement);
return statement;
}
@Override
public StatementProxy connection_createStatement(FilterChain chain, ConnectionProxy connection, int resultSetType,
int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
StatementProxy statement = super.connection_createStatement(chain, connection, resultSetType,
resultSetConcurrency, resultSetHoldability);
statementCreateAfter(statement);
return statement;
}
@Override
public CallableStatementProxy connection_prepareCall(FilterChain chain, ConnectionProxy connection, String sql)
throws SQLException {
CallableStatementProxy statement = super.connection_prepareCall(chain, connection, sql);
statementPrepareCallAfter(statement);
return statement;
}
@Override
public CallableStatementProxy connection_prepareCall(FilterChain chain, ConnectionProxy connection, String sql,
int resultSetType, int resultSetConcurrency)
throws SQLException {
CallableStatementProxy statement = super.connection_prepareCall(chain, connection, sql, resultSetType,
resultSetConcurrency);
statementPrepareCallAfter(statement);
return statement;
}
@Override
public CallableStatementProxy connection_prepareCall(FilterChain chain, ConnectionProxy connection, String sql,
int resultSetType, int resultSetConcurrency,
int resultSetHoldability) throws SQLException {
CallableStatementProxy statement = super.connection_prepareCall(chain, connection, sql, resultSetType,
resultSetConcurrency, resultSetHoldability);
statementPrepareCallAfter(statement);
return statement;
}
@Override
public PreparedStatementProxy connection_prepareStatement(FilterChain chain, ConnectionProxy connection, String sql)
throws SQLException {
PreparedStatementProxy statement = super.connection_prepareStatement(chain, connection, sql);
statementPrepareAfter(statement);
return statement;
}
@Override
public PreparedStatementProxy connection_prepareStatement(FilterChain chain, ConnectionProxy connection,
String sql, int autoGeneratedKeys) throws SQLException {
PreparedStatementProxy statement = super.connection_prepareStatement(chain, connection, sql, autoGeneratedKeys);
statementPrepareAfter(statement);
return statement;
}
@Override
public PreparedStatementProxy connection_prepareStatement(FilterChain chain, ConnectionProxy connection,
String sql, int resultSetType, int resultSetConcurrency)
throws SQLException {
PreparedStatementProxy statement = super.connection_prepareStatement(chain, connection, sql, resultSetType,
resultSetConcurrency);
statementPrepareAfter(statement);
return statement;
}
@Override
public PreparedStatementProxy connection_prepareStatement(FilterChain chain, ConnectionProxy connection,
String sql, int resultSetType, int resultSetConcurrency,
int resultSetHoldability) throws SQLException {
PreparedStatementProxy statement = super.connection_prepareStatement(chain, connection, sql, resultSetType,
resultSetConcurrency, resultSetHoldability);
statementPrepareAfter(statement);
return statement;
}
@Override
public PreparedStatementProxy connection_prepareStatement(FilterChain chain, ConnectionProxy connection,
String sql, int[] columnIndexes) throws SQLException {
PreparedStatementProxy statement = super.connection_prepareStatement(chain, connection, sql, columnIndexes);
statementPrepareAfter(statement);
return statement;
}
@Override
public PreparedStatementProxy connection_prepareStatement(FilterChain chain, ConnectionProxy connection,
String sql, String[] columnNames) throws SQLException {
PreparedStatementProxy statement = super.connection_prepareStatement(chain, connection, sql, columnNames);
statementPrepareAfter(statement);
return statement;
}
@Override
public boolean statement_execute(FilterChain chain, StatementProxy statement, String sql) throws SQLException {
statementExecuteBefore(statement, sql);
try {
boolean firstResult = super.statement_execute(chain, statement, sql);
statementExecuteAfter(statement, sql, firstResult);
return firstResult;
} catch (SQLException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
}
}
@Override
public boolean statement_execute(FilterChain chain, StatementProxy statement, String sql, int autoGeneratedKeys)
throws SQLException {
statementExecuteBefore(statement, sql);
try {
boolean firstResult = super.statement_execute(chain, statement, sql, autoGeneratedKeys);
this.statementExecuteAfter(statement, sql, firstResult);
return firstResult;
} catch (SQLException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
}
}
@Override
public boolean statement_execute(FilterChain chain, StatementProxy statement, String sql, int[] columnIndexes)
throws SQLException {
statementExecuteBefore(statement, sql);
try {
boolean firstResult = super.statement_execute(chain, statement, sql, columnIndexes);
this.statementExecuteAfter(statement, sql, firstResult);
return firstResult;
} catch (SQLException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
}
}
@Override
public boolean statement_execute(FilterChain chain, StatementProxy statement, String sql, String[] columnNames)
throws SQLException {
statementExecuteBefore(statement, sql);
try {
boolean firstResult = super.statement_execute(chain, statement, sql, columnNames);
this.statementExecuteAfter(statement, sql, firstResult);
return firstResult;
} catch (SQLException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
}
}
@Override
public int[] statement_executeBatch(FilterChain chain, StatementProxy statement) throws SQLException {
statementExecuteBatchBefore(statement);
try {
int[] result = super.statement_executeBatch(chain, statement);
statementExecuteBatchAfter(statement, result);
return result;
} catch (SQLException error) {
statement_executeErrorAfter(statement, statement.getBatchSql(), error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, statement.getBatchSql(), error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, statement.getBatchSql(), error);
throw error;
}
}
@Override
public ResultSetProxy statement_executeQuery(FilterChain chain, StatementProxy statement, String sql)
throws SQLException {
statementExecuteQueryBefore(statement, sql);
try {
ResultSetProxy resultSet = super.statement_executeQuery(chain, statement, sql);
if (resultSet != null) {
statementExecuteQueryAfter(statement, sql, resultSet);
resultSetOpenAfter(resultSet);
}
return resultSet;
} catch (SQLException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
}
}
@Override
public int statement_executeUpdate(FilterChain chain, StatementProxy statement, String sql) throws SQLException {
statementExecuteUpdateBefore(statement, sql);
try {
int updateCount = super.statement_executeUpdate(chain, statement, sql);
statementExecuteUpdateAfter(statement, sql, updateCount);
return updateCount;
} catch (SQLException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
}
}
@Override
public int statement_executeUpdate(FilterChain chain, StatementProxy statement, String sql, int autoGeneratedKeys)
throws SQLException {
statementExecuteUpdateBefore(statement, sql);
try {
int updateCount = super.statement_executeUpdate(chain, statement, sql, autoGeneratedKeys);
statementExecuteUpdateAfter(statement, sql, updateCount);
return updateCount;
} catch (SQLException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
}
}
@Override
public int statement_executeUpdate(FilterChain chain, StatementProxy statement, String sql, int[] columnIndexes)
throws SQLException {
statementExecuteUpdateBefore(statement, sql);
try {
int updateCount = super.statement_executeUpdate(chain, statement, sql, columnIndexes);
statementExecuteUpdateAfter(statement, sql, updateCount);
return updateCount;
} catch (SQLException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
}
}
@Override
public int statement_executeUpdate(FilterChain chain, StatementProxy statement, String sql, String[] columnNames)
throws SQLException {
statementExecuteUpdateBefore(statement, sql);
try {
int updateCount = super.statement_executeUpdate(chain, statement, sql, columnNames);
statementExecuteUpdateAfter(statement, sql, updateCount);
return updateCount;
} catch (SQLException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, sql, error);
throw error;
}
}
@Override
public ResultSetProxy statement_getGeneratedKeys(FilterChain chain, StatementProxy statement) throws SQLException {
ResultSetProxy resultSet = super.statement_getGeneratedKeys(chain, statement);
if (resultSet != null) {
resultSetOpenAfter(resultSet);
}
return resultSet;
}
@Override
public ResultSetProxy statement_getResultSet(FilterChain chain, StatementProxy statement) throws SQLException {
ResultSetProxy resultSet = super.statement_getResultSet(chain, statement);
if (resultSet != null) {
resultSetOpenAfter(resultSet);
}
return resultSet;
}
@Override
public boolean preparedStatement_execute(FilterChain chain, PreparedStatementProxy statement) throws SQLException {
try {
statementExecuteBefore(statement, statement.getSql());
boolean firstResult = chain.preparedStatement_execute(statement);
this.statementExecuteAfter(statement, statement.getSql(), firstResult);
return firstResult;
} catch (SQLException error) {
statement_executeErrorAfter(statement, statement.getSql(), error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, statement.getSql(), error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, statement.getSql(), error);
throw error;
}
}
@Override
public ResultSetProxy preparedStatement_executeQuery(FilterChain chain, PreparedStatementProxy statement)
throws SQLException {
try {
statementExecuteQueryBefore(statement, statement.getSql());
ResultSetProxy resultSet = chain.preparedStatement_executeQuery(statement);
if (resultSet != null) {
statementExecuteQueryAfter(statement, statement.getSql(), resultSet);
resultSetOpenAfter(resultSet);
}
return resultSet;
} catch (SQLException error) {
statement_executeErrorAfter(statement, statement.getSql(), error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, statement.getSql(), error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, statement.getSql(), error);
throw error;
}
}
@Override
public int preparedStatement_executeUpdate(FilterChain chain,
PreparedStatementProxy statement) throws SQLException {
try {
statementExecuteUpdateBefore(statement, statement.getSql());
int updateCount = super.preparedStatement_executeUpdate(chain, statement);
statementExecuteUpdateAfter(statement, statement.getSql(), updateCount);
return updateCount;
} catch (SQLException error) {
statement_executeErrorAfter(statement, statement.getSql(), error);
throw error;
} catch (RuntimeException error) {
statement_executeErrorAfter(statement, statement.getSql(), error);
throw error;
} catch (Error error) {
statement_executeErrorAfter(statement, statement.getSql(), error);
throw error;
}
}
protected void statementCreateAfter(StatementProxy statement) {
}
protected void statementPrepareAfter(PreparedStatementProxy statement) {
}
protected void statementPrepareCallAfter(CallableStatementProxy statement) {
}
protected void resultSetOpenAfter(ResultSetProxy resultSet) {
}
protected void statementExecuteUpdateBefore(StatementProxy statement, String sql) {
}
protected void statementExecuteUpdateAfter(StatementProxy statement, String sql, int updateCount) {
}
protected void statementExecuteQueryBefore(StatementProxy statement, String sql) {
}
protected void statementExecuteQueryAfter(StatementProxy statement, String sql, ResultSetProxy resultSet) {
}
protected void statementExecuteBefore(StatementProxy statement, String sql) {
}
protected void statementExecuteAfter(StatementProxy statement, String sql, boolean result) {
}
protected void statementExecuteBatchBefore(StatementProxy statement) {
}
protected void statementExecuteBatchAfter(StatementProxy statement, int[] result) {
}
protected void statement_executeErrorAfter(StatementProxy statement, String sql, Throwable error) {
}
}
| FilterEventAdapter |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/naming/NamingHelper.java | {
"start": 347,
"end": 7178
} | class ____ {
/**
* Singleton access
*/
public static final NamingHelper INSTANCE = new NamingHelper();
public static NamingHelper withCharset(String charset) {
return new NamingHelper( charset );
}
private final String charset;
public NamingHelper() {
this(null);
}
private NamingHelper(String charset) {
this.charset = charset;
}
/**
* If a foreign-key is not explicitly named, this is called to generate
* a unique hash using the table and column names.
*/
public String generateHashedFkName(
String prefix,
Identifier tableName,
Identifier referencedTableName,
List<Identifier> columnNames) {
return generateHashedFkName(
prefix,
tableName,
referencedTableName,
columnNames == null || columnNames.isEmpty()
? new Identifier[0]
: columnNames.toArray( new Identifier[0] )
);
}
/**
* If a foreign-key is not explicitly named, this is called to generate
* a unique hash using the table and column names.
*/
public String generateHashedFkName(
String prefix,
Identifier tableName,
Identifier referencedTableName,
Identifier... columnNames) {
// Use a concatenation that guarantees uniqueness, even if identical
// names exist between all table and column identifiers.
final StringBuilder text = new StringBuilder()
.append( "table`" ).append( tableName ).append( "`" )
.append( "references`" ).append( referencedTableName ).append( "`" );
// Ensure a consistent ordering of columns, regardless of the order
// they were bound.
// Clone the list, as sometimes a set of order-dependent Column
// bindings are given.
final Identifier[] alphabeticalColumns = columnNames.clone();
Arrays.sort( alphabeticalColumns, comparing( Identifier::getCanonicalName ) );
for ( Identifier columnName : alphabeticalColumns ) {
assert columnName != null;
text.append( "column`" ).append( columnName ).append( "`" );
}
return prefix + hashedName( text.toString() );
}
/**
* If a constraint is not explicitly named, this is called to generate
* a unique hash using the table and column names.
*
* @return String The generated name
*/
public String generateHashedConstraintName(
String prefix, Identifier tableName, Identifier... columnNames ) {
// Use a concatenation that guarantees uniqueness, even if identical
// names exist between all table and column identifiers.
final StringBuilder text = new StringBuilder( "table`" + tableName + "`" );
// Ensure a consistent ordering of columns, regardless of the order
// they were bound.
// Clone the list, as sometimes a set of order-dependent Column
// bindings are given.
final Identifier[] alphabeticalColumns = columnNames.clone();
Arrays.sort( alphabeticalColumns, comparing(Identifier::getCanonicalName) );
for ( Identifier columnName : alphabeticalColumns ) {
assert columnName != null;
text.append( "column`" ).append( columnName ).append( "`" );
}
return prefix + hashedName( text.toString() );
}
/**
* If a constraint is not explicitly named, this is called to generate
* a unique hash using the table and column names.
*
* @return String The generated name
*/
public String generateHashedConstraintName(
String prefix, Identifier tableName, List<Identifier> columnNames) {
final Identifier[] columnNamesArray = new Identifier[columnNames.size()];
for ( int i = 0; i < columnNames.size(); i++ ) {
columnNamesArray[i] = columnNames.get( i );
}
return generateHashedConstraintName( prefix, tableName, columnNamesArray );
}
/**
* Hash a constraint name using MD5. Convert the MD5 digest to base 35
* (full alphanumeric), guaranteeing that the length of the name will
* always be smaller than the 30 character identifier restriction
* enforced by some dialects.
*
* @param name The name to be hashed.
*
* @return String The hashed name.
*/
public String hashedName(String name) {
final byte[] bytes;
try {
bytes = charset == null
? name.getBytes()
: name.getBytes( charset );
}
catch (UnsupportedEncodingException uee) {
throw new IllegalArgumentException(uee);
}
final byte[] digest = hash( pad( bytes ) );
return new BigInteger( 1, digest ).toString( 35 );
}
// Constants for MD5
private static final int[] S = {
7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22,
5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20,
4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23,
6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21
};
private static final int[] K = new int[64];
static {
for ( int i = 0; i < 64; i++ ) {
K[i] = (int)(long) ( (1L << 32) * Math.abs( Math.sin( i + 1 ) ) );
}
}
public static byte[] hash(byte[] message) {
int a0 = 0x67452301;
int b0 = 0xefcdab89;
int c0 = 0x98badcfe;
int d0 = 0x10325476;
for ( int i = 0; i < message.length / 64; i++ ) {
final int[] M = new int[16];
for (int j = 0; j < 16; j++) {
M[j] = ((message[i * 64 + j * 4] & 0xFF))
| ((message[i * 64 + j * 4 + 1] & 0xFF) << 8)
| ((message[i * 64 + j * 4 + 2] & 0xFF) << 16)
| ((message[i * 64 + j * 4 + 3] & 0xFF) << 24);
}
int A = a0, B = b0, C = c0, D = d0;
for (int j = 0; j < 64; j++) {
final int F, g;
if (j < 16) {
F = (B & C) | (~B & D);
g = j;
}
else if (j < 32) {
F = (D & B) | (~D & C);
g = (5 * j + 1) % 16;
}
else if (j < 48) {
F = B ^ C ^ D;
g = (3 * j + 5) % 16;
}
else {
F = C ^ (B | ~D);
g = (7 * j) % 16;
}
final int temp = D;
D = C;
C = B;
B = B + Integer.rotateLeft( A + F + K[j] + M[g], S[j] );
A = temp;
}
a0 += A;
b0 += B;
c0 += C;
d0 += D;
}
// Convert final state to byte array (little-endian)
final byte[] digest = new byte[16];
encodeInt( digest, 0, a0 );
encodeInt( digest, 4, b0 );
encodeInt( digest, 8, c0 );
encodeInt( digest, 12, d0 );
return digest;
}
private static void encodeInt(byte[] output, int offset, int value) {
output[offset] = (byte) (value & 0xFF);
output[offset + 1] = (byte) ((value >>> 8) & 0xFF);
output[offset + 2] = (byte) ((value >>> 16) & 0xFF);
output[offset + 3] = (byte) ((value >>> 24) & 0xFF);
}
private static byte[] pad(byte[] input) {
final int originalLength = input.length;
final int numPaddingBytes = ( 56 - (originalLength + 1) % 64 + 64 ) % 64;
final byte[] padded = new byte[originalLength + 1 + numPaddingBytes + 8];
System.arraycopy( input, 0, padded, 0, originalLength );
padded[originalLength] = (byte) 0x80;
long bitLength = (long) originalLength * 8;
for ( int i = 0; i < 8; i++ ) {
padded[padded.length - 8 + i] = (byte) ( ( bitLength >>> (8 * i) ) & 0xFF );
}
return padded;
}
}
| NamingHelper |
java | apache__camel | components/camel-bindy/src/main/java/org/apache/camel/dataformat/bindy/format/factories/DateFormatFactory.java | {
"start": 1686,
"end": 3843
} | class ____ implements PatternFormat<Date> {
private String pattern;
private Locale locale;
private TimeZone timezone;
DatePatternFormat(String pattern, String timezone, Locale locale) {
this.pattern = pattern;
this.locale = locale;
if (!timezone.isEmpty()) {
this.timezone = TimeZone.getTimeZone(timezone);
}
}
@Override
public String format(Date object) throws Exception {
ObjectHelper.notNull(this.pattern, "pattern");
return this.getDateFormat().format(object);
}
@Override
public Date parse(String string) throws Exception {
Date date;
DateFormat df = this.getDateFormat();
ObjectHelper.notNull(this.pattern, "pattern");
// Check length of the string with date pattern
// To avoid to parse a string date : 20090901-10:32:30 when
// the pattern is yyyyMMdd
if (string.length() <= this.pattern.length()) {
// Force the parser to be strict in the syntax of the date to be
// converted
df.setLenient(false);
date = df.parse(string);
return date;
} else {
throw new FormatException("Date provided does not fit the pattern defined");
}
}
protected java.text.DateFormat getDateFormat() {
SimpleDateFormat result;
if (locale != null) {
result = new SimpleDateFormat(pattern, locale);
} else {
result = new SimpleDateFormat(pattern);
}
if (timezone != null) {
result.setTimeZone(timezone);
}
return result;
}
@Override
public String getPattern() {
return pattern;
}
/**
* Sets the pattern
*
* @param pattern the pattern
*/
public void setPattern(String pattern) {
this.pattern = pattern;
}
}
}
| DatePatternFormat |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/reflect/AvroEncode.java | {
"start": 1545,
"end": 1616
} | interface ____ {
Class<? extends CustomEncoding<?>> using();
}
| AvroEncode |
java | apache__logging-log4j2 | log4j-1.2-api/src/main/java/org/apache/log4j/helpers/PatternParser.java | {
"start": 1328,
"end": 1399
} | class ____ delegated to the PatternParser class.
*
* <p>
* It is this | is |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/stereotypes/StereotypeAlternativeTest.java | {
"start": 2482,
"end": 2629
} | class ____ {
public String getId() {
return "NOK";
}
}
@BeAlternativeWithPriority
static | NotAtAllAlternative |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/utils/Utils.java | {
"start": 15882,
"end": 16301
} | class ____ instantiate
* @param base A know baseclass of klass.
* @param <T> the type of the base class
* @throws ClassCastException If {@code klass} is not a subclass of {@code base}.
* @return the new instance.
*/
public static <T> T newInstance(Class<?> klass, Class<T> base) {
return Utils.newInstance(klass.asSubclass(base));
}
/**
* Construct a new object using a | to |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/properties/PropertiesComponentRegistryTest.java | {
"start": 1465,
"end": 4089
} | class ____ extends ContextTestSupport {
private MyFooBean foo;
private MyDummyBean bar;
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Override
protected CamelContext createCamelContext() throws Exception {
DefaultCamelContext context = (DefaultCamelContext) super.createCamelContext();
foo = new MyFooBean();
bar = new MyDummyBean();
// re-create context
DefaultRegistry reg = new DefaultRegistry();
reg.bind("foo", foo);
reg.bind("bar", bar);
context.getCamelContextExtension().setRegistry(reg);
context.getPropertiesComponent().setLocation("classpath:org/apache/camel/component/properties/cheese.properties");
return context;
}
@Test
public void testPropertiesComponentRegistryPlain() {
context.start();
assertSame(foo, context.getRegistry().lookupByName("foo"));
assertSame(bar, context.getRegistry().lookupByName("bar"));
assertNull(context.getRegistry().lookupByName("unknown"));
}
@Test
public void testPropertiesComponentRegistryLookupName() {
context.start();
assertSame(foo, context.getRegistry().lookupByName("{{bean.foo}}"));
assertSame(bar, context.getRegistry().lookupByName("{{bean.bar}}"));
RuntimeCamelException e = assertThrows(RuntimeCamelException.class,
() -> context.getRegistry().lookupByName("{{bean.unknown}}"),
"Should have thrown exception");
IllegalArgumentException cause = assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertEquals("Property with key [bean.unknown] not found in properties from text: {{bean.unknown}}",
cause.getMessage());
}
@Test
public void testPropertiesComponentRegistryLookupNameAndType() {
context.start();
assertSame(foo, context.getRegistry().lookupByNameAndType("{{bean.foo}}", MyFooBean.class));
assertSame(bar, context.getRegistry().lookupByNameAndType("{{bean.bar}}", MyDummyBean.class));
RuntimeCamelException e = assertThrows(RuntimeCamelException.class,
() -> context.getRegistry().lookupByNameAndType("{{bean.unknown}}", MyDummyBean.class),
"Should have thrown exception");
IllegalArgumentException cause = assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertEquals("Property with key [bean.unknown] not found in properties from text: {{bean.unknown}}",
cause.getMessage());
}
}
| PropertiesComponentRegistryTest |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/cglib/core/SpringNamingPolicy.java | {
"start": 1919,
"end": 2329
} | class ____ is for a FastClass, the source is
// "org.springframework.cglib.reflect.FastClass".
boolean isFastClass = (source != null && source.endsWith(".FastClass"));
if (isFastClass && !prefix.contains(FAST_CLASS_SUFFIX)) {
base += FAST_CLASS_SUFFIX;
}
int index = 0;
String attempt = base + index;
while (names.evaluate(attempt)) {
attempt = base + index++;
}
return attempt;
}
}
| name |
java | quarkusio__quarkus | extensions/amazon-lambda/deployment/src/main/java/io/quarkus/amazon/lambda/deployment/ScriptGeneratorProcessor.java | {
"start": 324,
"end": 829
} | class ____ {
@BuildStep
public void buildScripts(OutputTargetBuildItem target,
Optional<ProvidedAmazonLambdaHandlerBuildItem> providedLambda,
BuildProducer<ArtifactResultBuildItem> artifactResultProducer) throws Exception {
if (providedLambda.isPresent())
return; // assume these will be generated elsewhere
LambdaUtil.generateScripts("io.quarkus.amazon.lambda.runtime.QuarkusStreamHandler::handleRequest", target);
}
}
| ScriptGeneratorProcessor |
java | spring-projects__spring-boot | module/spring-boot-http-client/src/test/java/org/springframework/boot/http/client/ReflectiveComponentsClientHttpRequestFactoryBuilderTests.java | {
"start": 9759,
"end": 10537
} | class ____ implements ClientHttpRequestFactory {
private int readTimeout;
private int connectTimeout;
private @Nullable Duration readTimeoutDuration;
private @Nullable Duration connectTimeoutDuration;
@Override
public ClientHttpRequest createRequest(URI uri, HttpMethod httpMethod) {
throw new UnsupportedOperationException();
}
public void setConnectTimeout(int timeout) {
this.connectTimeout = timeout;
}
public void setReadTimeout(int timeout) {
this.readTimeout = timeout;
}
public void setConnectTimeout(@Nullable Duration timeout) {
this.connectTimeoutDuration = timeout;
}
public void setReadTimeout(@Nullable Duration timeout) {
this.readTimeoutDuration = timeout;
}
}
}
| IntAndDurationTimeoutsClientHttpRequestFactory |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/Neo4jEndpointBuilderFactory.java | {
"start": 1559,
"end": 11194
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedNeo4jEndpointBuilder advanced() {
return (AdvancedNeo4jEndpointBuilder) this;
}
/**
* Node alias.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param alias the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder alias(String alias) {
doSetProperty("alias", alias);
return this;
}
/**
* Url for connecting to Neo database.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param databaseUrl the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder databaseUrl(String databaseUrl) {
doSetProperty("databaseUrl", databaseUrl);
return this;
}
/**
* Detach a relationship - set true if you want to delete a node and
* detach its relationships to other nodes at same time.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param detachRelationship the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder detachRelationship(boolean detachRelationship) {
doSetProperty("detachRelationship", detachRelationship);
return this;
}
/**
* Detach a relationship - set true if you want to delete a node and
* detach its relationships to other nodes at same time.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param detachRelationship the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder detachRelationship(String detachRelationship) {
doSetProperty("detachRelationship", detachRelationship);
return this;
}
/**
* Dimension of Vector Index.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: producer
*
* @param dimension the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder dimension(Integer dimension) {
doSetProperty("dimension", dimension);
return this;
}
/**
* Dimension of Vector Index.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: producer
*
* @param dimension the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder dimension(String dimension) {
doSetProperty("dimension", dimension);
return this;
}
/**
* Node Label.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param label the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder label(String label) {
doSetProperty("label", label);
return this;
}
/**
* Maximum results for Vector Similarity search.
*
* The option is a: <code>int</code> type.
*
* Default: 3
* Group: producer
*
* @param maxResults the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder maxResults(int maxResults) {
doSetProperty("maxResults", maxResults);
return this;
}
/**
* Maximum results for Vector Similarity search.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 3
* Group: producer
*
* @param maxResults the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder maxResults(String maxResults) {
doSetProperty("maxResults", maxResults);
return this;
}
/**
* Minimum score for Vector Similarity search.
*
* The option is a: <code>double</code> type.
*
* Default: 0.0
* Group: producer
*
* @param minScore the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder minScore(double minScore) {
doSetProperty("minScore", minScore);
return this;
}
/**
* Minimum score for Vector Similarity search.
*
* The option will be converted to a <code>double</code> type.
*
* Default: 0.0
* Group: producer
*
* @param minScore the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder minScore(String minScore) {
doSetProperty("minScore", minScore);
return this;
}
/**
* Cypher Query.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param query the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder query(String query) {
doSetProperty("query", query);
return this;
}
/**
* Similarity Function of Vector Index.
*
* The option is a:
* <code>org.apache.camel.component.neo4j.Neo4jSimilarityFunction</code>
* type.
*
* Default: cosine
* Group: producer
*
* @param similarityFunction the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder similarityFunction(org.apache.camel.component.neo4j.Neo4jSimilarityFunction similarityFunction) {
doSetProperty("similarityFunction", similarityFunction);
return this;
}
/**
* Similarity Function of Vector Index.
*
* The option will be converted to a
* <code>org.apache.camel.component.neo4j.Neo4jSimilarityFunction</code>
* type.
*
* Default: cosine
* Group: producer
*
* @param similarityFunction the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder similarityFunction(String similarityFunction) {
doSetProperty("similarityFunction", similarityFunction);
return this;
}
/**
* Vector Index Name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param vectorIndexName the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder vectorIndexName(String vectorIndexName) {
doSetProperty("vectorIndexName", vectorIndexName);
return this;
}
/**
* Kerberos Authentication encoded base64 ticket.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param kerberosAuthTicket the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder kerberosAuthTicket(String kerberosAuthTicket) {
doSetProperty("kerberosAuthTicket", kerberosAuthTicket);
return this;
}
/**
* Basic authentication database password.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param password the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder password(String password) {
doSetProperty("password", password);
return this;
}
/**
* Basic authentication database realm.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param realm the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder realm(String realm) {
doSetProperty("realm", realm);
return this;
}
/**
* Bearer authentication database realm.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param token the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder token(String token) {
doSetProperty("token", token);
return this;
}
/**
* Basic authentication database user.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param username the value to set
* @return the dsl builder
*/
default Neo4jEndpointBuilder username(String username) {
doSetProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint for the Neo4j component.
*/
public | Neo4jEndpointBuilder |
java | apache__avro | lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroSerializer.java | {
"start": 1798,
"end": 3789
} | class ____<T> implements Serializer<AvroWrapper<T>> {
/** An factory for creating Avro datum encoders. */
private static final EncoderFactory ENCODER_FACTORY = new EncoderFactory();
/** The writer schema for the data to serialize. */
private final Schema mWriterSchema;
/** The Avro datum writer for serializing. */
private final DatumWriter<T> mAvroDatumWriter;
/** The Avro encoder for serializing. */
private BinaryEncoder mAvroEncoder;
/** The output stream for serializing. */
private OutputStream mOutputStream;
/**
* Constructor.
*
* @param writerSchema The writer schema for the Avro data being serialized.
*/
public AvroSerializer(Schema writerSchema) {
if (null == writerSchema) {
throw new IllegalArgumentException("Writer schema may not be null");
}
mWriterSchema = writerSchema;
mAvroDatumWriter = new ReflectDatumWriter<>(writerSchema);
}
/**
* Constructor.
*
* @param writerSchema The writer schema for the Avro data being serialized.
* @param datumWriter The datum writer to use for serialization.
*/
public AvroSerializer(Schema writerSchema, DatumWriter<T> datumWriter) {
if (null == writerSchema) {
throw new IllegalArgumentException("Writer schema may not be null");
}
mWriterSchema = writerSchema;
mAvroDatumWriter = datumWriter;
}
/**
* Gets the writer schema being used for serialization.
*
* @return The writer schema.
*/
public Schema getWriterSchema() {
return mWriterSchema;
}
/** {@inheritDoc} */
@Override
public void open(OutputStream outputStream) throws IOException {
mOutputStream = outputStream;
mAvroEncoder = ENCODER_FACTORY.binaryEncoder(outputStream, mAvroEncoder);
}
/** {@inheritDoc} */
@Override
public void serialize(AvroWrapper<T> avroWrapper) throws IOException {
mAvroDatumWriter.write(avroWrapper.datum(), mAvroEncoder);
// This would be a lot faster if the Serializer | AvroSerializer |
java | elastic__elasticsearch | x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/LastValueFieldProducer.java | {
"start": 917,
"end": 1006
} | class ____ that field values are collected and sorted by descending order by time
*/
| assumes |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/onetomany/OneToManyTest.java | {
"start": 17812,
"end": 18192
} | class ____ {
@Id
@GeneratedValue
Long id;
@OneToMany(cascade = CascadeType.ALL)
@JoinColumn(name = "a_id")
@OnDelete(action = OnDeleteAction.CASCADE)
List<OnDeleteUnidirectionalOneToManyChild> children;
}
@Entity(name = "OnDeleteUnidirectionalOneToManyChild")
@jakarta.persistence.Table(name = "OneToManyChild")
public static | OnDeleteUnidirectionalOneToManyParent |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java | {
"start": 1335,
"end": 11576
} | class ____ extends BlockPlacementPolicyDefault {
@Override
protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) {
int clusterSize = clusterMap.getNumOfLeaves();
int totalNumOfReplicas = numOfChosen + numOfReplicas;
if (totalNumOfReplicas > clusterSize) {
numOfReplicas -= (totalNumOfReplicas-clusterSize);
totalNumOfReplicas = clusterSize;
}
// No calculation needed when there is only one rack or picking one node.
int numOfRacks = clusterMap.getNumOfNonEmptyRacks();
// HDFS-14527 return default when numOfRacks = 0 to avoid
// ArithmeticException when calc maxNodesPerRack at following logic.
if (numOfRacks <= 1 || totalNumOfReplicas <= 1) {
return new int[] {numOfReplicas, totalNumOfReplicas};
}
// If more racks than replicas, put one replica per rack.
if (totalNumOfReplicas < numOfRacks) {
return new int[] {numOfReplicas, 1};
}
// If more replicas than racks, evenly spread the replicas.
// This calculation rounds up.
int maxNodesPerRack = (totalNumOfReplicas - 1) / numOfRacks + 1;
return new int[] {numOfReplicas, maxNodesPerRack};
}
/**
* Choose numOfReplicas in order:
* 1. If total replica expected is less than numOfRacks in cluster, it choose
* randomly.
* 2. If total replica expected is bigger than numOfRacks, it choose:
* 2a. Fill each rack exactly (maxNodesPerRack-1) replicas.
* 2b. For some random racks, place one more replica to each one of them,
* until numOfReplicas have been chosen. <br>
* 3. If after step 2, there are still replicas not placed (due to some
* racks have fewer datanodes than maxNodesPerRack), the rest of the replicas
* is placed evenly on the rest of the racks who have Datanodes that have
* not been placed a replica.
* 4. If after step 3, there are still replicas not placed. A
* {@link NotEnoughReplicasException} is thrown.
* <p>
* For normal setups, step 2 would suffice. So in the end, the difference
* of the numbers of replicas for each two racks is no more than 1.
* Either way it always prefer local storage.
* @return local node of writer
*/
@Override
protected Node chooseTargetInOrder(int numOfReplicas,
Node writer,
final Set<Node> excludedNodes,
final long blocksize,
final int maxNodesPerRack,
final List<DatanodeStorageInfo> results,
final boolean avoidStaleNodes,
final boolean newBlock,
EnumMap<StorageType, Integer> storageTypes)
throws NotEnoughReplicasException {
int totalReplicaExpected = results.size() + numOfReplicas;
int numOfRacks = clusterMap.getNumOfNonEmptyRacks();
try {
if (totalReplicaExpected < numOfRacks ||
totalReplicaExpected % numOfRacks == 0) {
writer = chooseOnce(numOfReplicas, writer, excludedNodes, blocksize,
maxNodesPerRack, results, avoidStaleNodes, storageTypes);
return writer;
}
assert totalReplicaExpected > (maxNodesPerRack -1) * numOfRacks;
// Calculate numOfReplicas for filling each rack exactly (maxNodesPerRack-1)
// replicas.
HashMap<String, Integer> rackCounts = new HashMap<>();
for (DatanodeStorageInfo dsInfo : results) {
String rack = dsInfo.getDatanodeDescriptor().getNetworkLocation();
Integer count = rackCounts.get(rack);
if (count != null) {
rackCounts.put(rack, count + 1);
} else {
rackCounts.put(rack, 1);
}
}
int excess = 0; // Sum of the above (maxNodesPerRack-1) part of nodes in results
for (int count : rackCounts.values()) {
if (count > maxNodesPerRack -1) {
excess += count - (maxNodesPerRack -1);
}
}
numOfReplicas = Math.min(totalReplicaExpected - results.size(),
(maxNodesPerRack -1) * numOfRacks - (results.size() - excess));
// Try to spread the replicas as evenly as possible across racks.
// This is done by first placing with (maxNodesPerRack-1), then spreading
// the remainder by calling again with maxNodesPerRack.
writer = chooseOnce(numOfReplicas, writer, new HashSet<>(excludedNodes),
blocksize, maxNodesPerRack - 1, results, avoidStaleNodes,
storageTypes);
// Exclude the chosen nodes
for (DatanodeStorageInfo resultStorage : results) {
addToExcludedNodes(resultStorage.getDatanodeDescriptor(),
excludedNodes);
}
LOG.trace("Chosen nodes: {}", results);
LOG.trace("Excluded nodes: {}", excludedNodes);
numOfReplicas = totalReplicaExpected - results.size();
chooseOnce(numOfReplicas, writer, excludedNodes, blocksize,
maxNodesPerRack, results, avoidStaleNodes, storageTypes);
} catch (NotEnoughReplicasException e) {
LOG.warn("Only able to place {} of total expected {}"
+ " (maxNodesPerRack={}, numOfReplicas={}) nodes "
+ "evenly across racks, falling back to evenly place on the "
+ "remaining racks. This may not guarantee rack-level fault "
+ "tolerance. Please check if the racks are configured properly.",
results.size(), totalReplicaExpected, maxNodesPerRack, numOfReplicas);
LOG.debug("Caught exception was:", e);
chooseEvenlyFromRemainingRacks(writer, excludedNodes, blocksize,
maxNodesPerRack, results, avoidStaleNodes, storageTypes,
totalReplicaExpected, e);
}
return writer;
}
/**
* Choose as evenly as possible from the racks which have available datanodes.
*/
private void chooseEvenlyFromRemainingRacks(Node writer,
Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
EnumMap<StorageType, Integer> storageTypes, int totalReplicaExpected,
NotEnoughReplicasException e) throws NotEnoughReplicasException {
int numResultsOflastChoose = 0;
NotEnoughReplicasException lastException = e;
int bestEffortMaxNodesPerRack = maxNodesPerRack;
while (results.size() != totalReplicaExpected &&
bestEffortMaxNodesPerRack < totalReplicaExpected) {
// Exclude the chosen nodes
final Set<Node> newExcludeNodes = new HashSet<>();
for (DatanodeStorageInfo resultStorage : results) {
addToExcludedNodes(resultStorage.getDatanodeDescriptor(),
newExcludeNodes);
}
LOG.trace("Chosen nodes: {}", results);
LOG.trace("Excluded nodes: {}", excludedNodes);
LOG.trace("New Excluded nodes: {}", newExcludeNodes);
final int numOfReplicas = totalReplicaExpected - results.size();
numResultsOflastChoose = results.size();
try {
chooseOnce(numOfReplicas, writer, newExcludeNodes, blocksize,
++bestEffortMaxNodesPerRack, results, avoidStaleNodes,
storageTypes);
} catch (NotEnoughReplicasException nere) {
lastException = nere;
} finally {
excludedNodes.addAll(newExcludeNodes);
}
// To improve performance, the maximum value of 'bestEffortMaxNodesPerRack'
// is calculated only when it is not possible to select a node.
if (numResultsOflastChoose == results.size()) {
Map<String, Integer> nodesPerRack = new HashMap<>();
for (DatanodeStorageInfo dsInfo : results) {
String rackName = dsInfo.getDatanodeDescriptor().getNetworkLocation();
nodesPerRack.merge(rackName, 1, Integer::sum);
}
bestEffortMaxNodesPerRack =
Math.max(bestEffortMaxNodesPerRack, Collections.max(nodesPerRack.values()));
}
}
if (results.size() != totalReplicaExpected) {
LOG.debug("Best effort placement failed: expecting {} replicas, only "
+ "chose {}.", totalReplicaExpected, results.size());
throw lastException;
}
}
/**
* Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>.
* Except that 1st replica prefer local storage.
* @return local node of writer.
*/
private Node chooseOnce(int numOfReplicas,
Node writer,
final Set<Node> excludedNodes,
final long blocksize,
final int maxNodesPerRack,
final List<DatanodeStorageInfo> results,
final boolean avoidStaleNodes,
EnumMap<StorageType, Integer> storageTypes)
throws NotEnoughReplicasException {
if (numOfReplicas == 0) {
return writer;
}
writer = chooseLocalStorage(writer, excludedNodes, blocksize,
maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
.getDatanodeDescriptor();
if (--numOfReplicas == 0) {
return writer;
}
chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
maxNodesPerRack, results, avoidStaleNodes, storageTypes);
return writer;
}
@Override
public BlockPlacementStatus verifyBlockPlacement(DatanodeInfo[] locs,
int numberOfReplicas) {
if (locs == null)
locs = DatanodeDescriptor.EMPTY_ARRAY;
if (!clusterMap.hasClusterEverBeenMultiRack()) {
// only one rack
return new BlockPlacementStatusDefault(1, 1, 1);
}
// Count locations on different racks.
Set<String> racks = new HashSet<>();
for (DatanodeInfo dn : locs) {
racks.add(dn.getNetworkLocation());
}
return new BlockPlacementStatusDefault(racks.size(), numberOfReplicas,
clusterMap.getNumOfNonEmptyRacks());
}
@Override
protected Collection<DatanodeStorageInfo> pickupReplicaSet(
Collection<DatanodeStorageInfo> moreThanOne,
Collection<DatanodeStorageInfo> exactlyOne,
Map<String, List<DatanodeStorageInfo>> rackMap) {
return moreThanOne.isEmpty() ? exactlyOne : moreThanOne;
}
}
| BlockPlacementPolicyRackFaultTolerant |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/hint/JoinStrategy.java | {
"start": 987,
"end": 3629
} | enum ____ {
/**
* Instructs the optimizer to use broadcast hash join strategy. If both sides are specified in
* this hint, the side that is first written will be broadcast.
*/
BROADCAST("BROADCAST"),
/**
* Instructs the optimizer to use shuffle hash join strategy. If both sides are specified in
* this hint, the side that is first written will be treated as the build side.
*/
SHUFFLE_HASH("SHUFFLE_HASH"),
/**
* Instructs the optimizer to use shuffle sort merge join strategy. As long as one of the side
* is specified in this hint, it will be tried.
*/
SHUFFLE_MERGE("SHUFFLE_MERGE"),
/**
* Instructs the optimizer to use nest loop join strategy. If both sides are specified in this
* hint, the side that is first written will be treated as the build side.
*/
NEST_LOOP("NEST_LOOP"),
/** Instructs the optimizer to use lookup join strategy. Only accept key-value hint options. */
LOOKUP("LOOKUP"),
/**
* Instructs the optimizer to use multi-way join strategy for streaming queries. This hint
* allows specifying multiple tables to be joined together in a single {@link
* org.apache.flink.table.runtime.operators.join.stream.StreamingMultiJoinOperator}.
*/
MULTI_JOIN("MULTI_JOIN");
private final String joinHintName;
JoinStrategy(String joinHintName) {
this.joinHintName = joinHintName;
}
public static boolean isJoinStrategy(String hintName) {
try {
JoinStrategy.valueOf(hintName.toUpperCase(Locale.ROOT));
return true;
} catch (Exception e) {
return false;
}
}
public String getJoinHintName() {
return joinHintName;
}
public static boolean validOptions(String hintName, List<String> options) {
if (!isJoinStrategy(hintName)) {
return false;
}
JoinStrategy strategy = JoinStrategy.valueOf(hintName);
switch (strategy) {
case SHUFFLE_HASH:
case SHUFFLE_MERGE:
case BROADCAST:
case NEST_LOOP:
return options.size() > 0;
case LOOKUP:
return null == options || options.size() == 0;
case MULTI_JOIN:
return options.size() > 0;
}
return false;
}
public static boolean isLookupHint(String hintName) {
String formalizedHintName = hintName.toUpperCase(Locale.ROOT);
return isJoinStrategy(formalizedHintName)
&& JoinStrategy.valueOf(formalizedHintName) == LOOKUP;
}
}
| JoinStrategy |
java | apache__hadoop | hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java | {
"start": 1215,
"end": 1946
} | class ____ extends FileSystemContractBaseTest {
private FileSystem adlStore;
@BeforeEach
public void setUp() throws Exception {
skipTestCheck();
adlStore = AdlStorageConfiguration.createStorageConnector();
if (AdlStorageConfiguration.isContractTestEnabled()) {
fs = adlStore;
}
assumeTrue(fs != null);
}
@AfterEach
public void tearDown() throws Exception {
if (AdlStorageConfiguration.isContractTestEnabled()) {
cleanup();
}
super.tearDown();
}
private void cleanup() throws IOException {
adlStore.delete(new Path("/test"), true);
}
private void skipTestCheck() {
assumeTrue(AdlStorageConfiguration.isContractTestEnabled());
}
} | TestAdlFileSystemContractLive |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ImportAwareTests.java | {
"start": 10495,
"end": 10623
} | interface ____ {
String value() default "";
}
@Configuration(proxyBeanMethods = false)
public static | EnableLiteConfiguration |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/JavaInstantGetSecondsGetNanoTest.java | {
"start": 9685,
"end": 10349
} | class ____ {
private static final Instant INSTANT = Instant.EPOCH;
public void foo() {
doSomething(() -> INSTANT.getEpochSecond());
// BUG: Diagnostic contains: JavaInstantGetSecondsGetNano
int nanos = INSTANT.getNano();
}
public void doSomething(Supplier<Long> supplier) {}
}
""")
.doTest();
}
@Test
public void getNanoInLambda() {
compilationHelper
.addSourceLines(
"test/TestCase.java",
"""
package test;
import java.time.Instant;
public | TestCase |
java | spring-projects__spring-boot | module/spring-boot-activemq/src/main/java/org/springframework/boot/activemq/autoconfigure/ActiveMQProperties.java | {
"start": 3974,
"end": 4268
} | class ____ {
/**
* Whether to enable embedded mode if the ActiveMQ Broker is available.
*/
private boolean enabled = true;
public boolean isEnabled() {
return this.enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
}
public static | Embedded |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/NamespaceHttpCustomFilterTests.java | {
"start": 4838,
"end": 5193
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.addFilterAfter(new CustomFilter(), UsernamePasswordAuthenticationFilter.class)
.formLogin(withDefaults());
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | CustomFilterAfterConfig |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/stream/JSONReaderTest_0.java | {
"start": 267,
"end": 585
} | class ____ extends TestCase {
public void test_read() throws Exception {
JSONReader reader = new JSONReader(new StringReader("{}"));
reader.config(Feature.AllowArbitraryCommas, true);
JSONObject object = (JSONObject) reader.readObject();
Assert.assertNotNull(object);
reader.close();
}
}
| JSONReaderTest_0 |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionReader.java | {
"start": 1314,
"end": 4493
} | class ____ implements ResultSubpartitionView {
/** The result subpartition that we read. */
private final BoundedBlockingSubpartition parent;
/**
* The listener that is notified when there are available buffers for this subpartition view.
*/
private final BufferAvailabilityListener availabilityListener;
/** The next buffer (look ahead). Null once the data is depleted or reader is disposed. */
@Nullable private Buffer nextBuffer;
/**
* The reader/decoder to the memory mapped region with the data we currently read from. Null
* once the reader empty or disposed.
*/
@Nullable private BoundedData.Reader dataReader;
/** The remaining number of data buffers (not events) in the result. */
private int dataBufferBacklog;
/** Flag whether this reader is released. Atomic, to avoid double release. */
private boolean isReleased;
private int sequenceNumber;
/** Convenience constructor that takes a single buffer. */
BoundedBlockingSubpartitionReader(
BoundedBlockingSubpartition parent,
BoundedData data,
int numDataBuffers,
BufferAvailabilityListener availabilityListener)
throws IOException {
this.parent = checkNotNull(parent);
checkNotNull(data);
this.dataReader = data.createReader(this);
this.nextBuffer = dataReader.nextBuffer();
checkArgument(numDataBuffers >= 0);
this.dataBufferBacklog = numDataBuffers;
this.availabilityListener = checkNotNull(availabilityListener);
}
@Nullable
@Override
public BufferAndBacklog getNextBuffer() throws IOException {
final Buffer current = nextBuffer; // copy reference to stack
if (current == null) {
// as per contract, we must return null when the reader is empty,
// but also in case the reader is disposed (rather than throwing an exception)
return null;
}
if (current.isBuffer()) {
dataBufferBacklog--;
}
assert dataReader != null;
nextBuffer = dataReader.nextBuffer();
Buffer.DataType nextDataType =
nextBuffer != null ? nextBuffer.getDataType() : Buffer.DataType.NONE;
return BufferAndBacklog.fromBufferAndLookahead(
current, nextDataType, dataBufferBacklog, sequenceNumber++);
}
/**
* This method is actually only meaningful for the {@link BoundedBlockingSubpartitionType#FILE}.
*
* <p>For the other types the {@link #nextBuffer} can not be ever set to null, so it is no need
* to notify available via this method. But the implementation is also compatible with other
* types even though called by mistake.
*/
@Override
public void notifyDataAvailable() {
if (nextBuffer == null) {
assert dataReader != null;
try {
nextBuffer = dataReader.nextBuffer();
} catch (IOException ex) {
// this exception wrapper is only for avoiding throwing IOException explicitly
// in relevant | BoundedBlockingSubpartitionReader |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/arch/Processor.java | {
"start": 2074,
"end": 2353
} | enum ____ types of a microprocessor.
* The following types are defined:
* <ul>
* <li>AArch64</li>
* <li>x86</li>
* <li>ia64</li>
* <li>PPC</li>
* <li>RISCV</li>
* <li>Unknown</li>
* </ul>
*/
public | defines |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/data/DataStructureConvertersTest.java | {
"start": 32817,
"end": 33625
} | class ____ {
public final int age;
public final String name;
public PojoWithImmutableFields(int age, String name) {
this.age = age;
this.name = name;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PojoWithImmutableFields that = (PojoWithImmutableFields) o;
return age == that.age && Objects.equals(name, that.name);
}
@Override
public int hashCode() {
return Objects.hash(age, name);
}
}
/** POJO with default constructor and private fields. */
public static | PojoWithImmutableFields |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.