language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java
|
{
"start": 7180,
"end": 13633
}
|
class ____ {
final InternalAggregations subAggregations;
EmptyBucketInfo(InternalAggregations subAggregations) {
this.subAggregations = subAggregations;
}
EmptyBucketInfo(StreamInput in) throws IOException {
this(InternalAggregations.readFrom(in));
}
public void writeTo(StreamOutput out) throws IOException {
subAggregations.writeTo(out);
}
@Override
public boolean equals(Object obj) {
if (obj == null || getClass() != obj.getClass()) {
return false;
}
EmptyBucketInfo that = (EmptyBucketInfo) obj;
return Objects.equals(subAggregations, that.subAggregations);
}
@Override
public int hashCode() {
return Objects.hash(getClass(), subAggregations);
}
}
private final List<Bucket> buckets;
private final DocValueFormat format;
private final int targetNumBuckets;
final EmptyBucketInfo emptyBucketInfo;
InternalVariableWidthHistogram(
String name,
List<Bucket> buckets,
EmptyBucketInfo emptyBucketInfo,
int targetNumBuckets,
DocValueFormat formatter,
Map<String, Object> metaData
) {
super(name, metaData);
this.buckets = buckets;
this.emptyBucketInfo = emptyBucketInfo;
this.format = formatter;
this.targetNumBuckets = targetNumBuckets;
}
/**
* Stream from a stream.
*/
public InternalVariableWidthHistogram(StreamInput in) throws IOException {
super(in);
emptyBucketInfo = new EmptyBucketInfo(in);
format = in.readNamedWriteable(DocValueFormat.class);
buckets = in.readCollectionAsList(stream -> Bucket.readFrom(stream, format));
targetNumBuckets = in.readVInt();
// we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort
if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.V_8_14_0)) {
// list is mutable by #readCollectionAsList contract
buckets.sort(Comparator.comparingDouble(b -> b.centroid));
}
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
emptyBucketInfo.writeTo(out);
out.writeNamedWriteable(format);
out.writeCollection(buckets);
out.writeVInt(targetNumBuckets);
}
@Override
public String getWriteableName() {
return VariableWidthHistogramAggregationBuilder.NAME;
}
@Override
public List<Bucket> getBuckets() {
return Collections.unmodifiableList(buckets);
}
public int getTargetBuckets() {
return targetNumBuckets;
}
public EmptyBucketInfo getEmptyBucketInfo() {
return emptyBucketInfo;
}
@Override
public InternalVariableWidthHistogram create(List<Bucket> buckets) {
return new InternalVariableWidthHistogram(name, buckets, emptyBucketInfo, targetNumBuckets, format, metadata);
}
@Override
public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
return new Bucket(prototype.centroid, prototype.bounds, prototype.docCount, prototype.format, aggregations);
}
@Override
public Bucket createBucket(Number key, long docCount, InternalAggregations aggregations) {
return new Bucket(key.doubleValue(), new Bucket.BucketBounds(key.doubleValue(), key.doubleValue()), docCount, format, aggregations);
}
@Override
public Number getKey(MultiBucketsAggregation.Bucket bucket) {
return ((Bucket) bucket).centroid;
}
private Bucket reduceBucket(List<Bucket> buckets, AggregationReduceContext context) {
assert buckets.isEmpty() == false;
double min = Double.POSITIVE_INFINITY;
double max = Double.NEGATIVE_INFINITY;
double sum = 0;
try (BucketReducer<Bucket> reducer = new BucketReducer<>(buckets.get(0), context, buckets.size())) {
for (Bucket bucket : buckets) {
min = Math.min(min, bucket.bounds.min);
max = Math.max(max, bucket.bounds.max);
sum += bucket.docCount * bucket.centroid;
reducer.accept(bucket);
}
final double centroid = sum / reducer.getDocCount();
final Bucket.BucketBounds bounds = new Bucket.BucketBounds(min, max);
return new Bucket(centroid, bounds, reducer.getDocCount(), format, reducer.getAggregations());
}
}
public List<Bucket> reduceBuckets(PriorityQueue<IteratorAndCurrent<Bucket>> pq, AggregationReduceContext reduceContext) {
List<Bucket> reducedBuckets = new ArrayList<>();
if (pq.size() > 0) {
double key = pq.top().current().centroid();
// list of buckets coming from different shards that have the same key
final List<Bucket> currentBuckets = new ArrayList<>();
do {
IteratorAndCurrent<Bucket> top = pq.top();
if (Double.compare(top.current().centroid(), key) != 0) {
// The key changes, reduce what we already buffered and reset the buffer for current buckets.
final Bucket reduced = reduceBucket(currentBuckets, reduceContext);
reduceContext.consumeBucketsAndMaybeBreak(1);
reducedBuckets.add(reduced);
currentBuckets.clear();
key = top.current().centroid();
}
currentBuckets.add(top.current());
if (top.hasNext()) {
Bucket prev = top.current();
top.next();
assert top.current().compareKey(prev) >= 0 : "shards must return data sorted by centroid";
pq.updateTop();
} else {
pq.pop();
}
} while (pq.size() > 0);
if (currentBuckets.isEmpty() == false) {
final Bucket reduced = reduceBucket(currentBuckets, reduceContext);
reduceContext.consumeBucketsAndMaybeBreak(1);
reducedBuckets.add(reduced);
}
}
mergeBucketsIfNeeded(reducedBuckets, targetNumBuckets, reduceContext);
return reducedBuckets;
}
static
|
EmptyBucketInfo
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgDoubleEvaluator.java
|
{
"start": 927,
"end": 3243
}
|
class ____ extends AbstractMultivalueFunction.AbstractEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(MvAvgDoubleEvaluator.class);
public MvAvgDoubleEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) {
super(driverContext, field);
}
@Override
public String name() {
return "MvAvg";
}
/**
* Evaluate blocks containing at least one multivalued field.
*/
@Override
public Block evalNullable(Block fieldVal) {
DoubleBlock v = (DoubleBlock) fieldVal;
int positionCount = v.getPositionCount();
try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
CompensatedSum work = new CompensatedSum();
for (int p = 0; p < positionCount; p++) {
int valueCount = v.getValueCount(p);
if (valueCount == 0) {
builder.appendNull();
continue;
}
int first = v.getFirstValueIndex(p);
int end = first + valueCount;
for (int i = first; i < end; i++) {
double value = v.getDouble(i);
MvAvg.process(work, value);
}
double result = MvAvg.finish(work, valueCount);
builder.appendDouble(result);
}
return builder.build();
}
}
/**
* Evaluate blocks containing at least one multivalued field.
*/
@Override
public Block evalNotNullable(Block fieldVal) {
DoubleBlock v = (DoubleBlock) fieldVal;
int positionCount = v.getPositionCount();
try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) {
CompensatedSum work = new CompensatedSum();
for (int p = 0; p < positionCount; p++) {
int valueCount = v.getValueCount(p);
int first = v.getFirstValueIndex(p);
int end = first + valueCount;
for (int i = first; i < end; i++) {
double value = v.getDouble(i);
MvAvg.process(work, value);
}
double result = MvAvg.finish(work, valueCount);
builder.appendDouble(result);
}
return builder.build().asBlock();
}
}
@Override
public long baseRamBytesUsed() {
return BASE_RAM_BYTES_USED + field.baseRamBytesUsed();
}
public static
|
MvAvgDoubleEvaluator
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/interceptor/AuditInterceptorDelegateIssueTest.java
|
{
"start": 3144,
"end": 3762
}
|
class ____ implements InterceptStrategy {
private volatile boolean invoked;
@Override
public Processor wrapProcessorInInterceptors(
CamelContext context, NamedNode definition, Processor target, Processor nextTarget) {
return new DelegateProcessor(target) {
protected void processNext(Exchange exchange) throws Exception {
invoked = true;
super.processNext(exchange);
}
};
}
public boolean isInvoked() {
return invoked;
}
}
}
|
MyIntercepStrategy
|
java
|
apache__maven
|
impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/mvn/CommonsCliMavenOptions.java
|
{
"start": 7547,
"end": 15117
}
|
class ____ extends CommonsCliOptions.CLIManager {
public static final String ALTERNATE_POM_FILE = "f";
public static final String NON_RECURSIVE = "N";
public static final String UPDATE_SNAPSHOTS = "U";
public static final String ACTIVATE_PROFILES = "P";
public static final String SUPPRESS_SNAPSHOT_UPDATES = "nsu";
public static final String CHECKSUM_FAILURE_POLICY = "C";
public static final String CHECKSUM_WARNING_POLICY = "c";
public static final String FAIL_FAST = "ff";
public static final String FAIL_AT_END = "fae";
public static final String FAIL_NEVER = "fn";
public static final String RESUME = "r";
public static final String RESUME_FROM = "rf";
public static final String PROJECT_LIST = "pl";
public static final String ALSO_MAKE = "am";
public static final String ALSO_MAKE_DEPENDENTS = "amd";
public static final String THREADS = "T";
public static final String BUILDER = "b";
public static final String NO_TRANSFER_PROGRESS = "ntp";
public static final String CACHE_ARTIFACT_NOT_FOUND = "canf";
public static final String STRICT_ARTIFACT_DESCRIPTOR_POLICY = "sadp";
public static final String IGNORE_TRANSITIVE_REPOSITORIES = "itr";
public static final String AT_FILE = "af";
@Override
protected void prepareOptions(org.apache.commons.cli.Options options) {
super.prepareOptions(options);
options.addOption(Option.builder(ALTERNATE_POM_FILE)
.longOpt("file")
.hasArg()
.desc("Force the use of an alternate POM file (or directory with pom.xml)")
.get());
options.addOption(Option.builder(NON_RECURSIVE)
.longOpt("non-recursive")
.desc(
"Do not recurse into sub-projects. When used together with -pl, do not recurse into sub-projects of selected aggregators")
.get());
options.addOption(Option.builder(UPDATE_SNAPSHOTS)
.longOpt("update-snapshots")
.desc("Forces a check for missing releases and updated snapshots on remote repositories")
.get());
options.addOption(Option.builder(ACTIVATE_PROFILES)
.longOpt("activate-profiles")
.desc(
"Comma-delimited list of profiles to activate. Don't use spaces between commas or double quote the full list. Prefixing a profile with ! excludes it, and ? marks it as optional.")
.hasArg()
.get());
options.addOption(Option.builder(SUPPRESS_SNAPSHOT_UPDATES)
.longOpt("no-snapshot-updates")
.desc("Suppress SNAPSHOT updates")
.get());
options.addOption(Option.builder(CHECKSUM_FAILURE_POLICY)
.longOpt("strict-checksums")
.desc("Fail the build if checksums don't match")
.get());
options.addOption(Option.builder(CHECKSUM_WARNING_POLICY)
.longOpt("lax-checksums")
.desc("Warn if checksums don't match")
.get());
options.addOption(Option.builder(FAIL_FAST)
.longOpt("fail-fast")
.desc("Stop at first failure in reactorized builds")
.get());
options.addOption(Option.builder(FAIL_AT_END)
.longOpt("fail-at-end")
.desc("Only fail the build afterwards; allow all non-impacted builds to continue")
.get());
options.addOption(Option.builder(FAIL_NEVER)
.longOpt("fail-never")
.desc("NEVER fail the build, regardless of project result")
.get());
options.addOption(Option.builder(RESUME)
.longOpt("resume")
.desc(
"Resume reactor from the last failed project, using the resume.properties file in the build directory")
.get());
options.addOption(Option.builder(RESUME_FROM)
.longOpt("resume-from")
.hasArg()
.desc("Resume reactor from specified project")
.get());
options.addOption(Option.builder(PROJECT_LIST)
.longOpt("projects")
.desc(
"Comma-delimited list of specified reactor projects to build instead of all projects. Don't use spaces between commas or double quote the full list. A project can be specified by [groupId]:artifactId or by its relative path. Prefixing a project with ! excludes it, and ? marks it as optional.")
.hasArg()
.get());
options.addOption(Option.builder(ALSO_MAKE)
.longOpt("also-make")
.desc("If project list is specified, also build projects required by the list")
.get());
options.addOption(Option.builder(ALSO_MAKE_DEPENDENTS)
.longOpt("also-make-dependents")
.desc("If project list is specified, also build projects that depend on projects on the list")
.get());
options.addOption(Option.builder(THREADS)
.longOpt("threads")
.hasArg()
.desc("Thread count, for instance 4 (int) or 2C/2.5C (int/float) where C is core multiplied")
.get());
options.addOption(Option.builder(BUILDER)
.longOpt("builder")
.hasArg()
.desc("The id of the build strategy to use")
.get());
options.addOption(Option.builder(NO_TRANSFER_PROGRESS)
.longOpt("no-transfer-progress")
.desc("Do not display transfer progress when downloading or uploading")
.get());
options.addOption(Option.builder(CACHE_ARTIFACT_NOT_FOUND)
.longOpt("cache-artifact-not-found")
.hasArg()
.desc(
"Defines caching behaviour for 'not found' artifacts. Supported values are 'true' (default), 'false'.")
.get());
options.addOption(Option.builder(STRICT_ARTIFACT_DESCRIPTOR_POLICY)
.longOpt("strict-artifact-descriptor-policy")
.hasArg()
.desc(
"Defines 'strict' artifact descriptor policy. Supported values are 'true', 'false' (default).")
.get());
options.addOption(Option.builder(IGNORE_TRANSITIVE_REPOSITORIES)
.longOpt("ignore-transitive-repositories")
.desc("If set, Maven will ignore remote repositories introduced by transitive dependencies.")
.get());
options.addOption(Option.builder(AT_FILE)
.longOpt("at-file")
.hasArg()
.desc(
"If set, Maven will load command line options from the specified file and merge with CLI specified ones.")
.get());
}
}
}
|
CLIManager
|
java
|
apache__kafka
|
connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java
|
{
"start": 2440,
"end": 13343
}
|
class ____ extends WorkerConfig {
private static final Logger log = LoggerFactory.getLogger(DistributedConfig.class);
/*
* NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES AS
* THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE.
*/
/**
* <code>group.id</code>
*/
public static final String GROUP_ID_CONFIG = CommonClientConfigs.GROUP_ID_CONFIG;
private static final String GROUP_ID_DOC = "A unique string that identifies the Connect cluster group this worker belongs to.";
/**
* <code>session.timeout.ms</code>
*/
public static final String SESSION_TIMEOUT_MS_CONFIG = CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG;
private static final String SESSION_TIMEOUT_MS_DOC = "The timeout used to detect worker failures. " +
"The worker sends periodic heartbeats to indicate its liveness to the broker. If no heartbeats are " +
"received by the broker before the expiration of this session timeout, then the broker will remove the " +
"worker from the group and initiate a rebalance. Note that the value must be in the allowable range as " +
"configured in the broker configuration by <code>group.min.session.timeout.ms</code> " +
"and <code>group.max.session.timeout.ms</code>.";
/**
* <code>heartbeat.interval.ms</code>
*/
public static final String HEARTBEAT_INTERVAL_MS_CONFIG = CommonClientConfigs.HEARTBEAT_INTERVAL_MS_CONFIG;
private static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the group " +
"coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the " +
"worker's session stays active and to facilitate rebalancing when new members join or leave the group. " +
"The value must be set lower than <code>session.timeout.ms</code>, but typically should be set no higher " +
"than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.";
/**
* <code>rebalance.timeout.ms</code>
*/
public static final String REBALANCE_TIMEOUT_MS_CONFIG = CommonClientConfigs.REBALANCE_TIMEOUT_MS_CONFIG;
private static final String REBALANCE_TIMEOUT_MS_DOC = CommonClientConfigs.REBALANCE_TIMEOUT_MS_DOC;
public static final String METADATA_RECOVERY_STRATEGY_CONFIG = CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG;
private static final String METADATA_RECOVERY_STRATEGY_DOC = CommonClientConfigs.METADATA_RECOVERY_STRATEGY_DOC;
public static final String DEFAULT_METADATA_RECOVERY_STRATEGY = CommonClientConfigs.DEFAULT_METADATA_RECOVERY_STRATEGY;
public static final String METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG = CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG;
private static final String METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC = CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC;
public static final long DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS = CommonClientConfigs.DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS;
/**
* <code>worker.sync.timeout.ms</code>
*/
public static final String WORKER_SYNC_TIMEOUT_MS_CONFIG = "worker.sync.timeout.ms";
private static final String WORKER_SYNC_TIMEOUT_MS_DOC = "When the worker is out of sync with other workers and needs" +
" to resynchronize configurations, wait up to this amount of time before giving up, leaving the group, and" +
" waiting a backoff period before rejoining.";
/**
* <code>group.unsync.timeout.ms</code>
*/
public static final String WORKER_UNSYNC_BACKOFF_MS_CONFIG = "worker.unsync.backoff.ms";
private static final String WORKER_UNSYNC_BACKOFF_MS_DOC = "When the worker is out of sync with other workers and " +
" fails to catch up within the <code>worker.sync.timeout.ms</code>, leave the Connect cluster for this long before rejoining.";
public static final int WORKER_UNSYNC_BACKOFF_MS_DEFAULT = 5 * 60 * 1000;
public static final String CONFIG_STORAGE_PREFIX = "config.storage.";
public static final String OFFSET_STORAGE_PREFIX = "offset.storage.";
public static final String STATUS_STORAGE_PREFIX = "status.storage.";
public static final String TOPIC_SUFFIX = "topic";
public static final String PARTITIONS_SUFFIX = "partitions";
public static final String REPLICATION_FACTOR_SUFFIX = "replication.factor";
/**
* <code>offset.storage.topic</code>
*/
public static final String OFFSET_STORAGE_TOPIC_CONFIG = OFFSET_STORAGE_PREFIX + TOPIC_SUFFIX;
private static final String OFFSET_STORAGE_TOPIC_CONFIG_DOC = "The name of the Kafka topic where source connector offsets are stored";
/**
* <code>offset.storage.partitions</code>
*/
public static final String OFFSET_STORAGE_PARTITIONS_CONFIG = OFFSET_STORAGE_PREFIX + PARTITIONS_SUFFIX;
private static final String OFFSET_STORAGE_PARTITIONS_CONFIG_DOC = "The number of partitions used when creating the offset storage topic";
/**
* <code>offset.storage.replication.factor</code>
*/
public static final String OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG = OFFSET_STORAGE_PREFIX + REPLICATION_FACTOR_SUFFIX;
private static final String OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG_DOC = "Replication factor used when creating the offset storage topic";
/**
* <code>config.storage.topic</code>
*/
public static final String CONFIG_TOPIC_CONFIG = CONFIG_STORAGE_PREFIX + TOPIC_SUFFIX;
private static final String CONFIG_TOPIC_CONFIG_DOC = "The name of the Kafka topic where connector configurations are stored";
/**
* <code>config.storage.replication.factor</code>
*/
public static final String CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG = CONFIG_STORAGE_PREFIX + REPLICATION_FACTOR_SUFFIX;
private static final String CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG_DOC = "Replication factor used when creating the configuration storage topic";
/**
* <code>status.storage.topic</code>
*/
public static final String STATUS_STORAGE_TOPIC_CONFIG = STATUS_STORAGE_PREFIX + TOPIC_SUFFIX;
public static final String STATUS_STORAGE_TOPIC_CONFIG_DOC = "The name of the Kafka topic where connector and task status are stored";
/**
* <code>status.storage.partitions</code>
*/
public static final String STATUS_STORAGE_PARTITIONS_CONFIG = STATUS_STORAGE_PREFIX + PARTITIONS_SUFFIX;
private static final String STATUS_STORAGE_PARTITIONS_CONFIG_DOC = "The number of partitions used when creating the status storage topic";
/**
* <code>status.storage.replication.factor</code>
*/
public static final String STATUS_STORAGE_REPLICATION_FACTOR_CONFIG = STATUS_STORAGE_PREFIX + REPLICATION_FACTOR_SUFFIX;
private static final String STATUS_STORAGE_REPLICATION_FACTOR_CONFIG_DOC = "Replication factor used when creating the status storage topic";
/**
* <code>connect.protocol</code>
*/
public static final String CONNECT_PROTOCOL_CONFIG = "connect.protocol";
public static final String CONNECT_PROTOCOL_DOC = "Compatibility mode for Kafka Connect Protocol";
public static final String CONNECT_PROTOCOL_DEFAULT = ConnectProtocolCompatibility.SESSIONED.toString();
/**
* <code>scheduled.rebalance.max.delay.ms</code>
*/
public static final String SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG = "scheduled.rebalance.max.delay.ms";
public static final String SCHEDULED_REBALANCE_MAX_DELAY_MS_DOC = "The maximum delay that is "
+ "scheduled in order to wait for the return of one or more departed workers before "
+ "rebalancing and reassigning their connectors and tasks to the group. During this "
+ "period the connectors and tasks of the departed workers remain unassigned";
public static final int SCHEDULED_REBALANCE_MAX_DELAY_MS_DEFAULT = Math.toIntExact(TimeUnit.SECONDS.toMillis(300));
public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG = "inter.worker.key.generation.algorithm";
public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_DEFAULT = "HmacSHA256";
public static final String INTER_WORKER_KEY_GENERATION_ALGORITHM_DOC = "The algorithm to use for generating internal request keys. "
+ "The algorithm '" + INTER_WORKER_KEY_GENERATION_ALGORITHM_DEFAULT + "' will be used as a default on JVMs that support it; "
+ "on other JVMs, no default is used and a value for this property must be manually specified in the worker config.";
public static final String INTER_WORKER_KEY_SIZE_CONFIG = "inter.worker.key.size";
public static final String INTER_WORKER_KEY_SIZE_DOC = "The size of the key to use for signing internal requests, in bits. "
+ "If null, the default key size for the key generation algorithm will be used.";
public static final Long INTER_WORKER_KEY_SIZE_DEFAULT = null;
public static final String INTER_WORKER_KEY_TTL_MS_CONFIG = "inter.worker.key.ttl.ms";
public static final String INTER_WORKER_KEY_TTL_MS_DOC = "The TTL of generated session keys used for "
+ "internal request validation (in milliseconds)";
public static final int INTER_WORKER_KEY_TTL_MS_DEFAULT = Math.toIntExact(TimeUnit.HOURS.toMillis(1));
public static final String INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG = "inter.worker.signature.algorithm";
public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT = "HmacSHA256";
public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DOC = "The algorithm used to sign internal requests. "
+ "The algorithm '" + INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT + "' will be used as a default on JVMs that support it; "
+ "on other JVMs, no default is used and a value for this property must be manually specified in the worker config.";
public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG = "inter.worker.verification.algorithms";
public static final List<String> INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT = List.of(INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT);
public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_DOC = "A list of permitted algorithms for verifying internal requests, "
+ "which must include the algorithm used for the <code>" + INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG + "</code> property. "
+ "The algorithm(s) '" + INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT + "' will be used as a default on JVMs that provide them; "
+ "on other JVMs, no default is used and a value for this property must be manually specified in the worker config.";
private final Crypto crypto;
public
|
DistributedConfig
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/SystemUtils.java
|
{
"start": 13162,
"end": 13773
}
|
class ____ loaded, the value will be out of sync with that System property.
* </p>
*
* @see SystemProperties#getJavaRuntimeName()
* @since 2.0
* @since Java 1.3
*/
public static final String JAVA_RUNTIME_NAME = SystemProperties.getJavaRuntimeName();
/**
* A constant for the System Property {@code java.runtime.version}. Java Runtime Environment version.
*
* <p>
* Defaults to {@code null} if the runtime does not have security access to read this property or the property does not exist.
* </p>
* <p>
* This value is initialized when the
|
is
|
java
|
greenrobot__EventBus
|
EventBusTestJava/src/main/java/org/greenrobot/eventbus/EventBusInheritanceDisabledTest.java
|
{
"start": 5018,
"end": 5712
}
|
class ____ {
@Subscribe(sticky = true)
public void onEvent(Object event) {
countObjectEvent++;
}
@Subscribe(sticky = true)
public void onEvent(MyEvent event) {
countMyEvent++;
}
@Subscribe(sticky = true)
public void onEvent(MyEventExtended event) {
countMyEventExtended++;
}
@Subscribe(sticky = true)
public void onEvent(MyEventInterface event) {
countMyEventInterface++;
}
@Subscribe(sticky = true)
public void onEvent(MyEventInterfaceExtended event) {
countMyEventInterfaceExtended++;
}
}
}
|
StickySubscriber
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduceLazyOutput.java
|
{
"start": 1971,
"end": 2324
}
|
class ____ {
private static final int NUM_HADOOP_WORKERS = 3;
private static final int NUM_MAPS_PER_NODE = 2;
private static final Path INPUTPATH = new Path("/testlazy/input");
private static final List<String> INPUTLIST =
Arrays.asList("All","Roads","Lead","To","Hadoop");
/**
* Test mapper.
*/
public static
|
TestMapReduceLazyOutput
|
java
|
apache__camel
|
components/camel-huawei/camel-huaweicloud-frs/src/test/java/org/apache/camel/component/huaweicloud/frs/mock/FaceVerificationWithImageFileAndMockClientTest.java
|
{
"start": 1475,
"end": 3826
}
|
class ____ extends CamelTestSupport {
TestConfiguration testConfiguration = new TestConfiguration();
@BindToRegistry("frsClient")
FrsClientMock frsClient = new FrsClientMock(null);
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:trigger_route")
.setProperty(FaceRecognitionProperties.FACE_IMAGE_FILE_PATH,
constant(testConfiguration.getProperty("imageFilePath")))
.setProperty(FaceRecognitionProperties.ANOTHER_FACE_IMAGE_FILE_PATH,
constant(testConfiguration.getProperty("anotherImageFilePath")))
.to("hwcloud-frs:faceVerification?"
+ "accessKey=" + testConfiguration.getProperty("accessKey")
+ "&secretKey=" + testConfiguration.getProperty("secretKey")
+ "&projectId=" + testConfiguration.getProperty("projectId")
+ "®ion=" + testConfiguration.getProperty("region")
+ "&ignoreSslVerification=true"
+ "&frsClient=#frsClient")
.log("perform faceVerification successfully")
.to("mock:perform_face_verification_result");
}
};
}
/**
* use imageFilePath to perform faceVerification
*
* @throws Exception
*/
@Test
public void testFaceVerification() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:perform_face_verification_result");
mock.expectedMinimumMessageCount(1);
template.sendBody("direct:trigger_route", "");
Exchange responseExchange = mock.getExchanges().get(0);
mock.assertIsSatisfied();
assertTrue(responseExchange.getIn().getBody() instanceof CompareFaceByFileResponse);
CompareFaceByFileResponse response = (CompareFaceByFileResponse) responseExchange.getIn().getBody();
assertEquals(MockResult.getCompareFaceResult(), response.getImage1Face());
assertEquals(MockResult.getCompareFaceResult(), response.getImage2Face());
assertEquals(1.0, response.getSimilarity());
}
}
|
FaceVerificationWithImageFileAndMockClientTest
|
java
|
apache__kafka
|
connect/api/src/test/java/org/apache/kafka/connect/source/SourceRecordTest.java
|
{
"start": 1617,
"end": 6080
}
|
class ____ {
private static final Map<String, ?> SOURCE_PARTITION = Map.of("src", "abc");
private static final Map<String, ?> SOURCE_OFFSET = Map.of("offset", "1");
private static final String TOPIC_NAME = "myTopic";
private static final Integer PARTITION_NUMBER = 0;
private static final Long KAFKA_TIMESTAMP = 0L;
private SourceRecord record;
@BeforeEach
public void beforeEach() {
record = new SourceRecord(SOURCE_PARTITION, SOURCE_OFFSET, TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key",
Schema.BOOLEAN_SCHEMA, false, KAFKA_TIMESTAMP, null);
}
@Test
public void shouldCreateSinkRecordWithHeaders() {
Headers headers = new ConnectHeaders().addString("h1", "hv1").addBoolean("h2", true);
record = new SourceRecord(SOURCE_PARTITION, SOURCE_OFFSET, TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key",
Schema.BOOLEAN_SCHEMA, false, KAFKA_TIMESTAMP, headers);
assertNotNull(record.headers());
assertSame(headers, record.headers());
assertFalse(record.headers().isEmpty());
}
@Test
public void shouldCreateSinkRecordWithEmptyHeaders() {
assertEquals(SOURCE_PARTITION, record.sourcePartition());
assertEquals(SOURCE_OFFSET, record.sourceOffset());
assertEquals(TOPIC_NAME, record.topic());
assertEquals(PARTITION_NUMBER, record.kafkaPartition());
assertEquals(Schema.STRING_SCHEMA, record.keySchema());
assertEquals("key", record.key());
assertEquals(Schema.BOOLEAN_SCHEMA, record.valueSchema());
assertEquals(false, record.value());
assertEquals(KAFKA_TIMESTAMP, record.timestamp());
assertNotNull(record.headers());
assertTrue(record.headers().isEmpty());
}
@Test
public void shouldDuplicateRecordAndCloneHeaders() {
SourceRecord duplicate = record.newRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false,
KAFKA_TIMESTAMP);
assertEquals(SOURCE_PARTITION, duplicate.sourcePartition());
assertEquals(SOURCE_OFFSET, duplicate.sourceOffset());
assertEquals(TOPIC_NAME, duplicate.topic());
assertEquals(PARTITION_NUMBER, duplicate.kafkaPartition());
assertEquals(Schema.STRING_SCHEMA, duplicate.keySchema());
assertEquals("key", duplicate.key());
assertEquals(Schema.BOOLEAN_SCHEMA, duplicate.valueSchema());
assertEquals(false, duplicate.value());
assertEquals(KAFKA_TIMESTAMP, duplicate.timestamp());
assertNotNull(duplicate.headers());
assertTrue(duplicate.headers().isEmpty());
assertNotSame(record.headers(), duplicate.headers());
assertEquals(record.headers(), duplicate.headers());
}
@Test
public void shouldDuplicateRecordUsingNewHeaders() {
Headers newHeaders = new ConnectHeaders().addString("h3", "hv3");
SourceRecord duplicate = record.newRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false,
KAFKA_TIMESTAMP, newHeaders);
assertEquals(SOURCE_PARTITION, duplicate.sourcePartition());
assertEquals(SOURCE_OFFSET, duplicate.sourceOffset());
assertEquals(TOPIC_NAME, duplicate.topic());
assertEquals(PARTITION_NUMBER, duplicate.kafkaPartition());
assertEquals(Schema.STRING_SCHEMA, duplicate.keySchema());
assertEquals("key", duplicate.key());
assertEquals(Schema.BOOLEAN_SCHEMA, duplicate.valueSchema());
assertEquals(false, duplicate.value());
assertEquals(KAFKA_TIMESTAMP, duplicate.timestamp());
assertNotNull(duplicate.headers());
assertEquals(newHeaders, duplicate.headers());
assertSame(newHeaders, duplicate.headers());
assertNotSame(record.headers(), duplicate.headers());
assertNotEquals(record.headers(), duplicate.headers());
}
@Test
public void shouldModifyRecordHeader() {
assertTrue(record.headers().isEmpty());
record.headers().addInt("intHeader", 100);
assertEquals(1, record.headers().size());
Header header = record.headers().lastWithName("intHeader");
assertEquals(100, (int) Values.convertToInteger(header.schema(), header.value()));
}
}
|
SourceRecordTest
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/PropertySourceAnnotationTests.java
|
{
"start": 15543,
"end": 15683
}
|
class ____ {
}
@Configuration
@MyPropertySource("classpath:org/springframework/context/annotation/p2.properties")
static
|
WithCustomFactory
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/schedulers/IoSchedulerInternalTest.java
|
{
"start": 995,
"end": 3073
}
|
class ____ extends RxJavaTest {
@Test
public void expiredQueueEmpty() {
ConcurrentLinkedQueue<ThreadWorker> expire = new ConcurrentLinkedQueue<>();
CompositeDisposable cd = new CompositeDisposable();
CachedWorkerPool.evictExpiredWorkers(expire, cd);
}
@Test
public void expiredWorkerRemoved() {
ConcurrentLinkedQueue<ThreadWorker> expire = new ConcurrentLinkedQueue<>();
CompositeDisposable cd = new CompositeDisposable();
ThreadWorker tw = new ThreadWorker(new RxThreadFactory("IoExpiryTest"));
try {
expire.add(tw);
cd.add(tw);
CachedWorkerPool.evictExpiredWorkers(expire, cd);
assertTrue(tw.isDisposed());
assertTrue(expire.isEmpty());
} finally {
tw.dispose();
}
}
@Test
public void noExpiredWorker() {
ConcurrentLinkedQueue<ThreadWorker> expire = new ConcurrentLinkedQueue<>();
CompositeDisposable cd = new CompositeDisposable();
ThreadWorker tw = new ThreadWorker(new RxThreadFactory("IoExpiryTest"));
tw.setExpirationTime(System.nanoTime() + 10_000_000_000L);
try {
expire.add(tw);
cd.add(tw);
CachedWorkerPool.evictExpiredWorkers(expire, cd);
assertFalse(tw.isDisposed());
assertFalse(expire.isEmpty());
} finally {
tw.dispose();
}
}
@Test
public void expireReuseRace() {
ConcurrentLinkedQueue<ThreadWorker> expire = new ConcurrentLinkedQueue<>();
CompositeDisposable cd = new CompositeDisposable();
ThreadWorker tw = new ThreadWorker(new RxThreadFactory("IoExpiryTest"));
tw.dispose();
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
expire.add(tw);
cd.add(tw);
TestHelper.race(
() -> CachedWorkerPool.evictExpiredWorkers(expire, cd),
() -> expire.remove(tw)
);
}
}
}
|
IoSchedulerInternalTest
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/jdk8/FlowableMapOptional.java
|
{
"start": 3699,
"end": 5584
}
|
class ____<T, R> extends BasicFuseableConditionalSubscriber<T, R> {
final Function<? super T, Optional<? extends R>> mapper;
MapOptionalConditionalSubscriber(ConditionalSubscriber<? super R> downstream, Function<? super T, Optional<? extends R>> mapper) {
super(downstream);
this.mapper = mapper;
}
@Override
public void onNext(T t) {
if (!tryOnNext(t)) {
upstream.request(1);
}
}
@Override
public boolean tryOnNext(T t) {
if (done) {
return true;
}
if (sourceMode != NONE) {
downstream.onNext(null);
return true;
}
Optional<? extends R> result;
try {
result = Objects.requireNonNull(mapper.apply(t), "The mapper returned a null Optional");
} catch (Throwable ex) {
fail(ex);
return true;
}
if (result.isPresent()) {
return downstream.tryOnNext(result.get());
}
return false;
}
@Override
public int requestFusion(int mode) {
return transitiveBoundaryFusion(mode);
}
@Override
public R poll() throws Throwable {
for (;;) {
T item = qs.poll();
if (item == null) {
return null;
}
Optional<? extends R> result = Objects.requireNonNull(mapper.apply(item), "The mapper returned a null Optional");
if (result.isPresent()) {
return result.get();
}
if (sourceMode == ASYNC) {
qs.request(1);
}
}
}
}
}
|
MapOptionalConditionalSubscriber
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/function/FailableToLongFunction.java
|
{
"start": 1149,
"end": 1892
}
|
interface ____<T, E extends Throwable> {
/** NOP singleton */
@SuppressWarnings("rawtypes")
FailableToLongFunction NOP = t -> 0L;
/**
* Gets the NOP singleton.
*
* @param <T> the type of the argument to the function
* @param <E> The kind of thrown exception or error.
* @return The NOP singleton.
*/
@SuppressWarnings("unchecked")
static <T, E extends Throwable> FailableToLongFunction<T, E> nop() {
return NOP;
}
/**
* Applies this function to the given arguments.
*
* @param t the first function argument
* @return the function result
* @throws E Thrown when the function fails.
*/
long applyAsLong(T t) throws E;
}
|
FailableToLongFunction
|
java
|
square__moshi
|
moshi/src/test/java/com/squareup/moshi/internal/ClassJsonAdapterTest.java
|
{
"start": 10656,
"end": 11752
}
|
class ____ {}
@Test
public void nonStaticNestedClassNotSupported() throws Exception {
try {
ClassJsonAdapter.Factory.create(NonStatic.class, NO_ANNOTATIONS, moshi);
fail();
} catch (IllegalArgumentException expected) {
assertThat(expected)
.hasMessageThat()
.isEqualTo(
"Cannot serialize non-static nested class "
+ "com.squareup.moshi.internal.ClassJsonAdapterTest$NonStatic");
}
}
@Test
public void anonymousClassNotSupported() throws Exception {
Comparator<Object> c =
new Comparator<Object>() {
@Override
public int compare(Object a, Object b) {
return 0;
}
};
try {
ClassJsonAdapter.Factory.create(c.getClass(), NO_ANNOTATIONS, moshi);
fail();
} catch (IllegalArgumentException expected) {
assertThat(expected)
.hasMessageThat()
.isEqualTo("Cannot serialize anonymous class " + c.getClass().getName());
}
}
@Test
public void localClassNotSupported() throws Exception {
|
NonStatic
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobsInfo.java
|
{
"start": 1116,
"end": 1371
}
|
class ____ {
protected ArrayList<JobInfo> job = new ArrayList<JobInfo>();
public JobsInfo() {
} // JAXB needs this
public void add(JobInfo jobInfo) {
job.add(jobInfo);
}
public ArrayList<JobInfo> getJobs() {
return job;
}
}
|
JobsInfo
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/TestClassInheritanceTests.java
|
{
"start": 1052,
"end": 1131
}
|
class ____ support in the {@link JupiterTestEngine}.
*
* @since 5.0
*/
|
hierarchy
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/metrics/util/TestingMetricRegistry.java
|
{
"start": 3919,
"end": 6469
}
|
class ____ {
private char delimiter = '.';
private int numberReporters = 0;
private TriConsumer<Metric, String, AbstractMetricGroup<?>> registerConsumer =
(ignoreMetric, ignoreMetricName, ignoreGroup) -> {};
private TriConsumer<Metric, String, AbstractMetricGroup<?>> unregisterConsumer =
(ignoreMetric, ignoreMetricName, ignoreGroup) -> {};
private BiConsumer<Span, AbstractMetricGroup<?>> spanConsumer = (span, group) -> {};
private BiConsumer<Event, AbstractMetricGroup<?>> eventConsumer = (event, group) -> {};
private ScopeFormats scopeFormats = ScopeFormats.fromConfig(new Configuration());
private TestingMetricRegistryBuilder() {}
public TestingMetricRegistryBuilder setDelimiter(char delimiter) {
this.delimiter = delimiter;
return this;
}
public TestingMetricRegistryBuilder setNumberReporters(int numberReporters) {
this.numberReporters = numberReporters;
return this;
}
public TestingMetricRegistryBuilder setRegisterConsumer(
TriConsumer<Metric, String, AbstractMetricGroup<?>> registerConsumer) {
this.registerConsumer = registerConsumer;
return this;
}
public TestingMetricRegistryBuilder setUnregisterConsumer(
TriConsumer<Metric, String, AbstractMetricGroup<?>> unregisterConsumer) {
this.unregisterConsumer = unregisterConsumer;
return this;
}
public TestingMetricRegistryBuilder setSpanConsumer(
BiConsumer<Span, AbstractMetricGroup<?>> spanConsumer) {
this.spanConsumer = spanConsumer;
return this;
}
public TestingMetricRegistryBuilder setEventConsumer(
BiConsumer<Event, AbstractMetricGroup<?>> eventConsumer) {
this.eventConsumer = eventConsumer;
return this;
}
public TestingMetricRegistryBuilder setScopeFormats(ScopeFormats scopeFormats) {
this.scopeFormats = scopeFormats;
return this;
}
public TestingMetricRegistry build() {
return new TestingMetricRegistry(
delimiter,
numberReporters,
registerConsumer,
unregisterConsumer,
spanConsumer,
eventConsumer,
scopeFormats);
}
}
}
|
TestingMetricRegistryBuilder
|
java
|
hibernate__hibernate-orm
|
tooling/metamodel-generator/src/jakartaData/java/org/hibernate/processor/test/data/eg/Bookshop.java
|
{
"start": 636,
"end": 1591
}
|
interface ____ extends CrudRepository<Book,String> {
@Find
@Transactional(REQUIRES_NEW)
List<Book> byPublisher(@Size(min=2,max=100) String publisher_name);
@Find
List<Book> byTitle(@Nonnull String title);
@Query("select isbn where title like ?1 order by isbn")
String[] ssns(@NotBlank String title);
@Query("select count(this) where title like ?1 order by isbn")
long count1(@NotNull String title);
@Query("select count(this) where this.title like ?1 order by this.isbn")
long count2(String title);
@Query("select length(text) where title = ?1")
int length(@Nonnull String title);
@Query("select count(this)")
long countAll();
@Query("where isbn in :isbns and type = Book")
List<Book> books(List<String> isbns);
@Query("delete from Book where type = org.hibernate.processor.test.data.eg.Type.Book")
long deleteAllBooks();
@Query("delete from Book where type = Book and isbn in ?1")
int deleteBooks(List<String> isbns);
}
|
Bookshop
|
java
|
spring-projects__spring-framework
|
spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/NestedTestBean.java
|
{
"start": 867,
"end": 1543
}
|
class ____ implements INestedTestBean {
private String company = "";
public NestedTestBean() {
}
public NestedTestBean(String company) {
setCompany(company);
}
public void setCompany(String company) {
this.company = (company != null ? company : "");
}
@Override
public String getCompany() {
return company;
}
@Override
public boolean equals(@Nullable Object obj) {
if (!(obj instanceof NestedTestBean ntb)) {
return false;
}
return this.company.equals(ntb.company);
}
@Override
public int hashCode() {
return this.company.hashCode();
}
@Override
public String toString() {
return "NestedTestBean: " + this.company;
}
}
|
NestedTestBean
|
java
|
quarkusio__quarkus
|
extensions/mongodb-client/runtime/src/main/java/io/quarkus/mongodb/reactive/ReactiveMongoDatabase.java
|
{
"start": 2409,
"end": 2838
}
|
class ____ use instead of {@code Document}.
* @return a {@link Uni} emitting the command result once completed
*/
<T> Uni<T> runCommand(Bson command, Class<T> clazz);
/**
* Executes command in the context of the current database.
*
* @param command the command to be run
* @param readPreference the {@link ReadPreference} to be used when executing the command
* @param clazz the default
|
to
|
java
|
micronaut-projects__micronaut-core
|
core/src/main/java/io/micronaut/core/io/value/Base64ResourceLoader.java
|
{
"start": 873,
"end": 1307
}
|
class ____ extends ValueResourceLoader {
private static final Base64ResourceLoader INSTANCE = new Base64ResourceLoader();
private Base64ResourceLoader() {
super("base64:");
}
public static ResourceLoader getInstance() {
return INSTANCE;
}
@Override
protected Optional<byte[]> extract(String value) {
return Optional.of(Base64.getDecoder().decode(value));
}
}
|
Base64ResourceLoader
|
java
|
grpc__grpc-java
|
core/src/main/java/io/grpc/internal/ManagedChannelImplBuilder.java
|
{
"start": 7459,
"end": 7680
}
|
interface ____ {
ClientTransportFactory buildClientTransportFactory();
}
/**
* Convenience ClientTransportFactoryBuilder, throws UnsupportedOperationException().
*/
public static
|
ClientTransportFactoryBuilder
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/util/NumberSequenceSourceWithWaitForCheckpoint.java
|
{
"start": 3195,
"end": 4719
}
|
class ____<
SplitT extends IteratorSourceSplit<?, ?>>
extends IteratorSourceEnumerator<SplitT> {
private final Queue<Integer> pendingRequests = new ArrayDeque<>();
private final SplitEnumeratorContext<?> context;
public AssignAfterCheckpointEnumerator(
SplitEnumeratorContext<SplitT> context, Collection<SplitT> splits) {
super(context, splits);
this.context = context;
}
@Override
public void handleSplitRequest(int subtaskId, @Nullable String requesterHostname) {
pendingRequests.add(subtaskId);
}
@Override
public Collection<SplitT> snapshotState(long checkpointId) throws Exception {
// this will be enqueued in the enumerator thread, so it will actually run after this
// method (the snapshot operation) is complete!
context.runInCoordinatorThread(this::fullFillPendingRequests);
return super.snapshotState(checkpointId);
}
private void fullFillPendingRequests() {
for (int subtask : pendingRequests) {
// respond only to requests for which we still have registered readers
if (!context.registeredReaders().containsKey(subtask)) {
continue;
}
super.handleSplitRequest(subtask, null);
}
pendingRequests.clear();
}
}
private static
|
AssignAfterCheckpointEnumerator
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/generics/EmbeddedIdGenericsSuperclassTest.java
|
{
"start": 5577,
"end": 5767
}
|
class ____ extends DomainEntityId {
private int someDomainField;
public CustomerId() {
super();
this.someDomainField = 1;
}
}
@Entity(name = "Customer")
public static
|
CustomerId
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/generator/command/pipe/TimeSeriesStatsGenerator.java
|
{
"start": 878,
"end": 4660
}
|
class ____ implements CommandGenerator {
public static final String STATS = "stats";
public static final CommandGenerator INSTANCE = new TimeSeriesStatsGenerator();
@Override
public CommandDescription generate(
List<CommandDescription> previousCommands,
List<Column> previousOutput,
QuerySchema schema,
QueryExecutor executor
) {
// generates stats in the form of:
// `STATS some_aggregation(some_field) by optional_grouping_field, non_optional = bucket(time_field, 5minute)`
// where `some_aggregation` can be a time series aggregation in the form of agg1(agg2_over_time(some_field)),
// or a regular aggregation.
// There is a variable number of aggregations per pipe
List<Column> nonNull = previousOutput.stream()
.filter(EsqlQueryGenerator::fieldCanBeUsed)
.filter(x -> x.type().equals("null") == false)
.collect(Collectors.toList());
if (nonNull.isEmpty()) {
return EMPTY_DESCRIPTION;
}
String timestamp = randomDateField(nonNull);
// if there's no timestamp field left, there's nothing to bucket on
if (timestamp == null) {
return EMPTY_DESCRIPTION;
}
// TODO: Switch back to using nonNull as possible arguments for aggregations. Using the timestamp field in both the bucket as well
// as as an argument in an aggregation causes all sorts of bizarre errors with confusing messages.
List<Column> acceptableFields = nonNull.stream()
.filter(c -> c.type().equals("datetime") == false && c.type().equals("date_nanos") == false)
.filter(c -> c.name().equals("@timestamp") == false)
.toList();
StringBuilder cmd = new StringBuilder(" | stats ");
// TODO: increase range max to 5
int nStats = randomIntBetween(1, 2);
for (int i = 0; i < nStats; i++) {
String name;
if (randomBoolean()) {
name = EsqlQueryGenerator.randomIdentifier();
} else {
name = EsqlQueryGenerator.randomName(acceptableFields);
if (name == null) {
name = EsqlQueryGenerator.randomIdentifier();
}
}
// generate the aggregation
String expression = randomBoolean()
? EsqlQueryGenerator.metricsAgg(acceptableFields)
: EsqlQueryGenerator.agg(acceptableFields);
if (i > 0) {
cmd.append(",");
}
cmd.append(" ");
cmd.append(name);
cmd.append(" = ");
cmd.append(expression);
}
cmd.append(" by ");
if (randomBoolean()) {
var col = EsqlQueryGenerator.randomGroupableName(acceptableFields);
if (col != null) {
cmd.append(col + ", ");
}
}
// TODO: add alternative time buckets
// TODO: replace name of bucket with half chance of being EsqlQueryGenerator.randomName(previousOutput) if
// is fixed https://github.com/elastic/elasticsearch/issues/134796
cmd.append(EsqlQueryGenerator.randomIdentifier() + " = bucket(" + timestamp + ",1hour)");
return new CommandDescription(STATS, this, cmd.toString(), Map.of());
}
@Override
public ValidationResult validateOutput(
List<CommandDescription> previousCommands,
CommandDescription commandDescription,
List<Column> previousColumns,
List<List<Object>> previousOutput,
List<Column> columns,
List<List<Object>> output
) {
// TODO validate columns
return VALIDATION_OK;
}
}
|
TimeSeriesStatsGenerator
|
java
|
quarkusio__quarkus
|
extensions/container-image/container-image-jib/deployment/src/main/java/io/quarkus/container/image/jib/deployment/JibProcessor.java
|
{
"start": 4452,
"end": 50278
}
|
class ____ {
private static final Logger log = Logger.getLogger(JibProcessor.class);
public static final String JIB = "jib";
private static final IsClassPredicate IS_CLASS_PREDICATE = new IsClassPredicate();
private static final String BINARY_NAME_IN_CONTAINER = "application";
// The source for this can be found at https://github.com/jboss-container-images/openjdk/blob/ubi9/modules/run/artifacts/opt/jboss/container/java/run/run-java.sh
// A list of env vars that affect this script can be found at https://rh-openjdk.github.io/redhat-openjdk-containers/ubi9/ubi9-openjdk-17.html
private static final String RUN_JAVA_PATH = "/opt/jboss/container/java/run/run-java.sh";
private static final String DEFAULT_BASE_IMAGE_USER = "185";
private static final String OPENTELEMETRY_CONTEXT_CONTEXT_STORAGE_PROVIDER_SYS_PROP = "io.opentelemetry.context.contextStorageProvider";
private static final FilePermissions REMOTE_DEV_FOLDER_PERMISSIONS = FilePermissions.fromOctalString("777");
private static final FilePermissions REMOTE_DEV_FILE_PERMISSIONS = FilePermissions.fromOctalString("666");
private static final FilePermissionsProvider REMOTE_DEV_FOLDER_PERMISSIONS_PROVIDER = (sourcePath,
destinationPath) -> Files.isDirectory(sourcePath)
? REMOTE_DEV_FOLDER_PERMISSIONS
: REMOTE_DEV_FILE_PERMISSIONS;
private static final OwnershipProvider REMOTE_DEV_OWNERSHIP_PROVIDER = (sourcePath,
destinationPath) -> DEFAULT_BASE_IMAGE_USER;
@BuildStep
public AvailableContainerImageExtensionBuildItem availability() {
return new AvailableContainerImageExtensionBuildItem(JIB);
}
// when AppCDS are enabled and a container image build via Jib has been requested,
// we want the AppCDS generation process to use the same JVM as the base image
// in order to make the AppCDS usable by the runtime JVM
@BuildStep(onlyIf = JibBuild.class)
public void jvmStartupOptimizerArchive(ContainerImageConfig containerImageConfig,
CompiledJavaVersionBuildItem compiledJavaVersion,
ContainerImageJibConfig jibConfig,
BuildProducer<JvmStartupOptimizerArchiveContainerImageBuildItem> producer) {
if (!containerImageConfig.isBuildExplicitlyEnabled() && !containerImageConfig.isPushExplicitlyEnabled()) {
return;
}
producer.produce(
new JvmStartupOptimizerArchiveContainerImageBuildItem(determineBaseJvmImage(jibConfig, compiledJavaVersion)));
}
private String determineBaseJvmImage(ContainerImageJibConfig jibConfig, CompiledJavaVersionBuildItem compiledJavaVersion) {
if (jibConfig.baseJvmImage().isPresent()) {
return jibConfig.baseJvmImage().get();
}
return ContainerImages.getDefaultJvmImage(compiledJavaVersion.getJavaVersion());
}
@SuppressWarnings("deprecation") // legacy JAR
@BuildStep(onlyIf = { IsProduction.class, JibBuild.class }, onlyIfNot = NativeBuild.class)
public void buildFromJar(ContainerImageConfig containerImageConfig, ContainerImageJibConfig jibConfig,
PackageConfig packageConfig,
ContainerImageInfoBuildItem containerImage,
JarBuildItem sourceJar,
MainClassBuildItem mainClass,
OutputTargetBuildItem outputTarget,
CurateOutcomeBuildItem curateOutcome,
CompiledJavaVersionBuildItem compiledJavaVersion,
Optional<ContainerImageBuildRequestBuildItem> buildRequest,
Optional<ContainerImagePushRequestBuildItem> pushRequest,
List<ContainerImageLabelBuildItem> containerImageLabels,
Optional<JvmStartupOptimizerArchiveResultBuildItem> jvmStartupOptimizerArchiveResult,
BuildProducer<ArtifactResultBuildItem> artifactResultProducer,
BuildProducer<ContainerImageBuilderBuildItem> containerImageBuilder) {
boolean buildContainerImage = buildContainerImageNeeded(containerImageConfig, buildRequest);
boolean pushContainerImage = pushContainerImageNeeded(containerImageConfig, pushRequest);
if (!buildContainerImage && !pushContainerImage) {
return;
}
JibContainerBuilder jibContainerBuilder;
PackageConfig.JarConfig.JarType jarType = packageConfig.jar().type();
jibContainerBuilder = switch (jarType) {
case LEGACY_JAR, UBER_JAR ->
createContainerBuilderFromLegacyJar(determineBaseJvmImage(jibConfig, compiledJavaVersion),
jibConfig, containerImageConfig,
sourceJar, outputTarget, mainClass, containerImageLabels);
case FAST_JAR, MUTABLE_JAR ->
createContainerBuilderFromFastJar(determineBaseJvmImage(jibConfig, compiledJavaVersion),
jibConfig, containerImageConfig, sourceJar, curateOutcome,
containerImageLabels,
jvmStartupOptimizerArchiveResult, jarType == MUTABLE_JAR);
};
setUser(jibConfig, jibContainerBuilder);
setPlatforms(jibConfig, jibContainerBuilder);
handleExtraFiles(outputTarget, jibContainerBuilder);
log.info("Starting (local) container image build for jar using jib.");
JibContainer container = containerize(containerImageConfig, jibConfig, containerImage, jibContainerBuilder,
pushRequest.isPresent());
writeOutputFiles(container, jibConfig, outputTarget);
artifactResultProducer.produce(new ArtifactResultBuildItem(null, "jar-container",
Map.of("container-image", container.getTargetImage().toString(), "pull-required",
Boolean.toString(pushContainerImage))));
containerImageBuilder.produce(new ContainerImageBuilderBuildItem(JIB));
}
@BuildStep(onlyIf = { IsProduction.class, JibBuild.class, NativeBuild.class })
public void buildFromNative(ContainerImageConfig containerImageConfig, ContainerImageJibConfig jibConfig,
ContainerImageInfoBuildItem containerImage,
NativeImageBuildItem nativeImage,
OutputTargetBuildItem outputTarget,
Optional<ContainerImageBuildRequestBuildItem> buildRequest,
Optional<ContainerImagePushRequestBuildItem> pushRequest,
List<ContainerImageLabelBuildItem> containerImageLabels,
Optional<UpxCompressedBuildItem> upxCompressed, // used to ensure that we work with the compressed native binary if compression was enabled
BuildProducer<ArtifactResultBuildItem> artifactResultProducer,
BuildProducer<ContainerImageBuilderBuildItem> containerImageBuilder) {
boolean buildContainerImage = buildContainerImageNeeded(containerImageConfig, buildRequest);
boolean pushContainerImage = pushContainerImageNeeded(containerImageConfig, pushRequest);
if (!buildContainerImage && !pushContainerImage) {
return;
}
if (!NativeBinaryUtil.nativeIsLinuxBinary(nativeImage)) {
throw new RuntimeException(
"The native binary produced by the build is not a Linux binary and therefore cannot be used in a Linux container image. Consider adding \"quarkus.native.container-build=true\" to your configuration");
}
JibContainerBuilder jibContainerBuilder = createContainerBuilderFromNative(jibConfig, containerImageConfig,
nativeImage, containerImageLabels);
setUser(jibConfig, jibContainerBuilder);
setPlatforms(jibConfig, jibContainerBuilder);
handleExtraFiles(outputTarget, jibContainerBuilder);
log.info("Starting (local) container image build for native binary using jib.");
JibContainer container = containerize(containerImageConfig, jibConfig, containerImage, jibContainerBuilder,
pushContainerImage);
writeOutputFiles(container, jibConfig, outputTarget);
artifactResultProducer.produce(new ArtifactResultBuildItem(null, "native-container",
Map.of("container-image", container.getTargetImage().toString(), "pull-required",
"" + pushContainerImage)));
containerImageBuilder.produce(new ContainerImageBuilderBuildItem(JIB));
}
private JibContainer containerize(ContainerImageConfig containerImageConfig,
ContainerImageJibConfig jibConfig, ContainerImageInfoBuildItem containerImage,
JibContainerBuilder jibContainerBuilder,
boolean pushRequested) {
Containerizer containerizer = createContainerizer(containerImageConfig, jibConfig, containerImage, pushRequested);
for (String additionalTag : containerImage.getAdditionalTags()) {
containerizer.withAdditionalTag(additionalTag);
}
// Jib uses the Google HTTP Client under the hood which attempts to record traces via OpenCensus which is wired
// to delegate to OpenTelemetry.
// This can lead to problems with the Quarkus OpenTelemetry extension which expects Vert.x to be running,
// something that is not the case at build time, see https://github.com/quarkusio/quarkus/issues/22864.
try (var resettableSystemProperties = ResettableSystemProperties
.of(OPENTELEMETRY_CONTEXT_CONTEXT_STORAGE_PROVIDER_SYS_PROP, "default")) {
JibContainer container = containerizeUnderLock(jibContainerBuilder, containerizer);
log.infof("%s container image %s (%s)\n",
containerImageConfig.isPushExplicitlyEnabled() ? "Pushed" : "Created",
container.getTargetImage(),
container.getDigest());
return container;
} catch (Exception e) {
throw new RuntimeException("Unable to create container image", e);
}
}
private Containerizer createContainerizer(ContainerImageConfig containerImageConfig,
ContainerImageJibConfig jibConfig, ContainerImageInfoBuildItem containerImageInfo,
boolean pushRequested) {
Containerizer containerizer;
ImageReference imageReference = ImageReference.of(containerImageInfo.getRegistry().orElse(null),
containerImageInfo.getRepository(), containerImageInfo.getTag());
if (pushRequested || containerImageConfig.isPushExplicitlyEnabled()) {
if (imageReference.getRegistry() == null) {
log.info("No container image registry was set, so 'docker.io' will be used");
}
RegistryImage registryImage = toRegistryImage(imageReference, containerImageConfig.username(),
containerImageConfig.password());
containerizer = Containerizer.to(registryImage);
} else {
DockerDaemonImage dockerDaemonImage = DockerDaemonImage.named(imageReference);
Optional<String> dockerConfigExecutableName = ConfigProvider.getConfig()
.getOptionalValue("quarkus.docker.executable-name", String.class);
Optional<String> jibConfigExecutableName = jibConfig.dockerExecutableName();
if (jibConfigExecutableName.isPresent()) {
dockerDaemonImage.setDockerExecutable(Paths.get(jibConfigExecutableName.get()));
} else if (dockerConfigExecutableName.isPresent()) {
dockerDaemonImage.setDockerExecutable(Paths.get(dockerConfigExecutableName.get()));
} else {
// detect the container runtime instead of falling back to 'docker' as the default
ContainerRuntimeUtil.ContainerRuntime detectedContainerRuntime = ContainerRuntimeUtil.detectContainerRuntime();
log.infof("Using %s to run the native image builder", detectedContainerRuntime.getExecutableName());
dockerDaemonImage.setDockerExecutable(Paths.get(detectedContainerRuntime.getExecutableName()));
}
dockerDaemonImage.setDockerEnvironment(jibConfig.dockerEnvironment());
containerizer = Containerizer.to(dockerDaemonImage);
}
containerizer.setToolName("Quarkus");
containerizer.setToolVersion(Version.getVersion());
containerizer.addEventHandler(LogEvent.class, e -> {
if (!e.getMessage().isEmpty()) {
log.log(toJBossLoggingLevel(e.getLevel()), e.getMessage());
}
});
containerizer.setAllowInsecureRegistries(containerImageConfig.insecure());
containerizer.setAlwaysCacheBaseImage(jibConfig.alwaysCacheBaseImage());
containerizer.setOfflineMode(jibConfig.offlineMode());
jibConfig.baseImageLayersCache().ifPresent(cacheDir -> containerizer.setBaseImageLayersCache(Paths.get(cacheDir)));
jibConfig.applicationLayersCache().ifPresent(cacheDir -> containerizer.setApplicationLayersCache(Paths.get(cacheDir)));
return containerizer;
}
/**
* Wraps the containerize invocation in a synchronized block to avoid OverlappingFileLockException when running parallel jib
* builds (e.g. mvn -T2 ...).
* Each build thread uses its own augmentation CL (which is why the OverlappingFileLockException prevention in jib doesn't
* work here), so the lock object
* has to be loaded via the parent classloader so that all build threads lock the same object.
* QuarkusAugmentor was chosen semi-randomly (note: quarkus-core-deployment is visible to that parent CL, this jib extension
* is not!).
*/
private JibContainer containerizeUnderLock(JibContainerBuilder jibContainerBuilder, Containerizer containerizer)
throws InterruptedException, RegistryException, IOException, CacheDirectoryCreationException, ExecutionException {
Class<?> lockObj = getClass();
ClassLoader parentCL = getClass().getClassLoader().getParent();
try {
lockObj = parentCL.loadClass("io.quarkus.deployment.QuarkusAugmentor");
} catch (ClassNotFoundException e) {
log.warnf("Could not load io.quarkus.deployment.QuarkusAugmentor with parent classloader: %s", parentCL);
}
synchronized (lockObj) {
return jibContainerBuilder.containerize(containerizer);
}
}
private void writeOutputFiles(JibContainer jibContainer, ContainerImageJibConfig jibConfig,
OutputTargetBuildItem outputTarget) {
doWriteOutputFile(outputTarget, Paths.get(jibConfig.imageDigestFile()), jibContainer.getDigest().toString());
doWriteOutputFile(outputTarget, Paths.get(jibConfig.imageIdFile()), jibContainer.getImageId().toString());
}
private void doWriteOutputFile(OutputTargetBuildItem outputTarget, Path configPath, String output) {
if (!configPath.isAbsolute()) {
configPath = outputTarget.getOutputDirectory().resolve(configPath);
}
try {
Files.writeString(configPath, output);
} catch (IOException e) {
log.errorf(e, "Unable to write file '%s'.", configPath.toAbsolutePath().toString());
}
}
private JibContainerBuilder toJibContainerBuilder(String baseImage, ContainerImageJibConfig jibConfig)
throws InvalidImageReferenceException {
if (baseImage.startsWith(Jib.TAR_IMAGE_PREFIX) || baseImage.startsWith(Jib.DOCKER_DAEMON_IMAGE_PREFIX)) {
return Jib.from(baseImage);
}
return Jib.from(toRegistryImage(ImageReference.parse(baseImage), jibConfig.baseRegistryUsername(),
jibConfig.baseRegistryPassword()));
}
private RegistryImage toRegistryImage(ImageReference imageReference, Optional<String> username, Optional<String> password) {
CredentialRetrieverFactory credentialRetrieverFactory = CredentialRetrieverFactory.forImage(imageReference,
log::info);
RegistryImage registryImage = RegistryImage.named(imageReference);
if (username.isPresent() && password.isPresent()) {
registryImage.addCredential(username.get(), password.get());
} else {
registryImage.addCredentialRetriever(credentialRetrieverFactory.wellKnownCredentialHelpers());
registryImage.addCredentialRetriever(credentialRetrieverFactory.dockerConfig());
// podman credentials: https://docs.podman.io/en/latest/markdown/podman-login.1.html
// podman for Windows and macOS
registryImage.addCredentialRetriever(credentialRetrieverFactory
.dockerConfig(Paths.get(System.getProperty("user.home"), ".config", "containers", "auth.json")));
String xdgRuntimeDir = System.getenv("XDG_RUNTIME_DIR");
if ((xdgRuntimeDir != null) && !xdgRuntimeDir.isEmpty()) {
registryImage.addCredentialRetriever(
credentialRetrieverFactory.dockerConfig(Paths.get(xdgRuntimeDir, "containers", "auth.json")));
}
String dockerConfigEnv = System.getenv().get("DOCKER_CONFIG");
if (dockerConfigEnv != null) {
Path dockerConfigPath = Path.of(dockerConfigEnv);
if (Files.isDirectory(dockerConfigPath)) {
// this matches jib's behaviour,
// see https://github.com/GoogleContainerTools/jib/blob/master/jib-maven-plugin/README.md#authentication-methods
dockerConfigPath = dockerConfigPath.resolve("config.json");
}
registryImage.addCredentialRetriever(credentialRetrieverFactory.dockerConfig(dockerConfigPath));
}
registryImage.addCredentialRetriever(credentialRetrieverFactory.googleApplicationDefaultCredentials());
}
return registryImage;
}
private Logger.Level toJBossLoggingLevel(LogEvent.Level level) {
return switch (level) {
case ERROR -> Logger.Level.ERROR;
case WARN -> Logger.Level.WARN;
case LIFECYCLE -> Logger.Level.INFO;
default -> Logger.Level.DEBUG;
};
}
/**
* We don't use Jib's JavaContainerBuilder here because we need to support the custom fast-jar format
* We create the following layers (least likely to change to most likely to change):
*
* <ul>
* <li>lib</li>
* <li>boot-lib</li>
* <li>quarkus-run.jar</li>
* <li>quarkus</li>
* <li>app</li>
* </ul>
*/
private JibContainerBuilder createContainerBuilderFromFastJar(String baseJvmImage, ContainerImageJibConfig jibConfig,
ContainerImageConfig containerImageConfig,
JarBuildItem sourceJarBuildItem,
CurateOutcomeBuildItem curateOutcome, List<ContainerImageLabelBuildItem> containerImageLabels,
Optional<JvmStartupOptimizerArchiveResultBuildItem> maybeJvmStartupOptimizerArchiveResult,
boolean isMutableJar) {
Path componentsPath = sourceJarBuildItem.getPath().getParent();
Path appLibDir = componentsPath.resolve(FastJarFormat.LIB).resolve(FastJarFormat.MAIN);
AbsoluteUnixPath workDirInContainer = AbsoluteUnixPath.get(jibConfig.workingDirectory());
Map<String, String> envVars = createEnvironmentVariables(jibConfig);
List<String> entrypoint;
if (jibConfig.jvmEntrypoint().isPresent()) {
entrypoint = Collections.unmodifiableList(jibConfig.jvmEntrypoint().get());
} else if (containsRunJava(baseJvmImage) && maybeJvmStartupOptimizerArchiveResult.isEmpty()) {
// we want to use run-java.sh by default. However, if AppCDS are being used, run-java.sh cannot be used because it would lead to using different JVM args
// which would mean AppCDS would not be taken into account at all
entrypoint = List.of(RUN_JAVA_PATH);
envVars.put("JAVA_APP_JAR", workDirInContainer + "/" + FastJarFormat.QUARKUS_RUN_JAR);
envVars.put("JAVA_APP_DIR", workDirInContainer.toString());
envVars.put("JAVA_OPTS_APPEND",
String.join(" ",
determineEffectiveJvmArguments(jibConfig, maybeJvmStartupOptimizerArchiveResult, isMutableJar)));
} else {
List<String> effectiveJvmArguments = determineEffectiveJvmArguments(jibConfig,
maybeJvmStartupOptimizerArchiveResult, isMutableJar);
List<String> argsList = new ArrayList<>(3 + effectiveJvmArguments.size());
argsList.add("java");
argsList.addAll(effectiveJvmArguments);
argsList.add("-jar");
argsList.add(FastJarFormat.QUARKUS_RUN_JAR);
entrypoint = Collections.unmodifiableList(argsList);
}
List<ResolvedDependency> fastChangingLibs = new ArrayList<>();
Collection<ResolvedDependency> userDependencies = curateOutcome.getApplicationModel().getRuntimeDependencies();
for (ResolvedDependency artifact : userDependencies) {
if (artifact == null) {
continue;
}
if (artifact.isWorkspaceModule()) {
fastChangingLibs.add(artifact);
continue;
}
String artifactVersion = artifact.getVersion();
if ((artifactVersion == null) || artifactVersion.isEmpty()) {
continue;
}
if (artifactVersion.toLowerCase().contains("snapshot")) {
fastChangingLibs.add(artifact);
}
}
Set<Path> fastChangingLibPaths = Collections.emptySet();
List<Path> nonFastChangingLibPaths = null;
if (!fastChangingLibs.isEmpty()) {
fastChangingLibPaths = new HashSet<>(fastChangingLibs.size());
Map<String, Path> libNameToPath = new HashMap<>();
try (DirectoryStream<Path> allLibPaths = Files.newDirectoryStream(appLibDir)) {
for (Path libPath : allLibPaths) {
libNameToPath.put(libPath.getFileName().toString(), libPath);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
List<String> libFileNames = new ArrayList<>(libNameToPath.keySet());
for (ResolvedDependency appArtifact : fastChangingLibs) {
String matchingLibDirFileName = null;
for (Path appArtifactPath : appArtifact.getResolvedPaths()) {
for (String libFileName : libFileNames) {
if (libFileName.contains(appArtifact.getGroupId())
&& libFileName.contains(appArtifactPath.getFileName().toString())) {
matchingLibDirFileName = libFileName;
break;
}
}
if (matchingLibDirFileName != null) {
break;
}
}
if (matchingLibDirFileName != null) {
fastChangingLibPaths.add(libNameToPath.get(matchingLibDirFileName));
}
}
Collection<Path> allLibPaths = libNameToPath.values();
nonFastChangingLibPaths = new ArrayList<>(allLibPaths.size() - fastChangingLibPaths.size());
for (Path libPath : allLibPaths) {
if (!fastChangingLibPaths.contains(libPath)) {
nonFastChangingLibPaths.add(libPath);
}
}
}
try {
Instant now = Instant.now();
boolean enforceModificationTime = !jibConfig.useCurrentTimestampFileModification();
Instant modificationTime = jibConfig.useCurrentTimestampFileModification() ? now : Instant.EPOCH;
JibContainerBuilder jibContainerBuilder = toJibContainerBuilder(baseJvmImage, jibConfig);
if (fastChangingLibPaths.isEmpty()) {
// just create a layer with the entire lib structure intact
addLayer(jibContainerBuilder, Collections.singletonList(componentsPath.resolve(FastJarFormat.LIB)),
workDirInContainer, "fast-jar-lib", isMutableJar, enforceModificationTime, modificationTime);
} else {
// we need to manually create each layer
// the idea here is that the fast changing libraries are created in a later layer, thus when they do change,
// docker doesn't have to create an entire layer with all dependencies - only change the fast ones
FileEntriesLayer.Builder bootLibsLayerBuilder = FileEntriesLayer.builder().setName("fast-jar-boot-libs");
Path bootLibPath = componentsPath.resolve(FastJarFormat.LIB).resolve(FastJarFormat.BOOT_LIB);
try (Stream<Path> bootLibPaths = Files.list(bootLibPath)) {
bootLibPaths.forEach(lib -> {
try {
AbsoluteUnixPath libPathInContainer = workDirInContainer.resolve(FastJarFormat.LIB)
.resolve(FastJarFormat.BOOT_LIB)
.resolve(lib.getFileName());
Instant bootLibModificationTime;
if (maybeJvmStartupOptimizerArchiveResult.isPresent()) {
// the boot lib jars need to preserve the modification time because otherwise AppCDS won't work
bootLibModificationTime = Files.getLastModifiedTime(lib).toInstant();
} else {
bootLibModificationTime = modificationTime;
}
bootLibsLayerBuilder.addEntry(lib, libPathInContainer, bootLibModificationTime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
}
jibContainerBuilder.addFileEntriesLayer(bootLibsLayerBuilder.build());
if (isMutableJar) {
Path deploymentPath = componentsPath.resolve(FastJarFormat.LIB)
.resolve(FastJarFormat.DEPLOYMENT_LIB);
addLayer(jibContainerBuilder, Collections.singletonList(deploymentPath),
workDirInContainer.resolve(FastJarFormat.LIB),
"fast-jar-deployment-libs", true, enforceModificationTime, modificationTime);
}
AbsoluteUnixPath libsMainPath = workDirInContainer.resolve(FastJarFormat.LIB)
.resolve(FastJarFormat.MAIN);
addLayer(jibContainerBuilder, nonFastChangingLibPaths, libsMainPath, "fast-jar-normal-libs",
isMutableJar, enforceModificationTime, modificationTime);
addLayer(jibContainerBuilder, new ArrayList<>(fastChangingLibPaths), libsMainPath, "fast-jar-changing-libs",
isMutableJar, enforceModificationTime, modificationTime);
}
if (maybeJvmStartupOptimizerArchiveResult.isPresent()) {
jibContainerBuilder.addFileEntriesLayer(FileEntriesLayer.builder().setName("app-cds").addEntry(
componentsPath.resolve(FastJarFormat.QUARKUS_RUN_JAR),
workDirInContainer.resolve(FastJarFormat.QUARKUS_RUN_JAR),
Files.getLastModifiedTime(componentsPath.resolve(FastJarFormat.QUARKUS_RUN_JAR)).toInstant())
.build());
jibContainerBuilder
.addLayer(Collections.singletonList(maybeJvmStartupOptimizerArchiveResult.get().getArchive()),
workDirInContainer);
} else {
jibContainerBuilder.addFileEntriesLayer(FileEntriesLayer.builder()
.setName("fast-jar-run")
.addEntry(
componentsPath.resolve(FastJarFormat.QUARKUS_RUN_JAR),
workDirInContainer.resolve(FastJarFormat.QUARKUS_RUN_JAR),
isMutableJar ? REMOTE_DEV_FILE_PERMISSIONS : DEFAULT_FILE_PERMISSIONS,
modificationTime,
isMutableJar ? DEFAULT_BASE_IMAGE_USER : "")
.build());
}
addLayer(jibContainerBuilder, Collections.singletonList(componentsPath.resolve(FastJarFormat.APP)),
workDirInContainer, "fast-jar-quarkus-app", isMutableJar, enforceModificationTime, modificationTime);
addLayer(jibContainerBuilder, Collections.singletonList(componentsPath.resolve(FastJarFormat.QUARKUS)),
workDirInContainer, "fast-jar-quarkus", isMutableJar, enforceModificationTime, modificationTime);
if (ContainerImageJibConfig.DEFAULT_WORKING_DIR.equals(jibConfig.workingDirectory())) {
// this layer ensures that the working directory is writeable
// see https://github.com/GoogleContainerTools/jib/issues/1270
// TODO: is this needed for all working directories?
jibContainerBuilder.addFileEntriesLayer(FileEntriesLayer.builder().addEntry(
new FileEntry(
Files.createTempDirectory("jib"),
AbsoluteUnixPath.get(jibConfig.workingDirectory()),
FilePermissions.DEFAULT_FOLDER_PERMISSIONS,
modificationTime, DEFAULT_BASE_IMAGE_USER))
.build());
}
if (isMutableJar) {
// this layer is needed for remote-dev
jibContainerBuilder.addFileEntriesLayer(FileEntriesLayer.builder()
.addEntry(
new FileEntry(
Files.createTempDirectory("jib"),
workDirInContainer.resolve("dev"),
REMOTE_DEV_FOLDER_PERMISSIONS,
modificationTime, DEFAULT_BASE_IMAGE_USER))
.addEntry(
new FileEntry(
componentsPath.resolve(FastJarFormat.QUARKUS_APP_DEPS),
workDirInContainer.resolve(FastJarFormat.QUARKUS_APP_DEPS),
REMOTE_DEV_FOLDER_PERMISSIONS,
modificationTime, DEFAULT_BASE_IMAGE_USER))
.build());
}
jibContainerBuilder
.setWorkingDirectory(workDirInContainer)
.setEntrypoint(entrypoint)
.setEnvironment(envVars)
.setLabels(allLabels(jibConfig, containerImageConfig, containerImageLabels));
mayInheritEntrypoint(jibContainerBuilder, entrypoint, jibConfig.jvmArguments());
if (jibConfig.useCurrentTimestamp()) {
jibContainerBuilder.setCreationTime(now);
}
for (int port : jibConfig.ports()) {
jibContainerBuilder.addExposedPort(Port.tcp(port));
}
return jibContainerBuilder;
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (InvalidImageReferenceException e) {
throw new RuntimeException(e);
}
}
// TODO: this needs to be a lot more sophisticated
private boolean containsRunJava(String baseJvmImage) {
return baseJvmImage.startsWith(ContainerImages.UBI8_JAVA_17_IMAGE_NAME) ||
baseJvmImage.startsWith(ContainerImages.UBI8_JAVA_21_IMAGE_NAME) ||
baseJvmImage.startsWith(ContainerImages.UBI9_JAVA_17_IMAGE_NAME) ||
baseJvmImage.startsWith(ContainerImages.UBI9_JAVA_21_IMAGE_NAME);
}
public JibContainerBuilder addLayer(JibContainerBuilder jibContainerBuilder, List<Path> files,
AbsoluteUnixPath pathInContainer, String name, boolean isMutableJar,
boolean enforceModificationTime, Instant forcedModificationTime)
throws IOException {
FileEntriesLayer.Builder layerConfigurationBuilder = FileEntriesLayer.builder().setName(name);
for (Path file : files) {
layerConfigurationBuilder.addEntryRecursive(
file, pathInContainer.resolve(file.getFileName()),
isMutableJar ? REMOTE_DEV_FOLDER_PERMISSIONS_PROVIDER : DEFAULT_FILE_PERMISSIONS_PROVIDER,
(sourcePath, destinationPath) -> {
if (enforceModificationTime) {
return forcedModificationTime;
}
try {
return Files.getLastModifiedTime(sourcePath).toInstant();
} catch (IOException e) {
throw new RuntimeException("Unable to get last modified time for " + sourcePath, e);
}
},
isMutableJar ? REMOTE_DEV_OWNERSHIP_PROVIDER : DEFAULT_OWNERSHIP_PROVIDER);
}
return jibContainerBuilder.addFileEntriesLayer(layerConfigurationBuilder.build());
}
private void mayInheritEntrypoint(JibContainerBuilder jibContainerBuilder, List<String> entrypoint,
List<String> arguments) {
if (entrypoint.size() == 1 && "INHERIT".equals(entrypoint.get(0))) {
jibContainerBuilder
.setEntrypoint((List<String>) null)
.setProgramArguments(arguments);
}
}
private List<String> determineEffectiveJvmArguments(ContainerImageJibConfig jibConfig,
Optional<JvmStartupOptimizerArchiveResultBuildItem> maybeJvmStartupOptimizerArchiveResult,
boolean isMutableJar) {
List<String> effectiveJvmArguments = new ArrayList<>(jibConfig.jvmArguments());
jibConfig.jvmAdditionalArguments().ifPresent(effectiveJvmArguments::addAll);
if (maybeJvmStartupOptimizerArchiveResult.isPresent()) {
JvmStartupOptimizerArchiveResultBuildItem appCDSResult = maybeJvmStartupOptimizerArchiveResult.get();
boolean containsAppCDSOptions = false;
for (String effectiveJvmArgument : effectiveJvmArguments) {
if (effectiveJvmArgument.startsWith(appCDSResult.getType().getJvmFlag())) {
containsAppCDSOptions = true;
break;
}
}
if (!containsAppCDSOptions) {
effectiveJvmArguments
.add(appCDSResult.getType().getJvmFlag() + "=" + appCDSResult.getArchive().getFileName().toString());
}
}
if (isMutableJar) {
// see https://github.com/quarkusio/quarkus/issues/41797
effectiveJvmArguments.add("-Dquarkus.package.output-directory=${PWD}");
}
return effectiveJvmArguments;
}
private void setUser(ContainerImageJibConfig jibConfig, JibContainerBuilder jibContainerBuilder) {
jibConfig.user().ifPresent(jibContainerBuilder::setUser);
}
private void setPlatforms(ContainerImageJibConfig jibConfig, JibContainerBuilder jibContainerBuilder) {
jibConfig.platforms().map(PlatformHelper::parse).ifPresent(jibContainerBuilder::setPlatforms);
}
private JibContainerBuilder createContainerBuilderFromLegacyJar(String baseJvmImage, ContainerImageJibConfig jibConfig,
ContainerImageConfig containerImageConfig,
JarBuildItem sourceJarBuildItem,
OutputTargetBuildItem outputTargetBuildItem,
MainClassBuildItem mainClassBuildItem,
List<ContainerImageLabelBuildItem> containerImageLabels) {
try {
// not ideal since this has been previously zipped - we would like to just reuse it
Path classesDir = outputTargetBuildItem.getOutputDirectory().resolve("jib");
ZipUtils.unzip(sourceJarBuildItem.getPath(), classesDir);
JavaContainerBuilder javaContainerBuilder;
if (baseJvmImage.startsWith(Jib.TAR_IMAGE_PREFIX) || baseJvmImage.startsWith(Jib.DOCKER_DAEMON_IMAGE_PREFIX)) {
javaContainerBuilder = JavaContainerBuilder.from(baseJvmImage);
} else {
javaContainerBuilder = JavaContainerBuilder
.from(toRegistryImage(ImageReference.parse(baseJvmImage), jibConfig.baseRegistryUsername(),
jibConfig.baseRegistryPassword()));
}
javaContainerBuilder = javaContainerBuilder
.addResources(classesDir, IS_CLASS_PREDICATE.negate())
.addClasses(classesDir, IS_CLASS_PREDICATE);
// when there is no custom entry point, we just set everything up for a regular java run
if (!jibConfig.jvmEntrypoint().isPresent()) {
javaContainerBuilder
.addJvmFlags(determineEffectiveJvmArguments(jibConfig, Optional.empty(), false))
.setMainClass(mainClassBuildItem.getClassName());
}
if (sourceJarBuildItem.getLibraryDir() != null) {
try (Stream<Path> dependenciesPaths = Files.list(sourceJarBuildItem.getLibraryDir())) {
javaContainerBuilder
.addDependencies(
dependenciesPaths
.filter(p -> Files.isRegularFile(p) && p.getFileName().toString().endsWith(".jar"))
.sorted(Comparator.comparing(Path::getFileName))
.collect(Collectors.toList()));
}
}
JibContainerBuilder jibContainerBuilder = javaContainerBuilder.toContainerBuilder()
.setEnvironment(createEnvironmentVariables(jibConfig))
.setLabels(allLabels(jibConfig, containerImageConfig, containerImageLabels));
if (jibConfig.useCurrentTimestamp()) {
jibContainerBuilder.setCreationTime(Instant.now());
}
if (jibConfig.jvmEntrypoint().isPresent()) {
jibContainerBuilder.setEntrypoint(jibConfig.jvmEntrypoint().get());
mayInheritEntrypoint(jibContainerBuilder, jibConfig.jvmEntrypoint().get(), jibConfig.jvmArguments());
}
return jibContainerBuilder;
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (InvalidImageReferenceException e) {
throw new RuntimeException(e);
}
}
private JibContainerBuilder createContainerBuilderFromNative(ContainerImageJibConfig jibConfig,
ContainerImageConfig containerImageConfig,
NativeImageBuildItem nativeImageBuildItem, List<ContainerImageLabelBuildItem> containerImageLabels) {
List<String> entrypoint;
if (jibConfig.nativeEntrypoint().isPresent()) {
entrypoint = jibConfig.nativeEntrypoint().get();
} else {
List<String> nativeArguments = jibConfig.nativeArguments().orElse(Collections.emptyList());
entrypoint = new ArrayList<>(nativeArguments.size() + 1);
entrypoint.add("./" + BINARY_NAME_IN_CONTAINER);
entrypoint.addAll(nativeArguments);
}
try {
AbsoluteUnixPath workDirInContainer = AbsoluteUnixPath.get("/work");
JibContainerBuilder jibContainerBuilder = toJibContainerBuilder(jibConfig.baseNativeImage(), jibConfig)
.addFileEntriesLayer(FileEntriesLayer.builder()
.addEntry(nativeImageBuildItem.getPath(), workDirInContainer.resolve(BINARY_NAME_IN_CONTAINER),
FilePermissions.fromOctalString("775"))
.build())
.setWorkingDirectory(workDirInContainer)
.setEntrypoint(entrypoint)
.setEnvironment(createEnvironmentVariables(jibConfig))
.setLabels(allLabels(jibConfig, containerImageConfig, containerImageLabels));
includeSharedObjects(jibContainerBuilder, nativeImageBuildItem, workDirInContainer);
mayInheritEntrypoint(jibContainerBuilder, entrypoint, jibConfig.nativeArguments().orElse(null));
if (jibConfig.useCurrentTimestamp()) {
jibContainerBuilder.setCreationTime(Instant.now());
}
for (int port : jibConfig.ports()) {
jibContainerBuilder.addExposedPort(Port.tcp(port));
}
return jibContainerBuilder;
} catch (InvalidImageReferenceException e) {
throw new RuntimeException(e);
}
}
private void includeSharedObjects(JibContainerBuilder jibContainerBuilder, NativeImageBuildItem nativeImageBuildItem,
AbsoluteUnixPath workDirInContainer) {
Path buildDir = nativeImageBuildItem.getPath().getParent();
try (Stream<Path> paths = Files.list(buildDir)) {
List<Path> sharedObjectFiles = paths.filter(Files::isRegularFile).filter(p -> !Files.isDirectory(p))
.filter(p -> p.getFileName().toString().endsWith(".so")).toList();
if (!sharedObjectFiles.isEmpty()) {
FileEntriesLayer.Builder fileEntriesLayerBuilder = FileEntriesLayer.builder();
sharedObjectFiles.forEach(sharedObjectFile -> {
fileEntriesLayerBuilder.addEntry(sharedObjectFile,
workDirInContainer.resolve(sharedObjectFile.getFileName().toString()),
FilePermissions.fromOctalString("775"));
});
jibContainerBuilder.addFileEntriesLayer(fileEntriesLayerBuilder.setName("shared objects").build());
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private Map<String, String> createEnvironmentVariables(ContainerImageJibConfig jibConfig) {
Map<String, String> original = jibConfig.environmentVariables();
if (original.isEmpty()) {
return original;
}
Map<String, String> converted = new HashMap<>();
for (Map.Entry<String, String> entry : original.entrySet()) {
converted.put(entry.getKey().toUpperCase().replace('-', '_').replace('.', '_').replace('/', '_'), entry.getValue());
}
return converted;
}
/**
* Allow users to have custom files in {@code src/main/jib} that will be copied into the built container's file system
* in same manner as the Jib Maven and Gradle plugins do.
* For example, {@code src/main/jib/foo/bar} would add {@code /foo/bar} into the container filesystem.
*
* See: https://github.com/GoogleContainerTools/jib/blob/v0.15.0-core/docs/faq.md#can-i-add-a-custom-directory-to-the-image
*/
private void handleExtraFiles(OutputTargetBuildItem outputTarget, JibContainerBuilder jibContainerBuilder) {
Path outputDirectory = outputTarget.getOutputDirectory();
Map.Entry<Path, Path> mainSourcesRoot = findMainSourcesRoot(outputDirectory);
if (mainSourcesRoot == null) { // this should never happen
return;
}
Path jibFilesRoot = mainSourcesRoot.getKey().resolve("jib");
if (!jibFilesRoot.toFile().exists()) {
return;
}
FileEntriesLayer extraFilesLayer;
try {
extraFilesLayer = ContainerBuilderHelper.extraDirectoryLayerConfiguration(
jibFilesRoot,
AbsoluteUnixPath.get("/"),
Collections.emptyMap(),
(localPath, ignored2) -> {
try {
return Files.getLastModifiedTime(localPath).toInstant();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
jibContainerBuilder.addFileEntriesLayer(
extraFilesLayer);
} catch (IOException e) {
throw new UncheckedIOException(
"Unable to add extra files in '" + jibFilesRoot.toAbsolutePath().toString() + "' to the container", e);
}
}
private Map<String, String> allLabels(ContainerImageJibConfig jibConfig, ContainerImageConfig containerImageConfig,
List<ContainerImageLabelBuildItem> containerImageLabels) {
if (containerImageLabels.isEmpty() && containerImageConfig.labels().isEmpty()) {
return Collections.emptyMap();
}
final Map<String, String> allLabels = new HashMap<>(containerImageConfig.labels());
for (ContainerImageLabelBuildItem containerImageLabel : containerImageLabels) {
// we want the user supplied labels to take precedence so the user can override labels generated from other extensions if desired
allLabels.putIfAbsent(containerImageLabel.getName(), containerImageLabel.getValue());
}
return allLabels;
}
// TODO: this predicate is rather simplistic since it results in creating the directory structure in both the resources and classes so it should probably be improved to remove empty directories
private static
|
JibProcessor
|
java
|
quarkusio__quarkus
|
extensions/scheduler/deployment/src/test/java/io/quarkus/scheduler/test/ConditionalExecutionTest.java
|
{
"start": 1771,
"end": 2366
}
|
class ____ {
static final CountDownLatch COUNTER = new CountDownLatch(1);
static final AtomicInteger OTHER_COUNT = new AtomicInteger(0);
@Scheduled(identity = "foo", every = "1s", skipExecutionIf = IsDisabled.class)
void doSomething() throws InterruptedException {
COUNTER.countDown();
}
@Scheduled(identity = "other-foo", every = "1s", skipExecutionIf = OtherIsDisabled.class)
void doSomethingElse() throws InterruptedException {
OTHER_COUNT.incrementAndGet();
}
}
@Singleton
public static
|
Jobs
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
|
{
"start": 33853,
"end": 34227
}
|
class ____ started and the event dispatcher is ready to use at
* this point.
*
* @throws Exception error occur.
*/
protected abstract void startInternal() throws Exception;
@Override
protected void serviceStop() throws Exception {
dispatcher.stop();
closeInternal();
}
/**
* Derived classes close themselves using this method.
* The base
|
is
|
java
|
apache__camel
|
components/camel-jms/src/test/java/org/apache/camel/component/jms/issues/JmsCustomJMSReplyToIssueTest.java
|
{
"start": 1749,
"end": 4392
}
|
class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
private JmsComponent amq;
@Test
public void testCustomJMSReplyTo() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Bye World");
// start a inOnly route
template.sendBody("direct:start", "Hello World");
JmsTemplate jms = new JmsTemplate(amq.getConfiguration().getConnectionFactory());
TextMessage msg = (TextMessage) jms.receive("JmsCustomJMSReplyToIssueTest.in");
assertEquals("Hello World", msg.getText());
// there should be a JMSReplyTo - and it should be a Queue - so we know where to send the reply
Queue replyTo = (Queue) msg.getJMSReplyTo();
assertEquals("ActiveMQQueue[JmsCustomJMSReplyToIssueTest.reply]", replyTo.toString());
// send reply
template.sendBody("activemq:" + replyTo.getQueueName(), "Bye World");
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected String getComponentName() {
return "activemq";
}
@Override
protected JmsComponent buildComponent(ConnectionFactory connectionFactory) {
amq = super.buildComponent(connectionFactory);
return amq;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// must preserve QoS so Camel will send JMSReplyTo even if message is inOnly
from("direct:start").process(exchange -> {
exchange.getMessage().setBody("Hello World");
// set the JMSReplyTo to force sending the reply here
exchange.getMessage().setHeader("JMSReplyTo", "JmsCustomJMSReplyToIssueTest.reply");
}).to("activemq:queue:JmsCustomJMSReplyToIssueTest.in?preserveMessageQos=true");
from("activemq:queue:JmsCustomJMSReplyToIssueTest.reply").to("mock:result");
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
}
|
JmsCustomJMSReplyToIssueTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/JUnit4SetUpNotRunTest.java
|
{
"start": 6381,
"end": 6463
}
|
class ____ {
public void setUp() {}
}
@RunWith(JUnit4.class)
|
J4SetUpWrongRunnerType
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/network/ThreadWatchdog.java
|
{
"start": 8092,
"end": 11851
}
|
class ____ extends AbstractRunnable {
private final ThreadPool threadPool;
private final TimeValue interval;
private final TimeValue quietTime;
private final Lifecycle lifecycle;
private final Logger logger;
Checker(ThreadPool threadPool, TimeValue interval, TimeValue quietTime, Lifecycle lifecycle, Logger logger) {
this.threadPool = threadPool;
this.interval = interval;
this.quietTime = quietTime.compareTo(interval) <= 0 ? interval : quietTime;
this.lifecycle = lifecycle;
this.logger = logger;
assert this.interval.millis() <= this.quietTime.millis();
}
@Override
protected void doRun() {
if (isRunning() == false) {
return;
}
boolean rescheduleImmediately = true;
try {
final var stuckThreadNames = getStuckThreadNames();
if (stuckThreadNames.isEmpty() == false) {
logger.warn(
"the following threads are active but did not make progress in the preceding [{}]: {}",
interval,
stuckThreadNames
);
rescheduleImmediately = false;
threadPool.generic().execute(threadDumper);
}
} finally {
if (rescheduleImmediately) {
scheduleNext(interval);
}
}
}
@Override
public boolean isForceExecution() {
return true;
}
private boolean isRunning() {
return 0 < interval.millis() && lifecycle.stoppedOrClosed() == false;
}
private void scheduleNext(TimeValue delay) {
if (isRunning()) {
threadPool.scheduleUnlessShuttingDown(delay, EsExecutors.DIRECT_EXECUTOR_SERVICE, Checker.this);
}
}
private final AbstractRunnable threadDumper = new AbstractRunnable() {
@Override
protected void doRun() {
assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.GENERIC);
if (isRunning()) {
HotThreads.logLocalHotThreads(
logger,
Level.WARN,
"hot threads dump due to active threads not making progress",
ReferenceDocs.NETWORK_THREADING_MODEL
);
}
}
@Override
public boolean isForceExecution() {
return true;
}
@Override
public void onFailure(Exception e) {
Checker.this.onFailure(e);
}
@Override
public void onRejection(Exception e) {
Checker.this.onRejection(e);
}
@Override
public void onAfter() {
scheduleNext(quietTime);
}
@Override
public String toString() {
return "ThreadWatchDog$Checker#threadDumper";
}
};
@Override
public void onFailure(Exception e) {
logger.error("exception in ThreadWatchDog$Checker", e);
assert false : e;
}
@Override
public void onRejection(Exception e) {
logger.debug("ThreadWatchDog$Checker execution rejected", e);
assert e instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown() : e;
}
@Override
public String toString() {
return "ThreadWatchDog$Checker";
}
}
}
|
Checker
|
java
|
apache__camel
|
components/camel-kubernetes/src/test/java/org/apache/camel/component/kubernetes/consumer/integration/pods/KubernetesPodsConsumerNamespaceIT.java
|
{
"start": 1952,
"end": 2906
}
|
class ____ extends KubernetesConsumerTestSupport {
@Test
public void namespaceTest() {
createPod(ns1, "pod1", null);
createPod(ns2, "pod2", null);
Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> {
final List<String> list = result.getExchanges().stream().map(ex -> ex.getIn().getBody(String.class)).toList();
assertThat(list, allOf(
not(hasItem(containsString("pod1"))),
hasItem(containsString("pod2"))));
});
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
fromF("kubernetes-pods://%s?oauthToken=%s&namespace=%s", host, authToken, ns2)
.process(new KubernetesProcessor())
.to(result);
}
};
}
}
|
KubernetesPodsConsumerNamespaceIT
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/bytecode/enhancement/BytecodeEnhancerRunner.java
|
{
"start": 1342,
"end": 7110
}
|
class ____ extends Suite {
private static final RunnerBuilder CUSTOM_RUNNER_BUILDER = new RunnerBuilder() {
@Override
public Runner runnerForClass(Class<?> testClass) throws Throwable {
return new CustomRunner( testClass );
}
};
public BytecodeEnhancerRunner(Class<?> klass) throws ClassNotFoundException, InitializationError {
super( CUSTOM_RUNNER_BUILDER, klass, enhanceTestClass( klass ) );
}
private static Class<?>[] enhanceTestClass(Class<?> klass) throws ClassNotFoundException {
String packageName = klass.getPackage().getName();
List<Class<?>> classList = new ArrayList<>();
try {
if ( klass.isAnnotationPresent( EnhancementOptions.class )
|| klass.isAnnotationPresent( ClassEnhancementSelector.class )
|| klass.isAnnotationPresent( ClassEnhancementSelectors.class )
|| klass.isAnnotationPresent( PackageEnhancementSelector.class )
|| klass.isAnnotationPresent( PackageEnhancementSelectors.class )
|| klass.isAnnotationPresent( ImplEnhancementSelector.class )
|| klass.isAnnotationPresent( ImplEnhancementSelectors.class ) ) {
classList.add( buildEnhancerClassLoader( klass ).loadClass( klass.getName() ) );
}
else if ( klass.isAnnotationPresent( CustomEnhancementContext.class ) ) {
for ( Class<? extends EnhancementContext> contextClass : klass.getAnnotation( CustomEnhancementContext.class ).value() ) {
EnhancementContext enhancementContextInstance = contextClass.getConstructor().newInstance();
classList.add( getEnhancerClassLoader( enhancementContextInstance, packageName ).loadClass( klass.getName() ) );
}
}
else {
classList.add( getEnhancerClassLoader( new EnhancerTestContext(), packageName ).loadClass( klass.getName() ) );
}
}
catch ( IllegalAccessException | InstantiationException | NoSuchMethodException | InvocationTargetException e ) {
// This is unlikely, but if happens throw runtime exception to fail the test
throw new RuntimeException( e );
}
return classList.toArray( new Class<?>[]{} );
}
// --- //
private static ClassLoader buildEnhancerClassLoader(Class<?> klass) {
final EnhancementOptions options = klass.getAnnotation( EnhancementOptions.class );
final EnhancementContext enhancerContext;
if ( options == null ) {
enhancerContext = new EnhancerTestContext();
}
else {
enhancerContext = new EnhancerTestContext() {
@Override
public boolean doBiDirectionalAssociationManagement(UnloadedField field) {
return options.biDirectionalAssociationManagement() && super.doBiDirectionalAssociationManagement( field );
}
@Override
public boolean doDirtyCheckingInline(UnloadedClass classDescriptor) {
return options.inlineDirtyChecking() && super.doDirtyCheckingInline( classDescriptor );
}
@Override
public boolean doExtendedEnhancement(UnloadedClass classDescriptor) {
return options.extendedEnhancement() && super.doExtendedEnhancement( classDescriptor );
}
@Override
public boolean hasLazyLoadableAttributes(UnloadedClass classDescriptor) {
return options.lazyLoading() && super.hasLazyLoadableAttributes( classDescriptor );
}
@Override
public boolean isLazyLoadable(UnloadedField field) {
return options.lazyLoading() && super.isLazyLoadable( field );
}
@Override
public UnsupportedEnhancementStrategy getUnsupportedEnhancementStrategy() {
final UnsupportedEnhancementStrategy strategy = options.unsupportedEnhancementStrategy();
return strategy != SKIP ? strategy : super.getUnsupportedEnhancementStrategy();
}
};
}
final List<EnhancementSelector> selectors = new ArrayList<>();
selectors.add( new PackageSelector( klass.getPackage().getName() ) );
applySelectors(
klass,
ClassEnhancementSelector.class,
ClassEnhancementSelectors.class,
selectorAnnotation -> selectors.add( new ClassSelector( selectorAnnotation.value().getName() ) )
);
applySelectors(
klass,
PackageEnhancementSelector.class,
PackageEnhancementSelectors.class,
selectorAnnotation -> selectors.add( new PackageSelector( selectorAnnotation.value() ) )
);
applySelectors(
klass,
ImplEnhancementSelector.class,
ImplEnhancementSelectors.class,
selectorAnnotation -> {
try {
selectors.add( selectorAnnotation.impl().newInstance() );
}
catch ( RuntimeException re ) {
throw re;
}
catch ( Exception e ) {
throw new RuntimeException( e );
}
}
);
return buildEnhancerClassLoader( enhancerContext, selectors );
}
private static <A extends Annotation> void applySelectors(
Class<?> klass,
Class<A> selectorAnnotationType,
Class<? extends Annotation> selectorsAnnotationType,
Consumer<A> action) {
final A selectorAnnotation = klass.getAnnotation( selectorAnnotationType );
final Annotation selectorsAnnotation = klass.getAnnotation( selectorsAnnotationType );
if ( selectorAnnotation != null ) {
action.accept( selectorAnnotation );
}
else if ( selectorsAnnotation != null ) {
try {
final Method valuesMethod = selectorsAnnotationType.getDeclaredMethods()[0];
//noinspection unchecked
final A[] selectorAnnotations = (A[]) valuesMethod.invoke( selectorsAnnotation );
for ( A groupedSelectorAnnotation : selectorAnnotations ) {
action.accept( groupedSelectorAnnotation );
}
}
catch (Exception e) {
throw new RuntimeException( e );
}
}
}
private static ClassLoader buildEnhancerClassLoader(
EnhancementContext enhancerContext,
List<EnhancementSelector> selectors) {
return new EnhancingClassLoader(
buildDefaultBytecodeProvider().getEnhancer( enhancerContext ),
selectors
);
}
private static
|
BytecodeEnhancerRunner
|
java
|
elastic__elasticsearch
|
test/framework/src/main/java/org/elasticsearch/index/mapper/TestBlock.java
|
{
"start": 25890,
"end": 28368
}
|
class ____ extends TestBlock.Builder implements BlockLoader.IntBuilder {
private IntBuilder(int expectedSize) {
super(expectedSize);
}
@Override
public BlockLoader.IntBuilder appendInt(int value) {
add(value);
return this;
}
}
@Override
public BlockLoader.DoubleBuilder min() {
return min;
}
@Override
public BlockLoader.DoubleBuilder max() {
return max;
}
@Override
public BlockLoader.DoubleBuilder sum() {
return sum;
}
@Override
public BlockLoader.IntBuilder count() {
return count;
}
@Override
public BlockLoader.Block build() {
var minBlock = min.build();
var maxBlock = max.build();
var sumBlock = sum.build();
var countBlock = count.build();
return parseAggMetricsToBlock(minBlock, maxBlock, sumBlock, countBlock);
}
public static TestBlock parseAggMetricsToBlock(TestBlock minBlock, TestBlock maxBlock, TestBlock sumBlock, TestBlock countBlock) {
assert minBlock.size() == maxBlock.size();
assert maxBlock.size() == sumBlock.size();
assert sumBlock.size() == countBlock.size();
var values = new ArrayList<>(minBlock.size());
for (int i = 0; i < minBlock.size(); i++) {
// we need to represent this complex block somehow
var value = new HashMap<String, Object>();
value.put("min", minBlock.values.get(i));
value.put("max", maxBlock.values.get(i));
value.put("sum", sumBlock.values.get(i));
value.put("value_count", countBlock.values.get(i));
values.add(value);
}
return new TestBlock(values);
}
@Override
public BlockLoader.Builder appendNull() {
throw new UnsupportedOperationException();
}
@Override
public BlockLoader.Builder beginPositionEntry() {
throw new UnsupportedOperationException();
}
@Override
public BlockLoader.Builder endPositionEntry() {
throw new UnsupportedOperationException();
}
@Override
public void close() {
}
}
private static
|
IntBuilder
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/onexception/ContextScopedOnExceptionLoadBalancerStopRouteTest.java
|
{
"start": 1284,
"end": 4914
}
|
class ____ extends ContextTestSupport {
@Test
public void testOk() throws Exception {
getMockEndpoint("mock:error").expectedMessageCount(0);
getMockEndpoint("mock:start").expectedBodiesReceived("World");
getMockEndpoint("mock:result").expectedBodiesReceived("Bye World");
getMockEndpoint("mock:exception").expectedMessageCount(0);
template.sendBody("direct:start", "World");
assertMockEndpointsSatisfied();
}
@Test
public void testError() throws Exception {
getMockEndpoint("mock:error").expectedBodiesReceived("Kaboom");
getMockEndpoint("mock:start").expectedBodiesReceived("Kaboom");
getMockEndpoint("mock:result").expectedMessageCount(0);
getMockEndpoint("mock:exception").expectedBodiesReceived("Kaboom");
template.sendBody("direct:start", "Kaboom");
assertMockEndpointsSatisfied();
}
@Test
public void testErrorOk() throws Exception {
getMockEndpoint("mock:error").expectedBodiesReceived("Kaboom");
getMockEndpoint("mock:start").expectedBodiesReceived("Kaboom", "World");
getMockEndpoint("mock:result").expectedBodiesReceived("Bye World");
getMockEndpoint("mock:exception").expectedBodiesReceived("Kaboom");
template.sendBody("direct:start", "Kaboom");
template.sendBody("direct:start", "World");
assertMockEndpointsSatisfied();
}
@Test
public void testErrorOkError() throws Exception {
getMockEndpoint("mock:error").expectedBodiesReceived("Kaboom");
getMockEndpoint("mock:start").expectedBodiesReceived("Kaboom", "World", "Kaboom");
getMockEndpoint("mock:result").expectedBodiesReceived("Bye World");
getMockEndpoint("mock:exception").expectedBodiesReceived("Kaboom", "Kaboom");
template.sendBody("direct:start", "Kaboom");
template.sendBody("direct:start", "World");
// give time for route to stop.
// this was originally 1 second, but the route does not always
// shut down that fast, so bumped it up for some cushion.
await().atMost(3, TimeUnit.SECONDS).untilAsserted(
() -> assertEquals(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("errorRoute")));
template.sendBody("direct:start", "Kaboom");
assertMockEndpointsSatisfied();
// should be 1 on the seda queue
SedaEndpoint seda = getMandatoryEndpoint("seda:error", SedaEndpoint.class);
SedaEndpoint seda2 = getMandatoryEndpoint("seda:error2", SedaEndpoint.class);
int size = seda.getQueue().size();
int size2 = seda2.getQueue().size();
assertTrue(size == 1 || size2 == 1, "There should be 1 exchange on the seda or seda2 queue");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
onException(Exception.class).handled(true).loadBalance().roundRobin().to("seda:error", "seda:error2").end()
.to("mock:exception");
from("direct:start").to("mock:start").choice().when(body().contains("Kaboom"))
.throwException(new IllegalArgumentException("Forced")).otherwise()
.transform(body().prepend("Bye ")).to("mock:result");
from("seda:error").routeId("errorRoute").to("controlbus:route?action=stop&routeId=errorRoute&async=true")
.to("mock:error");
}
};
}
}
|
ContextScopedOnExceptionLoadBalancerStopRouteTest
|
java
|
quarkusio__quarkus
|
extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/validatorfactory/HibernateValidatorFactoryCustomizerTest.java
|
{
"start": 521,
"end": 1107
}
|
class ____ {
@Inject
ValidatorFactory validatorFactory;
@RegisterExtension
static final QuarkusUnitTest test = new QuarkusUnitTest().setArchiveProducer(() -> ShrinkWrap
.create(JavaArchive.class)
.addClasses(MyMultipleHibernateValidatorFactoryCustomizer.class, MyEmailValidator.class,
MyNumValidator.class));
@Test
public void testOverrideConstraintValidatorConstraint() {
assertThat(validatorFactory.getValidator().validate(new TestBean())).hasSize(2);
}
static
|
HibernateValidatorFactoryCustomizerTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java
|
{
"start": 2167,
"end": 9308
}
|
class ____ {
static public String ViewFSTestDir = "/testDir";
/*
* return the ViewFS File context to be used for tests
*/
static public FileContext setupForViewFsLocalFs(FileContextTestHelper helper) throws Exception {
/**
* create the test root on local_fs - the mount table will point here
*/
FileContext fsTarget = FileContext.getLocalFSFileContext();
Path targetOfTests = helper.getTestRootPath(fsTarget);
// In case previous test was killed before cleanup
fsTarget.delete(targetOfTests, true);
fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
Configuration conf = new Configuration();
// Set up viewfs link for test dir as described above
String testDir = helper.getTestRootPath(fsTarget).toUri()
.getPath();
linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
// Set up viewfs link for home dir as described above
setUpHomeDir(conf, fsTarget);
// the test path may be relative to working dir - we need to make that work:
// Set up viewfs link for wd as described above
String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
Log.getLog().info("Working dir is: " + fc.getWorkingDirectory());
//System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
//System.out.println("TargetOfTests = "+ targetOfTests.toUri());
return fc;
}
/**
*
* delete the test directory in the target local fs
*/
static public void tearDownForViewFsLocalFs(FileContextTestHelper helper) throws Exception {
FileContext fclocal = FileContext.getLocalFSFileContext();
Path targetOfTests = helper.getTestRootPath(fclocal);
fclocal.delete(targetOfTests, true);
}
static void setUpHomeDir(Configuration conf, FileContext fsTarget) {
String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
int indexOf2ndSlash = homeDir.indexOf('/', 1);
if (indexOf2ndSlash >0) {
linkUpFirstComponents(conf, homeDir, fsTarget, "home dir");
} else { // home dir is at root. Just link the home dir itse
URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
ConfigUtil.addLink(conf, homeDir, linkTarget);
Log.getLog().info("Added link for home dir " + homeDir + "->" + linkTarget);
}
// Now set the root of the home dir for viewfs
String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
ConfigUtil.setHomeDirConf(conf, homeDirRoot);
Log.getLog().info("Home dir base for viewfs" + homeDirRoot);
}
/*
* Set up link in config for first component of path to the same
* in the target file system.
*/
static void linkUpFirstComponents(Configuration conf, String path,
FileContext fsTarget, String info) {
int indexOfEnd = path.indexOf('/', 1);
if (Shell.WINDOWS) {
indexOfEnd = path.indexOf('/', indexOfEnd + 1);
}
String firstComponent = path.substring(0, indexOfEnd);
URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
ConfigUtil.addLink(conf, firstComponent, linkTarget);
Log.getLog().info("Added link for " + info + " "
+ firstComponent + "->" + linkTarget);
}
/**
* Adds the given mount links to the given Hadoop compatible file system path.
* Mount link mappings are in sources, targets at their respective index
* locations.
*/
static void addMountLinksToFile(String mountTable, String[] sources,
String[] targets, Path mountTableConfPath, Configuration conf)
throws IOException, URISyntaxException {
ChildFsGetter cfs = new ViewFileSystemOverloadScheme.ChildFsGetter(
mountTableConfPath.toUri().getScheme());
try (FileSystem fs = cfs.getNewInstance(mountTableConfPath.toUri(),
conf)) {
try (FSDataOutputStream out = fs.create(mountTableConfPath)) {
String prefix =
new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(".")
.append((mountTable == null
? ConfigUtil.getDefaultMountTableName(conf)
: mountTable))
.append(".").toString();
out.writeBytes("<configuration>");
for (int i = 0; i < sources.length; i++) {
String src = sources[i];
String target = targets[i];
boolean isNfly = src.startsWith(Constants.CONFIG_VIEWFS_LINK_NFLY);
out.writeBytes("<property><name>");
if (isNfly) {
String[] srcParts = src.split("[.]");
assertEquals(3, srcParts.length, "Invalid NFlyLink format");
String actualSrc = srcParts[srcParts.length - 1];
String params = srcParts[srcParts.length - 2];
out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_NFLY + "."
+ params + "." + actualSrc);
} else if (Constants.CONFIG_VIEWFS_LINK_FALLBACK.equals(src)) {
out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_FALLBACK);
} else if (Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH.equals(src)) {
out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH);
} else {
out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK + "." + src);
}
out.writeBytes("</name>");
out.writeBytes("<value>");
out.writeBytes(target);
out.writeBytes("</value></property>");
out.flush();
}
out.writeBytes(("</configuration>"));
out.flush();
}
}
}
/**
* Adds the given mount links to the configuration. Mount link mappings are
* in sources, targets at their respective index locations.
*/
public static void addMountLinksToConf(String mountTable, String[] sources,
String[] targets, Configuration config) throws URISyntaxException {
for (int i = 0; i < sources.length; i++) {
String src = sources[i];
String target = targets[i];
String mountTableName = mountTable == null ?
Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE : mountTable;
boolean isNfly = src.startsWith(Constants.CONFIG_VIEWFS_LINK_NFLY);
if (isNfly) {
String[] srcParts = src.split("[.]");
assertEquals(3, srcParts.length, "Invalid NFlyLink format");
String actualSrc = srcParts[srcParts.length - 1];
String params = srcParts[srcParts.length - 2];
ConfigUtil.addLinkNfly(config, mountTableName, actualSrc, params,
target);
} else if (src.equals(Constants.CONFIG_VIEWFS_LINK_FALLBACK)) {
ConfigUtil.addLinkFallback(config, mountTableName, new URI(target));
} else if (src.equals(Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH)) {
ConfigUtil.addLinkMergeSlash(config, mountTableName, new URI(target));
} else {
ConfigUtil.addLink(config, mountTableName, src, new URI(target));
}
}
}
}
|
ViewFsTestSetup
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java
|
{
"start": 73277,
"end": 73688
}
|
class ____ implements BalancingWeightsFactory {
private final Map<String, WeightFunction> prefixWeights;
PrefixBalancingWeightsFactory(Map<String, WeightFunction> prefixWeights) {
this.prefixWeights = prefixWeights;
}
@Override
public BalancingWeights create() {
return new PrefixBalancingWeights();
}
|
PrefixBalancingWeightsFactory
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/component/ref/RefComponentTest.java
|
{
"start": 1214,
"end": 2117
}
|
class ____ extends ContextTestSupport {
private void setupComponent() throws Exception {
Component comp = new DirectComponent();
comp.setCamelContext(context);
Endpoint slow = comp.createEndpoint("direct:somename");
Consumer consumer = slow.createConsumer(new Processor() {
public void process(Exchange exchange) {
template.send("mock:result", exchange);
}
});
consumer.start();
// bind our endpoint to the registry for ref to lookup
context.getRegistry().bind("foo", slow);
}
@Test
public void testRef() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
setupComponent();
template.sendBody("ref:foo", "Hello World");
assertMockEndpointsSatisfied();
}
}
|
RefComponentTest
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/rest/messages/ThreadDumpInfoTest.java
|
{
"start": 1370,
"end": 4235
}
|
class ____ extends RestResponseMarshallingTestBase<ThreadDumpInfo> {
@Override
protected Class<ThreadDumpInfo> getTestResponseClass() {
return ThreadDumpInfo.class;
}
@Override
protected ThreadDumpInfo getTestResponseInstance() throws Exception {
final Collection<ThreadDumpInfo.ThreadInfo> threadInfos =
Arrays.asList(
ThreadDumpInfo.ThreadInfo.create("foobar", "barfoo"),
ThreadDumpInfo.ThreadInfo.create("bar", "foo"));
return ThreadDumpInfo.create(threadInfos);
}
@Override
protected void assertOriginalEqualsToUnmarshalled(
ThreadDumpInfo expected, ThreadDumpInfo actual) {
assertThat(actual.getThreadInfos())
.isEqualTo(Arrays.asList(expected.getThreadInfos().toArray()));
}
@Test
void testComparedWithDefaultJDKImplemetation() {
ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean();
ThreadInfo threadInfo =
threadMxBean.getThreadInfo(Thread.currentThread().getId(), Integer.MAX_VALUE);
// JDK11 has increased the output info of threadInfo.daemon and threadInfo.priority compared
// to JDK8, hence only compare the output of stacktrace content for compatibility.
String[] threadInfoLines = threadInfo.toString().split("\n");
String[] expected = Arrays.copyOfRange(threadInfoLines, 1, threadInfoLines.length);
String stringifyThreadInfo = ThreadDumpInfo.stringifyThreadInfo(threadInfo, 8);
String[] stringifyThreadInfoLines = stringifyThreadInfo.split("\n");
String[] stringified =
Arrays.copyOfRange(stringifyThreadInfoLines, 1, stringifyThreadInfoLines.length);
assertThat(stringified).isEqualTo(expected);
}
@Test
void testStacktraceDepthLimitation() {
ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean();
ThreadInfo threadInfo =
threadMxBean.getThreadInfo(Thread.currentThread().getId(), Integer.MAX_VALUE);
int expectedStacktraceDepth = threadInfo.getStackTrace().length;
String stringifiedInfo = ThreadDumpInfo.stringifyThreadInfo(threadInfo, Integer.MAX_VALUE);
assertThat(getOutputDepth(stringifiedInfo)).isEqualTo(expectedStacktraceDepth);
String stringifiedInfoExceedMaxDepth =
ThreadDumpInfo.stringifyThreadInfo(threadInfo, expectedStacktraceDepth - 1);
assertThat(getOutputDepth(stringifiedInfoExceedMaxDepth))
.isEqualTo(expectedStacktraceDepth - 1);
assertThat(stringifiedInfoExceedMaxDepth.contains("\t...")).isTrue();
}
private long getOutputDepth(String stringifiedInfo) {
return Arrays.stream(stringifiedInfo.split("\n")).filter(x -> x.contains("\tat ")).count();
}
}
|
ThreadDumpInfoTest
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/common/CommonExecExchange.java
|
{
"start": 1404,
"end": 1478
}
|
class ____ its functionality is replaced by ExecEdge.
*/
public abstract
|
once
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/MapperService.java
|
{
"start": 22520,
"end": 40178
}
|
class ____ implements XContentHelper.CustomMerge {
private static final XContentHelper.CustomMerge INSTANCE = new RawFieldMappingMerge();
private static final Set<String> MERGEABLE_OBJECT_TYPES = Set.of(ObjectMapper.CONTENT_TYPE, NestedObjectMapper.CONTENT_TYPE);
private RawFieldMappingMerge() {}
@SuppressWarnings("unchecked")
@Override
public Object merge(String parent, String key, Object oldValue, Object newValue) {
if (oldValue instanceof Map && newValue instanceof Map) {
if ("properties".equals(parent)) {
// merging two mappings of the same field, where "key" is the field name
Map<String, Object> baseMap = (Map<String, Object>) oldValue;
Map<String, Object> mapToMerge = (Map<String, Object>) newValue;
if (shouldMergeFieldMappings(baseMap, mapToMerge)) {
// if two field mappings are to be merged, we only want to keep some specific entries from the base mapping and
// let all others be overridden by the second mapping
Map<String, Object> mergedMappings = new HashMap<>();
// we must keep the "properties" node, otherwise our merge has no point
if (baseMap.containsKey("properties")) {
mergedMappings.put("properties", new HashMap<>((Map<String, Object>) baseMap.get("properties")));
}
// the "subobjects" setting affects an entire subtree and not only locally where it is configured
if (baseMap.containsKey("subobjects")) {
mergedMappings.put("subobjects", baseMap.get("subobjects"));
}
// Recursively merge these two field mappings.
// Since "key" is an arbitrary field name, for which we only need plain mapping subtrees merge, no need to pass it
// to the recursion as it shouldn't affect the merge logic. Specifically, passing a parent may cause merge
// failures of fields named "properties". See https://github.com/elastic/elasticsearch/issues/108866
XContentHelper.merge(mergedMappings, mapToMerge, INSTANCE);
return mergedMappings;
} else {
// non-mergeable types - replace the entire mapping subtree for this field
return mapToMerge;
}
}
// anything else (e.g. "_doc", "_meta", "properties") - no custom merge, rely on caller merge logic
// field mapping entries of Map type (like "fields" and "meta") are handled above and should never reach here
return null;
} else {
if (key.equals("required")) {
// we look for explicit `_routing.required` settings because we use them to detect contradictions of this setting
// that comes from mappings with such that comes from the optional `data_stream` configuration of composable index
// templates
if ("_routing".equals(parent) && oldValue != newValue) {
throw new MapperParsingException("contradicting `_routing.required` settings");
}
}
return newValue;
}
}
/**
* Normally, we don't want to merge raw field mappings, however there are cases where we do, for example - two
* "object" (or "nested") mappings.
*
* @param mappings1 first mapping of a field
* @param mappings2 second mapping of a field
* @return {@code true} if the second mapping should be merged into the first mapping
*/
private boolean shouldMergeFieldMappings(Map<String, Object> mappings1, Map<String, Object> mappings2) {
String type1 = (String) mappings1.get("type");
if (type1 == null && mappings1.get("properties") != null) {
type1 = ObjectMapper.CONTENT_TYPE;
}
String type2 = (String) mappings2.get("type");
if (type2 == null && mappings2.get("properties") != null) {
type2 = ObjectMapper.CONTENT_TYPE;
}
if (type1 == null || type2 == null) {
return false;
}
return MERGEABLE_OBJECT_TYPES.contains(type1) && MERGEABLE_OBJECT_TYPES.contains(type2);
}
}
public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason) {
final DocumentMapper currentMapper = this.mapper;
if (currentMapper != null && currentMapper.mappingSource().equals(mappingSource)) {
return currentMapper;
}
Map<String, Object> mappingSourceAsMap = MappingParser.convertToMap(mappingSource);
return doMerge(type, reason, mappingSourceAsMap);
}
private DocumentMapper doMerge(String type, MergeReason reason, Map<String, Object> mappingSourceAsMap) {
Mapping incomingMapping = parseMapping(type, reason, mappingSourceAsMap);
// TODO: In many cases the source here is equal to mappingSource so we need not serialize again.
// We should identify these cases reliably and save expensive serialization here
if (reason == MergeReason.MAPPING_AUTO_UPDATE_PREFLIGHT) {
// only doing a merge without updating the actual #mapper field, no need to synchronize
Mapping mapping = mergeMappings(this.mapper, incomingMapping, MergeReason.MAPPING_AUTO_UPDATE_PREFLIGHT, this.indexSettings);
return newDocumentMapper(mapping, MergeReason.MAPPING_AUTO_UPDATE_PREFLIGHT, mapping.toCompressedXContent());
} else {
// synchronized concurrent mapper updates are guaranteed to set merged mappers derived from the mapper value previously read
// TODO: can we even have concurrent updates here?
synchronized (this) {
Mapping mapping = mergeMappings(this.mapper, incomingMapping, reason, this.indexSettings);
DocumentMapper newMapper = newDocumentMapper(mapping, reason, mapping.toCompressedXContent());
this.mapper = newMapper;
assert assertSerialization(newMapper, reason);
return newMapper;
}
}
}
private DocumentMapper newDocumentMapper(Mapping mapping, MergeReason reason, CompressedXContent mappingSource) {
DocumentMapper newMapper = new DocumentMapper(
documentParser,
mapping,
mappingSource,
indexVersionCreated,
mapperMetrics,
index().getName()
);
newMapper.validate(indexSettings, reason != MergeReason.MAPPING_RECOVERY);
return newMapper;
}
public Mapping parseMapping(String mappingType, MergeReason reason, CompressedXContent mappingSource) {
try {
return mappingParser.parse(mappingType, reason, mappingSource);
} catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping: {}", e, e.getMessage());
}
}
/**
* A method to parse mapping from a source in a map form.
*
* @param mappingType the mapping type
* @param reason the merge reason to use when merging mappers while building the mapper
* @param mappingSource mapping source already converted to a map form, but not yet processed otherwise
* @return a parsed mapping
*/
public Mapping parseMapping(String mappingType, MergeReason reason, Map<String, Object> mappingSource) {
try {
return mappingParser.parse(mappingType, reason, mappingSource);
} catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping: {}", e, e.getMessage());
}
}
public static Mapping mergeMappings(
DocumentMapper currentMapper,
Mapping incomingMapping,
MergeReason reason,
IndexSettings indexSettings
) {
return mergeMappings(currentMapper, incomingMapping, reason, getMaxFieldsToAddDuringMerge(currentMapper, indexSettings, reason));
}
private static long getMaxFieldsToAddDuringMerge(DocumentMapper currentMapper, IndexSettings indexSettings, MergeReason reason) {
if (reason.isAutoUpdate() && indexSettings.isIgnoreDynamicFieldsBeyondLimit()) {
// If the index setting ignore_dynamic_beyond_limit is enabled,
// data nodes only add new dynamic fields until the limit is reached while parsing documents to be ingested.
// However, if there are concurrent mapping updates,
// data nodes may add dynamic fields under an outdated assumption that enough capacity is still available.
// When data nodes send the dynamic mapping update request to the master node,
// it will only add as many fields as there's actually capacity for when merging mappings.
long totalFieldsLimit = indexSettings.getMappingTotalFieldsLimit();
return Optional.ofNullable(currentMapper)
.map(DocumentMapper::mappers)
.map(ml -> ml.remainingFieldsUntilLimit(totalFieldsLimit))
.orElse(totalFieldsLimit);
} else {
// Else, we're not limiting the number of fields so that the merged mapping fails validation if it exceeds total_fields.limit.
// This is the desired behavior when making an explicit mapping update, even if ignore_dynamic_beyond_limit is enabled.
// When ignore_dynamic_beyond_limit is disabled and a dynamic mapping update would exceed the field limit,
// the document will get rejected.
// Normally, this happens on the data node in DocumentParserContext.addDynamicMapper but if there's a race condition,
// data nodes may add dynamic fields under an outdated assumption that enough capacity is still available.
// In this case, the master node will reject mapping updates that would exceed the limit when handling the mapping update.
return Long.MAX_VALUE;
}
}
static Mapping mergeMappings(DocumentMapper currentMapper, Mapping incomingMapping, MergeReason reason, long newFieldsBudget) {
Mapping newMapping;
if (currentMapper == null) {
newMapping = incomingMapping.withFieldsBudget(newFieldsBudget);
} else {
newMapping = currentMapper.mapping().merge(incomingMapping, reason, newFieldsBudget);
}
return newMapping;
}
private boolean assertSerialization(DocumentMapper mapper, MergeReason reason) {
// capture the source now, it may change due to concurrent parsing
final CompressedXContent mappingSource = mapper.mappingSource();
Mapping newMapping = parseMapping(mapper.type(), reason, mappingSource);
if (newMapping.toCompressedXContent().equals(mappingSource) == false) {
throw new AssertionError(
"Mapping serialization result is different from source. \n--> Source ["
+ mappingSource
+ "]\n--> Result ["
+ newMapping.toCompressedXContent()
+ "]"
);
}
return true;
}
/**
* Return the document mapper, or {@code null} if no mapping has been put yet
* or no documents have been indexed in the current index yet (which triggers a dynamic mapping update)
*/
public DocumentMapper documentMapper() {
return mapper;
}
public long mappingVersion() {
return mappingVersion;
}
/**
* Returns {@code true} if the given {@code mappingSource} includes a type
* as a top-level object.
*/
public static boolean isMappingSourceTyped(String type, Map<String, Object> mapping) {
return mapping.size() == 1 && mapping.keySet().iterator().next().equals(type);
}
/**
* Resolves a type from a mapping-related request into the type that should be used when
* merging and updating mappings.
*
* If the special `_doc` type is provided, then we replace it with the actual type that is
* being used in the mappings. This allows typeless APIs such as 'index' or 'put mappings'
* to work against indices with a custom type name.
*/
private String resolveDocumentType(String type) {
if (MapperService.SINGLE_MAPPING_NAME.equals(type)) {
if (mapper != null) {
return mapper.type();
}
}
return type;
}
/**
* Given the full name of a field, returns its {@link MappedFieldType}.
*/
public MappedFieldType fieldType(String fullName) {
return mappingLookup().fieldTypesLookup().get(fullName);
}
/**
* Exposes a snapshot of the mappings for the current index.
* If no mappings have been registered for the current index, an empty {@link MappingLookup} instance is returned.
* An index does not have mappings only if it was created without providing mappings explicitly,
* and no documents have yet been indexed in it.
*/
public MappingLookup mappingLookup() {
DocumentMapper mapper = this.mapper;
return mapper == null ? MappingLookup.EMPTY : mapper.mappers();
}
/**
* Returns field types that have eager global ordinals.
*/
public Iterable<MappedFieldType> getEagerGlobalOrdinalsFields() {
DocumentMapper mapper = this.mapper;
if (mapper == null) {
return Collections.emptySet();
}
MappingLookup mappingLookup = mapper.mappers();
return mappingLookup.getMatchingFieldNames("*")
.stream()
.map(mappingLookup::getFieldType)
.filter(MappedFieldType::eagerGlobalOrdinals)
.toList();
}
/**
* Return the index-time analyzer associated with a particular field
* @param field the field name
* @param unindexedFieldAnalyzer a function to return an Analyzer for a field with no
* directly associated index-time analyzer
*/
public NamedAnalyzer indexAnalyzer(String field, Function<String, NamedAnalyzer> unindexedFieldAnalyzer) {
return mappingLookup().indexAnalyzer(field, unindexedFieldAnalyzer);
}
@Override
public void close() throws IOException {
indexAnalyzers.close();
}
/**
* @return Whether a field is a metadata field
* Deserialization of SearchHit objects sent from pre 7.8 nodes and GetResults objects sent from pre 7.3 nodes,
* uses this method to divide fields into meta and document fields.
* TODO: remove in v 9.0
* @deprecated Use an instance method isMetadataField instead
*/
@Deprecated
public static boolean isMetadataFieldStatic(String fieldName) {
if (IndicesModule.getBuiltInMetadataFields().contains(fieldName)) {
return true;
}
// if a node had Size Plugin installed, _size field should also be considered a meta-field
return fieldName.equals("_size");
}
/**
* @return Whether a field is a metadata field.
* this method considers all mapper plugins
*/
public boolean isMetadataField(String field) {
var mapper = mappingLookup().getMapper(field);
return mapper instanceof MetadataFieldMapper;
}
/**
* @return If this field is defined as a multifield of another field
*/
public boolean isMultiField(String field) {
return mappingLookup().isMultiField(field);
}
/**
* Reload any search analyzers that have reloadable components if resource is {@code null},
* otherwise only the provided resource is reloaded.
* @param registry the analysis registry
* @param resource the name of the reloadable resource or {@code null} if all resources should be reloaded.
* @param preview {@code false} applies analyzer reloading. {@code true} previews the reloading operation, so analyzers are not reloaded
* but the results retrieved. This is useful for understanding analyzers usage in the different indices.
* @return The names of reloaded resources (or resources that would be reloaded if {@code preview} is true).
* @throws IOException
*/
public synchronized List<String> reloadSearchAnalyzers(AnalysisRegistry registry, @Nullable String resource, boolean preview)
throws IOException {
logger.debug("reloading search analyzers for index [{}]", indexSettings.getIndex().getName());
// TODO this should bust the cache somehow. Tracked in https://github.com/elastic/elasticsearch/issues/66722
return indexAnalyzers.reload(registry, indexSettings, resource, preview);
}
/**
* @return Returns all dynamic templates defined in this mapping.
*/
public DynamicTemplate[] getAllDynamicTemplates() {
return documentMapper().mapping().getRoot().dynamicTemplates();
}
public MapperRegistry getMapperRegistry() {
return mapperRegistry;
}
public Function<Query, BitSetProducer> getBitSetProducer() {
return bitSetProducer;
}
public MapperMetrics getMapperMetrics() {
return mapperMetrics;
}
}
|
RawFieldMappingMerge
|
java
|
apache__flink
|
flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java
|
{
"start": 88032,
"end": 90024
}
|
class ____ extends AbstractHandler {
private final BucketTaggingConfiguration configuration = new BucketTaggingConfiguration();
private Map<String, String> currentTagSet;
private String currentTagKey;
private String currentTagValue;
public BucketTaggingConfiguration getConfiguration() {
return configuration;
}
@Override
protected void doStartElement(String uri, String name, String qName, Attributes attrs) {
if (in("Tagging")) {
if (name.equals("TagSet")) {
currentTagSet = new HashMap<String, String>();
}
}
}
@Override
protected void doEndElement(String uri, String name, String qName) {
if (in("Tagging")) {
if (name.equals("TagSet")) {
configuration.getAllTagSets().add(new TagSet(currentTagSet));
currentTagSet = null;
}
} else if (in("Tagging", "TagSet")) {
if (name.equals("Tag")) {
if (currentTagKey != null && currentTagValue != null) {
currentTagSet.put(currentTagKey, currentTagValue);
}
currentTagKey = null;
currentTagValue = null;
}
} else if (in("Tagging", "TagSet", "Tag")) {
if (name.equals("Key")) {
currentTagKey = getText();
} else if (name.equals("Value")) {
currentTagValue = getText();
}
}
}
}
/**
* Handler for unmarshalling the response from GET Object Tagging.
*
* <p><Tagging> <TagSet> <Tag> <Key>Foo</Key> <Value>1</Value> </Tag> <Tag> <Key>Bar</Key>
* <Value>2</Value> </Tag> <Tag> <Key>Baz</Key> <Value>3</Value> </Tag> </TagSet> </Tagging>
*/
public static
|
BucketTaggingConfigurationHandler
|
java
|
apache__maven
|
compat/maven-artifact/src/main/java/org/apache/maven/artifact/resolver/MultipleArtifactsNotFoundException.java
|
{
"start": 1162,
"end": 4516
}
|
class ____ extends ArtifactResolutionException {
private static final String LS = System.lineSeparator();
private final List<Artifact> resolvedArtifacts;
private final List<Artifact> missingArtifacts;
/**
* @param originatingArtifact the artifact that was being resolved
* @param missingArtifacts artifacts that could not be resolved
* @param remoteRepositories remote repositories where the missing artifacts were not found
* @deprecated use {@link #MultipleArtifactsNotFoundException(Artifact, List, List, List)}
*/
@Deprecated
public MultipleArtifactsNotFoundException(
Artifact originatingArtifact,
List<Artifact> missingArtifacts,
List<ArtifactRepository> remoteRepositories) {
this(originatingArtifact, new ArrayList<>(), missingArtifacts, remoteRepositories);
}
/**
* Create an instance of the exception with all required information.
*
* @param originatingArtifact the artifact that was being resolved
* @param resolvedArtifacts artifacts that could be resolved
* @param missingArtifacts artifacts that could not be resolved
* @param remoteRepositories remote repositories where the missing artifacts were not found
*/
public MultipleArtifactsNotFoundException(
Artifact originatingArtifact,
List<Artifact> resolvedArtifacts,
List<Artifact> missingArtifacts,
List<ArtifactRepository> remoteRepositories) {
super(constructMessage(missingArtifacts), originatingArtifact, remoteRepositories);
this.resolvedArtifacts = resolvedArtifacts;
this.missingArtifacts = missingArtifacts;
}
/**
* artifacts that could be resolved
*
* @return {@link List} of {@link Artifact}
*/
public List<Artifact> getResolvedArtifacts() {
return resolvedArtifacts;
}
/**
* artifacts that could NOT be resolved
*
* @return {@link List} of {@link Artifact}
*/
public List<Artifact> getMissingArtifacts() {
return missingArtifacts;
}
private static String constructMessage(List<Artifact> artifacts) {
StringBuilder buffer = new StringBuilder(256);
buffer.append("Missing:").append(LS);
buffer.append("----------").append(LS);
int counter = 0;
for (Artifact artifact : artifacts) {
String message = (++counter) + ") " + artifact.getId();
buffer.append(constructMissingArtifactMessage(
message,
" ",
artifact.getGroupId(),
artifact.getArtifactId(),
artifact.getVersion(),
artifact.getType(),
artifact.getClassifier(),
artifact.getDownloadUrl(),
artifact.getDependencyTrail()));
}
buffer.append("----------").append(LS);
int size = artifacts.size();
buffer.append(size).append(" required artifact");
if (size > 1) {
buffer.append("s are");
} else {
buffer.append(" is");
}
buffer.append(" missing.").append(LS).append(LS).append("for artifact: ");
return buffer.toString();
}
}
|
MultipleArtifactsNotFoundException
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java
|
{
"start": 525,
"end": 1384
}
|
class ____ implements AggregatorState {
private float value;
private boolean seen;
FloatState(float init) {
this.value = init;
}
float floatValue() {
return value;
}
void floatValue(float value) {
this.value = value;
}
boolean seen() {
return seen;
}
void seen(boolean seen) {
this.seen = seen;
}
/** Extracts an intermediate view of the contents of this state. */
@Override
public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) {
assert blocks.length >= offset + 2;
blocks[offset + 0] = driverContext.blockFactory().newConstantFloatBlockWith(value, 1);
blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1);
}
@Override
public void close() {}
}
|
FloatState
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetAction.java
|
{
"start": 1931,
"end": 5436
}
|
class ____ extends LegacyActionRequest implements ToXContentObject, IndicesRequest {
private final String rulesetId;
private final Map<String, Object> matchCriteria;
private static final ParseField RULESET_ID_FIELD = new ParseField("ruleset_id");
private static final ParseField MATCH_CRITERIA_FIELD = new ParseField("match_criteria");
public Request(StreamInput in) throws IOException {
super(in);
this.rulesetId = in.readString();
this.matchCriteria = in.readGenericMap();
}
public Request(String rulesetId, Map<String, Object> matchCriteria) {
this.rulesetId = rulesetId;
this.matchCriteria = matchCriteria;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (Strings.isNullOrEmpty(rulesetId)) {
validationException = addValidationError("ruleset_id missing", validationException);
}
return validationException;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(rulesetId);
out.writeGenericMap(matchCriteria);
}
public String rulesetId() {
return rulesetId;
}
public Map<String, Object> matchCriteria() {
return matchCriteria;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Request request = (Request) o;
return Objects.equals(rulesetId, request.rulesetId) && Objects.equals(matchCriteria, request.matchCriteria);
}
@Override
public int hashCode() {
return Objects.hash(rulesetId, matchCriteria);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(RULESET_ID_FIELD.getPreferredName(), rulesetId);
builder.startObject(MATCH_CRITERIA_FIELD.getPreferredName());
builder.mapContents(matchCriteria);
builder.endObject();
builder.endObject();
return builder;
}
private static final ConstructingObjectParser<Request, String> PARSER = new ConstructingObjectParser<>(
"test_query_ruleset_request",
false,
(p, name) -> {
@SuppressWarnings("unchecked")
Map<String, Object> matchCriteria = (Map<String, Object>) p[0];
return new Request(name, matchCriteria);
}
);
static {
PARSER.declareObject(constructorArg(), (p, c) -> p.map(), MATCH_CRITERIA_FIELD);
PARSER.declareString(optionalConstructorArg(), RULESET_ID_FIELD); // Required for parsing
}
public static Request parse(XContentParser parser, String name) {
return PARSER.apply(parser, name);
}
@Override
public String[] indices() {
return new String[] { QueryRulesIndexService.QUERY_RULES_ALIAS_NAME };
}
@Override
public IndicesOptions indicesOptions() {
return IndicesOptions.lenientExpandHidden();
}
}
public static
|
Request
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/collection/CollectionAssert_isUnmodifiable_Test.java
|
{
"start": 2412,
"end": 6090
}
|
class ____ {
@Test
void should_fail_if_actual_is_null() {
// GIVEN
Collection<?> actual = null;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).isUnmodifiable());
// THEN
then(assertionError).hasMessage(shouldNotBeNull().create());
}
@ParameterizedTest
@MethodSource("modifiableCollections")
void should_fail_if_actual_can_be_modified(Collection<?> actual, ErrorMessageFactory errorMessageFactory) {
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).isUnmodifiable());
// THEN
then(assertionError).as(actual.getClass().getName())
.hasMessage(errorMessageFactory.create());
}
private static Stream<Arguments> modifiableCollections() {
return Stream.of(arguments(new ArrayList<>(), shouldBeUnmodifiable("Collection.add(null)")),
arguments(new LinkedHashSet<>(), shouldBeUnmodifiable("Collection.add(null)")),
arguments(new LinkedList<>(), shouldBeUnmodifiable("Collection.add(null)")),
arguments(new HashSet<>(), shouldBeUnmodifiable("Collection.add(null)")),
arguments(newArrayList(new Object()), shouldBeUnmodifiable("Collection.add(null)")),
arguments(newLinkedHashSet(new Object()), shouldBeUnmodifiable("Collection.add(null)")),
arguments(newTreeSet("element"), shouldBeUnmodifiable("Collection.add(null)", new NullPointerException())));
}
@ParameterizedTest
@MethodSource("unmodifiableCollections")
void should_pass(Collection<?> actual) {
// WHEN/THEN
assertThatNoException().as(actual.getClass().getName())
.isThrownBy(() -> assertThat(actual).isUnmodifiable());
}
private static Stream<Collection<?>> unmodifiableCollections() {
return Stream.of(Collections.emptyList(),
Collections.emptyNavigableSet(),
Collections.emptySet(),
Collections.emptySortedSet(),
Collections.singleton("element"),
Collections.singletonList("element"),
Collections.unmodifiableCollection(list(new Object())),
Collections.unmodifiableList(list(new Object())),
Collections.unmodifiableNavigableSet(newTreeSet("element")),
Collections.unmodifiableSet(set(new Object())),
Collections.unmodifiableSortedSet(newTreeSet("element")),
ImmutableList.of(new Object()),
ImmutableSet.of(new Object()),
ImmutableSortedSet.of("element"),
List.of(),
List.of("element"), // same implementation for 1 or 2 parameters
List.of("element", "element", "element"), // same implementation for 3+ parameters
Set.of(),
Set.of("element"), // same implementation for 1 or 2 parameters
Set.of("element1", "element2", "element3"), // same implementation for 3+ parameters
Sets.unmodifiableNavigableSet(newTreeSet("element")),
UnmodifiableCollection.unmodifiableCollection(list(new Object())),
UnmodifiableList.unmodifiableList(list(new Object())),
UnmodifiableNavigableSet.unmodifiableNavigableSet(newTreeSet("element")),
UnmodifiableSet.unmodifiableSet(set(new Object())),
UnmodifiableSortedSet.unmodifiableSortedSet(newTreeSet("element")));
}
}
|
CollectionAssert_isUnmodifiable_Test
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/models/spi/ConversionRegistration.java
|
{
"start": 846,
"end": 2408
}
|
class ____ {
private final Class<?> explicitDomainType;
private final Class<? extends AttributeConverter<?,?>> converterType;
private final boolean autoApply;
private final AnnotationDescriptor<? extends Annotation> source;
public ConversionRegistration(
Class<?> explicitDomainType,
Class<? extends AttributeConverter<?,?>> converterType,
boolean autoApply,
AnnotationDescriptor<? extends Annotation> source) {
assert converterType != null;
this.explicitDomainType = explicitDomainType;
this.converterType = converterType;
this.autoApply = autoApply;
this.source = source;
}
@Override
public boolean equals(Object object) {
if ( this == object ) {
return true;
}
if ( !(object instanceof ConversionRegistration that) ) {
return false;
}
return autoApply == that.autoApply
&& Objects.equals( explicitDomainType, that.explicitDomainType )
&& converterType.equals( that.converterType );
}
@Override
public int hashCode() {
return Objects.hash( explicitDomainType, converterType );
}
public Class<?> getExplicitDomainType() {
return explicitDomainType;
}
public Class<? extends AttributeConverter<?,?>> getConverterType() {
return converterType;
}
public boolean isAutoApply() {
return autoApply;
}
public AnnotationDescriptor<? extends Annotation> getSource() {
return source;
}
@Override
public String toString() {
return "ConversionRegistration( " + converterType.getName() + ", " + source.getAnnotationType().getSimpleName() + ", " + autoApply + ")";
}
}
|
ConversionRegistration
|
java
|
elastic__elasticsearch
|
libs/entitlement/qa/entitlement-test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/DummyImplementations.java
|
{
"start": 23151,
"end": 24159
}
|
class ____ extends AbstractSelector {
protected DummySelector(SelectorProvider provider) {
super(provider);
}
@Override
protected void implCloseSelector() throws IOException {
}
@Override
protected SelectionKey register(AbstractSelectableChannel ch, int ops, Object att) {
return null;
}
@Override
public Set<SelectionKey> keys() {
return Set.of();
}
@Override
public Set<SelectionKey> selectedKeys() {
return Set.of();
}
@Override
public int selectNow() throws IOException {
return 0;
}
@Override
public int select(long timeout) throws IOException {
return 0;
}
@Override
public int select() throws IOException {
return 0;
}
@Override
public Selector wakeup() {
return null;
}
}
}
|
DummySelector
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/feature/FeaturesTest2.java
|
{
"start": 304,
"end": 900
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
SerializeConfig config = new SerializeConfig();
config.setAsmEnable(false);
String text = JSON.toJSONString(new Entity(), config);
Assert.assertEquals("{\"value\":0}", text);
}
public void test_1() throws Exception {
SerializeConfig config = new SerializeConfig();
config.setAsmEnable(true);
String text = JSON.toJSONString(new Entity(), config);
Assert.assertEquals("{\"value\":0}", text);
}
private static
|
FeaturesTest2
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateSnapshotTransformerTest.java
|
{
"start": 6581,
"end": 8137
}
|
class ____<T>
implements StateSnapshotTransformFactory<T> {
private final SingleThreadAccessChecker singleThreadAccessChecker =
new SingleThreadAccessChecker();
static <T> StateSnapshotTransformFactory<T> create() {
return new SingleThreadAccessCheckingSnapshotTransformFactory<>();
}
@Override
public Optional<StateSnapshotTransformer<T>> createForDeserializedState() {
singleThreadAccessChecker.checkSingleThreadAccess();
return createStateSnapshotTransformer();
}
@Override
public Optional<StateSnapshotTransformer<byte[]>> createForSerializedState() {
singleThreadAccessChecker.checkSingleThreadAccess();
return createStateSnapshotTransformer();
}
private <T1> Optional<StateSnapshotTransformer<T1>> createStateSnapshotTransformer() {
return Optional.of(
new StateSnapshotTransformer<T1>() {
private final SingleThreadAccessChecker singleThreadAccessChecker =
new SingleThreadAccessChecker();
@Nullable
@Override
public T1 filterOrTransform(@Nullable T1 value) {
singleThreadAccessChecker.checkSingleThreadAccess();
return value;
}
});
}
}
private static
|
SingleThreadAccessCheckingSnapshotTransformFactory
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/core/protocol/CommandArgsUnitTests.java
|
{
"start": 538,
"end": 6031
}
|
class ____ {
@Test
void getFirstIntegerShouldReturnNull() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).add("foo");
assertThat(CommandArgsAccessor.getFirstInteger(args)).isNull();
}
@Test
void getFirstIntegerShouldReturnFirstInteger() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).add(1L).add(127).add(128).add(129).add(0)
.add(-1);
assertThat(CommandArgsAccessor.getFirstInteger(args)).isEqualTo(1L);
}
@Test
void getFirstIntegerShouldReturnFirstNegativeInteger() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).add(-1L).add(-127).add(-128).add(-129);
assertThat(CommandArgsAccessor.getFirstInteger(args)).isEqualTo(-1L);
}
@Test
void getFirstIntegerShouldReturnFirstPositiveLong() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).add(Long.MAX_VALUE);
assertThat(CommandArgsAccessor.getFirstInteger(args)).isEqualTo(Long.MAX_VALUE);
}
@Test
void getFirstIntegerShouldReturnFirstNegativeLong() {
assertThat(CommandArgsAccessor.getFirstInteger(new CommandArgs<>(StringCodec.UTF8).add(Long.MIN_VALUE)))
.isEqualTo(Long.MIN_VALUE);
assertThat(CommandArgsAccessor.getFirstInteger(new CommandArgs<>(StringCodec.UTF8).add(Long.MIN_VALUE + 2)))
.isEqualTo(Long.MIN_VALUE + 2);
assertThat(CommandArgsAccessor.getFirstInteger(new CommandArgs<>(StringCodec.UTF8).add(Integer.MIN_VALUE)))
.isEqualTo(Integer.MIN_VALUE);
}
@Test
void getFirstStringShouldReturnNull() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).add(1);
assertThat(CommandArgsAccessor.getFirstString(args)).isNull();
}
@Test
void getFirstStringShouldReturnFirstString() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).add("one").add("two");
assertThat(CommandArgsAccessor.getFirstString(args)).isEqualTo("one");
}
@Test
void getFirstCharArrayShouldReturnCharArray() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).add(1L).add("two".toCharArray());
assertThat(CommandArgsAccessor.getFirstCharArray(args)).isEqualTo("two".toCharArray());
}
@Test
void getFirstCharArrayShouldReturnNull() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).add(1L);
assertThat(CommandArgsAccessor.getFirstCharArray(args)).isNull();
}
@Test
void getFirstEncodedKeyShouldReturnNull() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).add(1L);
assertThat(CommandArgsAccessor.getFirstString(args)).isNull();
}
@Test
void getFirstEncodedKeyShouldReturnFirstKey() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).addKey("one").addKey("two");
assertThat(CommandArgsAccessor.encodeFirstKey(args)).isEqualTo(ByteBuffer.wrap("one".getBytes()));
}
@Test
void addValues() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).addValues(Arrays.asList("1", "2"));
ByteBuf buffer = Unpooled.buffer();
args.encode(buffer);
ByteBuf expected = Unpooled.buffer();
expected.writeBytes(("$1\r\n" + "1\r\n" + "$1\r\n" + "2\r\n").getBytes());
assertThat(buffer.toString(StandardCharsets.US_ASCII)).isEqualTo(expected.toString(StandardCharsets.US_ASCII));
}
@Test
void addByte() {
CommandArgs<String, String> args = new CommandArgs<>(StringCodec.UTF8).add("one".getBytes());
ByteBuf buffer = Unpooled.buffer();
args.encode(buffer);
ByteBuf expected = Unpooled.buffer();
expected.writeBytes(("$3\r\n" + "one\r\n").getBytes());
assertThat(buffer.toString(StandardCharsets.US_ASCII)).isEqualTo(expected.toString(StandardCharsets.US_ASCII));
}
@Test
void addByteUsingByteCodec() {
CommandArgs<byte[], byte[]> args = new CommandArgs<>(ByteArrayCodec.INSTANCE).add("one".getBytes());
ByteBuf buffer = Unpooled.buffer();
args.encode(buffer);
ByteBuf expected = Unpooled.buffer();
expected.writeBytes(("$3\r\n" + "one\r\n").getBytes());
assertThat(buffer.toString(StandardCharsets.US_ASCII)).isEqualTo(expected.toString(StandardCharsets.US_ASCII));
}
@Test
void addValueUsingByteCodec() {
CommandArgs<byte[], byte[]> args = new CommandArgs<>(ByteArrayCodec.INSTANCE).addValue("one".getBytes());
ByteBuf buffer = Unpooled.buffer();
args.encode(buffer);
ByteBuf expected = Unpooled.buffer();
expected.writeBytes(("$3\r\n" + "one\r\n").getBytes());
assertThat(buffer.toString(StandardCharsets.US_ASCII)).isEqualTo(expected.toString(StandardCharsets.US_ASCII));
}
@Test
void addKeyUsingByteCodec() {
CommandArgs<byte[], byte[]> args = new CommandArgs<>(ByteArrayCodec.INSTANCE).addValue("one".getBytes());
ByteBuf buffer = Unpooled.buffer();
args.encode(buffer);
ByteBuf expected = Unpooled.buffer();
expected.writeBytes(("$3\r\n" + "one\r\n").getBytes());
assertThat(buffer.toString(StandardCharsets.US_ASCII)).isEqualTo(expected.toString(StandardCharsets.US_ASCII));
}
}
|
CommandArgsUnitTests
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/bindinggraphvalidation/SetMultibindingValidationTest.java
|
{
"start": 1520,
"end": 1774
}
|
interface ____ {}");
private static final Source FOO_IMPL =
CompilerTests.javaSource(
"test.FooImpl",
"package test;",
"",
"import javax.inject.Inject;",
"",
"public final
|
Foo
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/EmbeddableWithGenericAndMappedSuperClassTest.java
|
{
"start": 6996,
"end": 7152
}
|
class ____ {
@Id
private Long id;
public Base() {
}
public Base(Long id) {
this.id = id;
}
}
@MappedSuperclass
public static abstract
|
Base
|
java
|
apache__kafka
|
server-common/src/test/java/org/apache/kafka/timeline/TimelineIntegerTest.java
|
{
"start": 1075,
"end": 3166
}
|
class ____ {
@Test
public void testModifyValue() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
TimelineInteger integer = new TimelineInteger(registry);
assertEquals(0, integer.get());
assertEquals(0, integer.get(Long.MAX_VALUE));
integer.set(1);
integer.set(2);
assertEquals(2, integer.get());
assertEquals(2, integer.get(Long.MAX_VALUE));
}
@Test
public void testToStringAndEquals() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
TimelineInteger integer = new TimelineInteger(registry);
assertEquals("0", integer.toString());
integer.set(1);
TimelineInteger integer2 = new TimelineInteger(registry);
integer2.set(1);
assertEquals("1", integer2.toString());
assertEquals(integer, integer2);
}
@Test
public void testSnapshot() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
TimelineInteger integer = new TimelineInteger(registry);
registry.getOrCreateSnapshot(2);
integer.set(1);
registry.getOrCreateSnapshot(3);
integer.set(2);
integer.increment();
integer.increment();
integer.decrement();
registry.getOrCreateSnapshot(4);
assertEquals(0, integer.get(2));
assertEquals(1, integer.get(3));
assertEquals(3, integer.get(4));
registry.revertToSnapshot(3);
assertEquals(1, integer.get());
registry.revertToSnapshot(2);
assertEquals(0, integer.get());
}
@Test
public void testReset() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
TimelineInteger value = new TimelineInteger(registry);
registry.getOrCreateSnapshot(2);
value.set(1);
registry.getOrCreateSnapshot(3);
value.set(2);
registry.reset();
assertEquals(List.of(), registry.epochsList());
assertEquals(TimelineInteger.INIT, value.get());
}
}
|
TimelineIntegerTest
|
java
|
micronaut-projects__micronaut-core
|
http-server-tck/src/main/java/io/micronaut/http/server/tck/tests/forms/FormUrlEncodedBodyInRequestFilterTest.java
|
{
"start": 4652,
"end": 5768
}
|
class ____ {
private final FilterBodyParser bodyParser;
CsrfFilter(FilterBodyParser bodyParser) {
this.bodyParser = bodyParser;
}
@ExecuteOn(TaskExecutors.BLOCKING)
@RequestFilter
@Nullable
public HttpResponse<?> csrfFilter(@NonNull HttpRequest<?> request) {
Map<String, Object> body = null;
try {
body = bodyParser.parseBody(request).get();
} catch (InterruptedException e) {
return HttpResponse.unauthorized();
} catch (ExecutionException e) {
return HttpResponse.unauthorized();
}
return body.containsKey("csrfToken") && body.get("csrfToken").equals("abcde") ? null : HttpResponse.unauthorized();
}
}
@Introspected
@ReflectiveAccess
record PasswordChange(
String username,
String password) {
}
@Introspected
@ReflectiveAccess
record PasswordChangeForm(
String username,
String password,
String csrfToken) {
}
}
|
CsrfFilter
|
java
|
quarkusio__quarkus
|
extensions/hibernate-validator/deployment/src/main/java/io/quarkus/hibernate/validator/deployment/SimpleMethodSignatureKey.java
|
{
"start": 189,
"end": 1202
}
|
class ____ {
private final String key;
SimpleMethodSignatureKey(MethodInfo method) {
// Notes:
// - MethodInfo.toString() is not usable here because it includes the declaring class
// - just parameters() for the second part would include annotations (see Type.toString())
key = method.name() + method.parameterTypes().stream()
.map(Type::name)
.map(DotName::toString)
.collect(Collectors.joining(", ", "(", ")"));
}
@Override
public int hashCode() {
return key.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
SimpleMethodSignatureKey other = (SimpleMethodSignatureKey) obj;
return key.equals(other.key);
}
@Override
public String toString() {
return key;
}
}
|
SimpleMethodSignatureKey
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java
|
{
"start": 40489,
"end": 41217
}
|
class ____<T extends Number> {",
" public abstract int anInt();",
" @SuppressWarnings(\"mutable\")",
" public abstract byte[] aByteArray();",
" @SuppressWarnings(\"mutable\")",
" @Nullable public abstract int[] aNullableIntArray();",
" public abstract List<T> aList();",
" public abstract ImmutableMap<T, String> anImmutableMap();",
" public abstract Optional<String> anOptionalString();",
" public abstract NestedAutoValue<T> aNestedAutoValue();",
"",
" public abstract Builder<T> toBuilder();",
"",
" @AutoValue.Builder",
" public abstract static
|
Baz
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/pool/exception/OracleExceptionSorterTest_stmt_getWarrnings.java
|
{
"start": 586,
"end": 2540
}
|
class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
assertEquals(0, JdbcStatManager.getInstance().getSqlList().size());
dataSource = new DruidDataSource();
dataSource.setExceptionSorter(new OracleExceptionSorter());
dataSource.setDriver(new OracleMockDriver());
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setPoolPreparedStatements(true);
dataSource.setMaxOpenPreparedStatements(100);
}
@Override
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
assertEquals(0, DruidDataSourceStatManager.getInstance().getDataSourceList().size());
}
public void test_connect() throws Exception {
String sql = "SELECT 1";
{
DruidPooledConnection conn = dataSource.getConnection();
PreparedStatement pstmt = conn.prepareStatement(sql);
pstmt.execute();
pstmt.close();
conn.close();
}
DruidPooledConnection conn = dataSource.getConnection();
MockConnection mockConn = conn.unwrap(MockConnection.class);
assertNotNull(mockConn);
Statement stmt = conn.createStatement();
stmt.execute(sql);
SQLException exception = new SQLException("xx", "xxx", 28);
mockConn.setError(exception);
SQLException stmtErrror = null;
try {
stmt.getWarnings();
} catch (SQLException ex) {
stmtErrror = ex;
}
assertNotNull(stmtErrror);
assertSame(exception, stmtErrror);
SQLException commitError = null;
try {
conn.commit();
} catch (SQLException ex) {
commitError = ex;
}
assertNotNull(commitError);
assertSame(exception, commitError.getCause());
conn.close();
}
}
|
OracleExceptionSorterTest_stmt_getWarrnings
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/configuration/ClusterOptions.java
|
{
"start": 11648,
"end": 12335
}
|
enum ____ implements DescribedEnum {
DISABLED(text("Flink is not monitoring or intercepting calls to System.exit()")),
LOG(text("Log exit attempt with stack trace but still allowing exit to be performed")),
THROW(text("Throw exception when exit is attempted disallowing JVM termination"));
private final InlineElement description;
UserSystemExitMode(InlineElement description) {
this.description = description;
}
@Override
public InlineElement getDescription() {
return description;
}
}
/**
* @see ClusterOptions#UNCAUGHT_EXCEPTION_HANDLING
*/
public
|
UserSystemExitMode
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ConstantFieldTest.java
|
{
"start": 3920,
"end": 4180
}
|
class ____ {
Object CONSTANT_CASE = 42;
void f() {
System.err.println(CONSTANT_CASE);
}
}
""")
.addOutputLines(
"out/Test.java",
"""
|
Test
|
java
|
elastic__elasticsearch
|
client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java
|
{
"start": 11063,
"end": 11733
}
|
class ____ extends BasicHeader {
ReqHeader(String name, String value) {
super(name, value);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof ReqHeader) {
Header otherHeader = (Header) other;
return Objects.equals(getName(), otherHeader.getName()) && Objects.equals(getValue(), otherHeader.getValue());
}
return false;
}
@Override
public int hashCode() {
return Objects.hash(getName(), getValue());
}
}
}
|
ReqHeader
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/threadsafety/GuardedByExpression.java
|
{
"start": 1933,
"end": 2019
}
|
class ____ (e.g. ClassName.fieldName).
*/
@AutoValue
public abstract static
|
literal
|
java
|
google__guava
|
android/guava-testlib/src/com/google/common/collect/testing/features/CollectionFeature.java
|
{
"start": 1225,
"end": 4133
}
|
enum ____ implements Feature<Collection> {
/**
* The collection must not throw {@code NullPointerException} on calls such as {@code
* contains(null)} or {@code remove(null)}, but instead must return a simple {@code false}.
*/
ALLOWS_NULL_QUERIES,
ALLOWS_NULL_VALUES(ALLOWS_NULL_QUERIES),
/**
* Indicates that a collection disallows certain elements (other than {@code null}, whose validity
* as an element is indicated by the presence or absence of {@link #ALLOWS_NULL_VALUES}). From the
* documentation for {@link Collection}:
*
* <blockquote>
*
* "Some collection implementations have restrictions on the elements that they may contain. For
* example, some implementations prohibit null elements, and some have restrictions on the types
* of their elements."
*
* </blockquote>
*/
RESTRICTS_ELEMENTS,
/**
* Indicates that a collection has a well-defined ordering of its elements. The ordering may
* depend on the element values, such as a {@link SortedSet}, or on the insertion ordering, such
* as a {@link LinkedHashSet}. All list tests and sorted-collection tests automatically specify
* this feature.
*/
KNOWN_ORDER,
/**
* Indicates that a collection has a different {@link Object#toString} representation than most
* collections. If not specified, the collection tests will examine the value returned by {@link
* Object#toString}.
*/
NON_STANDARD_TOSTRING,
/**
* Indicates that the constructor or factory method of a collection, usually an immutable set,
* throws an {@link IllegalArgumentException} when presented with duplicate elements instead of
* collapsing them to a single element or including duplicate instances in the collection.
*/
REJECTS_DUPLICATES_AT_CREATION,
SUPPORTS_ADD,
SUPPORTS_REMOVE,
SUPPORTS_ITERATOR_REMOVE,
FAILS_FAST_ON_CONCURRENT_MODIFICATION,
/**
* Features supported by general-purpose collections - everything but {@link #RESTRICTS_ELEMENTS}.
*
* @see java.util.Collection the definition of general-purpose collections.
*/
GENERAL_PURPOSE(SUPPORTS_ADD, SUPPORTS_REMOVE, SUPPORTS_ITERATOR_REMOVE),
/** Features supported by collections where only removal is allowed. */
REMOVE_OPERATIONS(SUPPORTS_REMOVE, SUPPORTS_ITERATOR_REMOVE),
SERIALIZABLE,
SERIALIZABLE_INCLUDING_VIEWS(SERIALIZABLE),
SUBSET_VIEW,
DESCENDING_VIEW,
/**
* For documenting collections that support no optional features, such as {@link
* java.util.Collections#emptySet}
*/
NONE;
private final Set<Feature<? super Collection>> implied;
CollectionFeature(Feature<? super Collection>... implied) {
this.implied = copyToSet(implied);
}
@Override
public Set<Feature<? super Collection>> getImpliedFeatures() {
return implied;
}
@Retention(RetentionPolicy.RUNTIME)
@Inherited
@TesterAnnotation
public @
|
CollectionFeature
|
java
|
apache__avro
|
lang/java/avro/src/test/java/org/apache/avro/reflect/TestReflect.java
|
{
"start": 12834,
"end": 13914
}
|
class ____ {
@Nullable
private String text;
@Override
public boolean equals(Object o) {
if (!(o instanceof R11))
return false;
R11 that = (R11) o;
if (this.text == null)
return that.text == null;
return this.text.equals(that.text);
}
}
@Test
void r11() throws Exception {
Schema r11Record = ReflectData.get().getSchema(R11.class);
assertEquals(Schema.Type.RECORD, r11Record.getType());
Field r11Field = r11Record.getField("text");
assertEquals(JsonProperties.NULL_VALUE, r11Field.defaultVal());
Schema r11FieldSchema = r11Field.schema();
assertEquals(Schema.Type.UNION, r11FieldSchema.getType());
assertEquals(Schema.Type.NULL, r11FieldSchema.getTypes().get(0).getType());
Schema r11String = r11FieldSchema.getTypes().get(1);
assertEquals(Schema.Type.STRING, r11String.getType());
R11 r11 = new R11();
checkReadWrite(r11, r11Record);
r11.text = "foo";
checkReadWrite(r11, r11Record);
}
// test nullable annotation on methods and parameters
public static
|
R11
|
java
|
quarkusio__quarkus
|
extensions/redis-client/runtime/src/main/java/io/quarkus/redis/runtime/client/ObservableRedis.java
|
{
"start": 411,
"end": 502
}
|
interface ____ tracks the duration of each operation for observability purpose.
*/
public
|
that
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/PreferredInterfaceTypeTest.java
|
{
"start": 12580,
"end": 13084
}
|
class ____ {
// BUG: Diagnostic contains: final ImmutableList<String> foo()
final List<String> foo() {
return ImmutableList.of();
}
}
""")
.doTest();
}
@Test
public void returnTypeList_singleReturnStatementArrayList_doesNotSuggestFix() {
testHelper
.addSourceLines(
"Test.java",
"""
import java.util.ArrayList;
import java.util.List;
|
Test
|
java
|
spring-projects__spring-framework
|
spring-web/src/test/java/org/springframework/web/method/HandlerMethodTests.java
|
{
"start": 8054,
"end": 8189
}
|
class ____ extends GenericAbstractSuperclass<String> {
@Override
public void processTwo(String value) {
}
}
}
|
GenericInterfaceImpl
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResults.java
|
{
"start": 927,
"end": 9976
}
|
class ____ extends SingleValueInferenceResults {
public static final String PREDICTION_PROBABILITY = "prediction_probability";
public static final String NAME = "classification";
public static final String PREDICTION_SCORE = "prediction_score";
private final String topNumClassesField;
// Accessed in sub-classes
protected final String resultsField;
private final String classificationLabel;
private final Double predictionProbability;
private final Double predictionScore;
private final List<TopClassEntry> topClasses;
private final List<ClassificationFeatureImportance> featureImportance;
private final PredictionFieldType predictionFieldType;
public ClassificationInferenceResults(
double value,
String classificationLabel,
List<TopClassEntry> topClasses,
List<ClassificationFeatureImportance> featureImportance,
InferenceConfig config,
Double predictionProbability,
Double predictionScore
) {
this(
value,
classificationLabel,
topClasses,
featureImportance,
(ClassificationConfig) config,
predictionProbability,
predictionScore
);
}
private ClassificationInferenceResults(
double value,
String classificationLabel,
List<TopClassEntry> topClasses,
List<ClassificationFeatureImportance> featureImportance,
ClassificationConfig classificationConfig,
Double predictionProbability,
Double predictionScore
) {
this(
value,
classificationLabel,
topClasses,
featureImportance,
classificationConfig.getTopClassesResultsField(),
classificationConfig.getResultsField(),
classificationConfig.getPredictionFieldType(),
classificationConfig.getNumTopFeatureImportanceValues(),
predictionProbability,
predictionScore
);
}
public ClassificationInferenceResults(
double value,
String classificationLabel,
List<TopClassEntry> topClasses,
List<ClassificationFeatureImportance> featureImportance,
String topNumClassesField,
String resultsField,
PredictionFieldType predictionFieldType,
int numTopFeatureImportanceValues,
Double predictionProbability,
Double predictionScore
) {
super(value);
this.classificationLabel = classificationLabel;
this.topClasses = topClasses == null ? Collections.emptyList() : Collections.unmodifiableList(topClasses);
this.topNumClassesField = topNumClassesField;
this.resultsField = resultsField;
this.predictionFieldType = predictionFieldType;
this.predictionProbability = predictionProbability;
this.predictionScore = predictionScore;
this.featureImportance = takeTopFeatureImportances(featureImportance, numTopFeatureImportanceValues);
}
static List<ClassificationFeatureImportance> takeTopFeatureImportances(
List<ClassificationFeatureImportance> featureImportances,
int numTopFeatures
) {
if (featureImportances == null || featureImportances.isEmpty()) {
return Collections.emptyList();
}
return featureImportances.stream()
.sorted((l, r) -> Double.compare(r.getTotalImportance(), l.getTotalImportance()))
.limit(numTopFeatures)
.toList();
}
public ClassificationInferenceResults(StreamInput in) throws IOException {
super(in);
this.featureImportance = in.readCollectionAsList(ClassificationFeatureImportance::new);
this.classificationLabel = in.readOptionalString();
this.topClasses = in.readCollectionAsImmutableList(TopClassEntry::new);
this.topNumClassesField = in.readString();
this.resultsField = in.readString();
this.predictionFieldType = in.readEnum(PredictionFieldType.class);
this.predictionProbability = in.readOptionalDouble();
this.predictionScore = in.readOptionalDouble();
}
public String getClassificationLabel() {
return classificationLabel;
}
public List<TopClassEntry> getTopClasses() {
return topClasses;
}
public PredictionFieldType getPredictionFieldType() {
return predictionFieldType;
}
public List<ClassificationFeatureImportance> getFeatureImportance() {
return featureImportance;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeCollection(featureImportance);
out.writeOptionalString(classificationLabel);
out.writeCollection(topClasses);
out.writeString(topNumClassesField);
out.writeString(resultsField);
out.writeEnum(predictionFieldType);
out.writeOptionalDouble(predictionProbability);
out.writeOptionalDouble(predictionScore);
}
@Override
public boolean equals(Object object) {
if (object == this) {
return true;
}
if (object == null || getClass() != object.getClass()) {
return false;
}
ClassificationInferenceResults that = (ClassificationInferenceResults) object;
return Objects.equals(value(), that.value())
&& Objects.equals(classificationLabel, that.classificationLabel)
&& Objects.equals(resultsField, that.resultsField)
&& Objects.equals(topNumClassesField, that.topNumClassesField)
&& Objects.equals(topClasses, that.topClasses)
&& Objects.equals(predictionFieldType, that.predictionFieldType)
&& Objects.equals(predictionProbability, that.predictionProbability)
&& Objects.equals(predictionScore, that.predictionScore)
&& Objects.equals(featureImportance, that.featureImportance);
}
@Override
public int hashCode() {
return Objects.hash(
value(),
classificationLabel,
topClasses,
resultsField,
topNumClassesField,
predictionProbability,
predictionScore,
featureImportance,
predictionFieldType
);
}
@Override
public String valueAsString() {
return classificationLabel == null ? super.valueAsString() : classificationLabel;
}
@Override
public Object predictedValue() {
return predictionFieldType.transformPredictedValue(value(), valueAsString());
}
public Double getPredictionProbability() {
return predictionProbability;
}
public Double getPredictionScore() {
return predictionScore;
}
@Override
public String getResultsField() {
return resultsField;
}
@Override
public Map<String, Object> asMap() {
Map<String, Object> map = new LinkedHashMap<>();
map.put(resultsField, predictionFieldType.transformPredictedValue(value(), valueAsString()));
addSupportingFieldsToMap(map);
return map;
}
@Override
public Map<String, Object> asMap(String outputField) {
Map<String, Object> map = new LinkedHashMap<>();
map.put(outputField, predictionFieldType.transformPredictedValue(value(), valueAsString()));
addSupportingFieldsToMap(map);
return map;
}
private void addSupportingFieldsToMap(Map<String, Object> map) {
if (topClasses.isEmpty() == false) {
map.put(topNumClassesField, topClasses.stream().map(TopClassEntry::asValueMap).collect(Collectors.toList()));
}
if (predictionProbability != null) {
map.put(PREDICTION_PROBABILITY, predictionProbability);
}
if (predictionScore != null) {
map.put(PREDICTION_SCORE, predictionScore);
}
if (featureImportance.isEmpty() == false) {
map.put(
FEATURE_IMPORTANCE,
featureImportance.stream().map(ClassificationFeatureImportance::toMap).collect(Collectors.toList())
);
}
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(resultsField, predictionFieldType.transformPredictedValue(value(), valueAsString()));
if (topClasses.size() > 0) {
builder.field(topNumClassesField, topClasses);
}
if (predictionProbability != null) {
builder.field(PREDICTION_PROBABILITY, predictionProbability);
}
if (predictionScore != null) {
builder.field(PREDICTION_SCORE, predictionScore);
}
if (featureImportance.isEmpty() == false) {
builder.field(FEATURE_IMPORTANCE, featureImportance);
}
return builder;
}
}
|
ClassificationInferenceResults
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java
|
{
"start": 1476,
"end": 6018
}
|
class ____ extends ESIntegTestCase {
private int numCopies;
private int numNodes;
public void testIndexAndDelete() throws Exception {
prepareIndex(1);
DocWriteResponse indexResponse = prepareIndex("idx").setSource("{}", XContentType.JSON).get();
assertShardInfo(indexResponse);
DeleteResponse deleteResponse = client().prepareDelete("idx", indexResponse.getId()).get();
assertShardInfo(deleteResponse);
}
public void testUpdate() throws Exception {
prepareIndex(1);
UpdateResponse updateResponse = client().prepareUpdate("idx", "1").setDoc("{}", XContentType.JSON).setDocAsUpsert(true).get();
assertShardInfo(updateResponse);
}
public void testBulkWithIndexAndDeleteItems() throws Exception {
prepareIndex(1);
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
for (int i = 0; i < 10; i++) {
bulkRequestBuilder.add(prepareIndex("idx").setSource("{}", XContentType.JSON));
}
BulkResponse bulkResponse = bulkRequestBuilder.get();
bulkRequestBuilder = client().prepareBulk();
for (BulkItemResponse item : bulkResponse) {
assertThat(item.isFailed(), equalTo(false));
assertShardInfo(item.getResponse());
bulkRequestBuilder.add(client().prepareDelete("idx", item.getId()));
}
bulkResponse = bulkRequestBuilder.get();
for (BulkItemResponse item : bulkResponse) {
assertThat(item.isFailed(), equalTo(false));
assertShardInfo(item.getResponse());
}
}
public void testBulkWithUpdateItems() throws Exception {
prepareIndex(1);
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
for (int i = 0; i < 10; i++) {
bulkRequestBuilder.add(client().prepareUpdate("idx", Integer.toString(i)).setDoc("{}", XContentType.JSON).setDocAsUpsert(true));
}
BulkResponse bulkResponse = bulkRequestBuilder.get();
for (BulkItemResponse item : bulkResponse) {
assertThat(item.getFailure(), nullValue());
assertThat(item.isFailed(), equalTo(false));
assertShardInfo(item.getResponse());
}
}
private void prepareIndex(int numberOfPrimaryShards) throws Exception {
prepareIndex(numberOfPrimaryShards, false);
}
private void prepareIndex(int numberOfPrimaryShards, boolean routingRequired) throws Exception {
numNodes = cluster().numDataNodes();
logger.info("Number of nodes: {}", numNodes);
int maxNumberOfCopies = (numNodes * 2) - 1;
numCopies = randomIntBetween(numNodes, maxNumberOfCopies);
logger.info("Number of copies: {}", numCopies);
assertAcked(
prepareCreate("idx").setSettings(indexSettings(numberOfPrimaryShards, numCopies - 1))
.setMapping("_routing", "required=" + routingRequired)
);
for (int i = 0; i < numberOfPrimaryShards; i++) {
ensureActiveShardCopies(i, numNodes);
}
}
private void assertShardInfo(ReplicationResponse response) {
assertShardInfo(response, numCopies, numNodes);
}
private void assertShardInfo(ReplicationResponse response, int expectedTotal, int expectedSuccessful) {
assertThat(response.getShardInfo().getTotal(), greaterThanOrEqualTo(expectedTotal));
assertThat(response.getShardInfo().getSuccessful(), greaterThanOrEqualTo(expectedSuccessful));
}
private void ensureActiveShardCopies(final int shardId, final int copyCount) throws Exception {
assertBusy(() -> {
ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
assertThat(state.routingTable().index("idx"), not(nullValue()));
assertThat(state.routingTable().index("idx").shard(shardId), not(nullValue()));
assertThat(state.routingTable().index("idx").shard(shardId).activeShards().size(), equalTo(copyCount));
ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx")
.setWaitForNoRelocatingShards(true)
.get();
assertThat(healthResponse.isTimedOut(), equalTo(false));
RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries("idx").setActiveOnly(true).get();
assertThat(recoveryResponse.shardRecoveryStates().get("idx").size(), equalTo(0));
});
}
}
|
ShardInfoIT
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/LangChain4jWebSearchEndpointBuilderFactory.java
|
{
"start": 12580,
"end": 16300
}
|
interface ____
extends
EndpointProducerBuilder {
default LangChain4jWebSearchEndpointBuilder basic() {
return (LangChain4jWebSearchEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedLangChain4jWebSearchEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedLangChain4jWebSearchEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* The webSearchRequest is the custom WebSearchRequest - advanced.
*
* The option is a:
* <code>dev.langchain4j.web.search.WebSearchRequest</code> type.
*
* Group: advanced
*
* @param webSearchRequest the value to set
* @return the dsl builder
*/
default AdvancedLangChain4jWebSearchEndpointBuilder webSearchRequest(dev.langchain4j.web.search.WebSearchRequest webSearchRequest) {
doSetProperty("webSearchRequest", webSearchRequest);
return this;
}
/**
* The webSearchRequest is the custom WebSearchRequest - advanced.
*
* The option will be converted to a
* <code>dev.langchain4j.web.search.WebSearchRequest</code> type.
*
* Group: advanced
*
* @param webSearchRequest the value to set
* @return the dsl builder
*/
default AdvancedLangChain4jWebSearchEndpointBuilder webSearchRequest(String webSearchRequest) {
doSetProperty("webSearchRequest", webSearchRequest);
return this;
}
}
public
|
AdvancedLangChain4jWebSearchEndpointBuilder
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/DoublePredicateAssertBaseTest.java
|
{
"start": 923,
"end": 1454
}
|
class ____ extends BaseTestTemplate<DoublePredicateAssert, DoublePredicate> {
protected Iterables iterables;
protected Predicate<Double> wrapped;
@Override
protected DoublePredicateAssert create_assertions() {
return new DoublePredicateAssert(value -> value <= 2);
}
@Override
protected void inject_internal_objects() {
super.inject_internal_objects();
iterables = mock(Iterables.class);
assertions.iterables = iterables;
wrapped = assertions.primitivePredicate;
}
}
|
DoublePredicateAssertBaseTest
|
java
|
processing__processing4
|
core/examples/src/main/java/Issue931.java
|
{
"start": 94,
"end": 606
}
|
class ____ extends PApplet {
public void draw(){
background(frameCount % 256);
text("Hello World "+frameCount, 10, 10);
frameRate(9999);
surface.setSize(frameCount + 100, 100);
}
public static void main(String[] passedArgs) {
String[] appletArgs = new String[]{ Issue931.class.getName()};
if (passedArgs != null) {
PApplet.main(concat(appletArgs, passedArgs));
} else {
PApplet.main(appletArgs);
}
}
}
|
Issue931
|
java
|
apache__camel
|
components/camel-mybatis/src/main/java/org/apache/camel/component/mybatis/MyBatisProcessingStrategy.java
|
{
"start": 986,
"end": 2024
}
|
interface ____ {
/**
* Called when record is being queried.
*
* @param consumer the consumer
* @param endpoint the endpoint
* @return Results of the query as a {@link List}
* @throws Exception can be thrown in case of error
*/
List<?> poll(MyBatisConsumer consumer, MyBatisEndpoint endpoint) throws Exception;
/**
* Commit callback if there are a statements to be run after processing.
*
* @param endpoint the endpoint
* @param exchange The exchange after it has been processed
* @param data The original data delivered to the route
* @param consumeStatements Name of the statement(s) to run, will use SQL update. Use comma to provide multiple
* statements to run.
* @throws Exception can be thrown in case of error
*/
void commit(MyBatisEndpoint endpoint, Exchange exchange, Object data, String consumeStatements) throws Exception;
}
|
MyBatisProcessingStrategy
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesBeanRegistrarTests.java
|
{
"start": 6459,
"end": 6593
}
|
class ____ {
}
@ConfigurationProperties("beancp")
@Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE)
static
|
BeanConfigurationProperties
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java
|
{
"start": 1420,
"end": 24648
}
|
class ____ extends SerializationTestCase {
private static final TransportVersion ESQL_AGGREGATE_METRIC_DOUBLE_BLOCK = TransportVersion.fromName(
"esql_aggregate_metric_double_block"
);
public void testConstantIntBlock() throws IOException {
assertConstantBlockImpl(blockFactory.newConstantIntBlockWith(randomInt(), randomIntBetween(1, 8192)));
}
public void testConstantLongBlockLong() throws IOException {
assertConstantBlockImpl(blockFactory.newConstantLongBlockWith(randomLong(), randomIntBetween(1, 8192)));
}
public void testConstantFloatBlock() throws IOException {
assertConstantBlockImpl(blockFactory.newConstantFloatBlockWith(randomFloat(), randomIntBetween(1, 8192)));
}
public void testConstantDoubleBlock() throws IOException {
assertConstantBlockImpl(blockFactory.newConstantDoubleBlockWith(randomDouble(), randomIntBetween(1, 8192)));
}
public void testConstantBytesRefBlock() throws IOException {
Block block = blockFactory.newConstantBytesRefBlockWith(
new BytesRef(((Integer) randomInt()).toString()),
randomIntBetween(1, 8192)
);
assertConstantBlockImpl(block);
}
private void assertConstantBlockImpl(Block origBlock) throws IOException {
assertThat(origBlock.asVector().isConstant(), is(true));
try (origBlock; Block deserBlock = serializeDeserializeBlock(origBlock)) {
EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock);
assertThat(deserBlock.asVector().isConstant(), is(true));
}
}
public void testEmptyIntBlock() throws IOException {
assertEmptyBlock(blockFactory.newIntBlockBuilder(0).build());
try (IntBlock toFilter = blockFactory.newIntBlockBuilder(0).appendNull().build()) {
assertEmptyBlock(toFilter.filter());
}
assertEmptyBlock(blockFactory.newIntVectorBuilder(0).build().asBlock());
try (IntVector toFilter = blockFactory.newIntVectorBuilder(0).appendInt(randomInt()).build()) {
assertEmptyBlock(toFilter.filter().asBlock());
}
}
public void testEmptyLongBlock() throws IOException {
assertEmptyBlock(blockFactory.newLongBlockBuilder(0).build());
try (LongBlock toFilter = blockFactory.newLongBlockBuilder(0).appendNull().build()) {
assertEmptyBlock(toFilter.filter());
}
assertEmptyBlock(blockFactory.newLongVectorBuilder(0).build().asBlock());
try (LongVector toFilter = blockFactory.newLongVectorBuilder(0).appendLong(randomLong()).build()) {
assertEmptyBlock(toFilter.filter().asBlock());
}
}
public void testEmptyFloatBlock() throws IOException {
assertEmptyBlock(blockFactory.newFloatBlockBuilder(0).build());
try (FloatBlock toFilter = blockFactory.newFloatBlockBuilder(0).appendNull().build()) {
assertEmptyBlock(toFilter.filter());
}
assertEmptyBlock(blockFactory.newFloatVectorBuilder(0).build().asBlock());
try (FloatVector toFilter = blockFactory.newFloatVectorBuilder(0).appendFloat(randomFloat()).build()) {
assertEmptyBlock(toFilter.filter().asBlock());
}
}
public void testEmptyDoubleBlock() throws IOException {
assertEmptyBlock(blockFactory.newDoubleBlockBuilder(0).build());
try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(0).appendNull().build()) {
assertEmptyBlock(toFilter.filter());
}
assertEmptyBlock(blockFactory.newDoubleVectorBuilder(0).build().asBlock());
try (DoubleVector toFilter = blockFactory.newDoubleVectorBuilder(0).appendDouble(randomDouble()).build()) {
assertEmptyBlock(toFilter.filter().asBlock());
}
}
public void testEmptyBytesRefBlock() throws IOException {
assertEmptyBlock(blockFactory.newBytesRefBlockBuilder(0).build());
try (BytesRefBlock toFilter = blockFactory.newBytesRefBlockBuilder(0).appendNull().build()) {
assertEmptyBlock(toFilter.filter());
}
assertEmptyBlock(blockFactory.newBytesRefVectorBuilder(0).build().asBlock());
try (BytesRefVector toFilter = blockFactory.newBytesRefVectorBuilder(0).appendBytesRef(randomBytesRef()).build()) {
assertEmptyBlock(toFilter.filter().asBlock());
}
}
public void testEmptyAggregateMetricDoubleBlock() throws IOException {
assertEmptyBlock(blockFactory.newAggregateMetricDoubleBlockBuilder(0).build());
try (AggregateMetricDoubleBlock toFilter = blockFactory.newAggregateMetricDoubleBlockBuilder(0).appendNull().build()) {
assertEmptyBlock(toFilter.filter());
}
}
private void assertEmptyBlock(Block origBlock) throws IOException {
assertThat(origBlock.getPositionCount(), is(0));
try (origBlock; Block deserBlock = serializeDeserializeBlock(origBlock)) {
EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock);
}
}
public void testFilterIntBlock() throws IOException {
try (IntBlock toFilter = blockFactory.newIntBlockBuilder(0).appendInt(1).appendInt(2).build()) {
assertFilterBlock(toFilter.filter(1));
}
try (IntBlock toFilter = blockFactory.newIntBlockBuilder(1).appendInt(randomInt()).appendNull().build()) {
assertFilterBlock(toFilter.filter(0));
}
try (IntVector toFilter = blockFactory.newIntVectorBuilder(1).appendInt(randomInt()).build()) {
assertFilterBlock(toFilter.filter(0).asBlock());
}
try (IntVector toFilter = blockFactory.newIntVectorBuilder(1).appendInt(randomInt()).appendInt(randomInt()).build()) {
assertFilterBlock(toFilter.filter(0).asBlock());
}
}
public void testFilterLongBlock() throws IOException {
try (LongBlock toFilter = blockFactory.newLongBlockBuilder(0).appendLong(1).appendLong(2).build()) {
assertFilterBlock(toFilter.filter(1));
}
try (LongBlock toFilter = blockFactory.newLongBlockBuilder(1).appendLong(randomLong()).appendNull().build()) {
assertFilterBlock(toFilter.filter(0));
}
try (LongVector toFilter = blockFactory.newLongVectorBuilder(1).appendLong(randomLong()).build()) {
assertFilterBlock(toFilter.filter(0).asBlock());
}
try (LongVector toFilter = blockFactory.newLongVectorBuilder(1).appendLong(randomLong()).appendLong(randomLong()).build()) {
assertFilterBlock(toFilter.filter(0).asBlock());
}
}
public void testFilterFloatBlock() throws IOException {
try (FloatBlock toFilter = blockFactory.newFloatBlockBuilder(0).appendFloat(1).appendFloat(2).build()) {
assertFilterBlock(toFilter.filter(1));
}
try (FloatBlock toFilter = blockFactory.newFloatBlockBuilder(1).appendFloat(randomFloat()).appendNull().build()) {
assertFilterBlock(toFilter.filter(0));
}
try (FloatVector toFilter = blockFactory.newFloatVectorBuilder(1).appendFloat(randomFloat()).build()) {
assertFilterBlock(toFilter.filter(0).asBlock());
}
try (FloatVector toFilter = blockFactory.newFloatVectorBuilder(1).appendFloat(randomFloat()).appendFloat(randomFloat()).build()) {
assertFilterBlock(toFilter.filter(0).asBlock());
}
}
public void testFilterDoubleBlock() throws IOException {
try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(0).appendDouble(1).appendDouble(2).build()) {
assertFilterBlock(toFilter.filter(1));
}
try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(1).appendDouble(randomDouble()).appendNull().build()) {
assertFilterBlock(toFilter.filter(0));
}
try (DoubleVector toFilter = blockFactory.newDoubleVectorBuilder(1).appendDouble(randomDouble()).build()) {
assertFilterBlock(toFilter.filter(0).asBlock());
}
try (
DoubleVector toFilter = blockFactory.newDoubleVectorBuilder(1).appendDouble(randomDouble()).appendDouble(randomDouble()).build()
) {
assertFilterBlock(toFilter.filter(0).asBlock());
}
}
public void testFilterBytesRefBlock() throws IOException {
try (
BytesRefBlock toFilter = blockFactory.newBytesRefBlockBuilder(0)
.appendBytesRef(randomBytesRef())
.appendBytesRef(randomBytesRef())
.build()
) {
assertFilterBlock(toFilter.filter(randomIntBetween(0, 1)));
}
try (BytesRefBlock toFilter = blockFactory.newBytesRefBlockBuilder(0).appendBytesRef(randomBytesRef()).appendNull().build()) {
assertFilterBlock(toFilter.filter(randomIntBetween(0, 1)));
}
try (BytesRefVector toFilter = blockFactory.newBytesRefVectorBuilder(0).appendBytesRef(randomBytesRef()).build()) {
assertFilterBlock(toFilter.asBlock().filter(0));
}
try (
BytesRefVector toFilter = blockFactory.newBytesRefVectorBuilder(0)
.appendBytesRef(randomBytesRef())
.appendBytesRef(randomBytesRef())
.build()
) {
assertFilterBlock(toFilter.asBlock().filter(randomIntBetween(0, 1)));
}
}
public void testFilterAggregateMetricDoubleBlock() throws IOException {
{
var builder = blockFactory.newAggregateMetricDoubleBlockBuilder(0);
builder.min().appendDouble(randomDouble());
builder.max().appendDouble(randomDouble());
builder.sum().appendDouble(randomDouble());
builder.count().appendInt(randomInt());
builder.min().appendDouble(randomDouble());
builder.max().appendDouble(randomDouble());
builder.sum().appendDouble(randomDouble());
builder.count().appendInt(randomInt());
try (AggregateMetricDoubleBlock toFilter = builder.build()) {
assertFilterBlock(toFilter.filter(randomIntBetween(0, 1)));
}
}
{
var builder = blockFactory.newAggregateMetricDoubleBlockBuilder(0);
builder.min().appendDouble(randomDouble());
builder.max().appendDouble(randomDouble());
builder.sum().appendDouble(randomDouble());
builder.count().appendInt(randomInt());
builder.appendNull();
try (AggregateMetricDoubleBlock toFilter = builder.build()) {
assertFilterBlock(toFilter.filter(randomIntBetween(0, 1)));
}
}
}
private void assertFilterBlock(Block origBlock) throws IOException {
assertThat(origBlock.getPositionCount(), is(1));
try (origBlock; Block deserBlock = serializeDeserializeBlock(origBlock)) {
EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock);
assertThat(deserBlock.getPositionCount(), is(1));
}
}
public void testConstantNullBlock() throws IOException {
try (Block origBlock = blockFactory.newConstantNullBlock(randomIntBetween(1, 8192))) {
try (Block deserBlock = serializeDeserializeBlock(origBlock)) {
EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock);
}
}
}
// TODO: more types, grouping, etc...
public void testSimulateAggs() {
DriverContext driverCtx = driverContext();
Page page = new Page(blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, 10).asBlock());
var function = SumLongAggregatorFunction.create(driverCtx, List.of(0));
try (BooleanVector noMasking = driverContext().blockFactory().newConstantBooleanVector(true, page.getPositionCount())) {
function.addRawInput(page, noMasking);
}
Block[] blocks = new Block[function.intermediateBlockCount()];
try {
function.evaluateIntermediate(blocks, 0, driverCtx);
Block[] deserBlocks = Arrays.stream(blocks).map(this::uncheckedSerializeDeserializeBlock).toArray(Block[]::new);
try {
IntStream.range(0, blocks.length)
.forEach(i -> EqualsHashCodeTestUtils.checkEqualsAndHashCode(blocks[i], unused -> deserBlocks[i]));
var inputChannels = IntStream.range(0, SumLongAggregatorFunction.intermediateStateDesc().size()).boxed().toList();
try (var finalAggregator = SumLongAggregatorFunction.create(driverCtx, inputChannels)) {
finalAggregator.addIntermediateInput(new Page(deserBlocks));
Block[] finalBlocks = new Block[1];
finalAggregator.evaluateFinal(finalBlocks, 0, driverCtx);
try (var finalBlock = (LongBlock) finalBlocks[0]) {
assertThat(finalBlock.getLong(0), is(55L));
}
}
} finally {
Releasables.close(deserBlocks);
}
} finally {
Releasables.close(blocks);
page.releaseBlocks();
}
}
public void testOrdinalVector() throws Exception {
int numValues = randomIntBetween(1, 1000);
BlockFactory blockFactory = driverContext().blockFactory();
BytesRef scratch = new BytesRef();
try (
BytesRefVector.Builder regular = blockFactory.newBytesRefVectorBuilder(between(1, numValues * 3));
BytesRefHash hash = new BytesRefHash(1, blockFactory.bigArrays());
IntVector.Builder ordinals = blockFactory.newIntVectorBuilder(between(1, numValues * 3));
BytesRefVector.Builder dictionary = blockFactory.newBytesRefVectorBuilder(between(1, numValues * 3));
) {
BytesRef v = new BytesRef("value-" + randomIntBetween(1, 20));
int ord = Math.toIntExact(hash.add(v));
ord = ord < 0 ? -1 - ord : ord;
ordinals.appendInt(ord);
regular.appendBytesRef(v);
for (long l = 0; l < hash.size(); l++) {
dictionary.appendBytesRef(hash.get(l, scratch));
}
try (BytesRefVector v1 = regular.build(); BytesRefVector v2 = new OrdinalBytesRefVector(ordinals.build(), dictionary.build())) {
BytesRefVector.equals(v1, v2);
for (BytesRefVector vector : List.of(v1, v2)) {
try (BytesRefBlock deserBlock = serializeDeserializeBlock(vector.asBlock())) {
EqualsHashCodeTestUtils.checkEqualsAndHashCode(deserBlock, unused -> deserBlock);
}
}
for (int p = 0; p < v1.getPositionCount(); p++) {
try (BytesRefVector f1 = v1.filter(p); BytesRefVector f2 = v2.filter(p)) {
BytesRefVector.equals(f1, f2);
for (BytesRefVector vector : List.of(f1, f2)) {
try (BytesRefBlock deserBlock = serializeDeserializeBlock(vector.asBlock())) {
EqualsHashCodeTestUtils.checkEqualsAndHashCode(deserBlock, unused -> deserBlock);
}
}
}
}
}
}
}
public void testOrdinalBlock() throws Exception {
int numValues = randomIntBetween(1, 1000);
BlockFactory blockFactory = driverContext().blockFactory();
BytesRef scratch = new BytesRef();
try (
BytesRefBlock.Builder regular = blockFactory.newBytesRefBlockBuilder(between(1, numValues * 3));
BytesRefHash hash = new BytesRefHash(1, blockFactory.bigArrays());
IntBlock.Builder ordinals = blockFactory.newIntBlockBuilder(between(1, numValues * 3));
BytesRefVector.Builder dictionary = blockFactory.newBytesRefVectorBuilder(between(1, numValues * 3));
) {
int valueCount = randomIntBetween(0, 3);
if (valueCount == 0) {
regular.appendNull();
ordinals.appendNull();
}
if (valueCount > 1) {
regular.beginPositionEntry();
ordinals.beginPositionEntry();
}
for (int v = 0; v < valueCount; v++) {
BytesRef bytes = new BytesRef("value-" + randomIntBetween(1, 20));
int ord = Math.toIntExact(hash.add(bytes));
ord = ord < 0 ? -1 - ord : ord;
ordinals.appendInt(ord);
regular.appendBytesRef(bytes);
}
if (valueCount > 1) {
regular.endPositionEntry();
ordinals.endPositionEntry();
}
for (long l = 0; l < hash.size(); l++) {
dictionary.appendBytesRef(hash.get(l, scratch));
}
try (BytesRefBlock b1 = regular.build(); BytesRefBlock b2 = new OrdinalBytesRefBlock(ordinals.build(), dictionary.build())) {
BytesRefBlock.equals(b1, b2);
for (BytesRefBlock block : List.of(b1, b2)) {
try (BytesRefBlock deserBlock = serializeDeserializeBlock(block)) {
EqualsHashCodeTestUtils.checkEqualsAndHashCode(deserBlock, unused -> deserBlock);
}
}
for (int p = 0; p < b1.getPositionCount(); p++) {
try (BytesRefBlock f1 = b1.filter(p); BytesRefBlock f2 = b2.filter(p)) {
BytesRefBlock.equals(f1, f2);
for (BytesRefBlock block : List.of(f1, f2)) {
try (BytesRefBlock deserBlock = serializeDeserializeBlock(block)) {
EqualsHashCodeTestUtils.checkEqualsAndHashCode(deserBlock, unused -> deserBlock);
}
}
}
}
try (BytesRefBlock e1 = b1.expand(); BytesRefBlock e2 = b2.expand()) {
BytesRefBlock.equals(e1, e2);
for (BytesRefBlock block : List.of(e1, e2)) {
try (BytesRefBlock deserBlock = serializeDeserializeBlock(block)) {
EqualsHashCodeTestUtils.checkEqualsAndHashCode(deserBlock, unused -> deserBlock);
}
}
}
}
}
}
public void testCompositeBlock() throws Exception {
final int numBlocks = randomIntBetween(1, 10);
final int positionCount = randomIntBetween(1, 1000);
final Block[] blocks = new Block[numBlocks];
for (int b = 0; b < numBlocks; b++) {
ElementType elementType = randomFrom(ElementType.LONG, ElementType.DOUBLE, ElementType.BOOLEAN, ElementType.NULL);
blocks[b] = RandomBlock.randomBlock(blockFactory, elementType, positionCount, true, 0, between(1, 2), 0, between(1, 2)).block();
}
try (CompositeBlock origBlock = new CompositeBlock(blocks)) {
assertThat(origBlock.getBlockCount(), equalTo(numBlocks));
for (int b = 0; b < numBlocks; b++) {
assertThat(origBlock.getBlock(b), equalTo(blocks[b]));
}
try (
CompositeBlock deserBlock = serializeDeserializeBlockWithVersion(
origBlock,
TransportVersionUtils.randomVersionBetween(random(), ESQL_AGGREGATE_METRIC_DOUBLE_BLOCK, TransportVersion.current())
)
) {
assertThat(deserBlock.getBlockCount(), equalTo(numBlocks));
for (int b = 0; b < numBlocks; b++) {
assertThat(deserBlock.getBlock(b), equalTo(origBlock.getBlock(b)));
}
EqualsHashCodeTestUtils.checkEqualsAndHashCode(deserBlock, unused -> deserBlock);
}
}
}
public void testAggregateMetricDouble() throws IOException {
final int positionCount = randomIntBetween(1, 1000);
DoubleBlock minBlock = (DoubleBlock) RandomBlock.randomBlock(
blockFactory,
randomFrom(ElementType.DOUBLE, ElementType.NULL),
positionCount,
true,
0,
1,
0,
0
).block();
DoubleBlock maxBlock = (DoubleBlock) RandomBlock.randomBlock(
blockFactory,
randomFrom(ElementType.DOUBLE, ElementType.NULL),
positionCount,
true,
0,
1,
0,
0
).block();
DoubleBlock suBlock = (DoubleBlock) RandomBlock.randomBlock(
blockFactory,
randomFrom(ElementType.DOUBLE, ElementType.NULL),
positionCount,
true,
0,
1,
0,
0
).block();
IntBlock countBlock = (IntBlock) RandomBlock.randomBlock(
blockFactory,
randomFrom(ElementType.INT, ElementType.NULL),
positionCount,
true,
0,
1,
0,
0
).block();
try (var origBlock = new AggregateMetricDoubleArrayBlock(minBlock, maxBlock, suBlock, countBlock)) {
try (
AggregateMetricDoubleBlock deserBlock = serializeDeserializeBlockWithVersion(
origBlock,
TransportVersionUtils.randomVersionBetween(
random(),
AggregateMetricDoubleArrayBlock.WRITE_TYPED_BLOCK,
TransportVersion.current()
)
)
) {
assertThat(deserBlock.minBlock(), equalTo(minBlock));
assertThat(deserBlock.minBlock(), equalTo(minBlock));
assertThat(deserBlock.minBlock(), equalTo(minBlock));
assertThat(deserBlock.minBlock(), equalTo(minBlock));
EqualsHashCodeTestUtils.checkEqualsAndHashCode(deserBlock, unused -> deserBlock);
}
}
}
static BytesRef randomBytesRef() {
return new BytesRef(randomAlphaOfLengthBetween(0, 10));
}
/**
* A {@link BigArrays} that won't throw {@link CircuitBreakingException}.
* <p>
* Rather than using the {@link NoneCircuitBreakerService} we use a
* very large limit so tests can call {@link CircuitBreaker#getUsed()}.
* </p>
*/
protected final BigArrays nonBreakingBigArrays() {
return new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofBytes(Integer.MAX_VALUE)).withCircuitBreaking();
}
/**
* A {@link DriverContext} with a nonBreakingBigArrays.
*/
protected DriverContext driverContext() { // TODO make this final and return a breaking block factory
return new DriverContext(nonBreakingBigArrays(), TestBlockFactory.getNonBreakingInstance());
}
}
|
BlockSerializationTests
|
java
|
quarkusio__quarkus
|
integration-tests/cache/src/test/java/io/quarkus/it/cache/TreeTestCase.java
|
{
"start": 549,
"end": 1507
}
|
class ____ {
@Test
public void test() {
// First, let's check that the import.sql file was correctly loaded.
given().when().get("/trees").then().statusCode(200).body(containsString("Oak"), containsString("Chestnut"));
// Then, we get one specific tree. The call result is cached because of the @CacheResult annotation.
given().when().get("/trees/1").then().statusCode(200).body(containsString("Oak"));
// The same tree is deleted from the database.
given().when().delete("/trees/1").then().statusCode(204);
// Is it really gone? Let's check.
given().when().get("/trees").then().statusCode(200).body(not(containsString("Oak")), containsString("Chestnut"));
// If we try to get the same tree again, it is still returned because it was cached earlier.
given().when().get("/trees/1").then().statusCode(200).statusCode(200).body(containsString("Oak"));
}
}
|
TreeTestCase
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/format/EnumNumberFormatShape3580PojoTest.java
|
{
"start": 1242,
"end": 1473
}
|
class ____ {
public PojoStateNum3580 state;
public PojoNum3580() {}
public PojoNum3580(PojoStateNum3580 state) {this.state = state;}
}
@JsonFormat(shape = JsonFormat.Shape.NUMBER)
public
|
PojoNum3580
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/reactive/ServerHttpSecurityConfigurationTests.java
|
{
"start": 18681,
"end": 18935
}
|
class ____ {
@Bean
RSocketMessageHandler messageHandler() {
return new RSocketMessageHandler();
}
}
@Configuration(proxyBeanMethods = false)
@EnableWebFlux
@EnableWebFluxSecurity
@Import(UserDetailsConfig.class)
static
|
RSocketSecurityConfig
|
java
|
spring-projects__spring-security
|
core/src/main/java/org/springframework/security/authorization/AuthorizationProxyFactory.java
|
{
"start": 968,
"end": 1438
}
|
interface ____ {
/**
* Wrap the given {@code object} in authorization-related advice.
*
* <p>
* Please check the implementation for which kinds of objects it supports.
* @param <T> the type of the object being proxied
* @param object the object to proxy
* @return the proxied object
* @throws org.springframework.aop.framework.AopConfigException if a proxy cannot be
* created
*/
<T> @Nullable T proxy(@Nullable T object);
}
|
AuthorizationProxyFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/collection/original/Permission.java
|
{
"start": 185,
"end": 625
}
|
class ____ {
private String type;
Permission() {}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public Permission(String type) {
this.type = type;
}
public boolean equals(Object that) {
if ( !(that instanceof Permission) ) return false;
Permission p = (Permission) that;
return this.type.equals(p.type);
}
public int hashCode() {
return type.hashCode();
}
}
|
Permission
|
java
|
reactor__reactor-core
|
reactor-core/src/test/java/reactor/core/publisher/MonoHasElementsTest.java
|
{
"start": 1193,
"end": 8303
}
|
class ____ {
@Test
public void sourceNull() {
assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> {
new MonoHasElements<>(null);
});
}
@Test
public void emptySource() {
AssertSubscriber<Boolean> ts = AssertSubscriber.create();
Flux.empty().hasElements().subscribe(ts);
ts.assertValues(false)
.assertComplete()
.assertNoError();
}
@Test
public void emptyMonoSource() {
AssertSubscriber<Boolean> ts = AssertSubscriber.create();
Mono.empty().hasElement().subscribe(ts);
ts.assertValues(false)
.assertComplete()
.assertNoError();
}
@Test
public void emptySourceBackpressured() {
AssertSubscriber<Boolean> ts = AssertSubscriber.create(0);
Mono.empty().hasElement().subscribe(ts);
ts.assertNoValues()
.assertNotComplete()
.assertNoError();
ts.request(1);
ts.assertValues(false)
.assertComplete()
.assertNoError();
}
@Test
public void nonEmptySource() {
AssertSubscriber<Boolean> ts = AssertSubscriber.create();
Flux.range(1, 10).hasElements().subscribe(ts);
ts.assertValues(true)
.assertComplete()
.assertNoError();
}
@Test
public void nonEmptySourceBackpressured() {
AssertSubscriber<Boolean> ts = AssertSubscriber.create(0);
Flux.range(1, 10).hasElements().subscribe(ts);
ts.assertNoValues()
.assertNotComplete()
.assertNoError();
ts.request(1);
ts.assertValues(true)
.assertComplete()
.assertNoError();
}
@Test
public void fluxSourceIsCancelled() {
AtomicLong cancelCount = new AtomicLong();
StepVerifier.create(Flux.range(1, 10)
.doOnCancel(cancelCount::incrementAndGet)
.hasElements())
.expectNext(true)
.verifyComplete();
assertThat(cancelCount).hasValue(1);
}
@Test
public void monoSourceIsNotCancelled() {
AtomicLong cancelCount = new AtomicLong();
StepVerifier.create(Mono.just(1)
.doOnCancel(cancelCount::incrementAndGet)
.hasElement())
.expectNext(true)
.verifyComplete();
assertThat(cancelCount).hasValue(0);
}
@Test
public void testHasElementUpstream() {
AtomicReference<Subscription> sub = new AtomicReference<>();
Mono.just("foo").hide()
.hasElement()
.subscribeWith(new LambdaSubscriber<>(v -> {}, e -> {}, () -> {},
s -> {
sub.set(s);
s.request(Long.MAX_VALUE);
}));
assertThat(sub.get()).isInstanceOf(MonoHasElement.HasElementSubscriber.class);
assertThat(Scannable.from(sub.get()).scan(Scannable.Attr.PARENT).getClass()).isEqualTo(FluxHide.HideSubscriber.class);
}
@Test
public void testHasElementsUpstream() {
AtomicReference<Subscription> sub = new AtomicReference<>();
Flux.just("foo", "bar").hide()
.hasElements()
.subscribeWith(new LambdaSubscriber<>(v -> {}, e -> {}, () -> {},
s -> {
sub.set(s);
s.request(Long.MAX_VALUE);
}));
assertThat(sub.get()).isInstanceOf(MonoHasElements.HasElementsSubscriber.class);
Scannable.from(sub.get())
.parents()
.findFirst()
.ifPresent(s -> assertThat(s).isInstanceOf(FluxHide.HideSubscriber
.class));
}
@Test
public void hasElementCancel() {
AtomicBoolean cancelled = new AtomicBoolean();
Mono.just("foo").hide()
.doOnCancel(() -> cancelled.set(true))
.log()
.hasElement()
.subscribeWith(new LambdaSubscriber<>(v -> {}, e -> {}, () -> {},
Subscription::cancel));
assertThat(cancelled.get()).isTrue();
}
@Test
public void hasElementsCancel() {
AtomicBoolean cancelled = new AtomicBoolean();
Flux.just("foo", "bar").hide()
.doOnCancel(() -> cancelled.set(true))
.hasElements()
.subscribeWith(new LambdaSubscriber<>(v -> {}, e -> {}, () -> {},
Subscription::cancel));
assertThat(cancelled.get()).isTrue();
}
@Test
public void scanOperatorHasElement(){
MonoHasElement<Integer> test = new MonoHasElement<>(Mono.just(1));
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
@Test
public void scanOperatorHasElements(){
MonoHasElements<Integer> test = new MonoHasElements<>(Flux.just(1, 2, 3));
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
@Test
public void scanHasElements() {
CoreSubscriber<? super Boolean> actual = new LambdaMonoSubscriber<>(null, e -> {}, null, null);
MonoHasElements.HasElementsSubscriber<String> test = new MonoHasElements.HasElementsSubscriber<>(actual);
Subscription parent = Operators.emptySubscription();
test.onSubscribe(parent);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual);
assertThat(test.scan(Scannable.Attr.PREFETCH)).isEqualTo(0);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
test.onComplete();
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
}
@Test
public void scanHasElementsNoTerminatedOnError() {
CoreSubscriber<? super Boolean> actual = new LambdaMonoSubscriber<>(null, e -> {}, null, null);
MonoHasElements.HasElementsSubscriber<String> test = new MonoHasElements.HasElementsSubscriber<>(actual);
test.onError(new IllegalStateException("boom"));
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
}
@Test
public void scanHasElement() {
CoreSubscriber<? super Boolean> actual = new LambdaMonoSubscriber<>(null, e -> {}, null, null);
MonoHasElement.HasElementSubscriber<String> test = new MonoHasElement.HasElementSubscriber<>(actual);
Subscription parent = Operators.emptySubscription();
test.onSubscribe(parent);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual);
assertThat(test.scan(Scannable.Attr.PREFETCH)).isEqualTo(0);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
test.onComplete();
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
}
@Test
public void scanHasElementTerminatedOnError() {
CoreSubscriber<? super Boolean> actual = new LambdaMonoSubscriber<>(null, e -> {}, null, null);
MonoHasElement.HasElementSubscriber<String> test = new MonoHasElement.HasElementSubscriber<>(actual);
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
test.onError(new IllegalStateException("boom"));
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
}
}
|
MonoHasElementsTest
|
java
|
elastic__elasticsearch
|
qa/smoke-test-http/src/internalClusterTest/java/org/elasticsearch/http/CorsNotSetIT.java
|
{
"start": 758,
"end": 1961
}
|
class ____ extends HttpSmokeTestCase {
public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws IOException {
String corsValue = "http://localhost:9200";
Request request = new Request("GET", "/");
RequestOptions.Builder options = request.getOptions().toBuilder();
options.addHeader("User-Agent", "Mozilla Bar");
options.addHeader("Origin", corsValue);
request.setOptions(options);
Response response = getRestClient().performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue());
assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue());
}
public void testThatOmittingCorsHeaderDoesNotReturnAnything() throws IOException {
Response response = getRestClient().performRequest(new Request("GET", "/"));
assertThat(response.getStatusLine().getStatusCode(), is(200));
assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue());
assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue());
}
}
|
CorsNotSetIT
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/core/asyncprocessing/AsyncFutureImpl.java
|
{
"start": 1720,
"end": 1892
}
|
class ____ on hot path and very complex, please take care of the performance as well as the
* running thread of each block when you decide to touch it.
*/
@Internal
public
|
is
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/FactoryMethodResolutionTests.java
|
{
"start": 2289,
"end": 2428
}
|
class ____ {
@Bean
@ExampleAnnotation
public ExampleBean exampleBean() {
return new ExampleBean();
}
}
static
|
TestConfiguration
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/EmbeddableWithToOneAssociationTest.java
|
{
"start": 3130,
"end": 3575
}
|
class ____ {
int officeNumber;
@OneToOne
ParkingSpot parkingSpot;
public LocationDetails() {
}
public LocationDetails(int officeNumber, ParkingSpot parkingSpot) {
this.officeNumber = officeNumber;
this.parkingSpot = parkingSpot;
}
public int getOfficeNumber() {
return officeNumber;
}
public ParkingSpot getParkingSpot() {
return parkingSpot;
}
}
@Entity(name = "ParkingSpot")
public static
|
LocationDetails
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/dialect/impala/visitor/ImpalaASTVisitor.java
|
{
"start": 278,
"end": 623
}
|
interface ____ extends HiveASTVisitor {
default boolean visit(ImpalaCreateTableStatement x) {
return true;
}
default void endVisit(ImpalaCreateTableStatement x) {
}
default boolean visit(ImpalaInsertStatement x) {
return true;
}
default void endVisit(ImpalaInsertStatement x) {
}
}
|
ImpalaASTVisitor
|
java
|
reactor__reactor-core
|
reactor-test/src/main/java/reactor/test/publisher/ColdTestPublisher.java
|
{
"start": 5318,
"end": 13552
}
|
class ____<T> implements Subscription {
final Subscriber<? super T> actual;
final Fuseable.@Nullable ConditionalSubscriber<? super T> actualConditional;
final ColdTestPublisher<T> parent;
volatile boolean cancelled;
volatile long requested;
@SuppressWarnings("rawtypes")
static final AtomicLongFieldUpdater<ColdTestPublisherSubscription> REQUESTED =
AtomicLongFieldUpdater.newUpdater(ColdTestPublisherSubscription.class, "requested");
volatile long wip;
@SuppressWarnings("rawtypes")
static final AtomicLongFieldUpdater<ColdTestPublisherSubscription> WIP =
AtomicLongFieldUpdater.newUpdater(ColdTestPublisherSubscription.class, "wip");
/** Where in the {@link ColdTestPublisher#values} buffer this subscription is at. */
int index;
@SuppressWarnings("unchecked")
ColdTestPublisherSubscription(Subscriber<? super T> actual, ColdTestPublisher<T> parent) {
this.actual = actual;
if(actual instanceof Fuseable.ConditionalSubscriber){
this.actualConditional = (Fuseable.ConditionalSubscriber<? super T>) actual;
}
else {
this.actualConditional = null;
}
this.parent = parent;
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
if (Operators.addCap(REQUESTED, this, n) == 0) {
parent.wasRequested = true;
}
drain();
}
}
@Override
public void cancel() {
if (!cancelled) {
ColdTestPublisher.CANCEL_COUNT.incrementAndGet(parent);
if (parent.violations.contains(DEFER_CANCELLATION) || parent.violations.contains(REQUEST_OVERFLOW)) {
return;
}
cancelled = true;
parent.remove(this);
}
}
private void drain() {
if (WIP.getAndIncrement(this) > 0) {
return;
}
for (; ; ) {
int i = index;
// Re-read the volatile 'requested' which could have grown via another thread
long r = requested;
int emitted = 0;
if (cancelled) {
return;
}
// This list can only grow while we're in drain(), so no risk in get(i) being out of bounds
int s = parent.values.size();
while (i < s) {
if (emitted == r && !parent.violations.contains(REQUEST_OVERFLOW)) {
break;
}
T t = parent.values.get(i);
if (t == null && !parent.violations.contains(ALLOW_NULL)) {
parent.remove(this);
actual.onError(new NullPointerException("The " + i + "th element was null"));
return;
}
//emit and increase count, potentially using conditional subscriber
if (actualConditional != null) {
//noinspection ConstantConditions
if (actualConditional.tryOnNext(t)) {
emitted++;
}
} else {
//noinspection ConstantConditions
actual.onNext(t);
emitted++;
}
i++;
if (cancelled) {
return;
}
}
index = i;
boolean hasMoreData = i < s;
boolean hasMoreRequest;
if (emitted > r) { //we did clearly overflow
assert parent.violations.contains(REQUEST_OVERFLOW);
parent.hasOverflown = true;
}
//let's update the REQUESTED unless we're in fastpath
if (r != Long.MAX_VALUE) {
hasMoreRequest = REQUESTED.addAndGet(this, -emitted) > 0;
}
else {
hasMoreRequest = true;
}
//let's exit early if we've transmitted the whole buffer and there's a terminal signal
if (i == s && emitTerminalSignalIfAny()) {
return;
}
//the only remaining early exit condition is if we're in slowpath and we've emitted
//all the requested amount but there's still values in the buffer...
//if the parent is configured to errorOnOverflow then we must terminate
if (hasMoreData && !hasMoreRequest && parent.errorOnOverflow) {
parent.remove(this);
actual.onError(Exceptions.failWithOverflow("Can't deliver value due to lack of requests"));
return;
}
//in all other cases, let's loop again in case of additional work, exit otherwise
if (WIP.decrementAndGet(this) == 0) {
return;
}
}
}
/**
* Attempt to terminate the subscriber if the publisher was marked as terminated.
* Note that if that is not the case, it is important to continue the drain loop
* since otherwise no downstream signal is going to be pushed yet we'd exit the loop early.
*
* @return true if the TestPublisher was terminated, false otherwise
*/
private boolean emitTerminalSignalIfAny() {
if (parent.done && this.parent.values.size() == index) {
parent.remove(this);
final Throwable t = parent.error;
if (t != null) {
actual.onError(parent.error);
}
else {
actual.onComplete();
}
return true;
}
return false;
}
}
@Override
public Flux<T> flux() {
return Flux.from(this);
}
@Override
public boolean wasSubscribed() {
return subscribeCount > 0;
}
@Override
public long subscribeCount() {
return subscribeCount;
}
@Override
public boolean wasCancelled() {
return cancelCount > 0;
}
@Override
public boolean wasRequested() {
return wasRequested;
}
@Override
public Mono<T> mono() {
return Mono.from(this);
}
@Override
public ColdTestPublisher<T> assertMinRequested(long n) {
ColdTestPublisherSubscription<T>[] subs = subscribers;
long minRequest = Stream.of(subs)
.mapToLong(s -> s.requested)
.min()
.orElse(0);
if (minRequest < n) {
throw new AssertionError("Expected smallest requested amount to be >= " + n + "; got " + minRequest);
}
return this;
}
@Override
public ColdTestPublisher<T> assertMaxRequested(long n) {
ColdTestPublisherSubscription<T>[] subs = subscribers;
long maxRequest = Stream.of(subs)
.mapToLong(s -> s.requested)
.max()
.orElse(0);
if (maxRequest > n) {
throw new AssertionError("Expected largest requested amount to be <= " + n + "; got " + maxRequest);
}
return this;
}
@Override
public ColdTestPublisher<T> assertSubscribers() {
ColdTestPublisherSubscription<T>[] s = subscribers;
if (s == EMPTY) {
throw new AssertionError("Expected subscribers");
}
return this;
}
@Override
public ColdTestPublisher<T> assertSubscribers(int n) {
int sl = subscribers.length;
if (sl != n) {
throw new AssertionError("Expected " + n + " subscribers, got " + sl);
}
return this;
}
@Override
public ColdTestPublisher<T> assertNoSubscribers() {
int sl = subscribers.length;
if (sl != 0) {
throw new AssertionError("Expected no subscribers, got " + sl);
}
return this;
}
@Override
public ColdTestPublisher<T> assertCancelled() {
if (cancelCount == 0) {
throw new AssertionError("Expected at least 1 cancellation");
}
return this;
}
@Override
public ColdTestPublisher<T> assertCancelled(int n) {
int cc = cancelCount;
if (cc != n) {
throw new AssertionError("Expected " + n + " cancellations, got " + cc);
}
return this;
}
@Override
public ColdTestPublisher<T> assertNotCancelled() {
if (cancelCount != 0) {
throw new AssertionError("Expected no cancellation");
}
return this;
}
@Override
public ColdTestPublisher<T> assertRequestOverflow() {
if (!hasOverflown) {
throw new AssertionError("Expected some request overflow");
}
return this;
}
@Override
public ColdTestPublisher<T> assertNoRequestOverflow() {
if (hasOverflown) {
throw new AssertionError("Unexpected request overflow");
}
return this;
}
@Override
public ColdTestPublisher<T> next(@Nullable T t) {
if (!violations.contains(ALLOW_NULL)) {
Objects.requireNonNull(t, "emitted values must be non-null");
}
values.add(t);
for (ColdTestPublisherSubscription<T> s : subscribers) {
s.drain();
}
return this;
}
@Override
public ColdTestPublisher<T> error(Throwable t) {
Objects.requireNonNull(t, "t");
error = t;
done = true;
final ColdTestPublisherSubscription[] subs = SUBSCRIBERS.getAndSet(this, TERMINATED);
for (ColdTestPublisherSubscription<?> s : subs) {
s.drain();
}
return this;
}
@Override
public ColdTestPublisher<T> complete() {
done = true;
error = null;
final ColdTestPublisherSubscription[] subs = SUBSCRIBERS.getAndSet(this, TERMINATED);
for (ColdTestPublisherSubscription<?> s : subs) {
s.drain();
}
return this;
}
}
|
ColdTestPublisherSubscription
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java
|
{
"start": 1138,
"end": 2234
}
|
interface ____ {
/**
* Get status information (e.g., whether formatted) of JournalNode's journals.
*
* @return A string presenting status for each journal
*/
String getJournalsStatus();
/**
* Get host and port of JournalNode.
*
* @return colon separated host and port.
*/
String getHostAndPort();
/**
* Get list of the clusters of JournalNode's journals
* as one JournalNode may support multiple clusters.
*
* @return list of clusters.
*/
List<String> getClusterIds();
/**
* Gets the version of Hadoop.
*
* @return the version of Hadoop.
*/
String getVersion();
/**
* Get the start time of the JournalNode.
*
* @return the start time of the JournalNode.
*/
long getJNStartedTimeInMillis();
/**
* Get the list of the storage infos of JournalNode's journals. Storage infos
* include layout version, namespace id, cluster id and creation time of the
* File system state.
*
* @return the list of storage infos associated with journals.
*/
List<String> getStorageInfos();
}
|
JournalNodeMXBean
|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/load/resource/bytes/ByteBufferRewinder.java
|
{
"start": 722,
"end": 1037
}
|
class ____ implements DataRewinder.Factory<ByteBuffer> {
@NonNull
@Override
public DataRewinder<ByteBuffer> build(ByteBuffer data) {
return new ByteBufferRewinder(data);
}
@NonNull
@Override
public Class<ByteBuffer> getDataClass() {
return ByteBuffer.class;
}
}
}
|
Factory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/CaseStatementWithTypeTest.java
|
{
"start": 7122,
"end": 7249
}
|
class ____ extends UnionParent {
public UnionChildB() {
}
public UnionChildB(Long id) {
super( id );
}
}
}
|
UnionChildB
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/core/serializer/Deserializer.java
|
{
"start": 1025,
"end": 1859
}
|
interface ____<T> {
/**
* Read (assemble) an object of type T from the given InputStream.
* <p>Note: Implementations should not close the given InputStream
* (or any decorators of that InputStream) but rather leave this up
* to the caller.
* @param inputStream the input stream
* @return the deserialized object
* @throws IOException in case of errors reading from the stream
*/
T deserialize(InputStream inputStream) throws IOException;
/**
* Read (assemble) an object of type T from the given byte array.
* @param serialized the byte array
* @return the deserialized object
* @throws IOException in case of deserialization failure
* @since 5.2.7
*/
default T deserializeFromByteArray(byte[] serialized) throws IOException {
return deserialize(new ByteArrayInputStream(serialized));
}
}
|
Deserializer
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeSubscribeOn.java
|
{
"start": 1973,
"end": 3146
}
|
class ____<T>
extends AtomicReference<Disposable>
implements MaybeObserver<T>, Disposable {
final SequentialDisposable task;
private static final long serialVersionUID = 8571289934935992137L;
final MaybeObserver<? super T> downstream;
SubscribeOnMaybeObserver(MaybeObserver<? super T> downstream) {
this.downstream = downstream;
this.task = new SequentialDisposable();
}
@Override
public void dispose() {
DisposableHelper.dispose(this);
task.dispose();
}
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(get());
}
@Override
public void onSubscribe(Disposable d) {
DisposableHelper.setOnce(this, d);
}
@Override
public void onSuccess(T value) {
downstream.onSuccess(value);
}
@Override
public void onError(Throwable e) {
downstream.onError(e);
}
@Override
public void onComplete() {
downstream.onComplete();
}
}
}
|
SubscribeOnMaybeObserver
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ThrowIfUncheckedKnownCheckedTest.java
|
{
"start": 872,
"end": 1663
}
|
class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(ThrowIfUncheckedKnownChecked.class, getClass());
@Test
public void positiveCase() {
compilationHelper
.addSourceLines(
"ThrowIfUncheckedKnownCheckedTestPositiveCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import static com.google.common.base.Throwables.propagateIfPossible;
import static com.google.common.base.Throwables.throwIfUnchecked;
import java.io.IOException;
import java.util.concurrent.ExecutionException;
/**
* @author cpovirk@google.com (Chris Povirk)
*/
public
|
ThrowIfUncheckedKnownCheckedTest
|
java
|
reactor__reactor-core
|
reactor-core/src/jcstress/java/reactor/core/publisher/FluxReplayStressTest.java
|
{
"start": 1509,
"end": 3999
}
|
class ____<T> {
final Flux<T> sharedSource;
final StressSubscriber<T> subscriber1 = new StressSubscriber<>();
final StressSubscriber<T> subscriber2 = new StressSubscriber<>();
public RefCntConcurrentSubscriptionBaseStressTest(Flux<T> sourceToShare) {
this(sourceToShare, null);
}
public RefCntConcurrentSubscriptionBaseStressTest(Flux<T> sourceToShare,
@Nullable Duration duration) {
this(sourceToShare, 2, duration);
}
public RefCntConcurrentSubscriptionBaseStressTest(Flux<T> sourceToShare,
int subscribersCnt,
@Nullable Duration duration) {
if (duration == null) {
this.sharedSource = sourceToShare.replay(1)
.refCount(subscribersCnt);
}
else {
this.sharedSource = sourceToShare.replay(1)
.refCount(subscribersCnt, duration);
}
}
}
@JCStressTest
@Outcome(id = {"[0,1], 1, [0,1], 1, 0, 0, [1,2]"}, expect = ACCEPTABLE, desc =
"Normal scenario when cancellation of the first subscriber has no effect on" +
" the second. The second subscription may still take place since" +
" the last subscriber can join the first subscription. However, due" +
" to natural concurrency, when the synchronization block is entered," +
" the connection is nulled. This will cause connect() to be called." +
" Current ConnectableFlux api does not allow any improvements on that" +
" front since it lacks coordination.")
@Outcome(id = {"[0,1], 1, [0,1], 0, 0, 1, [1,2]"}, expect = ACCEPTABLE_INTERESTING, desc =
"Expected corner case when the second subscriber still joins the first" +
" subscription, but due to natural concurrency, cancellation" +
" happens before onComplete is called. So the second subscriber gets the value" +
" and onError instead of onComplete. The second connect call may still" +
" happen, since ConnectableFlux.subscribe happens before the check of" +
" the current connection value in FluxRefCnt")
@Outcome(id = {"0, 0, 0, 0, 0, 1, [1,2]"}, expect = ACCEPTABLE_INTERESTING, desc =
"Expected corner case when the second subscriber still joins the first" +
" subscription, but due to natural concurrency, cancellation of the" +
" first subscriber happens before the value is delivered. In that case" +
" onError is delivered instead of any values. The second connect" +
" call may still happen.")
@State
public static
|
RefCntConcurrentSubscriptionBaseStressTest
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/support/ApplicationContextAwareProcessor.java
|
{
"start": 2986,
"end": 4858
}
|
class ____ implements BeanPostProcessor {
private final ConfigurableApplicationContext applicationContext;
private final StringValueResolver embeddedValueResolver;
/**
* Create a new ApplicationContextAwareProcessor for the given context.
*/
public ApplicationContextAwareProcessor(ConfigurableApplicationContext applicationContext) {
this.applicationContext = applicationContext;
this.embeddedValueResolver = new EmbeddedValueResolver(applicationContext.getBeanFactory());
}
@Override
public @Nullable Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof Aware) {
invokeAwareInterfaces(bean);
}
return bean;
}
private void invokeAwareInterfaces(Object bean) {
if (bean instanceof EnvironmentAware environmentAware) {
environmentAware.setEnvironment(this.applicationContext.getEnvironment());
}
if (bean instanceof EmbeddedValueResolverAware embeddedValueResolverAware) {
embeddedValueResolverAware.setEmbeddedValueResolver(this.embeddedValueResolver);
}
if (bean instanceof ResourceLoaderAware resourceLoaderAware) {
resourceLoaderAware.setResourceLoader(this.applicationContext);
}
if (bean instanceof ApplicationEventPublisherAware applicationEventPublisherAware) {
applicationEventPublisherAware.setApplicationEventPublisher(this.applicationContext);
}
if (bean instanceof MessageSourceAware messageSourceAware) {
messageSourceAware.setMessageSource(this.applicationContext);
}
if (bean instanceof ApplicationStartupAware applicationStartupAware) {
applicationStartupAware.setApplicationStartup(this.applicationContext.getApplicationStartup());
}
if (bean instanceof ApplicationContextAware applicationContextAware) {
applicationContextAware.setApplicationContext(this.applicationContext);
}
}
}
|
ApplicationContextAwareProcessor
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java
|
{
"start": 20951,
"end": 94537
}
|
class ____ {
public void getNewApplication(long duration) {
LOG.info("Mocked: successful getNewApplication call with duration {}",
duration);
metrics.succeededAppsCreated(duration);
}
public void submitApplication(long duration) {
LOG.info("Mocked: successful submitApplication call with duration {}",
duration);
metrics.succeededAppsSubmitted(duration);
}
public void forceKillApplication(long duration) {
LOG.info("Mocked: successful forceKillApplication call with duration {}",
duration);
metrics.succeededAppsKilled(duration);
}
public void getApplicationReport(long duration) {
LOG.info("Mocked: successful getApplicationReport call with duration {}",
duration);
metrics.succeededAppsRetrieved(duration);
}
public void getApplicationAttemptReport(long duration) {
LOG.info("Mocked: successful getApplicationAttemptReport call " +
"with duration {}", duration);
metrics.succeededAppAttemptReportRetrieved(duration);
}
public void getApplicationsReport(long duration) {
LOG.info("Mocked: successful getApplicationsReport call with duration {}",
duration);
metrics.succeededMultipleAppsRetrieved(duration);
}
public void getClusterMetrics(long duration){
LOG.info("Mocked: successful getClusterMetrics call with duration {}",
duration);
metrics.succeededGetClusterMetricsRetrieved(duration);
}
public void getClusterNodes(long duration) {
LOG.info("Mocked: successful getClusterNodes call with duration {}", duration);
metrics.succeededGetClusterNodesRetrieved(duration);
}
public void getNodeToLabels(long duration) {
LOG.info("Mocked: successful getNodeToLabels call with duration {}", duration);
metrics.succeededGetNodeToLabelsRetrieved(duration);
}
public void getLabelToNodes(long duration) {
LOG.info("Mocked: successful getLabelToNodes call with duration {}", duration);
metrics.succeededGetLabelsToNodesRetrieved(duration);
}
public void getClusterNodeLabels(long duration) {
LOG.info("Mocked: successful getClusterNodeLabels call with duration {}", duration);
metrics.succeededGetClusterNodeLabelsRetrieved(duration);
}
public void getQueueUserAcls(long duration) {
LOG.info("Mocked: successful getQueueUserAcls call with duration {}", duration);
metrics.succeededGetQueueUserAclsRetrieved(duration);
}
public void getListReservations(long duration) {
LOG.info("Mocked: successful listReservations call with duration {}", duration);
metrics.succeededListReservationsRetrieved(duration);
}
public void getApplicationAttempts(long duration) {
LOG.info("Mocked: successful getApplicationAttempts call with duration {}", duration);
metrics.succeededAppAttemptsRetrieved(duration);
}
public void getContainerReport(long duration) {
LOG.info("Mocked: successful getContainerReport call with duration {}", duration);
metrics.succeededGetContainerReportRetrieved(duration);
}
public void getContainers(long duration) {
LOG.info("Mocked: successful getContainer call with duration {}", duration);
metrics.succeededGetContainersRetrieved(duration);
}
public void getResourceTypeInfo(long duration) {
LOG.info("Mocked: successful getResourceTypeInfo call with duration {}", duration);
metrics.succeededGetResourceTypeInfoRetrieved(duration);
}
public void getFailApplicationAttempt(long duration) {
LOG.info("Mocked: successful failApplicationAttempt call with duration {}", duration);
metrics.succeededFailAppAttemptRetrieved(duration);
}
public void getUpdateApplicationPriority(long duration) {
LOG.info("Mocked: successful updateApplicationPriority call with duration {}", duration);
metrics.succeededUpdateAppPriorityRetrieved(duration);
}
public void getUpdateApplicationTimeouts(long duration) {
LOG.info("Mocked: successful updateApplicationTimeouts call with duration {}", duration);
metrics.succeededUpdateAppTimeoutsRetrieved(duration);
}
public void getSignalToContainerTimeouts(long duration) {
LOG.info("Mocked: successful signalToContainer call with duration {}", duration);
metrics.succeededSignalToContainerRetrieved(duration);
}
public void getQueueInfoRetrieved(long duration) {
LOG.info("Mocked: successful getQueueInfo call with duration {}", duration);
metrics.succeededGetQueueInfoRetrieved(duration);
}
public void moveApplicationAcrossQueuesRetrieved(long duration) {
LOG.info("Mocked: successful moveApplicationAcrossQueues call with duration {}", duration);
metrics.succeededMoveApplicationAcrossQueuesRetrieved(duration);
}
public void getResourceProfilesRetrieved(long duration) {
LOG.info("Mocked: successful getResourceProfiles call with duration {}", duration);
metrics.succeededGetResourceProfilesRetrieved(duration);
}
public void getResourceProfileRetrieved(long duration) {
LOG.info("Mocked: successful getResourceProfile call with duration {}", duration);
metrics.succeededGetResourceProfileRetrieved(duration);
}
public void getAttributesToNodesRetrieved(long duration) {
LOG.info("Mocked: successful getAttributesToNodes call with duration {}", duration);
metrics.succeededGetAttributesToNodesRetrieved(duration);
}
public void getClusterNodeAttributesRetrieved(long duration) {
LOG.info("Mocked: successful getClusterNodeAttributes call with duration {}", duration);
metrics.succeededGetClusterNodeAttributesRetrieved(duration);
}
public void getNodesToAttributesRetrieved(long duration) {
LOG.info("Mocked: successful getNodesToAttributes call with duration {}", duration);
metrics.succeededGetNodesToAttributesRetrieved(duration);
}
public void getNewReservationRetrieved(long duration) {
LOG.info("Mocked: successful getNewReservation call with duration {}", duration);
metrics.succeededGetNewReservationRetrieved(duration);
}
public void getSubmitReservationRetrieved(long duration) {
LOG.info("Mocked: successful getSubmitReservation call with duration {}", duration);
metrics.succeededSubmitReservationRetrieved(duration);
}
public void getUpdateReservationRetrieved(long duration) {
LOG.info("Mocked: successful getUpdateReservation call with duration {}", duration);
metrics.succeededUpdateReservationRetrieved(duration);
}
public void getDeleteReservationRetrieved(long duration) {
LOG.info("Mocked: successful getDeleteReservation call with duration {}", duration);
metrics.succeededDeleteReservationRetrieved(duration);
}
public void getListReservationRetrieved(long duration) {
LOG.info("Mocked: successful getListReservation call with duration {}", duration);
metrics.succeededListReservationRetrieved(duration);
}
public void getAppActivitiesRetrieved(long duration) {
LOG.info("Mocked: successful getAppActivities call with duration {}", duration);
metrics.succeededGetAppActivitiesRetrieved(duration);
}
public void getAppStatisticsRetrieved(long duration) {
LOG.info("Mocked: successful getAppStatistics call with duration {}", duration);
metrics.succeededGetAppStatisticsRetrieved(duration);
}
public void getAppPriorityRetrieved(long duration) {
LOG.info("Mocked: successful getAppPriority call with duration {}", duration);
metrics.succeededGetAppPriorityRetrieved(duration);
}
public void getAppQueueRetrieved(long duration) {
LOG.info("Mocked: successful getAppQueue call with duration {}", duration);
metrics.succeededGetAppQueueRetrieved(duration);
}
public void getUpdateQueueRetrieved(long duration) {
LOG.info("Mocked: successful getUpdateQueue call with duration {}", duration);
metrics.succeededUpdateAppQueueRetrieved(duration);
}
public void getAppTimeoutRetrieved(long duration) {
LOG.info("Mocked: successful getAppTimeout call with duration {}", duration);
metrics.succeededGetAppTimeoutRetrieved(duration);
}
public void getAppTimeoutsRetrieved(long duration) {
LOG.info("Mocked: successful getAppTimeouts call with duration {}", duration);
metrics.succeededGetAppTimeoutsRetrieved(duration);
}
public void getRMNodeLabelsRetrieved(long duration) {
LOG.info("Mocked: successful getRMNodeLabels call with duration {}", duration);
metrics.succeededGetRMNodeLabelsRetrieved(duration);
}
public void getCheckUserAccessToQueueRetrieved(long duration) {
LOG.info("Mocked: successful CheckUserAccessToQueue call with duration {}", duration);
metrics.succeededCheckUserAccessToQueueRetrieved(duration);
}
public void getGetDelegationTokenRetrieved(long duration) {
LOG.info("Mocked: successful GetDelegationToken call with duration {}", duration);
metrics.succeededGetDelegationTokenRetrieved(duration);
}
public void getRenewDelegationTokenRetrieved(long duration) {
LOG.info("Mocked: successful RenewDelegationToken call with duration {}", duration);
metrics.succeededRenewDelegationTokenRetrieved(duration);
}
public void getRefreshAdminAclsRetrieved(long duration) {
LOG.info("Mocked: successful RefreshAdminAcls call with duration {}", duration);
metrics.succeededRefreshAdminAclsRetrieved(duration);
}
public void getRefreshServiceAclsRetrieved(long duration) {
LOG.info("Mocked: successful RefreshServiceAcls call with duration {}", duration);
metrics.succeededRefreshServiceAclsRetrieved(duration);
}
public void getNumSucceededReplaceLabelsOnNodesRetrieved(long duration) {
LOG.info("Mocked: successful ReplaceLabelsOnNodes call with duration {}", duration);
metrics.succeededReplaceLabelsOnNodesRetrieved(duration);
}
public void getNumSucceededReplaceLabelsOnNodeRetrieved(long duration) {
LOG.info("Mocked: successful ReplaceLabelOnNode call with duration {}", duration);
metrics.succeededReplaceLabelsOnNodeRetrieved(duration);
}
public void getDumpSchedulerLogsRetrieved(long duration) {
LOG.info("Mocked: successful DumpSchedulerLogs call with duration {}", duration);
metrics.succeededDumpSchedulerLogsRetrieved(duration);
}
public void getActivitiesRetrieved(long duration) {
LOG.info("Mocked: successful GetActivities call with duration {}", duration);
metrics.succeededGetActivitiesLatencyRetrieved(duration);
}
public void getBulkActivitiesRetrieved(long duration) {
LOG.info("Mocked: successful GetBulkActivities call with duration {}", duration);
metrics.succeededGetBulkActivitiesRetrieved(duration);
}
public void getDeregisterSubClusterRetrieved(long duration) {
LOG.info("Mocked: successful DeregisterSubCluster call with duration {}", duration);
metrics.succeededDeregisterSubClusterRetrieved(duration);
}
public void addToClusterNodeLabelsRetrieved(long duration) {
LOG.info("Mocked: successful AddToClusterNodeLabels call with duration {}", duration);
metrics.succeededAddToClusterNodeLabelsRetrieved(duration);
}
public void getSchedulerConfigurationRetrieved(long duration) {
LOG.info("Mocked: successful GetSchedulerConfiguration call with duration {}", duration);
metrics.succeededGetSchedulerConfigurationRetrieved(duration);
}
public void getUpdateSchedulerConfigurationRetrieved(long duration) {
LOG.info("Mocked: successful UpdateSchedulerConfiguration call with duration {}", duration);
metrics.succeededUpdateSchedulerConfigurationRetrieved(duration);
}
public void getClusterInfoRetrieved(long duration) {
LOG.info("Mocked: successful GetClusterInfoRetrieved call with duration {}", duration);
metrics.succeededGetClusterInfoRetrieved(duration);
}
public void getClusterUserInfoRetrieved(long duration) {
LOG.info("Mocked: successful GetClusterUserInfoRetrieved call with duration {}", duration);
metrics.succeededGetClusterUserInfoRetrieved(duration);
}
public void getUpdateNodeResourceRetrieved(long duration) {
LOG.info("Mocked: successful UpdateNodeResourceRetrieved call with duration {}", duration);
metrics.succeededUpdateNodeResourceRetrieved(duration);
}
public void getRefreshNodesResourcesRetrieved(long duration) {
LOG.info("Mocked: successful RefreshNodesResourcesRetrieved call with duration {}", duration);
metrics.succeededRefreshNodesResourcesRetrieved(duration);
}
public void getCheckForDecommissioningNodesRetrieved(long duration) {
LOG.info("Mocked: successful CheckForDecommissioningNodesRetrieved call with duration {}",
duration);
metrics.succeededCheckForDecommissioningNodesRetrieved(duration);
}
public void getRefreshClusterMaxPriorityRetrieved(long duration) {
LOG.info("Mocked: successful RefreshClusterMaxPriority call with duration {}",
duration);
metrics.succeededRefreshClusterMaxPriorityRetrieved(duration);
}
public void getMapAttributesToNodesRetrieved(long duration) {
LOG.info("Mocked: successful MapAttributesToNodes call with duration {}",
duration);
metrics.succeededMapAttributesToNodesRetrieved(duration);
}
public void getGroupsForUsersRetrieved(long duration) {
LOG.info("Mocked: successful GetGroupsForUsers call with duration {}",
duration);
metrics.succeededGetGroupsForUsersRetrieved(duration);
}
public void getSaveFederationQueuePolicyRetrieved(long duration) {
LOG.info("Mocked: successful SaveFederationQueuePolicy call with duration {}",
duration);
metrics.succeededSaveFederationQueuePolicyRetrieved(duration);
}
public void getBatchSaveFederationQueuePoliciesRetrieved(long duration) {
LOG.info("Mocked: successful BatchSaveFederationQueuePoliciesRetrieved " +
" call with duration {}", duration);
metrics.succeededBatchSaveFederationQueuePoliciesRetrieved(duration);
}
public void getListFederationQueuePoliciesRetrieved(long duration) {
LOG.info("Mocked: successful ListFederationQueuePoliciesRetrieved " +
" call with duration {}", duration);
metrics.succeededListFederationQueuePoliciesRetrieved(duration);
}
public void getFederationSubClustersRetrieved(long duration) {
LOG.info("Mocked: successful GetFederationSubClustersRetrieved " +
" call with duration {}", duration);
metrics.succeededGetFederationSubClustersRetrieved(duration);
}
public void deleteFederationPoliciesByQueuesRetrieved(long duration) {
LOG.info("Mocked: successful DeleteFederationPoliciesByQueuesRetrieved " +
" call with duration {}", duration);
metrics.succeededDeleteFederationPoliciesByQueuesRetrieved(duration);
}
}
@Test
public void testSucceededGetClusterNodes() {
long totalGoodBefore = metrics.getNumSucceededGetClusterNodesRetrieved();
goodSubCluster.getClusterNodes(150);
assertEquals(totalGoodBefore + 1, metrics.getNumSucceededGetClusterNodesRetrieved());
assertEquals(150, metrics.getLatencySucceededGetClusterNodesRetrieved(),
ASSERT_DOUBLE_DELTA);
goodSubCluster.getClusterNodes(300);
assertEquals(totalGoodBefore + 2, metrics.getNumSucceededGetClusterNodesRetrieved());
assertEquals(225, metrics.getLatencySucceededGetClusterNodesRetrieved(),
ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetClusterNodesFailed() {
long totalBadBefore = metrics.getClusterNodesFailedRetrieved();
badSubCluster.getClusterNodes();
assertEquals(totalBadBefore + 1, metrics.getClusterNodesFailedRetrieved());
}
@Test
public void testSucceededGetNodeToLabels() {
long totalGoodBefore = metrics.getNumSucceededGetNodeToLabelsRetrieved();
goodSubCluster.getNodeToLabels(150);
assertEquals(totalGoodBefore + 1, metrics.getNumSucceededGetNodeToLabelsRetrieved());
assertEquals(150, metrics.getLatencySucceededGetNodeToLabelsRetrieved(),
ASSERT_DOUBLE_DELTA);
goodSubCluster.getNodeToLabels(300);
assertEquals(totalGoodBefore + 2, metrics.getNumSucceededGetNodeToLabelsRetrieved());
assertEquals(225, metrics.getLatencySucceededGetNodeToLabelsRetrieved(),
ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetNodeToLabelsFailed() {
long totalBadBefore = metrics.getNodeToLabelsFailedRetrieved();
badSubCluster.getNodeToLabels();
assertEquals(totalBadBefore + 1, metrics.getNodeToLabelsFailedRetrieved());
}
@Test
public void testSucceededLabelsToNodes() {
long totalGoodBefore = metrics.getNumSucceededGetLabelsToNodesRetrieved();
goodSubCluster.getLabelToNodes(150);
assertEquals(totalGoodBefore + 1, metrics.getNumSucceededGetLabelsToNodesRetrieved());
assertEquals(150, metrics.getLatencySucceededGetLabelsToNodesRetrieved(),
ASSERT_DOUBLE_DELTA);
goodSubCluster.getLabelToNodes(300);
assertEquals(totalGoodBefore + 2, metrics.getNumSucceededGetLabelsToNodesRetrieved());
assertEquals(225, metrics.getLatencySucceededGetLabelsToNodesRetrieved(),
ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetLabelsToNodesFailed() {
long totalBadBefore = metrics.getLabelsToNodesFailedRetrieved();
badSubCluster.getLabelToNodes();
assertEquals(totalBadBefore + 1, metrics.getLabelsToNodesFailedRetrieved());
}
@Test
public void testSucceededClusterNodeLabels() {
long totalGoodBefore = metrics.getNumSucceededGetClusterNodeLabelsRetrieved();
goodSubCluster.getClusterNodeLabels(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetClusterNodeLabelsRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetClusterNodeLabelsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getClusterNodeLabels(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetClusterNodeLabelsRetrieved());
assertEquals(225, metrics.getLatencySucceededGetClusterNodeLabelsRetrieved(),
ASSERT_DOUBLE_DELTA);
}
@Test
public void testClusterNodeLabelsFailed() {
long totalBadBefore = metrics.getGetClusterNodeLabelsFailedRetrieved();
badSubCluster.getClusterNodeLabels();
assertEquals(totalBadBefore + 1, metrics.getGetClusterNodeLabelsFailedRetrieved());
}
@Test
public void testSucceededQueueUserAcls() {
long totalGoodBefore = metrics.getNumSucceededGetQueueUserAclsRetrieved();
goodSubCluster.getQueueUserAcls(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetQueueUserAclsRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetQueueUserAclsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getQueueUserAcls(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetQueueUserAclsRetrieved());
assertEquals(225, metrics.getLatencySucceededGetQueueUserAclsRetrieved(),
ASSERT_DOUBLE_DELTA);
}
@Test
public void testQueueUserAclsFailed() {
long totalBadBefore = metrics.getQueueUserAclsFailedRetrieved();
badSubCluster.getQueueUserAcls();
assertEquals(totalBadBefore + 1, metrics.getQueueUserAclsFailedRetrieved());
}
@Test
public void testSucceededListReservations() {
long totalGoodBefore = metrics.getNumSucceededListReservationsRetrieved();
goodSubCluster.getListReservations(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededListReservationsRetrieved());
assertEquals(150,
metrics.getLatencySucceededListReservationsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getListReservations(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededListReservationsRetrieved());
assertEquals(225, metrics.getLatencySucceededListReservationsRetrieved(),
ASSERT_DOUBLE_DELTA);
}
@Test
public void testListReservationsFailed() {
long totalBadBefore = metrics.getListReservationsFailedRetrieved();
badSubCluster.getListReservations();
assertEquals(totalBadBefore + 1, metrics.getListReservationsFailedRetrieved());
}
@Test
public void testSucceededGetApplicationAttempts() {
long totalGoodBefore = metrics.getNumSucceededAppAttemptsRetrieved();
goodSubCluster.getApplicationAttempts(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededAppAttemptsRetrieved());
assertEquals(150,
metrics.getLatencySucceededAppAttemptRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getApplicationAttempts(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededAppAttemptsRetrieved());
assertEquals(225, metrics.getLatencySucceededAppAttemptRetrieved(),
ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetApplicationAttemptsFailed() {
long totalBadBefore = metrics.getAppAttemptsFailedRetrieved();
badSubCluster.getApplicationAttempts();
assertEquals(totalBadBefore + 1, metrics.getAppAttemptsFailedRetrieved());
}
@Test
public void testSucceededGetContainerReport() {
long totalGoodBefore = metrics.getNumSucceededGetContainerReportRetrieved();
goodSubCluster.getContainerReport(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetContainerReportRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetContainerReportRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getContainerReport(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetContainerReportRetrieved());
assertEquals(225, metrics.getLatencySucceededGetContainerReportRetrieved(),
ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetContainerReportFailed() {
long totalBadBefore = metrics.getContainerReportFailedRetrieved();
badSubCluster.getContainerReport();
assertEquals(totalBadBefore + 1, metrics.getContainerReportFailedRetrieved());
}
@Test
public void testSucceededGetContainers() {
long totalGoodBefore = metrics.getNumSucceededGetContainersRetrieved();
goodSubCluster.getContainers(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetContainersRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetContainersRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getContainers(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetContainersRetrieved());
assertEquals(225, metrics.getLatencySucceededGetContainersRetrieved(),
ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetContainersFailed() {
long totalBadBefore = metrics.getContainersFailedRetrieved();
badSubCluster.getContainers();
assertEquals(totalBadBefore + 1, metrics.getContainersFailedRetrieved());
}
@Test
public void testSucceededGetResourceTypeInfo() {
long totalGoodBefore = metrics.getNumSucceededGetResourceTypeInfoRetrieved();
goodSubCluster.getResourceTypeInfo(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetResourceTypeInfoRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetResourceTypeInfoRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getResourceTypeInfo(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetResourceTypeInfoRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetResourceTypeInfoRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetResourceTypeInfoFailed() {
long totalBadBefore = metrics.getGetResourceTypeInfoRetrieved();
badSubCluster.getResourceTypeInfo();
assertEquals(totalBadBefore + 1, metrics.getGetResourceTypeInfoRetrieved());
}
@Test
public void testSucceededFailApplicationAttempt() {
long totalGoodBefore = metrics.getNumSucceededFailAppAttemptRetrieved();
goodSubCluster.getFailApplicationAttempt(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededFailAppAttemptRetrieved());
assertEquals(150,
metrics.getLatencySucceededFailAppAttemptRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getFailApplicationAttempt(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededFailAppAttemptRetrieved());
assertEquals(225,
metrics.getLatencySucceededFailAppAttemptRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testFailApplicationAttemptFailed() {
long totalBadBefore = metrics.getFailApplicationAttemptFailedRetrieved();
badSubCluster.getFailApplicationAttempt();
assertEquals(totalBadBefore + 1, metrics.getFailApplicationAttemptFailedRetrieved());
}
@Test
public void testSucceededUpdateApplicationPriority() {
long totalGoodBefore = metrics.getNumSucceededUpdateAppPriorityRetrieved();
goodSubCluster.getUpdateApplicationPriority(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededUpdateAppPriorityRetrieved());
assertEquals(150,
metrics.getLatencySucceededUpdateAppPriorityRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getUpdateApplicationPriority(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededUpdateAppPriorityRetrieved());
assertEquals(225,
metrics.getLatencySucceededUpdateAppPriorityRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testUpdateApplicationPriorityFailed() {
long totalBadBefore = metrics.getUpdateApplicationPriorityFailedRetrieved();
badSubCluster.getUpdateApplicationPriority();
assertEquals(totalBadBefore + 1,
metrics.getUpdateApplicationPriorityFailedRetrieved());
}
@Test
public void testSucceededUpdateAppTimeoutsRetrieved() {
long totalGoodBefore = metrics.getNumSucceededUpdateAppTimeoutsRetrieved();
goodSubCluster.getUpdateApplicationTimeouts(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededUpdateAppTimeoutsRetrieved());
assertEquals(150,
metrics.getLatencySucceededUpdateAppTimeoutsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getUpdateApplicationTimeouts(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededUpdateAppTimeoutsRetrieved());
assertEquals(225,
metrics.getLatencySucceededUpdateAppTimeoutsRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testUpdateAppTimeoutsFailed() {
long totalBadBefore = metrics.getUpdateApplicationTimeoutsFailedRetrieved();
badSubCluster.getUpdateApplicationTimeouts();
assertEquals(totalBadBefore + 1,
metrics.getUpdateApplicationTimeoutsFailedRetrieved());
}
@Test
public void testSucceededSignalToContainerRetrieved() {
long totalGoodBefore = metrics.getNumSucceededSignalToContainerRetrieved();
goodSubCluster.getSignalToContainerTimeouts(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededSignalToContainerRetrieved());
assertEquals(150,
metrics.getLatencySucceededSignalToContainerRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getSignalToContainerTimeouts(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededSignalToContainerRetrieved());
assertEquals(225,
metrics.getLatencySucceededSignalToContainerRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testSignalToContainerFailed() {
long totalBadBefore = metrics.getSignalToContainerFailedRetrieved();
badSubCluster.getSignalContainer();
assertEquals(totalBadBefore + 1,
metrics.getSignalToContainerFailedRetrieved());
}
@Test
public void testSucceededGetQueueInfoRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetQueueInfoRetrieved();
goodSubCluster.getQueueInfoRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetQueueInfoRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetQueueInfoRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getQueueInfoRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetQueueInfoRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetQueueInfoRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetQueueInfoFailed() {
long totalBadBefore = metrics.getQueueInfoFailedRetrieved();
badSubCluster.getQueueInfo();
assertEquals(totalBadBefore + 1,
metrics.getQueueInfoFailedRetrieved());
}
@Test
public void testSucceededMoveApplicationAcrossQueuesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededMoveApplicationAcrossQueuesRetrieved();
goodSubCluster.moveApplicationAcrossQueuesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededMoveApplicationAcrossQueuesRetrieved());
assertEquals(150,
metrics.getLatencySucceededMoveApplicationAcrossQueuesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.moveApplicationAcrossQueuesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededMoveApplicationAcrossQueuesRetrieved());
assertEquals(225,
metrics.getLatencySucceededMoveApplicationAcrossQueuesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testMoveApplicationAcrossQueuesRetrievedFailed() {
long totalBadBefore = metrics.getMoveApplicationAcrossQueuesFailedRetrieved();
badSubCluster.moveApplicationAcrossQueuesFailed();
assertEquals(totalBadBefore + 1,
metrics.getMoveApplicationAcrossQueuesFailedRetrieved());
}
@Test
public void testSucceededGetResourceProfilesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetResourceProfilesRetrieved();
goodSubCluster.getResourceProfilesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetResourceProfilesRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetResourceProfilesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getResourceProfilesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetResourceProfilesRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetResourceProfilesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetResourceProfilesRetrievedFailed() {
long totalBadBefore = metrics.getResourceProfilesFailedRetrieved();
badSubCluster.getResourceProfilesFailed();
assertEquals(totalBadBefore + 1,
metrics.getResourceProfilesFailedRetrieved());
}
@Test
public void testSucceededGetResourceProfileRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetResourceProfileRetrieved();
goodSubCluster.getResourceProfileRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetResourceProfileRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetResourceProfileRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getResourceProfileRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetResourceProfileRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetResourceProfileRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetResourceProfileRetrievedFailed() {
long totalBadBefore = metrics.getResourceProfileFailedRetrieved();
badSubCluster.getResourceProfileFailed();
assertEquals(totalBadBefore + 1,
metrics.getResourceProfileFailedRetrieved());
}
@Test
public void testSucceededGetAttributesToNodesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetAttributesToNodesRetrieved();
goodSubCluster.getAttributesToNodesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetAttributesToNodesRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetAttributesToNodesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getAttributesToNodesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetAttributesToNodesRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetAttributesToNodesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetAttributesToNodesRetrievedFailed() {
long totalBadBefore = metrics.getAttributesToNodesFailedRetrieved();
badSubCluster.getAttributesToNodesFailed();
assertEquals(totalBadBefore + 1,
metrics.getAttributesToNodesFailedRetrieved());
}
@Test
public void testGetClusterNodeAttributesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetClusterNodeAttributesRetrieved();
goodSubCluster.getClusterNodeAttributesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetClusterNodeAttributesRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetClusterNodeAttributesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getClusterNodeAttributesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetClusterNodeAttributesRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetClusterNodeAttributesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetClusterNodeAttributesRetrievedFailed() {
long totalBadBefore = metrics.getClusterNodeAttributesFailedRetrieved();
badSubCluster.getClusterNodeAttributesFailed();
assertEquals(totalBadBefore + 1,
metrics.getClusterNodeAttributesFailedRetrieved());
}
@Test
public void testGetNodesToAttributesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetNodesToAttributesRetrieved();
goodSubCluster.getNodesToAttributesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetNodesToAttributesRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetNodesToAttributesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getNodesToAttributesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetNodesToAttributesRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetNodesToAttributesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetNodesToAttributesRetrievedFailed() {
long totalBadBefore = metrics.getNodesToAttributesFailedRetrieved();
badSubCluster.getNodesToAttributesFailed();
assertEquals(totalBadBefore + 1,
metrics.getNodesToAttributesFailedRetrieved());
}
@Test
public void testGetNewReservationRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetNewReservationRetrieved();
goodSubCluster.getNewReservationRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetNewReservationRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetNewReservationRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getNewReservationRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetNewReservationRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetNewReservationRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetNewReservationRetrievedFailed() {
long totalBadBefore = metrics.getNewReservationFailedRetrieved();
badSubCluster.getNewReservationFailed();
assertEquals(totalBadBefore + 1,
metrics.getNewReservationFailedRetrieved());
}
@Test
public void testGetSubmitReservationRetrieved() {
long totalGoodBefore = metrics.getNumSucceededSubmitReservationRetrieved();
goodSubCluster.getSubmitReservationRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededSubmitReservationRetrieved());
assertEquals(150,
metrics.getLatencySucceededSubmitReservationRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getSubmitReservationRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededSubmitReservationRetrieved());
assertEquals(225,
metrics.getLatencySucceededSubmitReservationRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetSubmitReservationRetrievedFailed() {
long totalBadBefore = metrics.getSubmitReservationFailedRetrieved();
badSubCluster.getSubmitReservationFailed();
assertEquals(totalBadBefore + 1,
metrics.getSubmitReservationFailedRetrieved());
}
@Test
public void testGetUpdateReservationRetrieved() {
long totalGoodBefore = metrics.getNumSucceededUpdateReservationRetrieved();
goodSubCluster.getUpdateReservationRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededUpdateReservationRetrieved());
assertEquals(150,
metrics.getLatencySucceededUpdateReservationRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getUpdateReservationRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededUpdateReservationRetrieved());
assertEquals(225,
metrics.getLatencySucceededUpdateReservationRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetUpdateReservationRetrievedFailed() {
long totalBadBefore = metrics.getUpdateReservationFailedRetrieved();
badSubCluster.getUpdateReservationFailed();
assertEquals(totalBadBefore + 1,
metrics.getUpdateReservationFailedRetrieved());
}
@Test
public void testGetDeleteReservationRetrieved() {
long totalGoodBefore = metrics.getNumSucceededDeleteReservationRetrieved();
goodSubCluster.getDeleteReservationRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededDeleteReservationRetrieved());
assertEquals(150,
metrics.getLatencySucceededDeleteReservationRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getDeleteReservationRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededDeleteReservationRetrieved());
assertEquals(225,
metrics.getLatencySucceededDeleteReservationRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetDeleteReservationRetrievedFailed() {
long totalBadBefore = metrics.getDeleteReservationFailedRetrieved();
badSubCluster.getDeleteReservationFailed();
assertEquals(totalBadBefore + 1,
metrics.getDeleteReservationFailedRetrieved());
}
@Test
public void testGetListReservationRetrieved() {
long totalGoodBefore = metrics.getNumSucceededListReservationRetrieved();
goodSubCluster.getListReservationRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededListReservationRetrieved());
assertEquals(150,
metrics.getLatencySucceededListReservationRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getListReservationRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededListReservationRetrieved());
assertEquals(225,
metrics.getLatencySucceededListReservationRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetListReservationRetrievedFailed() {
long totalBadBefore = metrics.getListReservationFailedRetrieved();
badSubCluster.getListReservationFailed();
assertEquals(totalBadBefore + 1,
metrics.getListReservationFailedRetrieved());
}
@Test
public void testGetAppActivitiesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetAppActivitiesRetrieved();
goodSubCluster.getAppActivitiesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetAppActivitiesRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetAppActivitiesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getAppActivitiesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetAppActivitiesRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetAppActivitiesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetAppActivitiesRetrievedFailed() {
long totalBadBefore = metrics.getAppActivitiesFailedRetrieved();
badSubCluster.getAppActivitiesFailed();
assertEquals(totalBadBefore + 1,
metrics.getAppActivitiesFailedRetrieved());
}
@Test
public void testGetAppStatisticsLatencyRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetAppStatisticsRetrieved();
goodSubCluster.getAppStatisticsRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetAppStatisticsRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetAppStatisticsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getAppStatisticsRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetAppStatisticsRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetAppStatisticsRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetAppStatisticsRetrievedFailed() {
long totalBadBefore = metrics.getAppStatisticsFailedRetrieved();
badSubCluster.getAppStatisticsFailed();
assertEquals(totalBadBefore + 1,
metrics.getAppStatisticsFailedRetrieved());
}
@Test
public void testGetAppPriorityLatencyRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetAppPriorityRetrieved();
goodSubCluster.getAppPriorityRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetAppPriorityRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetAppPriorityRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getAppPriorityRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetAppPriorityRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetAppPriorityRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetAppPriorityRetrievedFailed() {
long totalBadBefore = metrics.getAppPriorityFailedRetrieved();
badSubCluster.getAppPriorityFailed();
assertEquals(totalBadBefore + 1,
metrics.getAppPriorityFailedRetrieved());
}
@Test
public void testGetAppQueueLatencyRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetAppQueueRetrieved();
goodSubCluster.getAppQueueRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetAppQueueRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetAppQueueRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getAppQueueRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetAppQueueRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetAppQueueRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetAppQueueRetrievedFailed() {
long totalBadBefore = metrics.getAppQueueFailedRetrieved();
badSubCluster.getAppQueueFailed();
assertEquals(totalBadBefore + 1,
metrics.getAppQueueFailedRetrieved());
}
@Test
public void testUpdateAppQueueLatencyRetrieved() {
long totalGoodBefore = metrics.getNumSucceededUpdateAppQueueRetrieved();
goodSubCluster.getUpdateQueueRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededUpdateAppQueueRetrieved());
assertEquals(150,
metrics.getLatencySucceededUpdateAppQueueRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getUpdateQueueRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededUpdateAppQueueRetrieved());
assertEquals(225,
metrics.getLatencySucceededUpdateAppQueueRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testUpdateAppQueueRetrievedFailed() {
long totalBadBefore = metrics.getUpdateAppQueueFailedRetrieved();
badSubCluster.getUpdateQueueFailed();
assertEquals(totalBadBefore + 1,
metrics.getUpdateAppQueueFailedRetrieved());
}
@Test
public void testGetAppTimeoutLatencyRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetAppTimeoutRetrieved();
goodSubCluster.getAppTimeoutRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetAppTimeoutRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetAppTimeoutRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getAppTimeoutRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetAppTimeoutRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetAppTimeoutRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetAppTimeoutRetrievedFailed() {
long totalBadBefore = metrics.getAppTimeoutFailedRetrieved();
badSubCluster.getAppTimeoutFailed();
assertEquals(totalBadBefore + 1,
metrics.getAppTimeoutFailedRetrieved());
}
@Test
public void testGetAppTimeoutsLatencyRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetAppTimeoutsRetrieved();
goodSubCluster.getAppTimeoutsRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetAppTimeoutsRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetAppTimeoutsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getAppTimeoutsRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetAppTimeoutsRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetAppTimeoutsRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetAppTimeoutsRetrievedFailed() {
long totalBadBefore = metrics.getAppTimeoutsFailedRetrieved();
badSubCluster.getAppTimeoutsFailed();
assertEquals(totalBadBefore + 1,
metrics.getAppTimeoutsFailedRetrieved());
}
@Test
public void testGetRMNodeLabelsRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetRMNodeLabelsRetrieved();
goodSubCluster.getRMNodeLabelsRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetRMNodeLabelsRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetRMNodeLabelsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getRMNodeLabelsRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetRMNodeLabelsRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetRMNodeLabelsRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetRMNodeLabelsRetrievedFailed() {
long totalBadBefore = metrics.getRMNodeLabelsFailedRetrieved();
badSubCluster.getRMNodeLabelsFailed();
assertEquals(totalBadBefore + 1,
metrics.getRMNodeLabelsFailedRetrieved());
}
@Test
public void testCheckUserAccessToQueueRetrieved() {
long totalGoodBefore = metrics.getNumSucceededCheckUserAccessToQueueRetrieved();
goodSubCluster.getCheckUserAccessToQueueRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededCheckUserAccessToQueueRetrieved());
assertEquals(150,
metrics.getLatencySucceededCheckUserAccessToQueueRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getCheckUserAccessToQueueRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededCheckUserAccessToQueueRetrieved());
assertEquals(225,
metrics.getLatencySucceededCheckUserAccessToQueueRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testCheckUserAccessToQueueRetrievedFailed() {
long totalBadBefore = metrics.getCheckUserAccessToQueueFailedRetrieved();
badSubCluster.getCheckUserAccessToQueueFailed();
assertEquals(totalBadBefore + 1,
metrics.getCheckUserAccessToQueueFailedRetrieved());
}
@Test
public void testGetDelegationTokenRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetDelegationTokenRetrieved();
goodSubCluster.getGetDelegationTokenRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetDelegationTokenRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetDelegationTokenRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getGetDelegationTokenRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetDelegationTokenRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetDelegationTokenRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetDelegationTokenRetrievedFailed() {
long totalBadBefore = metrics.getDelegationTokenFailedRetrieved();
badSubCluster.getDelegationTokenFailed();
assertEquals(totalBadBefore + 1,
metrics.getDelegationTokenFailedRetrieved());
}
@Test
public void testRenewDelegationTokenRetrieved() {
long totalGoodBefore = metrics.getNumSucceededRenewDelegationTokenRetrieved();
goodSubCluster.getRenewDelegationTokenRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededRenewDelegationTokenRetrieved());
assertEquals(150,
metrics.getLatencySucceededRenewDelegationTokenRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getRenewDelegationTokenRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededRenewDelegationTokenRetrieved());
assertEquals(225,
metrics.getLatencySucceededRenewDelegationTokenRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testRenewDelegationTokenRetrievedFailed() {
long totalBadBefore = metrics.getRenewDelegationTokenFailedRetrieved();
badSubCluster.getRenewDelegationTokenFailed();
assertEquals(totalBadBefore + 1,
metrics.getRenewDelegationTokenFailedRetrieved());
}
@Test
public void testRefreshAdminAclsRetrieved() {
long totalGoodBefore = metrics.getNumSucceededRefreshAdminAclsRetrieved();
goodSubCluster.getRefreshAdminAclsRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededRefreshAdminAclsRetrieved());
assertEquals(150,
metrics.getLatencySucceededRefreshAdminAclsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getRefreshAdminAclsRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededRefreshAdminAclsRetrieved());
assertEquals(225,
metrics.getLatencySucceededRefreshAdminAclsRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testRefreshAdminAclsRetrievedFailed() {
long totalBadBefore = metrics.getNumRefreshAdminAclsFailedRetrieved();
badSubCluster.getRefreshAdminAclsFailedRetrieved();
assertEquals(totalBadBefore + 1,
metrics.getNumRefreshAdminAclsFailedRetrieved());
}
@Test
public void testRefreshServiceAclsRetrieved() {
long totalGoodBefore = metrics.getNumSucceededRefreshServiceAclsRetrieved();
goodSubCluster.getRefreshServiceAclsRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededRefreshServiceAclsRetrieved());
assertEquals(150,
metrics.getLatencySucceededRefreshServiceAclsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getRefreshServiceAclsRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededRefreshServiceAclsRetrieved());
assertEquals(225,
metrics.getLatencySucceededRefreshServiceAclsRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testRefreshServiceAclsRetrievedFailed() {
long totalBadBefore = metrics.getNumRefreshServiceAclsFailedRetrieved();
badSubCluster.getRefreshServiceAclsFailedRetrieved();
assertEquals(totalBadBefore + 1,
metrics.getNumRefreshServiceAclsFailedRetrieved());
}
@Test
public void testReplaceLabelsOnNodesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededReplaceLabelsOnNodesRetrieved();
goodSubCluster.getNumSucceededReplaceLabelsOnNodesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededReplaceLabelsOnNodesRetrieved());
assertEquals(150,
metrics.getLatencySucceededReplaceLabelsOnNodesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getNumSucceededReplaceLabelsOnNodesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededReplaceLabelsOnNodesRetrieved());
assertEquals(225,
metrics.getLatencySucceededReplaceLabelsOnNodesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testReplaceLabelsOnNodesRetrievedFailed() {
long totalBadBefore = metrics.getNumReplaceLabelsOnNodesFailedRetrieved();
badSubCluster.getReplaceLabelsOnNodesFailed();
assertEquals(totalBadBefore + 1,
metrics.getNumReplaceLabelsOnNodesFailedRetrieved());
}
@Test
public void testReplaceLabelsOnNodeRetrieved() {
long totalGoodBefore = metrics.getNumSucceededReplaceLabelsOnNodeRetrieved();
goodSubCluster.getNumSucceededReplaceLabelsOnNodeRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededReplaceLabelsOnNodeRetrieved());
assertEquals(150,
metrics.getLatencySucceededReplaceLabelsOnNodeRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getNumSucceededReplaceLabelsOnNodeRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededReplaceLabelsOnNodeRetrieved());
assertEquals(225,
metrics.getLatencySucceededReplaceLabelsOnNodeRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testReplaceLabelOnNodeRetrievedFailed() {
long totalBadBefore = metrics.getNumReplaceLabelsOnNodeFailedRetrieved();
badSubCluster.getReplaceLabelsOnNodeFailed();
assertEquals(totalBadBefore + 1,
metrics.getNumReplaceLabelsOnNodeFailedRetrieved());
}
@Test
public void testDumpSchedulerLogsRetrieved() {
long totalGoodBefore = metrics.getNumSucceededDumpSchedulerLogsRetrieved();
goodSubCluster.getDumpSchedulerLogsRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededDumpSchedulerLogsRetrieved());
assertEquals(150,
metrics.getLatencySucceededDumpSchedulerLogsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getDumpSchedulerLogsRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededDumpSchedulerLogsRetrieved());
assertEquals(225,
metrics.getLatencySucceededDumpSchedulerLogsRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testDumpSchedulerLogsRetrievedFailed() {
long totalBadBefore = metrics.getDumpSchedulerLogsFailedRetrieved();
badSubCluster.getDumpSchedulerLogsFailed();
assertEquals(totalBadBefore + 1,
metrics.getDumpSchedulerLogsFailedRetrieved());
}
@Test
public void testGetActivitiesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetActivitiesRetrieved();
goodSubCluster.getActivitiesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetActivitiesRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetActivitiesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getActivitiesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetActivitiesRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetActivitiesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetActivitiesRetrievedFailed() {
long totalBadBefore = metrics.getActivitiesFailedRetrieved();
badSubCluster.getActivitiesFailed();
assertEquals(totalBadBefore + 1,
metrics.getActivitiesFailedRetrieved());
}
@Test
public void testGetBulkActivitiesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetBulkActivitiesRetrieved();
goodSubCluster.getBulkActivitiesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetBulkActivitiesRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetBulkActivitiesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getBulkActivitiesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetBulkActivitiesRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetBulkActivitiesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetBulkActivitiesRetrievedFailed() {
long totalBadBefore = metrics.getBulkActivitiesFailedRetrieved();
badSubCluster.getBulkActivitiesFailed();
assertEquals(totalBadBefore + 1,
metrics.getBulkActivitiesFailedRetrieved());
}
@Test
public void testDeregisterSubClusterRetrieved() {
long totalGoodBefore = metrics.getNumSucceededDeregisterSubClusterRetrieved();
goodSubCluster.getDeregisterSubClusterRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededDeregisterSubClusterRetrieved());
assertEquals(150,
metrics.getLatencySucceededDeregisterSubClusterRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getDeregisterSubClusterRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededDeregisterSubClusterRetrieved());
assertEquals(225,
metrics.getLatencySucceededDeregisterSubClusterRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testDeregisterSubClusterRetrievedFailed() {
long totalBadBefore = metrics.getDeregisterSubClusterFailedRetrieved();
badSubCluster.getDeregisterSubClusterFailed();
assertEquals(totalBadBefore + 1,
metrics.getDeregisterSubClusterFailedRetrieved());
}
@Test
public void testAddToClusterNodeLabelsRetrieved() {
long totalGoodBefore = metrics.getNumSucceededAddToClusterNodeLabelsRetrieved();
goodSubCluster.addToClusterNodeLabelsRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededAddToClusterNodeLabelsRetrieved());
assertEquals(150,
metrics.getLatencySucceededAddToClusterNodeLabelsRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.addToClusterNodeLabelsRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededAddToClusterNodeLabelsRetrieved());
assertEquals(225,
metrics.getLatencySucceededAddToClusterNodeLabelsRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetSchedulerConfigurationRetrievedFailed() {
long totalBadBefore = metrics.getSchedulerConfigurationFailedRetrieved();
badSubCluster.getSchedulerConfigurationFailed();
assertEquals(totalBadBefore + 1,
metrics.getSchedulerConfigurationFailedRetrieved());
}
@Test
public void testGetSchedulerConfigurationRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetSchedulerConfigurationRetrieved();
goodSubCluster.getSchedulerConfigurationRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetSchedulerConfigurationRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetSchedulerConfigurationRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getSchedulerConfigurationRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetSchedulerConfigurationRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetSchedulerConfigurationRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testUpdateSchedulerConfigurationRetrievedFailed() {
long totalBadBefore = metrics.getUpdateSchedulerConfigurationFailedRetrieved();
badSubCluster.updateSchedulerConfigurationFailedRetrieved();
assertEquals(totalBadBefore + 1,
metrics.getUpdateSchedulerConfigurationFailedRetrieved());
}
@Test
public void testUpdateSchedulerConfigurationRetrieved() {
long totalGoodBefore = metrics.getNumSucceededUpdateSchedulerConfigurationRetrieved();
goodSubCluster.getUpdateSchedulerConfigurationRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededUpdateSchedulerConfigurationRetrieved());
assertEquals(150,
metrics.getLatencySucceededUpdateSchedulerConfigurationRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getUpdateSchedulerConfigurationRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededUpdateSchedulerConfigurationRetrieved());
assertEquals(225,
metrics.getLatencySucceededUpdateSchedulerConfigurationRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetClusterInfoRetrievedFailed() {
long totalBadBefore = metrics.getClusterInfoFailedRetrieved();
badSubCluster.getClusterInfoFailed();
assertEquals(totalBadBefore + 1, metrics.getClusterInfoFailedRetrieved());
}
@Test
public void testGetClusterInfoRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetClusterInfoRetrieved();
goodSubCluster.getClusterInfoRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetClusterInfoRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetClusterInfoRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getClusterInfoRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetClusterInfoRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetClusterInfoRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetClusterUserInfoRetrievedFailed() {
long totalBadBefore = metrics.getClusterUserInfoFailedRetrieved();
badSubCluster.getClusterUserInfoFailed();
assertEquals(totalBadBefore + 1, metrics.getClusterUserInfoFailedRetrieved());
}
@Test
public void testGetClusterUserInfoRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetClusterUserInfoRetrieved();
goodSubCluster.getClusterUserInfoRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetClusterUserInfoRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetClusterUserInfoRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getClusterUserInfoRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetClusterUserInfoRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetClusterUserInfoRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testUpdateNodeResourceRetrievedFailed() {
long totalBadBefore = metrics.getUpdateNodeResourceFailedRetrieved();
badSubCluster.getUpdateNodeResourceFailed();
assertEquals(totalBadBefore + 1, metrics.getUpdateNodeResourceFailedRetrieved());
}
@Test
public void testUpdateNodeResourceRetrieved() {
long totalGoodBefore = metrics.getNumSucceededUpdateNodeResourceRetrieved();
goodSubCluster.getUpdateNodeResourceRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededUpdateNodeResourceRetrieved());
assertEquals(150,
metrics.getLatencySucceededUpdateNodeResourceRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getUpdateNodeResourceRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededUpdateNodeResourceRetrieved());
assertEquals(225,
metrics.getLatencySucceededUpdateNodeResourceRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testRefreshNodesResourcesRetrievedFailed() {
long totalBadBefore = metrics.getRefreshNodesResourcesFailedRetrieved();
badSubCluster.getRefreshNodesResourcesFailed();
assertEquals(totalBadBefore + 1, metrics.getRefreshNodesResourcesFailedRetrieved());
}
@Test
public void testRefreshNodesResourcesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededRefreshNodesResourcesRetrieved();
goodSubCluster.getRefreshNodesResourcesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededRefreshNodesResourcesRetrieved());
assertEquals(150,
metrics.getLatencySucceededRefreshNodesResourcesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getRefreshNodesResourcesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededRefreshNodesResourcesRetrieved());
assertEquals(225,
metrics.getLatencySucceededRefreshNodesResourcesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testCheckForDecommissioningNodesFailedRetrieved() {
long totalBadBefore = metrics.getCheckForDecommissioningNodesFailedRetrieved();
badSubCluster.getCheckForDecommissioningNodesFailed();
assertEquals(totalBadBefore + 1,
metrics.getCheckForDecommissioningNodesFailedRetrieved());
}
@Test
public void testCheckForDecommissioningNodesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededCheckForDecommissioningNodesRetrieved();
goodSubCluster.getCheckForDecommissioningNodesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededCheckForDecommissioningNodesRetrieved());
assertEquals(150,
metrics.getLatencySucceededCheckForDecommissioningNodesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getCheckForDecommissioningNodesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededCheckForDecommissioningNodesRetrieved());
assertEquals(225,
metrics.getLatencySucceededCheckForDecommissioningNodesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testRefreshClusterMaxPriorityFailedRetrieved() {
long totalBadBefore = metrics.getRefreshClusterMaxPriorityFailedRetrieved();
badSubCluster.getRefreshClusterMaxPriorityFailed();
assertEquals(totalBadBefore + 1, metrics.getRefreshClusterMaxPriorityFailedRetrieved());
}
@Test
public void testRefreshClusterMaxPriorityRetrieved() {
long totalGoodBefore = metrics.getNumSucceededRefreshClusterMaxPriorityRetrieved();
goodSubCluster.getRefreshClusterMaxPriorityRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededRefreshClusterMaxPriorityRetrieved());
assertEquals(150,
metrics.getLatencySucceededRefreshClusterMaxPriorityRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getRefreshClusterMaxPriorityRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededRefreshClusterMaxPriorityRetrieved());
assertEquals(225,
metrics.getLatencySucceededRefreshClusterMaxPriorityRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetMapAttributesToNodesFailedRetrieved() {
long totalBadBefore = metrics.getMapAttributesToNodesFailedRetrieved();
badSubCluster.getMapAttributesToNodesFailed();
assertEquals(totalBadBefore + 1, metrics.getMapAttributesToNodesFailedRetrieved());
}
@Test
public void testGetMapAttributesToNodesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededMapAttributesToNodesRetrieved();
goodSubCluster.getMapAttributesToNodesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededMapAttributesToNodesRetrieved());
assertEquals(150,
metrics.getLatencySucceededMapAttributesToNodesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getMapAttributesToNodesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededMapAttributesToNodesRetrieved());
assertEquals(225,
metrics.getLatencySucceededMapAttributesToNodesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetGroupsForUserFailedRetrieved() {
long totalBadBefore = metrics.getGroupsForUserFailedRetrieved();
badSubCluster.getGroupsForUserFailed();
assertEquals(totalBadBefore + 1, metrics.getGroupsForUserFailedRetrieved());
}
@Test
public void testGetGroupsForUserRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetGroupsForUsersRetrieved();
goodSubCluster.getGroupsForUsersRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetGroupsForUsersRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetGroupsForUsersRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getGroupsForUsersRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetGroupsForUsersRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetGroupsForUsersRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testSaveFederationQueuePolicyFailedRetrieved() {
long totalBadBefore = metrics.getSaveFederationQueuePolicyFailedRetrieved();
badSubCluster.getSaveFederationQueuePolicyFailedRetrieved();
assertEquals(totalBadBefore + 1, metrics.getSaveFederationQueuePolicyFailedRetrieved());
}
@Test
public void testSaveFederationQueuePolicyRetrieved() {
long totalGoodBefore = metrics.getNumSucceededSaveFederationQueuePolicyRetrieved();
goodSubCluster.getSaveFederationQueuePolicyRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededSaveFederationQueuePolicyRetrieved());
assertEquals(150,
metrics.getLatencySucceededSaveFederationQueuePolicyRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getSaveFederationQueuePolicyRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededSaveFederationQueuePolicyRetrieved());
assertEquals(225,
metrics.getLatencySucceededSaveFederationQueuePolicyRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetBatchSaveFederationQueuePoliciesFailedRetrieved() {
long totalBadBefore = metrics.getBatchSaveFederationQueuePoliciesFailedRetrieved();
badSubCluster.getBatchSaveFederationQueuePoliciesFailedRetrieved();
assertEquals(totalBadBefore + 1,
metrics.getBatchSaveFederationQueuePoliciesFailedRetrieved());
}
@Test
public void testGetBatchSaveFederationQueuePoliciesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededBatchSaveFederationQueuePoliciesRetrieved();
goodSubCluster.getBatchSaveFederationQueuePoliciesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededBatchSaveFederationQueuePoliciesRetrieved());
assertEquals(150,
metrics.getLatencySucceededBatchSaveFederationQueuePoliciesRetrieved(),
ASSERT_DOUBLE_DELTA);
goodSubCluster.getBatchSaveFederationQueuePoliciesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededBatchSaveFederationQueuePoliciesRetrieved());
assertEquals(225,
metrics.getLatencySucceededBatchSaveFederationQueuePoliciesRetrieved(),
ASSERT_DOUBLE_DELTA);
}
@Test
public void testListFederationQueuePoliciesFailedRetrieved() {
long totalBadBefore = metrics.getListFederationQueuePoliciesFailedRetrieved();
badSubCluster.getListFederationQueuePoliciesFailedRetrieved();
assertEquals(totalBadBefore + 1,
metrics.getListFederationQueuePoliciesFailedRetrieved());
}
@Test
public void testListFederationQueuePoliciesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededListFederationQueuePoliciesFailedRetrieved();
goodSubCluster.getListFederationQueuePoliciesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededListFederationQueuePoliciesFailedRetrieved());
assertEquals(150,
metrics.getLatencySucceededListFederationQueuePoliciesRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getListFederationQueuePoliciesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededListFederationQueuePoliciesFailedRetrieved());
assertEquals(225,
metrics.getLatencySucceededListFederationQueuePoliciesRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testGetFederationSubClustersFailedRetrieved() {
long totalBadBefore = metrics.getFederationSubClustersFailedRetrieved();
badSubCluster.getFederationSubClustersFailedRetrieved();
assertEquals(totalBadBefore + 1,
metrics.getFederationSubClustersFailedRetrieved());
}
@Test
public void testGetFederationSubClustersRetrieved() {
long totalGoodBefore = metrics.getNumSucceededGetFederationSubClustersRetrieved();
goodSubCluster.getFederationSubClustersRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededGetFederationSubClustersRetrieved());
assertEquals(150,
metrics.getLatencySucceededGetFederationSubClustersRetrieved(), ASSERT_DOUBLE_DELTA);
goodSubCluster.getFederationSubClustersRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededGetFederationSubClustersRetrieved());
assertEquals(225,
metrics.getLatencySucceededGetFederationSubClustersRetrieved(), ASSERT_DOUBLE_DELTA);
}
@Test
public void testDeleteFederationPoliciesByQueuesFailedRetrieved() {
long totalBadBefore = metrics.getDeleteFederationPoliciesByQueuesRetrieved();
badSubCluster.getDeleteFederationPoliciesByQueuesFailedRetrieved();
assertEquals(totalBadBefore + 1,
metrics.getDeleteFederationPoliciesByQueuesRetrieved());
}
@Test
public void testDeleteFederationPoliciesByQueuesRetrieved() {
long totalGoodBefore = metrics.getNumSucceededDeleteFederationPoliciesByQueuesRetrieved();
goodSubCluster.deleteFederationPoliciesByQueuesRetrieved(150);
assertEquals(totalGoodBefore + 1,
metrics.getNumSucceededDeleteFederationPoliciesByQueuesRetrieved());
assertEquals(150,
metrics.getLatencySucceededDeleteFederationPoliciesByQueuesRetrieved(),
ASSERT_DOUBLE_DELTA);
goodSubCluster.deleteFederationPoliciesByQueuesRetrieved(300);
assertEquals(totalGoodBefore + 2,
metrics.getNumSucceededDeleteFederationPoliciesByQueuesRetrieved());
assertEquals(225,
metrics.getLatencySucceededDeleteFederationPoliciesByQueuesRetrieved(),
ASSERT_DOUBLE_DELTA);
}
}
|
MockGoodSubCluster
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/orm/domain/gambit/EntityWithLazyManyToOneSelfReference.java
|
{
"start": 446,
"end": 1920
}
|
class ____ {
private Integer id;
// alphabetical
private String name;
private EntityWithLazyManyToOneSelfReference other;
private Integer someInteger;
EntityWithLazyManyToOneSelfReference() {
}
public EntityWithLazyManyToOneSelfReference(Integer id, String name, Integer someInteger) {
this.id = id;
this.name = name;
this.someInteger = someInteger;
}
public EntityWithLazyManyToOneSelfReference(
Integer id,
String name,
Integer someInteger,
EntityWithLazyManyToOneSelfReference other) {
this.id = id;
this.name = name;
this.someInteger = someInteger;
this.other = other;
}
public EntityWithLazyManyToOneSelfReference(
Integer id,
String name,
EntityWithLazyManyToOneSelfReference other,
Integer someInteger) {
this.id = id;
this.name = name;
this.other = other;
this.someInteger = someInteger;
}
@Id
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@ManyToOne(fetch = FetchType.LAZY)
@JoinColumn
public EntityWithLazyManyToOneSelfReference getOther() {
return other;
}
public void setOther(EntityWithLazyManyToOneSelfReference other) {
this.other = other;
}
public Integer getSomeInteger() {
return someInteger;
}
public void setSomeInteger(Integer someInteger) {
this.someInteger = someInteger;
}
}
|
EntityWithLazyManyToOneSelfReference
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/filter/wall/WallDropTest.java
|
{
"start": 842,
"end": 1559
}
|
class ____ extends TestCase {
public void testMySql() throws Exception {
WallConfig config = new WallConfig();
config.setDropTableAllow(false);
assertFalse(WallUtils.isValidateMySql("DROP TABLE T1", config));
}
public void testOracle() throws Exception {
WallConfig config = new WallConfig();
config.setDropTableAllow(false);
assertFalse(WallUtils.isValidateOracle("DROP TABLE T1", config));
}
public void testMySql_true() throws Exception {
assertTrue(WallUtils.isValidateMySql("DROP TABLE T1"));
}
public void testOracle_true() throws Exception {
assertTrue(WallUtils.isValidateOracle("DROP TABLE T1"));
}
}
|
WallDropTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.