language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | libs/tdigest/src/test/java/org/elasticsearch/tdigest/TDigestTestCase.java | {
"start": 1634,
"end": 1723
} | class ____ TDigest tests that require {@link TDigestArrays} instances.
* <p>
* This | for |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStConfigurableOptions.java | {
"start": 2934,
"end": 25970
} | class ____ implements Serializable {
// --------------------------------------------------------------------------
// Provided configurable DBOptions within Flink
// --------------------------------------------------------------------------
public static final ConfigOption<Integer> MAX_BACKGROUND_THREADS =
key("state.backend.forst.thread.num")
.intType()
.defaultValue(2)
.withDescription(
"The maximum number of concurrent background flush and compaction jobs (per stateful operator). "
+ "The default value is '2'.");
public static final ConfigOption<Integer> MAX_OPEN_FILES =
key("state.backend.forst.files.open")
.intType()
.defaultValue(-1)
.withDescription(
"The maximum number of open files (per stateful operator) that can be used by the ForSt, '-1' means no limit. "
+ "The default value is '-1'.");
public static final ConfigOption<MemorySize> LOG_MAX_FILE_SIZE =
key("state.backend.forst.log.max-file-size")
.memoryType()
.defaultValue(MemorySize.parse("25mb"))
.withDescription(
"The maximum size of ForSt's file used for information logging. "
+ "If the log files becomes larger than this, a new file will be created. "
+ "If 0, all logs will be written to one log file. "
+ "The default maximum file size is '25MB'. ");
public static final ConfigOption<Integer> LOG_FILE_NUM =
key("state.backend.forst.log.file-num")
.intType()
.defaultValue(4)
.withDescription(
"The maximum number of files ForSt should keep for information logging (Default setting: 4).");
public static final ConfigOption<String> LOG_DIR =
key("state.backend.forst.log.dir")
.stringType()
.noDefaultValue()
.withDescription(
"The directory for ForSt's information logging files. "
+ "If empty (Flink default setting), log files will be in the same directory as the Flink log. "
+ "If non-empty, this directory will be used and the data directory's absolute path will be used as the prefix of the log file name. "
+ "If setting this option as a non-existing location, e.g '/dev/null', ForSt will then create the log under its own database folder.");
public static final ConfigOption<InfoLogLevel> LOG_LEVEL =
key("state.backend.forst.log.level")
.enumType(InfoLogLevel.class)
.defaultValue(INFO_LEVEL)
.withDescription(
Description.builder()
.text(
"The specified information logging level for ForSt. "
+ "If unset, Flink will use %s.",
code(INFO_LEVEL.name()))
.linebreak()
.text(
"Note: ForSt info logs will not be written to the TaskManager logs and there "
+ "is no rolling strategy, unless you configure %s, %s, and %s accordingly. "
+ "Without a rolling strategy, long-running tasks may lead to uncontrolled "
+ "disk space usage if configured with increased log levels!",
code(LOG_DIR.key()),
code(LOG_MAX_FILE_SIZE.key()),
code(LOG_FILE_NUM.key()))
.linebreak()
.text(
"There is no need to modify the ForSt log level, unless for troubleshooting ForSt.")
.build());
// --------------------------------------------------------------------------
// Provided configurable ColumnFamilyOptions within Flink
// --------------------------------------------------------------------------
public static final ConfigOption<CompactionStyle> COMPACTION_STYLE =
key("state.backend.forst.compaction.style")
.enumType(CompactionStyle.class)
.defaultValue(LEVEL)
.withDescription(
String.format(
"The specified compaction style for DB. Candidate compaction style is %s, %s, %s or %s, "
+ "and Flink chooses '%s' as default style.",
LEVEL.name(),
FIFO.name(),
UNIVERSAL.name(),
NONE.name(),
LEVEL.name()));
public static final ConfigOption<Boolean> USE_DYNAMIC_LEVEL_SIZE =
key("state.backend.forst.compaction.level.use-dynamic-size")
.booleanType()
.defaultValue(false)
.withDescription(
Description.builder()
.text(
"If true, ForSt will pick target size of each level dynamically. From an empty key-value store, ")
.text(
"ForSt would make last level the base level, which means merging L0 data into the last level, ")
.text(
"until it exceeds max_bytes_for_level_base. And then repeat this process for second last level and so on. ")
.text("The default value is 'false'. ")
.text(
"For more information, please refer to %s",
link(
"https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#level_compaction_dynamic_level_bytes-is-true",
"RocksDB's doc."))
.build());
public static final ConfigOption<List<CompressionType>> COMPRESSION_PER_LEVEL =
key("state.backend.forst.compression.per.level")
.enumType(CompressionType.class)
.asList()
.defaultValues(SNAPPY_COMPRESSION)
.withDescription(
Description.builder()
.text(
"A semicolon-separated list of Compression Type. Different levels can have different "
+ "compression policies. In many cases, lower levels use fast compression algorithms, "
+ "while higher levels with more data use slower but more effective compression algorithms. "
+ "The N th element in the List corresponds to the compression type of the level N-1. "
+ "When %s is true, compression_per_level[0] still determines L0, but other "
+ "elements are based on the base level and may not match the level seen in the info log.",
code(USE_DYNAMIC_LEVEL_SIZE.key()))
.linebreak()
.text(
"Note: If the List size is smaller than the level number, the undefined lower level uses the last Compression Type in the List.")
.linebreak()
.text(
"Some commonly used compression algorithms for candidates include %s ,%s and %s.",
code(NO_COMPRESSION.name()),
code(SNAPPY_COMPRESSION.name()),
code(LZ4_COMPRESSION.name()))
.linebreak()
.text(
"The default value is %s, which means that all data uses the Snappy compression algorithm. ",
code(SNAPPY_COMPRESSION.name()))
.text(
"Likewise, if set to %s , means that all data is not compressed, which will achieve faster speed but will bring some space amplification. ",
code(NO_COMPRESSION.name()))
.text(
"In addition, if we need to consider both spatial amplification and performance, we can also set it to '%s;%s;%s', which means that L0 and L1 data will not be compressed, and other data will be compressed using LZ4.",
code(NO_COMPRESSION.name()),
code(NO_COMPRESSION.name()),
code(LZ4_COMPRESSION.name()))
.build());
public static final ConfigOption<MemorySize> TARGET_FILE_SIZE_BASE =
key("state.backend.forst.compaction.level.target-file-size-base")
.memoryType()
.defaultValue(MemorySize.parse("64mb"))
.withDescription(
Description.builder()
.text(
"The target file size for compaction, which determines a level-1 file size. "
+ "The default value is '%s'.",
text(MemorySize.parse("64mb").toString()))
.build());
public static final ConfigOption<MemorySize> MAX_SIZE_LEVEL_BASE =
key("state.backend.forst.compaction.level.max-size-level-base")
.memoryType()
.defaultValue(MemorySize.parse("256mb"))
.withDescription(
Description.builder()
.text(
"The upper-bound of the total size of level base files in bytes. "
+ "The default value is '%s'.",
text(MemorySize.parse("256mb").toString()))
.build());
public static final ConfigOption<MemorySize> WRITE_BUFFER_SIZE =
key("state.backend.forst.writebuffer.size")
.memoryType()
.defaultValue(MemorySize.parse("64mb"))
.withDescription(
"The amount of data built up in memory (backed by an unsorted log on disk) "
+ "before converting to a sorted on-disk files. The default writebuffer size is '64MB'.");
public static final ConfigOption<Integer> MAX_WRITE_BUFFER_NUMBER =
key("state.backend.forst.writebuffer.count")
.intType()
.defaultValue(2)
.withDescription(
"The maximum number of write buffers that are built up in memory. "
+ "The default value is '2'.");
public static final ConfigOption<Integer> MIN_WRITE_BUFFER_NUMBER_TO_MERGE =
key("state.backend.forst.writebuffer.number-to-merge")
.intType()
.defaultValue(1)
.withDescription(
"The minimum number of write buffers that will be merged together before writing to storage. "
+ "The default value is '1'.");
public static final ConfigOption<MemorySize> BLOCK_SIZE =
key("state.backend.forst.block.blocksize")
.memoryType()
.defaultValue(MemorySize.parse("4kb"))
.withDescription(
"The approximate size (in bytes) of user data packed per block. "
+ "The default blocksize is '4KB'.");
public static final ConfigOption<MemorySize> METADATA_BLOCK_SIZE =
key("state.backend.forst.block.metadata-blocksize")
.memoryType()
.defaultValue(MemorySize.parse("4kb"))
.withDescription(
"Approximate size of partitioned metadata packed per block. "
+ "Currently applied to indexes block when partitioned index/filters option is enabled. "
+ "The default blocksize is '4KB'.");
public static final ConfigOption<MemorySize> BLOCK_CACHE_SIZE =
key("state.backend.forst.block.cache-size")
.memoryType()
.defaultValue(MemorySize.parse("8mb"))
.withDescription(
"The amount of the cache for data blocks in ForSt. "
+ "The default block-cache size is '8MB'.");
public static final ConfigOption<MemorySize> WRITE_BATCH_SIZE =
key("state.backend.forst.write-batch-size")
.memoryType()
.defaultValue(MemorySize.parse("2mb"))
.withDescription(
"The max size of the consumed memory for ForSt batch write, "
+ "will flush just based on item count if this config set to 0.");
public static final ConfigOption<Boolean> USE_BLOOM_FILTER =
key("state.backend.forst.use-bloom-filter")
.booleanType()
.defaultValue(false)
.withDescription(
"Whether every newly created SST file will contain a Bloom filter. Default 'false'.");
public static final ConfigOption<Double> BLOOM_FILTER_BITS_PER_KEY =
key("state.backend.forst.bloom-filter.bits-per-key")
.doubleType()
.defaultValue(10.0)
.withDescription(
"Bits per key that the bloom filter will use, this only takes effect when the bloom filter is used. "
+ "The default value is 10.0.");
public static final ConfigOption<Boolean> BLOOM_FILTER_BLOCK_BASED_MODE =
key("state.backend.forst.bloom-filter.block-based-mode")
.booleanType()
.defaultValue(false)
.withDescription(
"If set 'true', ForSt will use block-based filter instead of full filter, this only takes effect when bloom filter is used. "
+ "The default value is 'false'.");
public static final ConfigOption<Long> COMPACT_FILTER_QUERY_TIME_AFTER_NUM_ENTRIES =
key("state.backend.forst.compaction.filter.query-time-after-num-entries")
.longType()
.defaultValue(1000L)
.withDescription(
"Number of state entries to process by compaction filter before updating current timestamp. "
+ "Updating the timestamp more often can improve cleanup speed, "
+ "but it decreases compaction performance because it uses JNI calls from native code.The default value is '1000L'.");
public static final ConfigOption<Duration> COMPACT_FILTER_PERIODIC_COMPACTION_TIME =
key("state.backend.forst.compaction.filter.periodic-compaction-time")
.durationType()
.defaultValue(Duration.ofDays(30))
.withDescription(
"Periodic compaction could speed up expired state entries cleanup, especially for state"
+ " entries rarely accessed. Files older than this value will be picked up for compaction,"
+ " and re-written to the same level as they were before. It makes sure a file goes through"
+ " compaction filters periodically. 0 means turning off periodic compaction.The default value is '30 d' (30 days).");
public static final ConfigOption<Double> RESTORE_OVERLAP_FRACTION_THRESHOLD =
key("state.backend.forst.restore-overlap-fraction-threshold")
.doubleType()
.defaultValue(0.0)
.withDescription(
"The threshold of overlap fraction between the state handle's key-group range and target key-group range. "
+ "When restore base DB, only the handle which overlap fraction greater than or equal to threshold "
+ "has a chance to be an initial handle. "
+ "The default value is 0.0, there is always a handle will be selected for initialization. ");
public static final ConfigOption<Boolean> USE_INGEST_DB_RESTORE_MODE =
key("state.backend.forst.use-ingest-db-restore-mode")
.booleanType()
.defaultValue(Boolean.FALSE)
.withDescription(
"A recovery mode that directly clips and ingests multiple DBs during state recovery if the keys"
+ " in the SST files does not exceed the declared key-group range.");
public static final ConfigOption<Boolean> USE_DELETE_FILES_IN_RANGE_DURING_RESCALING =
key("state.backend.forst.rescaling.use-delete-files-in-range")
.booleanType()
.defaultValue(Boolean.FALSE)
.withDescription(
"If true, during rescaling, the deleteFilesInRange API will be invoked "
+ "to clean up the useless key-values so that primary storage space can be reclaimed more promptly.");
static final ConfigOption<?>[] CANDIDATE_CONFIGS =
new ConfigOption<?>[] {
// cache
CACHE_DIRECTORY,
CACHE_SIZE_BASE_LIMIT,
CACHE_RESERVED_SIZE,
// configurable forst executor
EXECUTOR_COORDINATOR_INLINE,
EXECUTOR_WRITE_IO_INLINE,
EXECUTOR_WRITE_IO_PARALLELISM,
EXECUTOR_READ_IO_PARALLELISM,
// configurable DBOptions
MAX_BACKGROUND_THREADS,
MAX_OPEN_FILES,
LOG_LEVEL,
LOG_MAX_FILE_SIZE,
LOG_FILE_NUM,
LOG_DIR,
// configurable ColumnFamilyOptions
COMPACTION_STYLE,
COMPRESSION_PER_LEVEL,
USE_DYNAMIC_LEVEL_SIZE,
TARGET_FILE_SIZE_BASE,
MAX_SIZE_LEVEL_BASE,
WRITE_BUFFER_SIZE,
MAX_WRITE_BUFFER_NUMBER,
MIN_WRITE_BUFFER_NUMBER_TO_MERGE,
BLOCK_SIZE,
METADATA_BLOCK_SIZE,
BLOCK_CACHE_SIZE,
USE_BLOOM_FILTER,
BLOOM_FILTER_BITS_PER_KEY,
BLOOM_FILTER_BLOCK_BASED_MODE,
RESTORE_OVERLAP_FRACTION_THRESHOLD,
USE_INGEST_DB_RESTORE_MODE,
USE_DELETE_FILES_IN_RANGE_DURING_RESCALING,
COMPACT_FILTER_QUERY_TIME_AFTER_NUM_ENTRIES,
COMPACT_FILTER_PERIODIC_COMPACTION_TIME
};
private static final Set<ConfigOption<?>> POSITIVE_INT_CONFIG_SET =
new HashSet<>(
Arrays.asList(
MAX_BACKGROUND_THREADS,
LOG_FILE_NUM,
MAX_WRITE_BUFFER_NUMBER,
MIN_WRITE_BUFFER_NUMBER_TO_MERGE));
private static final Set<ConfigOption<?>> SIZE_CONFIG_SET =
new HashSet<>(
Arrays.asList(
TARGET_FILE_SIZE_BASE,
MAX_SIZE_LEVEL_BASE,
WRITE_BUFFER_SIZE,
BLOCK_SIZE,
METADATA_BLOCK_SIZE,
BLOCK_CACHE_SIZE));
/**
* Helper method to check whether the (key,value) is valid through given configuration and
* returns the formatted value.
*
* @param option The configuration key which is configurable in {@link
* ForStConfigurableOptions}.
* @param value The value within given configuration.
*/
static void checkArgumentValid(ConfigOption<?> option, Object value) {
final String key = option.key();
if (POSITIVE_INT_CONFIG_SET.contains(option)) {
Preconditions.checkArgument(
(Integer) value > 0,
"Configured value for key: " + key + " must be larger than 0.");
} else if (SIZE_CONFIG_SET.contains(option)) {
Preconditions.checkArgument(
((MemorySize) value).getBytes() > 0,
"Configured size for key" + key + " must be larger than 0.");
} else if (LOG_MAX_FILE_SIZE.equals(option)) {
Preconditions.checkArgument(
((MemorySize) value).getBytes() >= 0,
"Configured size for key " + key + " must be larger than or equal to 0.");
} else if (LOG_DIR.equals(option)) {
Preconditions.checkArgument(
new File((String) value).isAbsolute(),
"Configured path for key " + key + " is not absolute.");
}
}
}
| ForStConfigurableOptions |
java | google__gson | gson/src/main/java/com/google/gson/reflect/TypeToken.java | {
"start": 13833,
"end": 16691
} | class ____ no type arguments are provided, this method
* simply delegates to {@link #get(Class)} and creates a {@code TypeToken(Class)}.
*
* @throws IllegalArgumentException If {@code rawType} is not of type {@code Class}, or if the
* type arguments are invalid for the raw type
*/
public static TypeToken<?> getParameterized(Type rawType, Type... typeArguments) {
Objects.requireNonNull(rawType);
Objects.requireNonNull(typeArguments);
// Perform basic validation here because this is the only public API where users
// can create malformed parameterized types
if (!(rawType instanceof Class)) {
// See also https://bugs.openjdk.org/browse/JDK-8250659
throw new IllegalArgumentException("rawType must be of type Class, but was " + rawType);
}
Class<?> rawClass = (Class<?>) rawType;
TypeVariable<?>[] typeVariables = rawClass.getTypeParameters();
int expectedArgsCount = typeVariables.length;
int actualArgsCount = typeArguments.length;
if (actualArgsCount != expectedArgsCount) {
throw new IllegalArgumentException(
rawClass.getName()
+ " requires "
+ expectedArgsCount
+ " type arguments, but got "
+ actualArgsCount);
}
// For legacy reasons create a TypeToken(Class) if the type is not generic
if (typeArguments.length == 0) {
return get(rawClass);
}
// Check for this here to avoid misleading exception thrown by ParameterizedTypeImpl
if (GsonTypes.requiresOwnerType(rawType)) {
throw new IllegalArgumentException(
"Raw type "
+ rawClass.getName()
+ " is not supported because it requires specifying an owner type");
}
for (int i = 0; i < expectedArgsCount; i++) {
Type typeArgument =
Objects.requireNonNull(typeArguments[i], "Type argument must not be null");
Class<?> rawTypeArgument = GsonTypes.getRawType(typeArgument);
TypeVariable<?> typeVariable = typeVariables[i];
for (Type bound : typeVariable.getBounds()) {
Class<?> rawBound = GsonTypes.getRawType(bound);
if (!rawBound.isAssignableFrom(rawTypeArgument)) {
throw new IllegalArgumentException(
"Type argument "
+ typeArgument
+ " does not satisfy bounds for type variable "
+ typeVariable
+ " declared by "
+ rawType);
}
}
}
return new TypeToken<>(GsonTypes.newParameterizedTypeWithOwner(null, rawClass, typeArguments));
}
/**
* Gets type literal for the array type whose elements are all instances of {@code componentType}.
*/
public static TypeToken<?> getArray(Type componentType) {
return new TypeToken<>(GsonTypes.arrayOf(componentType));
}
}
| and |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/scheduling/JMFailoverITCase.java | {
"start": 23534,
"end": 25536
} | class ____ extends AbstractStreamOperator<Tuple2<Integer, Integer>>
implements OneInputStreamOperator<Long, Tuple2<Integer, Integer>> {
public static Map<Integer, Boolean> subtaskBlocked = new ConcurrentHashMap<>();
public static Map<Integer, Integer> attemptIds = new ConcurrentHashMap<>();
@Override
protected void setup(
StreamTask<?, ?> containingTask,
StreamConfig config,
Output<StreamRecord<Tuple2<Integer, Integer>>> output) {
super.setup(containingTask, config, output);
int subIdx = getRuntimeContext().getTaskInfo().getIndexOfThisSubtask();
// attempt id ++
attemptIds.compute(
subIdx,
(ignored, value) -> {
if (value == null) {
value = 0;
} else {
value += 1;
}
return value;
});
// wait until unblocked.
if (subtaskBlocked.containsKey(subIdx) && subtaskBlocked.get(subIdx)) {
tryWaitUntilCondition(() -> !subtaskBlocked.get(subIdx));
}
}
@Override
public void processElement(StreamRecord<Long> streamRecord) throws Exception {
int number = streamRecord.getValue().intValue();
output.collect(new StreamRecord<>(new Tuple2<>(number % NUMBER_KEYS, 1)));
}
public static void clear() {
subtaskBlocked.clear();
attemptIds.clear();
}
public static void blockSubTasks(Integer... subIndices) {
setSubtaskBlocked(Arrays.asList(subIndices), true, subtaskBlocked);
}
public static void unblockSubTasks(Integer... subIndices) {
setSubtaskBlocked(Arrays.asList(subIndices), false, subtaskBlocked);
}
}
private static | StubMapFunction |
java | junit-team__junit5 | junit-platform-launcher/src/main/java/org/junit/platform/launcher/core/DiscoveryIssueException.java | {
"start": 730,
"end": 932
} | class ____ extends JUnitException {
@Serial
private static final long serialVersionUID = 1L;
DiscoveryIssueException(String message) {
super(message, null, false, false);
}
}
| DiscoveryIssueException |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/LongAssertBaseTest.java | {
"start": 742,
"end": 831
} | class ____ {@link LongAssert} tests.
*
* @author Olivier Michallat
*/
public abstract | for |
java | spring-projects__spring-framework | spring-core/src/main/java24/org/springframework/core/type/classreading/ClassFileClassMetadata.java | {
"start": 1756,
"end": 6392
} | class ____ implements AnnotationMetadata {
private final String className;
private final AccessFlags accessFlags;
private final @Nullable String enclosingClassName;
private final @Nullable String superClassName;
private final boolean independentInnerClass;
private final Set<String> interfaceNames;
private final Set<String> memberClassNames;
private final Set<MethodMetadata> declaredMethods;
private final MergedAnnotations mergedAnnotations;
private @Nullable Set<String> annotationTypes;
ClassFileClassMetadata(String className, AccessFlags accessFlags, @Nullable String enclosingClassName,
@Nullable String superClassName, boolean independentInnerClass, Set<String> interfaceNames,
Set<String> memberClassNames, Set<MethodMetadata> declaredMethods, MergedAnnotations mergedAnnotations) {
this.className = className;
this.accessFlags = accessFlags;
this.enclosingClassName = enclosingClassName;
this.superClassName = (!className.endsWith(".package-info")) ? superClassName : null;
this.independentInnerClass = independentInnerClass;
this.interfaceNames = interfaceNames;
this.memberClassNames = memberClassNames;
this.declaredMethods = declaredMethods;
this.mergedAnnotations = mergedAnnotations;
}
@Override
public String getClassName() {
return this.className;
}
@Override
public boolean isInterface() {
return this.accessFlags.has(AccessFlag.INTERFACE);
}
@Override
public boolean isAnnotation() {
return this.accessFlags.has(AccessFlag.ANNOTATION);
}
@Override
public boolean isAbstract() {
return this.accessFlags.has(AccessFlag.ABSTRACT);
}
@Override
public boolean isFinal() {
return this.accessFlags.has(AccessFlag.FINAL);
}
@Override
public boolean isIndependent() {
return (this.enclosingClassName == null || this.independentInnerClass);
}
@Override
public @Nullable String getEnclosingClassName() {
return this.enclosingClassName;
}
@Override
public @Nullable String getSuperClassName() {
return this.superClassName;
}
@Override
public String[] getInterfaceNames() {
return StringUtils.toStringArray(this.interfaceNames);
}
@Override
public String[] getMemberClassNames() {
return StringUtils.toStringArray(this.memberClassNames);
}
@Override
public MergedAnnotations getAnnotations() {
return this.mergedAnnotations;
}
@Override
public Set<String> getAnnotationTypes() {
Set<String> annotationTypes = this.annotationTypes;
if (annotationTypes == null) {
annotationTypes = Collections.unmodifiableSet(
AnnotationMetadata.super.getAnnotationTypes());
this.annotationTypes = annotationTypes;
}
return annotationTypes;
}
@Override
public Set<MethodMetadata> getAnnotatedMethods(String annotationName) {
Set<MethodMetadata> result = new LinkedHashSet<>(4);
for (MethodMetadata annotatedMethod : this.declaredMethods) {
if (annotatedMethod.isAnnotated(annotationName)) {
result.add(annotatedMethod);
}
}
return Collections.unmodifiableSet(result);
}
@Override
public Set<MethodMetadata> getDeclaredMethods() {
return Collections.unmodifiableSet(this.declaredMethods);
}
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof ClassFileClassMetadata that && this.className.equals(that.className)));
}
@Override
public int hashCode() {
return this.className.hashCode();
}
@Override
public String toString() {
return this.className;
}
static ClassFileClassMetadata of(ClassModel classModel, @Nullable ClassLoader classLoader) {
Builder builder = new Builder(classLoader);
builder.classEntry(classModel.thisClass());
String currentClassName = classModel.thisClass().name().stringValue();
classModel.elementStream().forEach(classElement -> {
switch (classElement) {
case AccessFlags flags -> {
builder.accessFlags(flags);
}
case NestHostAttribute _ -> {
builder.enclosingClass(classModel.thisClass());
}
case InnerClassesAttribute innerClasses -> {
builder.nestMembers(currentClassName, innerClasses);
}
case RuntimeVisibleAnnotationsAttribute annotationsAttribute -> {
builder.mergedAnnotations(ClassFileAnnotationMetadata.createMergedAnnotations(
ClassUtils.convertResourcePathToClassName(currentClassName), annotationsAttribute, classLoader));
}
case Superclass superclass -> {
builder.superClass(superclass);
}
case Interfaces interfaces -> {
builder.interfaces(interfaces);
}
case MethodModel method -> {
builder.method(method);
}
default -> {
// ignore | ClassFileClassMetadata |
java | hibernate__hibernate-orm | hibernate-spatial/src/test/java/org/hibernate/spatial/testing/domain/GeomEntity.java | {
"start": 661,
"end": 907
} | class ____ raw Geometries, because in test classes a wide variety of SRIDs and
* coordinate spaces are mixed. (This creates notable problems for Oracle, which is very, very strict in what it accepts)
*/
@Entity
@Table(name = "geomtest")
public | uses |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/api/Abstract2DArrayAssert.java | {
"start": 660,
"end": 989
} | class ____ all two-dimensional array assertions.
*
* @param <SELF> the "self" type of this assertion class.
* @param <ACTUAL> the type of the "actual" value which is a two-dimensional Array of ELEMENT.
* @param <ELEMENT> the type of the "actual" array element.
*
* @author Maciej Wajcht
* @since 3.17.0
*/
public abstract | for |
java | google__guice | core/src/com/google/inject/internal/aop/AnonymousClassDefiner.java | {
"start": 1042,
"end": 2139
} | class ____'d during initialization and should not be used. */
static final boolean HAS_ERROR;
static {
sun.misc.Unsafe theUnsafe;
Method anonymousDefineMethod;
try {
theUnsafe = UnsafeGetter.getUnsafe();
// defineAnonymousClass was removed in JDK17, so we must refer to it reflectively.
anonymousDefineMethod =
sun.misc.Unsafe.class.getMethod(
"defineAnonymousClass", Class.class, byte[].class, Object[].class);
} catch (ReflectiveOperationException e) {
theUnsafe = null;
anonymousDefineMethod = null;
}
THE_UNSAFE = theUnsafe;
ANONYMOUS_DEFINE_METHOD = anonymousDefineMethod;
HAS_ERROR = theUnsafe == null;
}
@Override
public Class<?> define(Class<?> hostClass, byte[] bytecode) throws Exception {
if (HAS_ERROR) {
throw new IllegalStateException(
"Should not be called. An earlier error occurred during AnonymousClassDefiner static"
+ " initialization.");
}
return (Class<?>) ANONYMOUS_DEFINE_METHOD.invoke(THE_UNSAFE, hostClass, bytecode, null);
}
}
| err |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/diagnostics/analyzer/BindValidationFailureAnalyzerTests.java | {
"start": 6367,
"end": 6626
} | class ____ {
@NotNull
@SuppressWarnings("NullAway.Init")
private String bar;
String getBar() {
return this.bar;
}
void setBar(String bar) {
this.bar = bar;
}
}
}
@ConfigurationProperties("foo.bar")
@Validated
static | Nested |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java | {
"start": 4579,
"end": 5540
} | class ____ implements Writeable {
private final short percent;
private final long total;
public Cpu(short percent, long total) {
this.percent = percent;
this.total = total;
}
public Cpu(StreamInput in) throws IOException {
percent = in.readShort();
total = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeShort(percent);
out.writeLong(total);
}
/**
* Get the Process cpu usage.
* <p>
* Supported Platforms: All.
*/
public short getPercent() {
return percent;
}
/**
* Get the Process cpu time (sum of User and Sys).
* <p>
* Supported Platforms: All.
*/
public TimeValue getTotal() {
return new TimeValue(total);
}
}
}
| Cpu |
java | spring-projects__spring-boot | module/spring-boot-mustache/src/test/java/org/springframework/boot/mustache/autoconfigure/MustacheAutoConfigurationReactiveIntegrationTests.java | {
"start": 2793,
"end": 3623
} | class ____ {
@Autowired
private WebTestClient client;
@Test
void testHomePage() {
String result = this.client.get()
.uri("/")
.exchange()
.expectStatus()
.isOk()
.expectBody(String.class)
.returnResult()
.getResponseBody();
assertThat(result).contains("Hello App").contains("Hello World");
}
@Test
void testPartialPage() {
String result = this.client.get()
.uri("/partial")
.exchange()
.expectStatus()
.isOk()
.expectBody(String.class)
.returnResult()
.getResponseBody();
assertThat(result).contains("Hello App").contains("Hello World");
}
@Configuration(proxyBeanMethods = false)
@Import({ NettyReactiveWebServerAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class })
@Controller
@EnableWebFlux
static | MustacheAutoConfigurationReactiveIntegrationTests |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/mapping/condition/PathParser.java | {
"start": 1680,
"end": 18786
} | class ____ {
private static final PathSegment SLASH = new PathSegment(Type.SLASH, RestConstants.SLASH);
private final List<PathSegment> segments = new LinkedList<>();
private final StringBuilder buf = new StringBuilder();
/**
* Ensure that the path is normalized using {@link PathUtils#normalize(String)} before parsing.
*/
static PathSegment[] parse(String path) {
if (path == null || path.isEmpty() || RestConstants.SLASH.equals(path)) {
return new PathSegment[] {PathSegment.literal(RestConstants.SLASH)};
}
if (PathUtils.isDirectPath(path)) {
return new PathSegment[] {PathSegment.literal(path)};
}
List<PathSegment> segments = new PathParser().doParse(path);
return segments.toArray(new PathSegment[0]);
}
private List<PathSegment> doParse(String path) {
parseSegments(path);
transformSegments(segments, path);
for (PathSegment segment : segments) {
try {
segment.initPattern();
} catch (Exception e) {
throw new PathParserException(Messages.REGEX_PATTERN_INVALID, segment.getValue(), path, e);
}
}
return segments;
}
private void parseSegments(String path) {
int state = State.INITIAL;
boolean regexBraceStart = false;
boolean regexMulti = false;
String variableName = null;
int len = path.length();
for (int i = 0; i < len; i++) {
char c = path.charAt(i);
switch (c) {
case '/':
switch (state) {
case State.INITIAL:
case State.SEGMENT_END:
continue;
case State.LITERAL_START:
if (buf.length() > 0) {
appendSegment(Type.LITERAL);
}
break;
case State.WILDCARD_START:
appendSegment(Type.WILDCARD);
break;
case State.REGEX_VARIABLE_START:
if (path.charAt(i - 1) != '^' || path.charAt(i - 2) != '[') {
regexMulti = true;
}
buf.append(c);
continue;
case State.VARIABLE_START:
case State.WILDCARD_VARIABLE_START:
throw new PathParserException(Messages.MISSING_CLOSE_CAPTURE, path, i);
default:
}
segments.add(SLASH);
state = State.SEGMENT_END;
continue;
case '?':
switch (state) {
case State.INITIAL:
case State.LITERAL_START:
case State.SEGMENT_END:
state = State.WILDCARD_START;
break;
default:
}
break;
case '*':
switch (state) {
case State.INITIAL:
case State.LITERAL_START:
case State.SEGMENT_END:
state = State.WILDCARD_START;
break;
case State.VARIABLE_START:
if (path.charAt(i - 1) == '{') {
state = State.WILDCARD_VARIABLE_START;
continue;
}
break;
default:
}
break;
case '.':
if (state == State.REGEX_VARIABLE_START) {
if (path.charAt(i - 1) != '\\') {
regexMulti = true;
}
}
break;
case 'S':
case 'W':
if (state == State.REGEX_VARIABLE_START) {
if (path.charAt(i - 1) == '\\') {
regexMulti = true;
}
}
break;
case ':':
if (state == State.VARIABLE_START) {
state = State.REGEX_VARIABLE_START;
variableName = buf.toString();
buf.setLength(0);
continue;
}
break;
case '{':
switch (state) {
case State.INITIAL:
case State.SEGMENT_END:
state = State.VARIABLE_START;
continue;
case State.LITERAL_START:
if (buf.length() > 0) {
appendSegment(Type.LITERAL);
}
state = State.VARIABLE_START;
continue;
case State.VARIABLE_START:
case State.WILDCARD_VARIABLE_START:
throw new PathParserException(Messages.ILLEGAL_NESTED_CAPTURE, path, i);
case State.REGEX_VARIABLE_START:
if (path.charAt(i - 1) != '\\') {
regexBraceStart = true;
}
break;
default:
}
break;
case '}':
switch (state) {
case State.INITIAL:
case State.LITERAL_START:
case State.SEGMENT_END:
throw new PathParserException(Messages.MISSING_OPEN_CAPTURE, path);
case State.VARIABLE_START:
appendSegment(Type.VARIABLE, buf.toString());
state = State.LITERAL_START;
continue;
case State.REGEX_VARIABLE_START:
if (regexBraceStart) {
regexBraceStart = false;
} else {
if (buf.length() == 0) {
throw new PathParserException(Messages.MISSING_REGEX_CONSTRAINT, path, i);
}
appendSegment(regexMulti ? Type.PATTERN_MULTI : Type.PATTERN, variableName);
regexMulti = false;
state = State.LITERAL_START;
continue;
}
break;
case State.WILDCARD_VARIABLE_START:
appendSegment(Type.WILDCARD_TAIL, buf.toString());
state = State.END;
continue;
default:
}
break;
default:
if (state == State.INITIAL || state == State.SEGMENT_END) {
state = State.LITERAL_START;
}
break;
}
if (state == State.END) {
throw new PathParserException(Messages.NO_MORE_DATA_ALLOWED, path, i);
}
buf.append(c);
}
if (buf.length() > 0) {
switch (state) {
case State.LITERAL_START:
appendSegment(Type.LITERAL);
break;
case State.WILDCARD_START:
appendSegment(Type.WILDCARD);
break;
case State.VARIABLE_START:
case State.REGEX_VARIABLE_START:
case State.WILDCARD_VARIABLE_START:
throw new PathParserException(Messages.MISSING_CLOSE_CAPTURE, path, len - 1);
default:
}
}
}
private void appendSegment(Type type) {
segments.add(new PathSegment(type, buf.toString()));
buf.setLength(0);
}
private void appendSegment(Type type, String name) {
segments.add(new PathSegment(type, buf.toString().trim(), name.trim()));
buf.setLength(0);
}
private static void transformSegments(List<PathSegment> segments, String path) {
ListIterator<PathSegment> iterator = segments.listIterator();
PathSegment curr, prev = null;
while (iterator.hasNext()) {
curr = iterator.next();
String value = curr.getValue();
Type type = curr.getType();
switch (type) {
case SLASH:
if (prev != null) {
switch (prev.getType()) {
case LITERAL:
case VARIABLE:
case PATTERN:
prev = curr;
break;
case PATTERN_MULTI:
if (!".*".equals(prev.getValue())) {
prev.setValue(prev.getValue() + '/');
}
break;
default:
}
}
iterator.remove();
continue;
case WILDCARD:
if ("*".equals(value)) {
type = Type.VARIABLE;
value = StringUtils.EMPTY_STRING;
} else if ("**".equals(value)) {
if (!iterator.hasNext()) {
type = Type.WILDCARD_TAIL;
value = StringUtils.EMPTY_STRING;
} else {
type = Type.PATTERN_MULTI;
value = ".*";
}
} else {
type = Type.PATTERN;
value = toRegex(value);
}
curr.setType(type);
curr.setValue(value);
break;
case WILDCARD_TAIL:
break;
case PATTERN:
case PATTERN_MULTI:
curr.setValue("(?<" + curr.getVariable() + '>' + value + ')');
break;
default:
}
if (prev == null) {
prev = curr;
continue;
}
String pValue = prev.getValue();
switch (prev.getType()) {
case LITERAL:
switch (type) {
case VARIABLE:
prev.setType(Type.PATTERN);
prev.setValue(quoteRegex(pValue) + "(?<" + curr.getVariable() + ">[^/]+)");
prev.setVariables(curr.getVariables());
iterator.remove();
continue;
case PATTERN:
case PATTERN_MULTI:
prev.setType(type);
prev.setValue(quoteRegex(pValue) + "(?<" + curr.getVariable() + '>' + value + ')');
prev.setVariables(curr.getVariables());
iterator.remove();
continue;
default:
}
break;
case VARIABLE:
switch (type) {
case LITERAL:
prev.setType(Type.PATTERN);
prev.setValue("(?<" + prev.getVariable() + ">[^/]+)" + quoteRegex(value));
iterator.remove();
continue;
case VARIABLE:
throw new PathParserException(Messages.ILLEGAL_DOUBLE_CAPTURE, path);
case PATTERN:
case PATTERN_MULTI:
String var = curr.getVariable();
prev.addVariable(var);
prev.setType(type);
prev.setValue("(?<" + prev.getVariable() + ">[^/]+)(?<" + var + '>' + value + ')');
iterator.remove();
continue;
default:
}
break;
case PATTERN:
case PATTERN_MULTI:
switch (type) {
case LITERAL:
prev.setValue(pValue + quoteRegex(value));
iterator.remove();
continue;
case WILDCARD_TAIL:
if (curr.getVariables() == null) {
prev.setValue(pValue + ".*");
} else {
prev.addVariable(curr.getVariable());
prev.setValue(pValue + "(?<" + curr.getVariable() + ">.*)");
}
prev.setType(Type.PATTERN_MULTI);
iterator.remove();
continue;
case VARIABLE:
if (value.isEmpty()) {
prev.setValue(pValue + "[^/]+");
iterator.remove();
continue;
}
prev.addVariable(curr.getVariable());
prev.setValue(pValue + "(?<" + curr.getVariable() + ">[^/]+)");
iterator.remove();
continue;
case PATTERN_MULTI:
prev.setType(Type.PATTERN_MULTI);
case PATTERN:
if (curr.getVariables() == null) {
prev.setValue(pValue + value);
} else {
prev.addVariable(curr.getVariable());
prev.setValue(pValue + "(?<" + curr.getVariable() + '>' + value + ')');
}
iterator.remove();
continue;
default:
}
break;
default:
}
prev = curr;
}
}
private static String quoteRegex(String regex) {
for (int i = 0, len = regex.length(); i < len; i++) {
switch (regex.charAt(i)) {
case '(':
case ')':
case '[':
case ']':
case '$':
case '^':
case '.':
case '{':
case '}':
case '|':
case '\\':
return "\\Q" + regex + "\\E";
default:
}
}
return regex;
}
private static String toRegex(String wildcard) {
int len = wildcard.length();
StringBuilder sb = new StringBuilder(len + 8);
for (int i = 0; i < len; i++) {
char c = wildcard.charAt(i);
switch (c) {
case '*':
if (i > 0) {
char prev = wildcard.charAt(i - 1);
if (prev == '*') {
continue;
}
if (prev == '?') {
sb.append("*");
continue;
}
}
sb.append("[^/]*");
break;
case '?':
if (i > 0 && wildcard.charAt(i - 1) == '*') {
continue;
}
sb.append("[^/]");
break;
case '(':
case ')':
case '$':
case '.':
case '{':
case '}':
case '|':
case '\\':
sb.append('\\');
sb.append(c);
break;
default:
sb.append(c);
break;
}
}
return sb.toString();
}
private | PathParser |
java | spring-projects__spring-boot | module/spring-boot-freemarker/src/main/java/org/springframework/boot/freemarker/autoconfigure/AbstractFreeMarkerConfiguration.java | {
"start": 930,
"end": 1040
} | class ____ shared FreeMarker configuration.
*
* @author Brian Clozel
* @author Stephane Nicoll
*/
abstract | for |
java | google__guava | android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java | {
"start": 29983,
"end": 31098
} | class ____ extends KeySet implements SortedSet<K> {
SortedKeySet(SortedMap<K, Collection<V>> subMap) {
super(subMap);
}
SortedMap<K, Collection<V>> sortedMap() {
return (SortedMap<K, Collection<V>>) super.map();
}
@Override
public @Nullable Comparator<? super K> comparator() {
return sortedMap().comparator();
}
@Override
@ParametricNullness
public K first() {
return sortedMap().firstKey();
}
@Override
public SortedSet<K> headSet(@ParametricNullness K toElement) {
return new SortedKeySet(sortedMap().headMap(toElement));
}
@Override
@ParametricNullness
public K last() {
return sortedMap().lastKey();
}
@Override
public SortedSet<K> subSet(@ParametricNullness K fromElement, @ParametricNullness K toElement) {
return new SortedKeySet(sortedMap().subMap(fromElement, toElement));
}
@Override
public SortedSet<K> tailSet(@ParametricNullness K fromElement) {
return new SortedKeySet(sortedMap().tailMap(fromElement));
}
}
@WeakOuter
private final | SortedKeySet |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ext/jdk8/StreamSerializerTest.java | {
"start": 484,
"end": 5244
} | class ____
{
public int foo;
public String bar;
@JsonCreator
public TestBean(@JsonProperty("foo") int foo, @JsonProperty("bar") String bar)
{
this.foo = foo;
this.bar = bar;
}
@Override
public boolean equals(Object obj)
{
if (obj.getClass() != getClass()) {
return false;
}
TestBean castObj = (TestBean) obj;
return castObj.foo == foo && Objects.equals(castObj.bar, bar);
}
@Override
public int hashCode() {
return foo ^ bar.hashCode();
}
}
TestBean[] empty = {};
TestBean testBean1 = new TestBean(1, "one");
TestBean testBean2 = new TestBean(2, "two");
TestBean[] single = { testBean1 };
TestBean[] multipleValues = { testBean1, testBean2 };
@Test
public void testEmptyStream() throws Exception {
assertArrayEquals(empty, this.roundTrip(Stream.empty(), TestBean[].class));
}
@Test
public void testNestedStreamEmptyElement() throws Exception {
final List<NestedStream<String,List<String>>> expected = Arrays.asList(new NestedStream<>(new ArrayList<>()));
final Collection<NestedStream<String, List<String>>> actual = roundTrip(expected.stream(), new TypeReference<Collection<NestedStream<String,List<String>>>>() {});
assertEquals(expected,actual);
}
@Test
public void testSingleElement() throws Exception {
assertArrayEquals(single, roundTrip(Stream.of(single), TestBean[].class));
}
@Test
public void testNestedStreamSingleElement() throws Exception {
final List<NestedStream<String,List<String>>> nestedStream = Arrays.asList(new NestedStream<>(Arrays.asList("foo")));
final Collection<NestedStream<String, List<String>>> roundTrip = roundTrip(nestedStream.stream(), new TypeReference<Collection<NestedStream<String,List<String>>>>() {});
assertEquals(roundTrip,nestedStream);
}
@Test
public void testMultiElements() throws Exception {
assertArrayEquals(multipleValues, roundTrip(Stream.of(multipleValues), TestBean[].class));
}
@Test
public void testNestedStreamMultiElements() throws Exception {
final List<NestedStream<String,List<String>>> expected = Arrays.asList(new NestedStream<>(Arrays.asList("foo")),new NestedStream<>(Arrays.asList("bar")));
final Collection<NestedStream<String, List<String>>> actual = roundTrip(expected.stream(), new TypeReference<Collection<NestedStream<String,List<String>>>>() {});
assertEquals(expected,actual);
}
@Test
public void testStreamCloses() throws Exception {
assertClosesOnSuccess(Stream.of(multipleValues), stream -> roundTrip(stream, TestBean[].class));
}
// 10-Jan-2025, tatu: I hate these kinds of obscure lambda-ridden tests.
// They were accidentally disabled and now fail for... some reason. WTF.
// (came from `jackson-modules-java8`, disabled due to JUnit 4->5 migration)
/*
@Test
public void testStreamClosesOnRuntimeException() throws Exception {
String exceptionMessage = "Stream peek threw";
assertClosesOnRuntimeException(exceptionMessage, stream -> roundTrip(stream, TestBean[].class),
Stream.of(multipleValues)
.peek(e -> {
throw new RuntimeException(exceptionMessage);
}));
}
@Test
public void testStreamClosesOnSneakyIOException() throws Exception {
String exceptionMessage = "Stream peek threw";
assertClosesOnIoException(exceptionMessage, stream -> roundTrip(stream, TestBean[].class),
Stream.of(multipleValues)
.peek(e -> {
sneakyThrow(new IOException(exceptionMessage));
}));
}
@Test
public void testStreamClosesOnWrappedIoException() throws Exception {
final String exceptionMessage = "Stream peek threw";
assertClosesOnWrappedIoException(exceptionMessage, stream -> roundTrip(stream, TestBean[].class),
Stream.of(multipleValues)
.peek(e -> {
throw new UncheckedIOException(new IOException(exceptionMessage));
}));
}
*/
private <T, R> R[] roundTrip(Stream<T> stream, Class<R[]> clazz) {
String json = objectMapper.writeValueAsString(stream);
return objectMapper.readValue(json, clazz);
}
private <T, R> R roundTrip(Stream<T> stream, TypeReference<R> tr) {
return objectMapper.readValue(objectMapper.writeValueAsString(stream), tr);
}
/**
* Test | TestBean |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java | {
"start": 4401,
"end": 7296
} | class ____ extends GroupMappingProviderBase {
@Override
public List<String> getGroups(String user) throws IOException {
return toList(getGroupsInternal(user));
}
@Override
public Set<String> getGroupsSet(String user) throws IOException {
return toSet(getGroupsInternal(user));
}
private String getGroupsInternal(String user) throws IOException {
checkTestConf(PROVIDER_SPECIFIC_CONF_VALUE_FOR_CLUSTER);
String group = null;
if (user.equals(hdfs.name)) {
group = hdfs.group;
} else if (user.equals(jack.name)) { // jack has another group from clusterProvider
group = jack.group2;
}
return group;
}
}
static {
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
CompositeGroupsMapping.class, GroupMappingServiceProvider.class);
conf.set(CompositeGroupsMapping.MAPPING_PROVIDERS_CONFIG_KEY, "userProvider,clusterProvider");
conf.setClass(CompositeGroupsMapping.MAPPING_PROVIDER_CONFIG_PREFIX + ".userProvider",
UserProvider.class, GroupMappingServiceProvider.class);
conf.setClass(CompositeGroupsMapping.MAPPING_PROVIDER_CONFIG_PREFIX + ".clusterProvider",
ClusterProvider.class, GroupMappingServiceProvider.class);
conf.set(CompositeGroupsMapping.MAPPING_PROVIDER_CONFIG_PREFIX +
".clusterProvider" + PROVIDER_SPECIFIC_CONF, PROVIDER_SPECIFIC_CONF_VALUE_FOR_CLUSTER);
conf.set(CompositeGroupsMapping.MAPPING_PROVIDER_CONFIG_PREFIX +
".userProvider" + PROVIDER_SPECIFIC_CONF, PROVIDER_SPECIFIC_CONF_VALUE_FOR_USER);
}
@Test
public void TestMultipleGroupsMapping() throws Exception {
Groups groups = new Groups(conf);
assertTrue(groups.getGroups(john.name).get(0).equals(john.group));
assertTrue(groups.getGroups(hdfs.name).get(0).equals(hdfs.group));
}
@Test
public void TestMultipleGroupsMappingWithCombined() throws Exception {
conf.set(CompositeGroupsMapping.MAPPING_PROVIDERS_COMBINED_CONFIG_KEY, "true");
Groups groups = new Groups(conf);
assertTrue(groups.getGroups(jack.name).size() == 2);
// the configured providers list in order is "userProvider,clusterProvider"
// group -> userProvider, group2 -> clusterProvider
assertTrue(groups.getGroups(jack.name).contains(jack.group));
assertTrue(groups.getGroups(jack.name).contains(jack.group2));
}
@Test
public void TestMultipleGroupsMappingWithoutCombined() throws Exception {
conf.set(CompositeGroupsMapping.MAPPING_PROVIDERS_COMBINED_CONFIG_KEY, "false");
Groups groups = new Groups(conf);
// the configured providers list in order is "userProvider,clusterProvider"
// group -> userProvider, group2 -> clusterProvider
assertTrue(groups.getGroups(jack.name).size() == 1);
assertTrue(groups.getGroups(jack.name).get(0).equals(jack.group));
}
}
| ClusterProvider |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterHttpDelegationToken.java | {
"start": 3723,
"end": 8958
} | class ____ extends AuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException {
Properties props = new Properties();
Enumeration<?> names = filterConfig.getInitParameterNames();
while (names.hasMoreElements()) {
String name = (String) names.nextElement();
if (name.startsWith(configPrefix)) {
String value = filterConfig.getInitParameter(name);
props.put(name.substring(configPrefix.length()), value);
}
}
props.put(AuthenticationFilter.AUTH_TYPE, "simple");
props.put(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
return props;
}
}
@BeforeEach
public void setup() throws Exception {
Configuration conf = SecurityConfUtil.initSecurity();
conf.set(RBFConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(RBFConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_KEY, "0.0.0.0:0");
conf.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "0.0.0.0:0");
conf.set(FILTER_INITIALIZER_PROPERTY,
NoAuthFilterInitializer.class.getName());
conf.set(HADOOP_HTTP_AUTHENTICATION_TYPE, "simple");
// Start routers with an RPC and HTTP service only
Configuration routerConf = new RouterConfigBuilder()
.rpc()
.http()
.build();
conf.addResource(routerConf);
router = new Router();
router.init(conf);
router.start();
InetSocketAddress webAddress = router.getHttpServerAddress();
URI webURI = new URI(SWebHdfs.SCHEME, null,
webAddress.getHostName(), webAddress.getPort(), null, null, null);
fs = (WebHdfsFileSystem)FileSystem.get(webURI, conf);
}
@AfterEach
public void cleanup() throws Exception {
if (router != null) {
router.stop();
router.close();
}
SecurityConfUtil.destroy();
}
@Test
public void testGetDelegationToken() throws Exception {
final String renewer = "renewer0";
Token<?> token = getDelegationToken(fs, renewer);
assertNotNull(token);
DelegationTokenIdentifier tokenId =
getTokenIdentifier(token.getIdentifier());
assertEquals("router", tokenId.getOwner().toString());
assertEquals(renewer, tokenId.getRenewer().toString());
assertEquals("", tokenId.getRealUser().toString());
assertEquals("SWEBHDFS delegation", token.getKind().toString());
assertNotNull(token.getPassword());
}
@Test
public void testRenewDelegationToken() throws Exception {
Token<?> token = getDelegationToken(fs, "router");
DelegationTokenIdentifier tokenId =
getTokenIdentifier(token.getIdentifier());
long t = renewDelegationToken(fs, token);
assertTrue(t <= tokenId.getMaxDate(), t + " should not be larger than " + tokenId.getMaxDate());
}
@Test
public void testCancelDelegationToken() throws Exception {
Token<?> token = getDelegationToken(fs, "router");
cancelDelegationToken(fs, token);
LambdaTestUtils.intercept(IOException.class,
"Server returned HTTP response code: 403 ",
() -> renewDelegationToken(fs, token));
}
private Token<DelegationTokenIdentifier> getDelegationToken(
WebHdfsFileSystem webHdfs, String renewer) throws IOException {
Map<?, ?> json = sendHttpRequest(webHdfs, GetOpParam.Op.GETDELEGATIONTOKEN,
new RenewerParam(renewer));
return WebHdfsTestUtil.convertJsonToDelegationToken(json);
}
private long renewDelegationToken(WebHdfsFileSystem webHdfs, Token<?> token)
throws IOException {
Map<?, ?> json =
sendHttpRequest(webHdfs, PutOpParam.Op.RENEWDELEGATIONTOKEN,
new TokenArgumentParam(token.encodeToUrlString()));
return ((Number) json.get("long")).longValue();
}
private void cancelDelegationToken(WebHdfsFileSystem webHdfs, Token<?> token)
throws IOException {
sendHttpRequest(webHdfs, PutOpParam.Op.CANCELDELEGATIONTOKEN,
new TokenArgumentParam(token.encodeToUrlString()));
}
private Map<?, ?> sendHttpRequest(WebHdfsFileSystem webHdfs,
final HttpOpParam.Op op, final Param<?, ?>... parameters)
throws IOException {
String user = SecurityConfUtil.getRouterUserName();
// process parameters, add user.name
List<Param<?, ?>> pList = new ArrayList<>();
pList.add(new UserParam(user));
pList.addAll(Arrays.asList(parameters));
// build request url
final URL url = WebHdfsTestUtil.toUrl(webHdfs, op, null,
pList.toArray(new Param<?, ?>[pList.size()]));
// open connection and send request
HttpURLConnection conn =
WebHdfsTestUtil.openConnection(url, webHdfs.getConf());
conn.setRequestMethod(op.getType().toString());
WebHdfsTestUtil.sendRequest(conn);
final Map<?, ?> json = WebHdfsTestUtil.getAndParseResponse(conn);
conn.disconnect();
return json;
}
private DelegationTokenIdentifier getTokenIdentifier(byte[] id)
throws IOException {
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
ByteArrayInputStream bais = new ByteArrayInputStream(id);
DataInputStream dais = new DataInputStream(bais);
identifier.readFields(dais);
return identifier;
}
}
| NoAuthFilter |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/query/SqlResultSetMappingDescriptor.java | {
"start": 6082,
"end": 8616
} | class ____ {
private final JpaColumnResultDescriptor argumentResultDescriptor;
private ArgumentDescriptor(JpaColumnResultDescriptor argumentResultDescriptor) {
this.argumentResultDescriptor = argumentResultDescriptor;
}
ArgumentMemento resolve(ResultSetMappingResolutionContext resolutionContext) {
return new ArgumentMemento( argumentResultDescriptor.resolve( resolutionContext ) );
}
}
private final String mappingName;
private final Class<?> targetJavaType;
private final List<ArgumentDescriptor> argumentResultDescriptors;
public ConstructorResultDescriptor(
ConstructorResult constructorResult,
SqlResultSetMapping mappingAnnotation) {
this.mappingName = mappingAnnotation.name();
this.targetJavaType = constructorResult.targetClass();
argumentResultDescriptors = interpretArguments( constructorResult, mappingAnnotation );
}
private static List<ArgumentDescriptor> interpretArguments(
ConstructorResult constructorResult,
SqlResultSetMapping mappingAnnotation) {
final ColumnResult[] columnResults = constructorResult.columns();
if ( ArrayHelper.isEmpty( columnResults ) ) {
throw new IllegalArgumentException( "ConstructorResult did not define any ColumnResults" );
}
final List<ArgumentDescriptor> argumentResultDescriptors = arrayList( columnResults.length );
for ( ColumnResult columnResult : columnResults ) {
final JpaColumnResultDescriptor argumentResultDescriptor = new JpaColumnResultDescriptor(
columnResult,
mappingAnnotation
);
argumentResultDescriptors.add(new ArgumentDescriptor(argumentResultDescriptor));
}
return argumentResultDescriptors;
}
@Override
public ResultMemento resolve(ResultSetMappingResolutionContext resolutionContext) {
BOOT_QUERY_LOGGER.tracef(
"Generating InstantiationResultMappingMemento for JPA ConstructorResult(%s) for ResultSet mapping `%s`",
targetJavaType.getName(),
mappingName
);
final List<ArgumentMemento> argumentResultMementos = new ArrayList<>( argumentResultDescriptors.size() );
argumentResultDescriptors.forEach(
(mapping) -> argumentResultMementos.add( mapping.resolve( resolutionContext ) )
);
final var targetJtd =
resolutionContext.getTypeConfiguration().getJavaTypeRegistry()
.resolveDescriptor( targetJavaType );
return new ResultMementoInstantiationStandard( targetJtd, argumentResultMementos );
}
}
/**
* @see jakarta.persistence.EntityResult
*/
public static | ArgumentDescriptor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/id/entities/Dog.java | {
"start": 722,
"end": 1058
} | class ____ {
private Integer id;
private String name;
@Id
@GeneratedValue(strategy = GenerationType.TABLE, generator = "DogGen")
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| Dog |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/convert/ApplicationConversionServiceTests.java | {
"start": 16473,
"end": 16634
} | class ____ implements Converter<String, Integer> {
@Override
public @Nullable Integer convert(String source) {
return null;
}
}
static | ExampleConverter |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/unmatched_prop_type/User.java | {
"start": 744,
"end": 1164
} | class ____ {
private final Integer id;
private final String name;
private Birthday dob;
public User(String id, String name) {
this.id = Integer.valueOf(id);
this.name = name;
}
public Integer getId() {
return id;
}
public String getName() {
return name;
}
public Birthday getDob() {
return dob;
}
public void setDob(String dob) {
this.dob = new Birthday(dob);
}
| User |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java | {
"start": 1296,
"end": 2489
} | class ____ {
static final Logger LOG = LoggerFactory.getLogger(TestDisableConnCache.class);
static final int BLOCK_SIZE = 4096;
static final int FILE_SIZE = 3 * BLOCK_SIZE;
/**
* Test that the socket cache can be disabled by setting the capacity to
* 0. Regression test for HDFS-3365.
* @throws Exception
*/
@Test
public void testDisableCache() throws Exception {
HdfsConfiguration confWithoutCache = new HdfsConfiguration();
// Configure a new instance with no peer caching, ensure that it doesn't
// cache anything
confWithoutCache.setInt(
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
BlockReaderTestUtil util = new BlockReaderTestUtil(1, confWithoutCache);
final Path testFile = new Path("/testConnCache.dat");
util.writeFile(testFile, FILE_SIZE / 1024);
FileSystem fsWithoutCache = FileSystem.newInstance(util.getConf());
try {
DFSTestUtil.readFile(fsWithoutCache, testFile);
assertEquals(0, ((DistributedFileSystem)fsWithoutCache).
dfs.getClientContext().getPeerCache().size());
} finally {
fsWithoutCache.close();
util.shutdown();
}
}
} | TestDisableConnCache |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/web/servlet/DelegatingFilterProxyRegistrationBean.java | {
"start": 2347,
"end": 4165
} | class ____ extends AbstractFilterRegistrationBean<DelegatingFilterProxy>
implements ApplicationContextAware {
@SuppressWarnings("NullAway.Init")
private ApplicationContext applicationContext;
private final String targetBeanName;
/**
* Create a new {@link DelegatingFilterProxyRegistrationBean} instance to be
* registered with the specified {@link ServletRegistrationBean}s.
* @param targetBeanName name of the target filter bean to look up in the Spring
* application context (must not be {@code null}).
* @param servletRegistrationBeans associate {@link ServletRegistrationBean}s
*/
public DelegatingFilterProxyRegistrationBean(String targetBeanName,
ServletRegistrationBean<?>... servletRegistrationBeans) {
super(servletRegistrationBeans);
Assert.hasLength(targetBeanName, "'targetBeanName' must not be empty");
this.targetBeanName = targetBeanName;
setName(targetBeanName);
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
protected String getTargetBeanName() {
return this.targetBeanName;
}
@Override
public DelegatingFilterProxy getFilter() {
return new DelegatingFilterProxy(this.targetBeanName, getWebApplicationContext()) {
@Override
protected void initFilterBean() throws ServletException {
// Don't initialize filter bean on init()
}
};
}
private WebApplicationContext getWebApplicationContext() {
Assert.state(this.applicationContext != null, "ApplicationContext has not been injected");
Assert.state(this.applicationContext instanceof WebApplicationContext,
"Injected ApplicationContext is not a WebApplicationContext");
return (WebApplicationContext) this.applicationContext;
}
}
| DelegatingFilterProxyRegistrationBean |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/AssertionFailureIgnoredTest.java | {
"start": 7615,
"end": 8656
} | class ____ {
void f() {
AssertionError t =
assertThrows(
AssertionError.class,
() -> {
System.err.println();
System.err.println();
});
assertThat(t).isInstanceOf(AssertionError.class);
assertThrows(
AssertionError.class,
() -> {
System.err.println();
System.err.println();
});
assertThrows(
AssertionError.class,
() -> {
if (true) throw new AssertionError();
});
}
}
""")
.doTest();
}
@Test
public void union() {
testHelper
.addSourceLines(
"Test.java",
"""
import org.junit.Assert;
import java.io.IOError;
| Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/components/ComponentInWhereClauseTest.java | {
"start": 7439,
"end": 7656
} | class ____ {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
protected Long id;
public Long getId() {
return id;
}
}
@Entity(name = "Employee")
@Table(name = "EMPLOYEE")
public static | AbstractEntity |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/connector/source/ReaderOutput.java | {
"start": 2115,
"end": 5195
} | interface ____<T> extends SourceOutput<T> {
/**
* Emit a record without a timestamp.
*
* <p>Use this method if the source system does not have a notion of records with timestamps.
*
* <p>The events later pass through a {@link TimestampAssigner}, which attaches a timestamp to
* the event based on the event's contents. For example a file source with JSON records would
* not have a generic timestamp from the file reading and JSON parsing process, and thus use
* this method to produce initially a record without a timestamp. The {@code TimestampAssigner}
* in the next step would be used to extract timestamp from a field of the JSON object.
*
* @param record the record to emit.
*/
@Override
void collect(T record);
/**
* Emit a record with a timestamp.
*
* <p>Use this method if the source system has timestamps attached to records. Typical examples
* would be Logs, PubSubs, or Message Queues, like Kafka or Kinesis, which store a timestamp
* with each event.
*
* <p>The events typically still pass through a {@link TimestampAssigner}, which may decide to
* either use this source-provided timestamp, or replace it with a timestamp stored within the
* event (for example if the event was a JSON object one could configure aTimestampAssigner that
* extracts one of the object's fields and uses that as a timestamp).
*
* @param record the record to emit.
* @param timestamp the timestamp of the record.
*/
@Override
void collect(T record, long timestamp);
/**
* Emits the given watermark.
*
* <p>Emitting a watermark also implicitly marks the stream as <i>active</i>, ending previously
* marked idleness.
*/
@Override
void emitWatermark(Watermark watermark);
/**
* Marks this output as idle, meaning that downstream operations do not wait for watermarks from
* this output.
*
* <p>An output becomes active again as soon as the next watermark is emitted.
*/
@Override
void markIdle();
/**
* Creates a {@code SourceOutput} for a specific Source Split. Use these outputs if you want to
* run split-local logic, like watermark generation.
*
* <p>If a split-local output was already created for this split-ID, the method will return that
* instance, so that only one split-local output exists per split-ID.
*
* <p><b>IMPORTANT:</b> After the split has been finished, it is crucial to release the created
* output again. Otherwise it will continue to contribute to the watermark generation like a
* perpetually stalling source split, and may hold back the watermark indefinitely.
*
* @see #releaseOutputForSplit(String)
*/
SourceOutput<T> createOutputForSplit(String splitId);
/**
* Releases the {@code SourceOutput} created for the split with the given ID.
*
* @see #createOutputForSplit(String)
*/
void releaseOutputForSplit(String splitId);
}
| ReaderOutput |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/GenericInMemoryCatalog.java | {
"start": 2691,
"end": 33943
} | class ____ extends AbstractCatalog {
public static final String DEFAULT_DB = "default";
private final Map<String, CatalogDatabase> databases;
private final Map<ObjectPath, CatalogBaseTable> tables;
private final Map<ObjectPath, CatalogModel> models;
private final Map<ObjectPath, CatalogFunction> functions;
private final Map<ObjectPath, Map<CatalogPartitionSpec, CatalogPartition>> partitions;
private final Map<ObjectPath, CatalogTableStatistics> tableStats;
private final Map<ObjectPath, CatalogColumnStatistics> tableColumnStats;
private final Map<ObjectPath, Map<CatalogPartitionSpec, CatalogTableStatistics>> partitionStats;
private final Map<ObjectPath, Map<CatalogPartitionSpec, CatalogColumnStatistics>>
partitionColumnStats;
public GenericInMemoryCatalog(String name) {
this(name, DEFAULT_DB);
}
public GenericInMemoryCatalog(String name, String defaultDatabase) {
super(name, defaultDatabase);
this.databases = new LinkedHashMap<>();
this.databases.put(defaultDatabase, new CatalogDatabaseImpl(new HashMap<>(), null));
this.tables = new LinkedHashMap<>();
this.models = new LinkedHashMap<>();
this.functions = new LinkedHashMap<>();
this.partitions = new LinkedHashMap<>();
this.tableStats = new LinkedHashMap<>();
this.tableColumnStats = new LinkedHashMap<>();
this.partitionStats = new LinkedHashMap<>();
this.partitionColumnStats = new LinkedHashMap<>();
}
@Override
public void open() {}
@Override
public void close() {}
// ------ databases ------
@Override
public void createDatabase(String databaseName, CatalogDatabase db, boolean ignoreIfExists)
throws DatabaseAlreadyExistException {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(databaseName));
checkNotNull(db);
if (databaseExists(databaseName)) {
if (!ignoreIfExists) {
throw new DatabaseAlreadyExistException(getName(), databaseName);
}
} else {
databases.put(databaseName, db.copy());
}
}
@Override
public void dropDatabase(String databaseName, boolean ignoreIfNotExists, boolean cascade)
throws DatabaseNotExistException, DatabaseNotEmptyException {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(databaseName));
if (databases.containsKey(databaseName)) {
// Make sure the database is empty
if (isDatabaseEmpty(databaseName)) {
databases.remove(databaseName);
} else if (cascade) {
// delete all tables and functions in this database and then delete the database.
List<ObjectPath> deleteTablePaths =
tables.keySet().stream()
.filter(op -> op.getDatabaseName().equals(databaseName))
.collect(Collectors.toList());
deleteTablePaths.forEach(
objectPath -> {
try {
dropTable(objectPath, true);
} catch (TableNotExistException e) {
// ignore
}
});
List<ObjectPath> deleteFunctionPaths =
functions.keySet().stream()
.filter(op -> op.getDatabaseName().equals(databaseName))
.collect(Collectors.toList());
deleteFunctionPaths.forEach(
objectPath -> {
try {
dropFunction(objectPath, true);
} catch (FunctionNotExistException e) {
// ignore
}
});
databases.remove(databaseName);
} else {
throw new DatabaseNotEmptyException(getName(), databaseName);
}
} else if (!ignoreIfNotExists) {
throw new DatabaseNotExistException(getName(), databaseName);
}
}
private boolean isDatabaseEmpty(String databaseName) {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(databaseName));
return tables.keySet().stream().noneMatch(op -> op.getDatabaseName().equals(databaseName))
&& functions.keySet().stream()
.noneMatch(op -> op.getDatabaseName().equals(databaseName));
}
@Override
public void alterDatabase(
String databaseName, CatalogDatabase newDatabase, boolean ignoreIfNotExists)
throws DatabaseNotExistException {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(databaseName));
checkNotNull(newDatabase);
CatalogDatabase existingDatabase = databases.get(databaseName);
if (existingDatabase != null) {
if (existingDatabase.getClass() != newDatabase.getClass()) {
throw new CatalogException(
String.format(
"Database types don't match. Existing database is '%s' and new database is '%s'.",
existingDatabase.getClass().getName(),
newDatabase.getClass().getName()));
}
databases.put(databaseName, newDatabase.copy());
} else if (!ignoreIfNotExists) {
throw new DatabaseNotExistException(getName(), databaseName);
}
}
@Override
public List<String> listDatabases() {
return new ArrayList<>(databases.keySet());
}
@Override
public CatalogDatabase getDatabase(String databaseName) throws DatabaseNotExistException {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(databaseName));
if (!databaseExists(databaseName)) {
throw new DatabaseNotExistException(getName(), databaseName);
} else {
return databases.get(databaseName).copy();
}
}
@Override
public boolean databaseExists(String databaseName) {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(databaseName));
return databases.containsKey(databaseName);
}
// ------ tables ------
@Override
public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists)
throws TableAlreadyExistException, DatabaseNotExistException {
checkNotNull(tablePath);
checkNotNull(table);
if (!databaseExists(tablePath.getDatabaseName())) {
throw new DatabaseNotExistException(getName(), tablePath.getDatabaseName());
}
if (tableExists(tablePath)) {
if (!ignoreIfExists) {
throw new TableAlreadyExistException(getName(), tablePath);
}
} else {
tables.put(tablePath, table.copy());
if (isPartitionedTable(tablePath)) {
partitions.put(tablePath, new LinkedHashMap<>());
partitionStats.put(tablePath, new LinkedHashMap<>());
partitionColumnStats.put(tablePath, new LinkedHashMap<>());
}
}
}
@Override
public void alterTable(
ObjectPath tablePath, CatalogBaseTable newTable, boolean ignoreIfNotExists)
throws TableNotExistException {
checkNotNull(tablePath);
checkNotNull(newTable);
CatalogBaseTable existingTable = tables.get(tablePath);
if (existingTable != null) {
if (existingTable.getTableKind() != newTable.getTableKind()) {
throw new CatalogException(
String.format(
"Table types don't match. Existing table is '%s' and new table is '%s'.",
existingTable.getTableKind(), newTable.getTableKind()));
}
tables.put(tablePath, newTable.copy());
} else if (!ignoreIfNotExists) {
throw new TableNotExistException(getName(), tablePath);
}
}
// ------ tables and views ------
@Override
public void dropTable(ObjectPath tablePath, boolean ignoreIfNotExists)
throws TableNotExistException {
checkNotNull(tablePath);
if (tableExists(tablePath)) {
tables.remove(tablePath);
tableStats.remove(tablePath);
tableColumnStats.remove(tablePath);
partitions.remove(tablePath);
partitionStats.remove(tablePath);
partitionColumnStats.remove(tablePath);
} else if (!ignoreIfNotExists) {
throw new TableNotExistException(getName(), tablePath);
}
}
@Override
public void renameTable(ObjectPath tablePath, String newTableName, boolean ignoreIfNotExists)
throws TableNotExistException, TableAlreadyExistException {
checkNotNull(tablePath);
checkArgument(!StringUtils.isNullOrWhitespaceOnly(newTableName));
if (tableExists(tablePath)) {
ObjectPath newPath = new ObjectPath(tablePath.getDatabaseName(), newTableName);
if (tableExists(newPath)) {
throw new TableAlreadyExistException(getName(), newPath);
} else {
tables.put(newPath, tables.remove(tablePath));
// table statistics
if (tableStats.containsKey(tablePath)) {
tableStats.put(newPath, tableStats.remove(tablePath));
}
// table column statistics
if (tableColumnStats.containsKey(tablePath)) {
tableColumnStats.put(newPath, tableColumnStats.remove(tablePath));
}
// partitions
if (partitions.containsKey(tablePath)) {
partitions.put(newPath, partitions.remove(tablePath));
}
// partition statistics
if (partitionStats.containsKey(tablePath)) {
partitionStats.put(newPath, partitionStats.remove(tablePath));
}
// partition column statistics
if (partitionColumnStats.containsKey(tablePath)) {
partitionColumnStats.put(newPath, partitionColumnStats.remove(tablePath));
}
}
} else if (!ignoreIfNotExists) {
throw new TableNotExistException(getName(), tablePath);
}
}
@Override
public List<String> listTables(String databaseName) throws DatabaseNotExistException {
return listObjectsUnderDatabase(tables, databaseName, objectPath -> true);
}
@Override
public List<String> listViews(String databaseName) throws DatabaseNotExistException {
return listObjectsUnderDatabase(
tables, databaseName, k -> (tables.get(k) instanceof CatalogView));
}
@Override
public List<String> listMaterializedTables(String databaseName)
throws DatabaseNotExistException {
return listObjectsUnderDatabase(
tables, databaseName, k -> (tables.get(k) instanceof CatalogMaterializedTable));
}
@Override
public CatalogBaseTable getTable(ObjectPath tablePath) throws TableNotExistException {
checkNotNull(tablePath);
if (!tableExists(tablePath)) {
throw new TableNotExistException(getName(), tablePath);
} else {
return tables.get(tablePath).copy();
}
}
@Override
public boolean tableExists(ObjectPath tablePath) {
checkNotNull(tablePath);
return databaseExists(tablePath.getDatabaseName()) && tables.containsKey(tablePath);
}
private void ensureTableExists(ObjectPath tablePath) throws TableNotExistException {
if (!tableExists(tablePath)) {
throw new TableNotExistException(getName(), tablePath);
}
}
// ------ models ------
@Override
public void createModel(ObjectPath modelPath, CatalogModel model, boolean ignoreIfExists)
throws ModelAlreadyExistException, DatabaseNotExistException {
checkNotNull(modelPath);
checkNotNull(model);
if (!databaseExists(modelPath.getDatabaseName())) {
throw new DatabaseNotExistException(getName(), modelPath.getDatabaseName());
}
if (modelExists(modelPath)) {
if (!ignoreIfExists) {
throw new ModelAlreadyExistException(getName(), modelPath);
}
} else {
models.put(modelPath, model.copy());
}
}
@Override
public void alterModel(ObjectPath modelPath, CatalogModel newModel, boolean ignoreIfNotExists)
throws ModelNotExistException {
checkNotNull(modelPath);
CatalogModel existingModel = models.get(modelPath);
if (existingModel == null || newModel == null) {
if (ignoreIfNotExists) {
return;
}
throw new ModelNotExistException(getName(), modelPath);
}
models.put(modelPath, newModel.copy());
}
@Override
public void dropModel(ObjectPath modelPath, boolean ignoreIfNotExists)
throws ModelNotExistException {
checkNotNull(modelPath);
if (modelExists(modelPath)) {
models.remove(modelPath);
} else if (!ignoreIfNotExists) {
throw new ModelNotExistException(getName(), modelPath);
}
}
@Override
public void renameModel(ObjectPath modelPath, String newModelName, boolean ignoreIfNotExists)
throws ModelNotExistException, ModelAlreadyExistException {
checkNotNull(modelPath);
checkArgument(!StringUtils.isNullOrWhitespaceOnly(newModelName));
if (modelExists(modelPath)) {
ObjectPath newPath = new ObjectPath(modelPath.getDatabaseName(), newModelName);
if (modelExists(newPath)) {
throw new ModelAlreadyExistException(getName(), newPath);
} else {
models.put(newPath, models.remove(modelPath));
}
} else if (!ignoreIfNotExists) {
throw new ModelNotExistException(getName(), modelPath);
}
}
@Override
public List<String> listModels(String databaseName) throws DatabaseNotExistException {
return listObjectsUnderDatabase(models, databaseName, k -> true);
}
@Override
public CatalogModel getModel(ObjectPath modelPath) throws ModelNotExistException {
checkNotNull(modelPath);
if (!modelExists(modelPath)) {
throw new ModelNotExistException(getName(), modelPath);
} else {
return models.get(modelPath).copy();
}
}
@Override
public boolean modelExists(ObjectPath modelPath) {
checkNotNull(modelPath);
return databaseExists(modelPath.getDatabaseName()) && models.containsKey(modelPath);
}
// ------ functions ------
@Override
public void createFunction(ObjectPath path, CatalogFunction function, boolean ignoreIfExists)
throws FunctionAlreadyExistException, DatabaseNotExistException {
checkNotNull(path);
checkNotNull(function);
ObjectPath functionPath = normalize(path);
if (!databaseExists(functionPath.getDatabaseName())) {
throw new DatabaseNotExistException(getName(), functionPath.getDatabaseName());
}
if (functionExists(functionPath)) {
if (!ignoreIfExists) {
throw new FunctionAlreadyExistException(getName(), functionPath);
}
} else {
functions.put(functionPath, function.copy());
}
}
@Override
public void alterFunction(
ObjectPath path, CatalogFunction newFunction, boolean ignoreIfNotExists)
throws FunctionNotExistException {
checkNotNull(path);
checkNotNull(newFunction);
ObjectPath functionPath = normalize(path);
CatalogFunction existingFunction = functions.get(functionPath);
if (existingFunction != null) {
if (existingFunction.getClass() != newFunction.getClass()) {
throw new CatalogException(
String.format(
"Function types don't match. Existing function is '%s' and new function is '%s'.",
existingFunction.getClass().getName(),
newFunction.getClass().getName()));
}
functions.put(functionPath, newFunction.copy());
} else if (!ignoreIfNotExists) {
throw new FunctionNotExistException(getName(), functionPath);
}
}
@Override
public void dropFunction(ObjectPath path, boolean ignoreIfNotExists)
throws FunctionNotExistException {
checkNotNull(path);
ObjectPath functionPath = normalize(path);
if (functionExists(functionPath)) {
functions.remove(functionPath);
} else if (!ignoreIfNotExists) {
throw new FunctionNotExistException(getName(), functionPath);
}
}
@Override
public List<String> listFunctions(String databaseName) throws DatabaseNotExistException {
return listObjectsUnderDatabase(functions, databaseName, k -> true);
}
@Override
public CatalogFunction getFunction(ObjectPath path) throws FunctionNotExistException {
checkNotNull(path);
ObjectPath functionPath = normalize(path);
if (!(databaseExists(functionPath.getDatabaseName())
&& functions.containsKey(functionPath))) {
throw new FunctionNotExistException(getName(), functionPath);
} else {
return functions.get(functionPath).copy();
}
}
@Override
public boolean functionExists(ObjectPath path) {
checkNotNull(path);
try {
getFunction(path);
return true;
} catch (Exception e) {
return false;
}
}
private ObjectPath normalize(ObjectPath path) {
return new ObjectPath(
path.getDatabaseName(), FunctionIdentifier.normalizeName(path.getObjectName()));
}
// ------ partitions ------
@Override
public void createPartition(
ObjectPath tablePath,
CatalogPartitionSpec partitionSpec,
CatalogPartition partition,
boolean ignoreIfExists)
throws TableNotExistException,
TableNotPartitionedException,
PartitionSpecInvalidException,
PartitionAlreadyExistsException,
CatalogException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
checkNotNull(partition);
ensureTableExists(tablePath);
ensurePartitionedTable(tablePath);
ensureFullPartitionSpec(tablePath, partitionSpec);
if (partitionExists(tablePath, partitionSpec)) {
if (!ignoreIfExists) {
throw new PartitionAlreadyExistsException(getName(), tablePath, partitionSpec);
}
}
partitions.get(tablePath).put(partitionSpec, partition.copy());
}
@Override
public void dropPartition(
ObjectPath tablePath, CatalogPartitionSpec partitionSpec, boolean ignoreIfNotExists)
throws PartitionNotExistException, CatalogException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
if (partitionExists(tablePath, partitionSpec)) {
partitions.get(tablePath).remove(partitionSpec);
partitionStats.get(tablePath).remove(partitionSpec);
partitionColumnStats.get(tablePath).remove(partitionSpec);
} else if (!ignoreIfNotExists) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec);
}
}
@Override
public void alterPartition(
ObjectPath tablePath,
CatalogPartitionSpec partitionSpec,
CatalogPartition newPartition,
boolean ignoreIfNotExists)
throws PartitionNotExistException, CatalogException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
checkNotNull(newPartition);
if (partitionExists(tablePath, partitionSpec)) {
CatalogPartition existingPartition = partitions.get(tablePath).get(partitionSpec);
if (existingPartition.getClass() != newPartition.getClass()) {
throw new CatalogException(
String.format(
"Partition types don't match. Existing partition is '%s' and new partition is '%s'.",
existingPartition.getClass().getName(),
newPartition.getClass().getName()));
}
partitions.get(tablePath).put(partitionSpec, newPartition.copy());
} else if (!ignoreIfNotExists) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec);
}
}
@Override
public List<CatalogPartitionSpec> listPartitions(ObjectPath tablePath)
throws TableNotExistException, TableNotPartitionedException, CatalogException {
checkNotNull(tablePath);
ensureTableExists(tablePath);
ensurePartitionedTable(tablePath);
return new ArrayList<>(partitions.get(tablePath).keySet());
}
@Override
public List<CatalogPartitionSpec> listPartitions(
ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
throws TableNotExistException,
TableNotPartitionedException,
PartitionSpecInvalidException,
CatalogException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
ensurePartitionedTable(tablePath);
CatalogTable catalogTable = (CatalogTable) getTable(tablePath);
List<String> partKeys = catalogTable.getPartitionKeys();
Map<String, String> spec = partitionSpec.getPartitionSpec();
if (!partKeys.containsAll(spec.keySet())) {
return new ArrayList<>();
}
return partitions.get(tablePath).keySet().stream()
.filter(
ps ->
ps.getPartitionSpec()
.entrySet()
.containsAll(partitionSpec.getPartitionSpec().entrySet()))
.collect(Collectors.toList());
}
@Override
public List<CatalogPartitionSpec> listPartitionsByFilter(
ObjectPath tablePath, List<Expression> filters)
throws TableNotExistException, TableNotPartitionedException, CatalogException {
throw new UnsupportedOperationException();
}
@Override
public CatalogPartition getPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
throws PartitionNotExistException, CatalogException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
if (!partitionExists(tablePath, partitionSpec)) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec);
}
return partitions.get(tablePath).get(partitionSpec).copy();
}
@Override
public boolean partitionExists(ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
throws CatalogException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
return partitions.containsKey(tablePath)
&& partitions.get(tablePath).containsKey(partitionSpec);
}
private void ensureFullPartitionSpec(ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
throws TableNotExistException, PartitionSpecInvalidException {
if (!isFullPartitionSpec(tablePath, partitionSpec)) {
throw new PartitionSpecInvalidException(
getName(),
((CatalogTable) getTable(tablePath)).getPartitionKeys(),
tablePath,
partitionSpec);
}
}
/** Check if the given partitionSpec is full partition spec for the given table. */
private boolean isFullPartitionSpec(ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
throws TableNotExistException {
CatalogBaseTable baseTable = getTable(tablePath);
if (!(baseTable instanceof CatalogTable)) {
return false;
}
CatalogTable table = (CatalogTable) baseTable;
List<String> partitionKeys = table.getPartitionKeys();
Map<String, String> spec = partitionSpec.getPartitionSpec();
// The size of partition spec should not exceed the size of partition keys
return partitionKeys.size() == spec.size() && spec.keySet().containsAll(partitionKeys);
}
private void ensurePartitionedTable(ObjectPath tablePath) throws TableNotPartitionedException {
if (!isPartitionedTable(tablePath)) {
throw new TableNotPartitionedException(getName(), tablePath);
}
}
/**
* Check if the given table is a partitioned table. Note that "false" is returned if the table
* doesn't exists.
*/
private boolean isPartitionedTable(ObjectPath tablePath) {
CatalogBaseTable table = null;
try {
table = getTable(tablePath);
} catch (TableNotExistException e) {
return false;
}
return (table instanceof CatalogTable) && ((CatalogTable) table).isPartitioned();
}
// ------ statistics ------
@Override
public CatalogTableStatistics getTableStatistics(ObjectPath tablePath)
throws TableNotExistException {
checkNotNull(tablePath);
if (!tableExists(tablePath)) {
throw new TableNotExistException(getName(), tablePath);
}
if (!isPartitionedTable(tablePath)) {
CatalogTableStatistics result = tableStats.get(tablePath);
return result != null ? result.copy() : CatalogTableStatistics.UNKNOWN;
} else {
return CatalogTableStatistics.UNKNOWN;
}
}
@Override
public CatalogColumnStatistics getTableColumnStatistics(ObjectPath tablePath)
throws TableNotExistException {
checkNotNull(tablePath);
if (!tableExists(tablePath)) {
throw new TableNotExistException(getName(), tablePath);
}
CatalogColumnStatistics result = tableColumnStats.get(tablePath);
return result != null ? result.copy() : CatalogColumnStatistics.UNKNOWN;
}
@Override
public CatalogTableStatistics getPartitionStatistics(
ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
throws PartitionNotExistException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
if (!partitionExists(tablePath, partitionSpec)) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec);
}
CatalogTableStatistics result = partitionStats.get(tablePath).get(partitionSpec);
return result != null ? result.copy() : CatalogTableStatistics.UNKNOWN;
}
@Override
public CatalogColumnStatistics getPartitionColumnStatistics(
ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
throws PartitionNotExistException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
if (!partitionExists(tablePath, partitionSpec)) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec);
}
CatalogColumnStatistics result = partitionColumnStats.get(tablePath).get(partitionSpec);
return result != null ? result.copy() : CatalogColumnStatistics.UNKNOWN;
}
@Override
public void alterTableStatistics(
ObjectPath tablePath, CatalogTableStatistics tableStatistics, boolean ignoreIfNotExists)
throws TableNotExistException {
checkNotNull(tablePath);
checkNotNull(tableStatistics);
if (tableExists(tablePath)) {
tableStats.put(tablePath, tableStatistics.copy());
} else if (!ignoreIfNotExists) {
throw new TableNotExistException(getName(), tablePath);
}
}
@Override
public void alterTableColumnStatistics(
ObjectPath tablePath,
CatalogColumnStatistics columnStatistics,
boolean ignoreIfNotExists)
throws TableNotExistException {
checkNotNull(tablePath);
checkNotNull(columnStatistics);
if (tableExists(tablePath)) {
tableColumnStats.put(tablePath, columnStatistics.copy());
} else if (!ignoreIfNotExists) {
throw new TableNotExistException(getName(), tablePath);
}
}
@Override
public void alterPartitionStatistics(
ObjectPath tablePath,
CatalogPartitionSpec partitionSpec,
CatalogTableStatistics partitionStatistics,
boolean ignoreIfNotExists)
throws PartitionNotExistException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
checkNotNull(partitionStatistics);
if (partitionExists(tablePath, partitionSpec)) {
partitionStats.get(tablePath).put(partitionSpec, partitionStatistics.copy());
} else if (!ignoreIfNotExists) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec);
}
}
@Override
public void alterPartitionColumnStatistics(
ObjectPath tablePath,
CatalogPartitionSpec partitionSpec,
CatalogColumnStatistics columnStatistics,
boolean ignoreIfNotExists)
throws PartitionNotExistException {
checkNotNull(tablePath);
checkNotNull(partitionSpec);
checkNotNull(columnStatistics);
if (partitionExists(tablePath, partitionSpec)) {
partitionColumnStats.get(tablePath).put(partitionSpec, columnStatistics.copy());
} else if (!ignoreIfNotExists) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec);
}
}
private List<String> listObjectsUnderDatabase(
Map<ObjectPath, ?> map, String databaseName, Predicate<ObjectPath> filter)
throws DatabaseNotExistException {
checkArgument(
!StringUtils.isNullOrWhitespaceOnly(databaseName),
"databaseName cannot be null or empty");
if (!databaseExists(databaseName)) {
throw new DatabaseNotExistException(getName(), databaseName);
}
return map.keySet().stream()
.filter(k -> k.getDatabaseName().equals(databaseName))
.filter(filter)
.map(ObjectPath::getObjectName)
.collect(Collectors.toList());
}
}
| GenericInMemoryCatalog |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeToFutureTest.java | {
"start": 952,
"end": 2278
} | class ____ extends RxJavaTest {
@Test
public void success() throws Exception {
assertEquals((Integer)1, Maybe.just(1)
.subscribeOn(Schedulers.computation())
.toFuture()
.get());
}
@Test
public void empty() throws Exception {
assertNull(Maybe.empty()
.subscribeOn(Schedulers.computation())
.toFuture()
.get());
}
@Test
public void error() throws InterruptedException {
try {
Maybe.error(new TestException())
.subscribeOn(Schedulers.computation())
.toFuture()
.get();
fail("Should have thrown!");
} catch (ExecutionException ex) {
assertTrue("" + ex.getCause(), ex.getCause() instanceof TestException);
}
}
@Test
public void cancel() {
MaybeSubject<Integer> ms = MaybeSubject.create();
Future<Integer> f = ms.toFuture();
assertTrue(ms.hasObservers());
f.cancel(true);
assertFalse(ms.hasObservers());
}
@Test
public void cancel2() {
MaybeSubject<Integer> ms = MaybeSubject.create();
Future<Integer> f = ms.toFuture();
assertTrue(ms.hasObservers());
f.cancel(false);
assertFalse(ms.hasObservers());
}
}
| MaybeToFutureTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrdsTests.java | {
"start": 1141,
"end": 16885
} | class ____ extends ESTestCase {
private final MockBigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
public void testExplicitCollectsFromSingleBucket() {
collectsFromSingleBucketCase(LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.ONE));
}
public void testSurpriseCollectsFromSingleBucket() {
collectsFromSingleBucketCase(LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY));
}
public void testCollectsFromManyBuckets() {
try (LongKeyedBucketOrds ords = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY)) {
assertCollectsFromManyBuckets(ords, scaledRandomIntBetween(1, 10000), Long.MIN_VALUE, Long.MAX_VALUE);
}
}
public void testCollectsFromManyBucketsSmall() {
int owningBucketOrds = scaledRandomIntBetween(2, 10000);
long maxValue = randomLongBetween(10000 / owningBucketOrds, 2 << (16 * 3));
CardinalityUpperBound cardinality = CardinalityUpperBound.ONE.multiply(owningBucketOrds);
try (LongKeyedBucketOrds ords = LongKeyedBucketOrds.buildForValueRange(bigArrays, cardinality, 0, maxValue)) {
assertCollectsFromManyBuckets(ords, owningBucketOrds, 0, maxValue);
}
}
private void collectsFromSingleBucketCase(LongKeyedBucketOrds ords) {
try {
// Test a few explicit values
assertThat(ords.add(0, 0), equalTo(0L));
assertThat(ords.add(0, 1000), equalTo(1L));
assertThat(ords.add(0, 0), equalTo(-1L));
assertThat(ords.add(0, 1000), equalTo(-2L));
assertThat(ords.find(0, 0), equalTo(0L));
assertThat(ords.find(0, 1000), equalTo(1L));
// And some random values
Set<Long> seen = new HashSet<>();
seen.add(0L);
seen.add(1000L);
assertThat(ords.size(), equalTo(2L));
long[] values = new long[scaledRandomIntBetween(1, 10000)];
for (int i = 0; i < values.length; i++) {
values[i] = randomValueOtherThanMany(seen::contains, ESTestCase::randomLong);
seen.add(values[i]);
}
for (int i = 0; i < values.length; i++) {
assertThat(ords.find(0, values[i]), equalTo(-1L));
assertThat(ords.add(0, values[i]), equalTo(i + 2L));
assertThat(ords.find(0, values[i]), equalTo(i + 2L));
assertThat(ords.size(), equalTo(i + 3L));
if (randomBoolean()) {
assertThat(ords.add(0, 0), equalTo(-1L));
}
}
for (int i = 0; i < values.length; i++) {
assertThat(ords.add(0, values[i]), equalTo(-1 - (i + 2L)));
}
// And the explicit values are still ok
assertThat(ords.add(0, 0), equalTo(-1L));
assertThat(ords.add(0, 1000), equalTo(-2L));
// Check counting values
assertThat(ords.bucketsInOrd(0), equalTo(values.length + 2L));
// Check iteration
LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = ords.ordsEnum(0);
assertTrue(ordsEnum.next());
assertThat(ordsEnum.ord(), equalTo(0L));
assertThat(ordsEnum.value(), equalTo(0L));
assertTrue(ordsEnum.next());
assertThat(ordsEnum.ord(), equalTo(1L));
assertThat(ordsEnum.value(), equalTo(1000L));
for (int i = 0; i < values.length; i++) {
assertTrue(ordsEnum.next());
assertThat(ordsEnum.ord(), equalTo(i + 2L));
assertThat(ordsEnum.value(), equalTo(values[i]));
}
assertFalse(ordsEnum.next());
assertThat(ords.maxOwningBucketOrd(), equalTo(0L));
} finally {
ords.close();
}
}
private void assertCollectsFromManyBuckets(LongKeyedBucketOrds ords, int maxAllowedOwningBucketOrd, long minValue, long maxValue) {
// Test a few explicit values
assertThat(ords.add(0, 0), equalTo(0L));
assertThat(ords.add(1, 0), equalTo(1L));
assertThat(ords.add(0, 0), equalTo(-1L));
assertThat(ords.add(1, 0), equalTo(-2L));
assertThat(ords.size(), equalTo(2L));
assertThat(ords.find(0, 0), equalTo(0L));
assertThat(ords.find(1, 0), equalTo(1L));
// And some random values
Set<OwningBucketOrdAndValue> seen = new HashSet<>();
seen.add(new OwningBucketOrdAndValue(0, 0));
seen.add(new OwningBucketOrdAndValue(1, 0));
OwningBucketOrdAndValue[] values = new OwningBucketOrdAndValue[scaledRandomIntBetween(1, 10000)];
long maxOwningBucketOrd = Long.MIN_VALUE;
for (int i = 0; i < values.length; i++) {
values[i] = randomValueOtherThanMany(
seen::contains,
() -> new OwningBucketOrdAndValue(randomLongBetween(0, maxAllowedOwningBucketOrd), randomLongBetween(minValue, maxValue))
);
seen.add(values[i]);
maxOwningBucketOrd = Math.max(maxOwningBucketOrd, values[i].owningBucketOrd);
}
for (int i = 0; i < values.length; i++) {
assertThat(ords.find(values[i].owningBucketOrd, values[i].value), equalTo(-1L));
assertThat(ords.add(values[i].owningBucketOrd, values[i].value), equalTo(i + 2L));
assertThat(ords.find(values[i].owningBucketOrd, values[i].value), equalTo(i + 2L));
assertThat(ords.size(), equalTo(i + 3L));
if (randomBoolean()) {
assertThat(ords.add(0, 0), equalTo(-1L));
}
}
for (int i = 0; i < values.length; i++) {
assertThat(ords.add(values[i].owningBucketOrd, values[i].value), equalTo(-1 - (i + 2L)));
}
// And the explicit values are still ok
assertThat(ords.add(0, 0), equalTo(-1L));
assertThat(ords.add(1, 0), equalTo(-2L));
for (long owningBucketOrd = 0; owningBucketOrd <= maxAllowedOwningBucketOrd; owningBucketOrd++) {
long expectedCount = 0;
LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = ords.ordsEnum(owningBucketOrd);
if (owningBucketOrd <= 1) {
expectedCount++;
assertTrue(ordsEnum.next());
assertThat(ordsEnum.ord(), equalTo(owningBucketOrd));
assertThat(ordsEnum.value(), equalTo(0L));
}
for (int i = 0; i < values.length; i++) {
if (values[i].owningBucketOrd == owningBucketOrd) {
expectedCount++;
assertTrue(ordsEnum.next());
assertThat(ordsEnum.ord(), equalTo(i + 2L));
assertThat(ordsEnum.value(), equalTo(values[i].value));
}
}
assertFalse(ordsEnum.next());
assertThat(ords.bucketsInOrd(owningBucketOrd), equalTo(expectedCount));
}
assertFalse(ords.ordsEnum(randomLongBetween(maxOwningBucketOrd + 1, Long.MAX_VALUE)).next());
assertThat(ords.bucketsInOrd(randomLongBetween(maxOwningBucketOrd + 1, Long.MAX_VALUE)), equalTo(0L));
assertThat(ords.maxOwningBucketOrd(), equalTo(maxOwningBucketOrd));
}
public void testKeyIteratorSingleValue() {
try (LongKeyedBucketOrds ords = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.ONE)) {
// Test a few explicit values
assertThat(ords.add(0, 0), equalTo(0L));
assertThat(ords.add(0, 1000), equalTo(1L));
assertThat(ords.add(0, 0), equalTo(-1L));
assertThat(ords.add(0, 1000), equalTo(-2L));
assertThat(ords.find(0, 0), equalTo(0L));
assertThat(ords.find(0, 1000), equalTo(1L));
// And some random values
Set<Long> seen = new TreeSet<>();
seen.add(0L);
seen.add(1000L);
assertThat(ords.size(), equalTo(2L));
long[] values = new long[scaledRandomIntBetween(1, 10000)];
for (int i = 0; i < values.length; i++) {
values[i] = randomValueOtherThanMany(seen::contains, ESTestCase::randomLong);
seen.add(values[i]);
}
for (int i = 0; i < values.length; i++) {
assertThat(ords.find(0, values[i]), equalTo(-1L));
assertThat(ords.add(0, values[i]), equalTo(i + 2L));
assertThat(ords.find(0, values[i]), equalTo(i + 2L));
assertThat(ords.size(), equalTo(i + 3L));
}
// For the single value case, the sorted iterator should exactly equal the values tree set iterator
Iterator<Long> expected = seen.iterator();
Iterator<Long> actual = ords.keyOrderedIterator(0);
assertNotSame(expected, actual);
while (expected.hasNext()) {
assertThat(actual.hasNext(), is(true));
long actualNext = actual.next();
long expectedNext = expected.next();
assertThat(actualNext, equalTo(expectedNext));
}
assertThat(actual.hasNext(), is(false));
}
}
public void testKeyIteratormanyBuckets() {
long maxAllowedOwningBucketOrd = scaledRandomIntBetween(1, 10000);
long minValue = Long.MIN_VALUE;
long maxValue = Long.MAX_VALUE;
try (LongKeyedBucketOrds ords = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY)) {
Map<Long, TreeSet<Long>> expected = new HashMap<>();
// Test a few explicit values
assertThat(ords.add(0, 0), equalTo(0L));
assertThat(ords.add(1, 0), equalTo(1L));
assertThat(ords.add(0, 0), equalTo(-1L));
assertThat(ords.add(1, 0), equalTo(-2L));
assertThat(ords.size(), equalTo(2L));
assertThat(ords.find(0, 0), equalTo(0L));
assertThat(ords.find(1, 0), equalTo(1L));
Set<OwningBucketOrdAndValue> seen = new HashSet<>();
seen.add(new OwningBucketOrdAndValue(0, 0));
seen.add(new OwningBucketOrdAndValue(1, 0));
expected.put(0L, new TreeSet<>());
expected.get(0L).add(0L);
expected.put(1L, new TreeSet<>());
expected.get(1L).add(0L);
OwningBucketOrdAndValue[] values = new OwningBucketOrdAndValue[scaledRandomIntBetween(1, 10000)];
for (int i = 0; i < values.length; i++) {
values[i] = randomValueOtherThanMany(
seen::contains,
() -> new OwningBucketOrdAndValue(
randomLongBetween(0, maxAllowedOwningBucketOrd),
randomLongBetween(minValue, maxValue)
)
);
seen.add(values[i]);
if (expected.containsKey(values[i].owningBucketOrd) == false) {
expected.put(values[i].owningBucketOrd, new TreeSet<>());
}
expected.get(values[i].owningBucketOrd).add(values[i].value);
}
for (int i = 0; i < values.length; i++) {
assertThat(ords.find(values[i].owningBucketOrd, values[i].value), equalTo(-1L));
assertThat(ords.add(values[i].owningBucketOrd, values[i].value), equalTo(i + 2L));
assertThat(ords.find(values[i].owningBucketOrd, values[i].value), equalTo(i + 2L));
assertThat(ords.size(), equalTo(i + 3L));
}
for (Long owningBucketOrd : expected.keySet()) {
Iterator<Long> expectedIterator = expected.get(owningBucketOrd).iterator();
Iterator<Long> actualIterator = ords.keyOrderedIterator(owningBucketOrd);
assertNotSame(expectedIterator, actualIterator);
while (expectedIterator.hasNext()) {
assertThat(actualIterator.hasNext(), is(true));
long actualNext = actualIterator.next();
long expectedNext = expectedIterator.next();
assertThat(actualNext, equalTo(expectedNext));
}
assertThat(actualIterator.hasNext(), is(false));
}
}
}
public void testKeyIteratorManyBucketsSmall() {
int maxAllowedOwningBucketOrd = scaledRandomIntBetween(2, 10000);
long minValue = 0;
long maxValue = randomLongBetween(10000 / maxAllowedOwningBucketOrd, 2 << (16 * 3));
CardinalityUpperBound cardinality = CardinalityUpperBound.ONE.multiply(maxAllowedOwningBucketOrd);
try (LongKeyedBucketOrds ords = LongKeyedBucketOrds.buildForValueRange(bigArrays, cardinality, minValue, maxValue)) {
assertTrue(ords instanceof LongKeyedBucketOrds.FromManySmall);
Map<Long, TreeSet<Long>> expected = new HashMap<>();
// Test a few explicit values
assertThat(ords.add(0, 0), equalTo(0L));
assertThat(ords.add(1, 0), equalTo(1L));
assertThat(ords.add(0, 0), equalTo(-1L));
assertThat(ords.add(1, 0), equalTo(-2L));
assertThat(ords.size(), equalTo(2L));
assertThat(ords.find(0, 0), equalTo(0L));
assertThat(ords.find(1, 0), equalTo(1L));
Set<OwningBucketOrdAndValue> seen = new HashSet<>();
seen.add(new OwningBucketOrdAndValue(0, 0));
seen.add(new OwningBucketOrdAndValue(1, 0));
expected.put(0L, new TreeSet<>());
expected.get(0L).add(0L);
expected.put(1L, new TreeSet<>());
expected.get(1L).add(0L);
OwningBucketOrdAndValue[] values = new OwningBucketOrdAndValue[scaledRandomIntBetween(1, 10000)];
for (int i = 0; i < values.length; i++) {
values[i] = randomValueOtherThanMany(
seen::contains,
() -> new OwningBucketOrdAndValue(
randomLongBetween(0, maxAllowedOwningBucketOrd),
randomLongBetween(minValue, maxValue)
)
);
seen.add(values[i]);
if (expected.containsKey(values[i].owningBucketOrd) == false) {
expected.put(values[i].owningBucketOrd, new TreeSet<>());
}
expected.get(values[i].owningBucketOrd).add(values[i].value);
}
for (int i = 0; i < values.length; i++) {
assertThat(ords.find(values[i].owningBucketOrd, values[i].value), equalTo(-1L));
assertThat(ords.add(values[i].owningBucketOrd, values[i].value), equalTo(i + 2L));
assertThat(ords.find(values[i].owningBucketOrd, values[i].value), equalTo(i + 2L));
assertThat(ords.size(), equalTo(i + 3L));
}
for (Long owningBucketOrd : expected.keySet()) {
Iterator<Long> expectedIterator = expected.get(owningBucketOrd).iterator();
Iterator<Long> actualIterator = ords.keyOrderedIterator(owningBucketOrd);
assertNotSame(expectedIterator, actualIterator);
while (expectedIterator.hasNext()) {
assertThat(actualIterator.hasNext(), is(true));
long actualNext = actualIterator.next();
long expectedNext = expectedIterator.next();
assertThat(actualNext, equalTo(expectedNext));
}
assertThat(actualIterator.hasNext(), is(false));
}
}
}
private | LongKeyedBucketOrdsTests |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/filter/UrlHandlerFilter.java | {
"start": 4508,
"end": 5463
} | interface ____ {
/**
* Configure a request consumer to be called just before the handler
* is invoked when a URL with a trailing slash is matched.
*/
TrailingSlashSpec intercept(Consumer<HttpServletRequest> consumer);
/**
* Handle requests by sending a redirect to the same URL but the
* trailing slash trimmed.
* @param status the redirect status to use
* @return the top level {@link Builder}, which allows adding more
* handlers and then building the Filter instance.
*/
Builder redirect(HttpStatus status);
/**
* Handle the request by wrapping it in order to trim the trailing
* slash, and delegating to the rest of the filter chain.
* @return the top level {@link Builder}, which allows adding more
* handlers and then building the Filter instance.
*/
Builder wrapRequest();
}
}
/**
* Default {@link Builder} implementation.
*/
private static final | TrailingSlashSpec |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java | {
"start": 1056,
"end": 1131
} | class ____ shutdown {@link Thread}s and {@link ExecutorService}s.
*/
public | to |
java | apache__kafka | connect/runtime/src/test/resources/test-plugins/subclass-of-classpath/test/plugins/SubclassOfClasspathConverter.java | {
"start": 1046,
"end": 1115
} | class ____ the classpath which is discovered reflectively
*/
public | from |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/bean/MockitoSpyForClassTest.java | {
"start": 1185,
"end": 2017
} | class ____ extends ContextTestSupport {
@Test
public void testCallingSpy() {
Object response = template.requestBody("direct:start", "anything");
assertEquals("mocked answer", response);
}
@Override
protected Registry createCamelRegistry() throws Exception {
MyService mockService = Mockito.spy(new MyService());
when(mockService.doSomething(any())).thenReturn("mocked answer");
Registry answer = super.createCamelRegistry();
answer.bind("myService", mockService);
return answer;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:start").bean("bean:myService");
}
};
}
public static | MockitoSpyForClassTest |
java | apache__camel | components/camel-reactive-streams/src/test/java/org/apache/camel/component/reactive/streams/DirectClientAPITest.java | {
"start": 1571,
"end": 7983
} | class ____ extends ReactiveStreamsTestSupport {
@BindToRegistry("hello")
private SampleBean bean = new SampleBean();
@Test
public void testFromDirect() throws Exception {
Publisher<Integer> data = camel.from("direct:endpoint", Integer.class);
BlockingQueue<Integer> queue = new LinkedBlockingDeque<>();
Flowable.fromPublisher(data).map(i -> -i).doOnNext(queue::add).subscribe();
context.start();
template.sendBody("direct:endpoint", 1);
Integer res = queue.poll(1, TimeUnit.SECONDS);
assertNotNull(res);
assertEquals(-1, res.intValue());
}
@Test
public void testFromDirectOnHotContext() throws Exception {
context.start();
Thread.sleep(200);
Publisher<Integer> data = camel.from("direct:endpoint", Integer.class);
BlockingQueue<Integer> queue = new LinkedBlockingDeque<>();
Flowable.fromPublisher(data).map(i -> -i).doOnNext(queue::add).subscribe();
template.sendBody("direct:endpoint", 1);
Integer res = queue.poll(1, TimeUnit.SECONDS);
assertNotNull(res);
assertEquals(-1, res.intValue());
}
@Test
public void testDirectCall() throws Exception {
context.start();
BlockingQueue<String> queue = new LinkedBlockingDeque<>();
Flowable.just(1, 2, 3).flatMap(camel.to("bean:hello", String.class)::apply).doOnNext(queue::add).subscribe();
check3HelloInQueue(queue);
}
@Test
public void testDirectSendAndForget() throws Exception {
new RouteBuilder() {
@Override
public void configure() {
from("direct:data").to("mock:result");
}
}.addRoutesToCamelContext(context);
context.start();
Flowable.just(1, 2, 3).subscribe(camel.subscriber("direct:data", Integer.class));
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(3);
mock.assertIsSatisfied();
int idx = 1;
for (Exchange ex : mock.getExchanges()) {
Integer num = ex.getIn().getBody(Integer.class);
assertEquals(Integer.valueOf(idx++), num);
}
}
@Test
public void testDirectCallOverload() throws Exception {
context.start();
BlockingQueue<String> queue = new LinkedBlockingDeque<>();
Flowable.just(1, 2, 3).flatMap(e -> camel.to("bean:hello", e, String.class)).doOnNext(queue::add).subscribe();
check3HelloInQueue(queue);
}
@Test
public void testDirectCallWithExchange() throws Exception {
context.start();
BlockingQueue<String> queue = new LinkedBlockingDeque<>();
Flowable.just(1, 2, 3).flatMap(camel.to("bean:hello")::apply).map(ex -> ex.getMessage().getBody(String.class))
.doOnNext(queue::add).subscribe();
check3HelloInQueue(queue);
}
private void check3HelloInQueue(BlockingQueue<String> queue) throws InterruptedException {
Set<String> res = new HashSet<>();
res.add(queue.poll(1, TimeUnit.SECONDS));
res.add(queue.poll(1, TimeUnit.SECONDS));
res.add(queue.poll(1, TimeUnit.SECONDS));
Assertions.assertThat(res).containsExactlyInAnyOrder("Hello 1", "Hello 2", "Hello 3");
}
@Test
public void testDirectCallWithExchangeOverload() throws Exception {
context.start();
BlockingQueue<String> queue = new LinkedBlockingDeque<>();
Flowable.just(1, 2, 3).flatMap(e -> camel.to("bean:hello", e)).map(ex -> ex.getMessage().getBody(String.class))
.doOnNext(queue::add).subscribe();
check3HelloInQueue(queue);
}
@Test
public void testProxiedDirectCall() throws Exception {
context.start();
new RouteBuilder() {
@Override
public void configure() {
from("direct:proxy").to("bean:hello").setBody().simple("proxy to ${body}");
}
}.addRoutesToCamelContext(context);
BlockingQueue<String> queue = new LinkedBlockingDeque<>();
Flowable.just(1, 2, 3).flatMap(camel.to("direct:proxy", String.class)::apply).doOnNext(queue::add).subscribe();
for (int i = 1; i <= 3; i++) {
String res = queue.poll(1, TimeUnit.SECONDS);
assertEquals("proxy to Hello " + i, res);
}
}
@Test
public void testDirectCallFromCamel() throws Exception {
new RouteBuilder() {
@Override
public void configure() {
from("direct:source").to("direct:stream").setBody().simple("after stream: ${body}").to("mock:dest");
}
}.addRoutesToCamelContext(context);
context.start();
camel.process("direct:stream", p -> Flowable.fromPublisher(p).map(exchange -> {
int val = exchange.getIn().getBody(Integer.class);
exchange.getMessage().setBody(-val);
return exchange;
}));
for (int i = 1; i <= 3; i++) {
template.sendBody("direct:source", i);
}
MockEndpoint mock = getMockEndpoint("mock:dest");
mock.expectedMessageCount(3);
mock.assertIsSatisfied();
int id = 1;
for (Exchange ex : mock.getExchanges()) {
String content = ex.getIn().getBody(String.class);
assertEquals("after stream: " + (-id++), content);
}
}
@Test
public void testDirectCallFromCamelWithConversion() throws Exception {
new RouteBuilder() {
@Override
public void configure() {
from("direct:source").to("direct:stream").setBody().simple("after stream: ${body}").to("mock:dest");
}
}.addRoutesToCamelContext(context);
context.start();
camel.process("direct:stream", Integer.class, p -> Flowable.fromPublisher(p).map(i -> -i));
for (int i = 1; i <= 3; i++) {
template.sendBody("direct:source", i);
}
MockEndpoint mock = getMockEndpoint("mock:dest");
mock.expectedMessageCount(3);
mock.assertIsSatisfied();
int id = 1;
for (Exchange ex : mock.getExchanges()) {
String content = ex.getIn().getBody(String.class);
assertEquals("after stream: " + (-id++), content);
}
}
public static | DirectClientAPITest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java | {
"start": 955,
"end": 2646
} | class ____ extends ValueAggregatorBaseDescriptor {
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key, Object val) {
ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
String [] words = val.toString().split(" ");
String countType;
String id;
Entry<Text, Text> e;
for (String word: words) {
long numVal = Long.parseLong(word);
countType = LONG_VALUE_SUM;
id = "count_" + word;
e = generateEntry(countType, id, ValueAggregatorDescriptor.ONE);
if (e != null) {
retv.add(e);
}
countType = LONG_VALUE_MAX;
id = "max";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
countType = LONG_VALUE_MIN;
id = "min";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
countType = STRING_VALUE_MAX;
id = "value_as_string_max";
e = generateEntry(countType, id, new Text(""+numVal));
if (e != null) {
retv.add(e);
}
countType = STRING_VALUE_MIN;
id = "value_as_string_min";
e = generateEntry(countType, id, new Text(""+numVal));
if (e != null) {
retv.add(e);
}
countType = UNIQ_VALUE_COUNT;
id = "uniq_count";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
countType = VALUE_HISTOGRAM;
id = "histogram";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
}
return retv;
}
}
| AggregatorTests |
java | apache__camel | components/camel-aws/camel-aws2-lambda/src/test/java/org/apache/camel/component/aws2/lambda/integration/LambdaAliasesIT.java | {
"start": 1749,
"end": 4959
} | class ____ extends Aws2LambdaBase {
@Test
public void createGetDeleteAndListAliasesShouldSucceed() {
Map<String, Object> headers = new HashMap<>();
headers.put(Lambda2Constants.RUNTIME, "nodejs16.x");
headers.put(Lambda2Constants.HANDLER, "GetHelloWithName.handler");
headers.put(Lambda2Constants.ROLE, "arn:aws:iam::643534317684:role/lambda-execution-role");
ClassLoader classLoader = getClass().getClassLoader();
InputStream body
= classLoader.getResourceAsStream("org/apache/camel/component/aws2/lambda/function/node/GetHelloWithName.zip");
CreateFunctionResponse functionCreated
= template.requestBodyAndHeaders("direct:createFunction", body, headers, CreateFunctionResponse.class);
assertEquals("GetHelloWithName", functionCreated.functionName());
headers = new HashMap<>();
headers.put(Lambda2Constants.FUNCTION_ALIAS_NAME, "GetHelloWithNameAlias");
headers.put(Lambda2Constants.FUNCTION_VERSION, "$LATEST");
CreateAliasResponse aliasCreated
= template.requestBodyAndHeaders("direct:createAlias", null, headers, CreateAliasResponse.class);
assertEquals("GetHelloWithNameAlias", aliasCreated.name());
assertEquals("$LATEST", aliasCreated.functionVersion());
headers = new HashMap<>();
headers.put(Lambda2Constants.FUNCTION_ALIAS_NAME, "GetHelloWithNameAlias");
GetAliasResponse aliasGot = template.requestBodyAndHeaders("direct:getAlias", null, headers, GetAliasResponse.class);
assertEquals("GetHelloWithNameAlias", aliasGot.name());
assertEquals("$LATEST", aliasGot.functionVersion());
ListAliasesResponse aliasesListed = template.requestBody("direct:listAliases", null, ListAliasesResponse.class);
assertNotNull(aliasesListed.aliases());
aliasesListed.aliases().stream().anyMatch(a -> "GetHelloWithNameAlias".equals(a.name()));
headers = new HashMap<>();
headers.put(Lambda2Constants.FUNCTION_ALIAS_NAME, "GetHelloWithNameAlias");
template.requestBodyAndHeaders("direct:deleteAlias", null, headers);
aliasesListed = template.requestBody("direct:listAliases", null, ListAliasesResponse.class);
assertNotNull(aliasesListed.aliases());
aliasesListed.aliases().stream().noneMatch(a -> "GetHelloWithNameAlias".equals(a.name()));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
String endpointUriFormat = "aws2-lambda://GetHelloWithName?operation=%s";
from("direct:createFunction").toF(endpointUriFormat, Lambda2Operations.createFunction);
from("direct:createAlias").toF(endpointUriFormat, Lambda2Operations.createAlias);
from("direct:listAliases").toF(endpointUriFormat, Lambda2Operations.listAliases);
from("direct:getAlias").toF(endpointUriFormat, Lambda2Operations.getAlias);
from("direct:deleteAlias").toF(endpointUriFormat, Lambda2Operations.deleteAlias);
}
};
}
}
| LambdaAliasesIT |
java | apache__kafka | group-coordinator/src/test/java/org/apache/kafka/coordinator/group/metrics/GroupCoordinatorMetricsTest.java | {
"start": 3122,
"end": 16878
} | class ____ {
@Test
public void testMetricNames() {
MetricsRegistry registry = new MetricsRegistry();
Metrics metrics = new Metrics();
Set<org.apache.kafka.common.MetricName> expectedMetrics = Set.of(
metrics.metricName("offset-commit-rate", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("offset-commit-count", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("offset-expiration-rate", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("offset-expiration-count", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("offset-deletion-rate", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("offset-deletion-count", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("group-completed-rebalance-rate", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("group-completed-rebalance-count", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("consumer-group-rebalance-rate", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("consumer-group-rebalance-count", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName(
"group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("protocol", "classic")),
metrics.metricName(
"group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("protocol", "consumer")),
metrics.metricName(
"consumer-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", ConsumerGroupState.EMPTY.toString())),
metrics.metricName(
"consumer-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", ConsumerGroupState.ASSIGNING.toString())),
metrics.metricName(
"consumer-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", ConsumerGroupState.RECONCILING.toString())),
metrics.metricName(
"consumer-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", ConsumerGroupState.STABLE.toString())),
metrics.metricName(
"consumer-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", ConsumerGroupState.DEAD.toString())),
metrics.metricName(
"group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("protocol", Group.GroupType.SHARE.toString())),
metrics.metricName("share-group-rebalance-rate", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("share-group-rebalance-count", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName(
"share-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
"The number of share groups in empty state.",
"state", GroupState.EMPTY.toString()),
metrics.metricName(
"share-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
"The number of share groups in stable state.",
"state", GroupState.STABLE.toString()),
metrics.metricName(
"share-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
"The number of share groups in dead state.",
"state", GroupState.DEAD.toString()),
metrics.metricName(
"group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("protocol", Group.GroupType.STREAMS.toString())),
metrics.metricName("streams-group-rebalance-rate", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName("streams-group-rebalance-count", GroupCoordinatorMetrics.METRICS_GROUP),
metrics.metricName(
"streams-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", StreamsGroupState.EMPTY.toString())),
metrics.metricName(
"streams-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", StreamsGroupState.ASSIGNING.toString())),
metrics.metricName(
"streams-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", StreamsGroupState.RECONCILING.toString())),
metrics.metricName(
"streams-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", StreamsGroupState.STABLE.toString())),
metrics.metricName(
"streams-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", StreamsGroupState.DEAD.toString())),
metrics.metricName(
"streams-group-count",
GroupCoordinatorMetrics.METRICS_GROUP,
Map.of("state", StreamsGroupState.NOT_READY.toString()))
);
try {
try (GroupCoordinatorMetrics ignored = new GroupCoordinatorMetrics(registry, metrics)) {
Set<String> expectedRegistry = Set.of(
"kafka.coordinator.group:type=GroupMetadataManager,name=NumOffsets",
"kafka.coordinator.group:type=GroupMetadataManager,name=NumGroups",
"kafka.coordinator.group:type=GroupMetadataManager,name=NumGroupsPreparingRebalance",
"kafka.coordinator.group:type=GroupMetadataManager,name=NumGroupsCompletingRebalance",
"kafka.coordinator.group:type=GroupMetadataManager,name=NumGroupsStable",
"kafka.coordinator.group:type=GroupMetadataManager,name=NumGroupsDead",
"kafka.coordinator.group:type=GroupMetadataManager,name=NumGroupsEmpty"
);
assertMetricsForTypeEqual(registry, "kafka.coordinator.group", expectedRegistry);
expectedMetrics.forEach(metricName -> assertTrue(metrics.metrics().containsKey(metricName), metricName + " is missing"));
}
assertMetricsForTypeEqual(registry, "kafka.coordinator.group", Set.of());
expectedMetrics.forEach(metricName -> assertFalse(metrics.metrics().containsKey(metricName)));
} finally {
registry.shutdown();
}
}
@Test
public void aggregateShards() {
MetricsRegistry registry = new MetricsRegistry();
Metrics metrics = new Metrics();
GroupCoordinatorMetrics coordinatorMetrics = new GroupCoordinatorMetrics(registry, metrics);
SnapshotRegistry snapshotRegistry0 = new SnapshotRegistry(new LogContext());
SnapshotRegistry snapshotRegistry1 = new SnapshotRegistry(new LogContext());
TopicPartition tp0 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0);
TopicPartition tp1 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 1);
GroupCoordinatorMetricsShard shard0 = coordinatorMetrics.newMetricsShard(snapshotRegistry0, tp0);
GroupCoordinatorMetricsShard shard1 = coordinatorMetrics.newMetricsShard(snapshotRegistry1, tp1);
coordinatorMetrics.activateMetricsShard(shard0);
coordinatorMetrics.activateMetricsShard(shard1);
shard0.setClassicGroupGauges(Utils.mkMap(
Utils.mkEntry(ClassicGroupState.PREPARING_REBALANCE, 1L),
Utils.mkEntry(ClassicGroupState.COMPLETING_REBALANCE, 1L),
Utils.mkEntry(ClassicGroupState.STABLE, 1L),
Utils.mkEntry(ClassicGroupState.EMPTY, 1L)
));
shard1.setClassicGroupGauges(Utils.mkMap(
Utils.mkEntry(ClassicGroupState.PREPARING_REBALANCE, 1L),
Utils.mkEntry(ClassicGroupState.COMPLETING_REBALANCE, 1L),
Utils.mkEntry(ClassicGroupState.STABLE, 1L),
Utils.mkEntry(ClassicGroupState.EMPTY, 1L),
Utils.mkEntry(ClassicGroupState.DEAD, 1L)
));
shard0.setConsumerGroupGauges(Map.of(ConsumerGroupState.ASSIGNING, 5L));
shard1.setConsumerGroupGauges(Map.of(
ConsumerGroupState.RECONCILING, 1L,
ConsumerGroupState.DEAD, 1L
));
shard0.setStreamsGroupGauges(Map.of(StreamsGroupState.ASSIGNING, 2L));
shard1.setStreamsGroupGauges(Map.of(
StreamsGroupState.RECONCILING, 1L,
StreamsGroupState.DEAD, 1L,
StreamsGroupState.NOT_READY, 1L
));
shard0.setShareGroupGauges(Map.of(ShareGroupState.STABLE, 2L));
shard1.setShareGroupGauges(Map.of(
ShareGroupState.EMPTY, 2L,
ShareGroupState.STABLE, 3L,
ShareGroupState.DEAD, 1L
));
IntStream.range(0, 6).forEach(__ -> shard0.incrementNumOffsets());
IntStream.range(0, 2).forEach(__ -> shard1.incrementNumOffsets());
IntStream.range(0, 1).forEach(__ -> shard1.decrementNumOffsets());
assertEquals(4, shard0.numClassicGroups());
assertEquals(5, shard1.numClassicGroups());
assertGaugeValue(registry, metricName("GroupMetadataManager", "NumGroups"), 9);
assertGaugeValue(
metrics,
metrics.metricName("group-count", METRICS_GROUP, Map.of("protocol", "classic")),
9
);
snapshotRegistry0.idempotentCreateSnapshot(1000);
snapshotRegistry1.idempotentCreateSnapshot(1500);
shard0.commitUpTo(1000);
shard1.commitUpTo(1500);
assertEquals(5, shard0.numConsumerGroups());
assertEquals(2, shard1.numConsumerGroups());
assertEquals(6, shard0.numOffsets());
assertEquals(1, shard1.numOffsets());
assertGaugeValue(
metrics,
metrics.metricName("group-count", METRICS_GROUP, Map.of("protocol", "consumer")),
7
);
assertGaugeValue(registry, metricName("GroupMetadataManager", "NumOffsets"), 7);
assertEquals(2, shard0.numShareGroups());
assertEquals(6, shard1.numShareGroups());
assertGaugeValue(
metrics,
metrics.metricName("group-count", METRICS_GROUP, Map.of("protocol", "share")),
8
);
assertEquals(2, shard0.numStreamsGroups());
assertEquals(3, shard1.numStreamsGroups());
assertGaugeValue(
metrics,
metrics.metricName("group-count", METRICS_GROUP, Map.of("protocol", "streams")),
5
);
}
@Test
public void testGlobalSensors() {
MetricsRegistry registry = new MetricsRegistry();
Time time = new MockTime();
Metrics metrics = new Metrics(time);
GroupCoordinatorMetrics coordinatorMetrics = new GroupCoordinatorMetrics(registry, metrics);
GroupCoordinatorMetricsShard shard = coordinatorMetrics.newMetricsShard(
new SnapshotRegistry(new LogContext()), new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0)
);
shard.record(CLASSIC_GROUP_COMPLETED_REBALANCES_SENSOR_NAME, 10);
assertMetricValue(metrics, metrics.metricName("group-completed-rebalance-rate", GroupCoordinatorMetrics.METRICS_GROUP), 1.0 / 3.0);
assertMetricValue(metrics, metrics.metricName("group-completed-rebalance-count", GroupCoordinatorMetrics.METRICS_GROUP), 10);
shard.record(OFFSET_COMMITS_SENSOR_NAME, 20);
assertMetricValue(metrics, metrics.metricName("offset-commit-rate", GroupCoordinatorMetrics.METRICS_GROUP), 2.0 / 3.0);
assertMetricValue(metrics, metrics.metricName("offset-commit-count", GroupCoordinatorMetrics.METRICS_GROUP), 20);
shard.record(OFFSET_EXPIRED_SENSOR_NAME, 30);
assertMetricValue(metrics, metrics.metricName("offset-expiration-rate", GroupCoordinatorMetrics.METRICS_GROUP), 1.0);
assertMetricValue(metrics, metrics.metricName("offset-expiration-count", GroupCoordinatorMetrics.METRICS_GROUP), 30);
shard.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME, 50);
assertMetricValue(metrics, metrics.metricName("consumer-group-rebalance-rate", GroupCoordinatorMetrics.METRICS_GROUP), 5.0 / 3.0);
assertMetricValue(metrics, metrics.metricName("consumer-group-rebalance-count", GroupCoordinatorMetrics.METRICS_GROUP), 50);
shard.record(SHARE_GROUP_REBALANCES_SENSOR_NAME, 50);
assertMetricValue(metrics, metrics.metricName(
"share-group-rebalance-rate",
GroupCoordinatorMetrics.METRICS_GROUP,
"The rate of share group rebalances"
), 5.0 / 3.0);
assertMetricValue(metrics, metrics.metricName(
"share-group-rebalance-count",
GroupCoordinatorMetrics.METRICS_GROUP,
"The total number of share group rebalances"
), 50);
shard.record(STREAMS_GROUP_REBALANCES_SENSOR_NAME, 50);
assertMetricValue(metrics, metrics.metricName(
"streams-group-rebalance-rate",
GroupCoordinatorMetrics.METRICS_GROUP,
"The rate of streams group rebalances"
), 5.0 / 3.0);
assertMetricValue(metrics, metrics.metricName(
"streams-group-rebalance-count",
GroupCoordinatorMetrics.METRICS_GROUP,
"The total number of streams group rebalances"
), 50);
}
private void assertMetricValue(Metrics metrics, MetricName metricName, double val) {
assertEquals(val, metrics.metric(metricName).metricValue());
}
}
| GroupCoordinatorMetricsTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/AnnotationPositionTest.java | {
"start": 18755,
"end": 19044
} | class ____ {
@TypeUse private List<?> x;
@EitherUse private List<?> y;
@NonTypeUse private List<?> z;
}
""")
.addOutputLines(
"Test.java",
"""
import java.util.List;
| Test |
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/rsocket/DefaultRSocketStrategies.java | {
"start": 1924,
"end": 3407
} | class ____ implements RSocketStrategies {
private final List<Encoder<?>> encoders;
private final List<Decoder<?>> decoders;
private final RouteMatcher routeMatcher;
private final ReactiveAdapterRegistry adapterRegistry;
private final DataBufferFactory bufferFactory;
private final MetadataExtractor metadataExtractor;
private DefaultRSocketStrategies(List<Encoder<?>> encoders, List<Decoder<?>> decoders,
RouteMatcher routeMatcher, ReactiveAdapterRegistry adapterRegistry,
DataBufferFactory bufferFactory, MetadataExtractor metadataExtractor) {
this.encoders = Collections.unmodifiableList(encoders);
this.decoders = Collections.unmodifiableList(decoders);
this.routeMatcher = routeMatcher;
this.adapterRegistry = adapterRegistry;
this.bufferFactory = bufferFactory;
this.metadataExtractor = metadataExtractor;
}
@Override
public List<Encoder<?>> encoders() {
return this.encoders;
}
@Override
public List<Decoder<?>> decoders() {
return this.decoders;
}
@Override
public RouteMatcher routeMatcher() {
return this.routeMatcher;
}
@Override
public ReactiveAdapterRegistry reactiveAdapterRegistry() {
return this.adapterRegistry;
}
@Override
public DataBufferFactory dataBufferFactory() {
return this.bufferFactory;
}
@Override
public MetadataExtractor metadataExtractor() {
return this.metadataExtractor;
}
/**
* Default implementation of {@link RSocketStrategies.Builder}.
*/
static | DefaultRSocketStrategies |
java | spring-projects__spring-boot | build-plugin/spring-boot-gradle-plugin/src/main/java/org/springframework/boot/gradle/plugin/PluginApplicationAction.java | {
"start": 1004,
"end": 1177
} | class ____ the {@code Plugin} that, when applied, will trigger the execution of
* this action.
* @return the plugin class
* @throws ClassNotFoundException if the plugin | of |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/StronglyTypeTimeTest.java | {
"start": 6440,
"end": 7086
} | class ____ {
private final long FOO_MILLIS = 100;
private final long BAR_IN_MILLIS = 100;
private final long BAZ_MILLI = 100;
public Duration foo() {
return Duration.ofMillis(FOO_MILLIS);
}
public Duration bar() {
return Duration.ofMillis(BAR_IN_MILLIS);
}
public Duration baz() {
return Duration.ofMillis(BAZ_MILLI);
}
}
""")
.addOutputLines(
"Test.java",
"""
import java.time.Duration;
| Test |
java | apache__camel | components/camel-mina/src/test/java/org/apache/camel/component/mina/MinaVMCustomCodecTest.java | {
"start": 1748,
"end": 3759
} | class ____ extends BaseMinaTest {
@BindToRegistry("myCodec")
private final MyCodec codec1 = new MyCodec();
@Test
public void testMyCodec() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.expectedBodiesReceived("Bye World");
Object out = template.requestBody(String.format("mina:vm://localhost:%1$s?sync=true&codec=#myCodec", getPort()),
"Hello World");
assertEquals("Bye World", out);
mock.assertIsSatisfied();
}
@Test
public void testTCPEncodeUTF8InputIsString() throws Exception {
final String myUri = String.format("mina:vm://localhost:%1$s?encoding=UTF-8&sync=false", getNextPort());
context.addRoutes(new RouteBuilder() {
public void configure() {
from(myUri).to("mock:result");
}
});
MockEndpoint endpoint = getMockEndpoint("mock:result");
// include a UTF-8 char in the text \u0E08 is a Thai elephant
String body = "Hello Thai Elephant \u0E08";
endpoint.expectedMessageCount(1);
endpoint.expectedBodiesReceived(body);
template.sendBody(myUri, body);
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testBadConfiguration() {
final String format = String.format("mina:vm://localhost:%1$s?sync=true&codec=#XXX", getPort());
assertThrows(ResolveEndpointFailedException.class, () -> template.sendBody(format, "Hello World"),
"Should have thrown a ResolveEndpointFailedException");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(String.format("mina:vm://localhost:%1$s?sync=true&codec=#myCodec", getPort()))
.transform(constant("Bye World")).to("mock:result");
}
};
}
private static | MinaVMCustomCodecTest |
java | google__guava | android/guava-tests/test/com/google/common/eventbus/DispatcherTest.java | {
"start": 4973,
"end": 5372
} | class ____ {
private final String name;
public IntegerSubscriber(String name) {
this.name = name;
}
@Subscribe
public void handleInteger(Integer integer) {
dispatchedSubscribers.add(this);
dispatcher.dispatch("hello", stringSubscribers.iterator());
}
@Override
public String toString() {
return name;
}
}
public final | IntegerSubscriber |
java | spring-projects__spring-security | ldap/src/test/java/org/springframework/security/ldap/userdetails/LdapUserDetailsImplTests.java | {
"start": 919,
"end": 1517
} | class ____ {
@Test
public void credentialsAreCleared() {
LdapUserDetailsImpl.Essence mutableLdapUserDetails = new LdapUserDetailsImpl.Essence();
mutableLdapUserDetails.setDn("uid=username1,ou=people,dc=example,dc=com");
mutableLdapUserDetails.setUsername("username1");
mutableLdapUserDetails.setPassword("password");
LdapUserDetails ldapUserDetails = mutableLdapUserDetails.createUserDetails();
assertThat(ldapUserDetails).isInstanceOf(CredentialsContainer.class);
ldapUserDetails.eraseCredentials();
assertThat(ldapUserDetails.getPassword()).isNull();
}
}
| LdapUserDetailsImplTests |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/annotations/Consume.java | {
"start": 473,
"end": 780
} | interface ____ {
/**
* The build item type that this comes after.
*
* @return the build item
*/
Class<? extends BuildItem> value();
/**
* The repeatable holder for {@link Consume}.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
@ | Consume |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/AsyncCorrelateTestPrograms.java | {
"start": 1945,
"end": 10818
} | class ____ {
static final Row[] BEFORE_DATA = {Row.of(1L, 1, "hi#there"), Row.of(2L, 2, "hello#world")};
static final Row[] AFTER_DATA = {
Row.of(4L, 4, "foo#bar"), Row.of(3L, 3, "bar#fiz"),
};
static final String[] SOURCE_SCHEMA = {"a BIGINT", "b INT NOT NULL", "c VARCHAR"};
public static final TableTestProgram CORRELATE_CATALOG_FUNC =
TableTestProgram.of(
"async-correlate-catalog-func",
"validate correlate with temporary catalog function")
.setupTemporaryCatalogFunction("func1", TableFunc1.class)
.setupTableSource(
SourceTestStep.newBuilder("source_t")
.addSchema(SOURCE_SCHEMA)
.producedBeforeRestore(BEFORE_DATA)
.producedAfterRestore(AFTER_DATA)
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema("a VARCHAR", "b VARCHAR")
.consumedBeforeRestore(
"+I[hi#there, $hi]",
"+I[hi#there, $there]",
"+I[hello#world, $hello]",
"+I[hello#world, $world]")
.consumedAfterRestore(
"+I[foo#bar, $foo]",
"+I[foo#bar, $bar]",
"+I[bar#fiz, $bar]",
"+I[bar#fiz, $fiz]")
.build())
.runSql(
"INSERT INTO sink_t SELECT c, s FROM source_t, LATERAL TABLE(func1(c, '$')) AS T(s)")
.build();
public static final TableTestProgram CORRELATE_SYSTEM_FUNC =
TableTestProgram.of(
"async-correlate-system-func",
"validate correlate with temporary system function")
.setupTemporarySystemFunction("STRING_SPLIT", AsyncStringSplit.class)
.setupTableSource(
SourceTestStep.newBuilder("source_t")
.addSchema(SOURCE_SCHEMA)
.producedBeforeRestore(BEFORE_DATA)
.producedAfterRestore(AFTER_DATA)
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema("a VARCHAR", "b VARCHAR")
.consumedBeforeRestore(
"+I[hi#there, hi]",
"+I[hi#there, there]",
"+I[hello#world, hello]",
"+I[hello#world, world]")
.consumedAfterRestore(
"+I[foo#bar, foo]",
"+I[foo#bar, bar]",
"+I[bar#fiz, bar]",
"+I[bar#fiz, fiz]")
.build())
.runSql(
"INSERT INTO sink_t SELECT c, s FROM source_t, LATERAL TABLE(STRING_SPLIT(c, '#')) AS T(s)")
.build();
public static final TableTestProgram CORRELATE_JOIN_FILTER =
TableTestProgram.of(
"async-correlate-join-filter",
"validate correlate with join and filter")
.setupTemporaryCatalogFunction("func1", TableFunc1.class)
.setupTableSource(
SourceTestStep.newBuilder("source_t")
.addSchema(SOURCE_SCHEMA)
.producedBeforeRestore(BEFORE_DATA)
.producedAfterRestore(AFTER_DATA)
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema("a VARCHAR", "b VARCHAR")
.consumedBeforeRestore(
"+I[hello#world, hello]", "+I[hello#world, world]")
.consumedAfterRestore("+I[bar#fiz, bar]", "+I[bar#fiz, fiz]")
.build())
.runSql(
"INSERT INTO sink_t SELECT * FROM (SELECT c, s FROM source_t, LATERAL TABLE(func1(c)) AS T(s)) AS T2 WHERE c LIKE '%hello%' OR c LIKE '%fiz%'")
.build();
public static final TableTestProgram CORRELATE_LEFT_JOIN =
TableTestProgram.of("async-correlate-left-join", "validate correlate with left join")
.setupTemporaryCatalogFunction("func1", TableFunc1.class)
.setupTableSource(
SourceTestStep.newBuilder("source_t")
.addSchema(SOURCE_SCHEMA)
.producedBeforeRestore(BEFORE_DATA)
.producedAfterRestore(AFTER_DATA)
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema("a VARCHAR", "b VARCHAR")
.consumedBeforeRestore(
"+I[hi#there, hi]",
"+I[hi#there, there]",
"+I[hello#world, hello]",
"+I[hello#world, world]")
.consumedAfterRestore(
"+I[foo#bar, foo]",
"+I[foo#bar, bar]",
"+I[bar#fiz, bar]",
"+I[bar#fiz, fiz]")
.build())
.runSql(
"INSERT INTO sink_t SELECT c, s FROM source_t LEFT JOIN LATERAL TABLE(func1(c)) AS T(s) ON TRUE")
.build();
public static final TableTestProgram CORRELATE_UDF_EXCEPTION =
TableTestProgram.of(
"async-correlate-exception",
"validates async calc node that fails some number of times and then recovers after restore")
.setupConfig(TABLE_EXEC_ASYNC_TABLE_RETRY_DELAY, Duration.ofMillis(3000))
.setupConfig(TABLE_EXEC_ASYNC_TABLE_MAX_RETRIES, 3)
.setupConfig(TABLE_EXEC_ASYNC_TABLE_MAX_CONCURRENT_OPERATIONS, 5)
.setupTemporaryCatalogFunction("func1", FailureThenSucceedSplit.class)
.setupTableSource(
SourceTestStep.newBuilder("source_t")
.addSchema(SOURCE_SCHEMA)
// Errors on "hello#world"
.producedBeforeRestore(BEFORE_DATA)
.producedAfterRestore(AFTER_DATA)
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema("a VARCHAR", "b VARCHAR")
.consumedBeforeRestore(
"+I[hi#there, $hi]", "+I[hi#there, $there]")
.consumedAfterRestore(
"+I[hello#world, $hello]",
"+I[hello#world, $world]",
"+I[foo#bar, $foo]",
"+I[foo#bar, $bar]",
"+I[bar#fiz, $bar]",
"+I[bar#fiz, $fiz]")
.build())
.runSql(
"INSERT INTO sink_t SELECT c, s FROM source_t, LATERAL TABLE(func1(c, '$')) AS T(s)")
.build();
/** Splitter functions. */
public static | AsyncCorrelateTestPrograms |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/bug/Bug_for_ruiyi.java | {
"start": 291,
"end": 3216
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
String sql = "insert into icshall_guide(id,gmt_create,gmt_modified,subject,content,cat_id)"
+ "values (8,now(),now(),\"Why my payment is deducted incorrectly?/ Why my payment is deducted twice?\","
+ "\"{\\\"id\\\":1,\\\"title\\\":\\\"Have you contacted your card issuer to double check instead of only checking online?\\\","
+ "\\\"type\\\":\\\"START\\\","
+ "\\\"currentLevel\\\":1,"
+ "\\\"name\\\":\\\"name1\\\","
+ "\\\"values\\\":[{\\\"id\\\":2,"
+ "\\\"title\\\":\\\"Yes\\\","
+ "\\\"type\\\":\\\"MIDWAY\\\","
+ "\\\"currentLevel\\\":2,"
+ "\\\"value\\\":1,"
+ "\\\"childs\\\":[{\\\"id\\\":3,"
+ "\\\"title\\\":\\\"If it is deducted twice, please contact the online service with the official bank statement.\\\","
+ "\\\"type\\\":\\\"END\\\"," + "\\\"currentLevel\\\":3}]}," + "{\\\"id\\\":4,"
+ "\\\"title\\\":\\\"No\\\"," + "\\\"type\\\":\\\"MIDWAY\\\"," + "\\\"currentLevel\\\":2,"
+ "\\\"value\\\":1," + "\\\"childs\\\":[{\\\"id\\\":5,"
+ "\\\"title\\\":\\\"Please contact your card issuer to double confirm.\\\","
+ "\\\"type\\\":\\\"END\\\"," + "\\\"currentLevel\\\":3}]}]}\",607)";
String expected = "INSERT INTO icshall_guide (id, gmt_create, gmt_modified, subject, content\n" +
"\t, cat_id)\n" +
"VALUES (8, now(), now(), 'Why my payment is deducted incorrectly?/ Why my payment is deducted twice?', '{\"id\":1,\"title\":\"Have you contacted your card issuer to double check instead of only checking online?\",\"type\":\"START\",\"currentLevel\":1,\"name\":\"name1\",\"values\":[{\"id\":2,\"title\":\"Yes\",\"type\":\"MIDWAY\",\"currentLevel\":2,\"value\":1,\"childs\":[{\"id\":3,\"title\":\"If it is deducted twice, please contact the online service with the official bank statement.\",\"type\":\"END\",\"currentLevel\":3}]},{\"id\":4,\"title\":\"No\",\"type\":\"MIDWAY\",\"currentLevel\":2,\"value\":1,\"childs\":[{\"id\":5,\"title\":\"Please contact your card issuer to double confirm.\",\"type\":\"END\",\"currentLevel\":3}]}]}'\n" +
"\t, 607);\n";
StringBuilder out = new StringBuilder();
MySqlOutputVisitor visitor = new MySqlOutputVisitor(out);
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
for (SQLStatement statement : statementList) {
statement.accept(visitor);
visitor.print(";");
visitor.println();
}
//System.out.println(out.toString());
assertEquals(expected, out.toString());
}
}
| Bug_for_ruiyi |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/erroneous/misbalancedbraces/Target.java | {
"start": 215,
"end": 278
} | class ____ {
//CHECKSTYLE:OFF
public boolean foo;
}
| Target |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StGeotileFromFieldDocValuesAndLiteralEvaluator.java | {
"start": 3472,
"end": 4279
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory encoded;
private final int precision;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory encoded, int precision) {
this.source = source;
this.encoded = encoded;
this.precision = precision;
}
@Override
public StGeotileFromFieldDocValuesAndLiteralEvaluator get(DriverContext context) {
return new StGeotileFromFieldDocValuesAndLiteralEvaluator(source, encoded.get(context), precision, context);
}
@Override
public String toString() {
return "StGeotileFromFieldDocValuesAndLiteralEvaluator[" + "encoded=" + encoded + ", precision=" + precision + "]";
}
}
}
| Factory |
java | quarkusio__quarkus | integration-tests/main/src/test/java/io/quarkus/it/main/ConfigPropertiesITCase.java | {
"start": 114,
"end": 181
} | class ____ extends ConfigPropertiesTestCase {
}
| ConfigPropertiesITCase |
java | mapstruct__mapstruct | core/src/main/java/org/mapstruct/BeanMapping.java | {
"start": 1936,
"end": 2718
} | interface ____ {
/**
* Specifies the result type of the factory method to be used in case several factory methods qualify.
* <p>
* <b>NOTE</b>: This property is not inherited to generated mapping methods
*
* @return the resultType to select
*/
Class<?> resultType() default void.class;
/**
* A qualifier can be specified to aid the selection process of a suitable factory method or filtering applicable
* {@code @}{@link BeforeMapping} / {@code @}{@link AfterMapping} methods. This is useful in case multiple factory
* method (hand written of internal) qualify and result in an 'Ambiguous factory methods' error.
* <p>
* A qualifier is a custom annotation and can be placed on either a hand written mapper | BeanMapping |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java | {
"start": 1232,
"end": 1988
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(DELETE, "/_snapshot/{repository}"));
}
@Override
public String getName() {
return "delete_repository_action";
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
final var deleteRepositoryRequest = new DeleteRepositoryRequest(
getMasterNodeTimeout(request),
getAckTimeout(request),
request.param("repository")
);
return channel -> client.admin().cluster().deleteRepository(deleteRepositoryRequest, new RestToXContentListener<>(channel));
}
}
| RestDeleteRepositoryAction |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/JobVertexTaskManagersInfo.java | {
"start": 4183,
"end": 8347
} | class ____ {
public static final String TASK_MANAGERS_FIELD_ENDPOINT = "endpoint";
public static final String TASK_MANAGERS_FIELD_STATUS = "status";
public static final String TASK_MANAGERS_FIELD_START_TIME = "start-time";
public static final String TASK_MANAGERS_FIELD_END_TIME = "end-time";
public static final String TASK_MANAGERS_FIELD_DURATION = "duration";
public static final String TASK_MANAGERS_FIELD_METRICS = "metrics";
public static final String TASK_MANAGERS_FIELD_STATUS_COUNTS = "status-counts";
public static final String TASK_MANAGERS_FIELD_TASKMANAGER_ID = "taskmanager-id";
public static final String TASK_MANAGERS_FIELD_AGGREGATED = "aggregated";
@JsonProperty(TASK_MANAGERS_FIELD_ENDPOINT)
private final String endpoint;
@JsonProperty(TASK_MANAGERS_FIELD_STATUS)
private final ExecutionState status;
@JsonProperty(TASK_MANAGERS_FIELD_START_TIME)
private final long startTime;
@JsonProperty(TASK_MANAGERS_FIELD_END_TIME)
private final long endTime;
@JsonProperty(TASK_MANAGERS_FIELD_DURATION)
private final long duration;
@JsonProperty(TASK_MANAGERS_FIELD_METRICS)
private final IOMetricsInfo metrics;
@JsonProperty(TASK_MANAGERS_FIELD_STATUS_COUNTS)
private final Map<ExecutionState, Integer> statusCounts;
@JsonProperty(TASK_MANAGERS_FIELD_TASKMANAGER_ID)
private final String taskmanagerId;
@JsonProperty(TASK_MANAGERS_FIELD_AGGREGATED)
private final AggregatedTaskDetailsInfo aggregated;
@JsonCreator
public TaskManagersInfo(
@JsonProperty(TASK_MANAGERS_FIELD_ENDPOINT) String endpoint,
@JsonProperty(TASK_MANAGERS_FIELD_STATUS) ExecutionState status,
@JsonProperty(TASK_MANAGERS_FIELD_START_TIME) long startTime,
@JsonProperty(TASK_MANAGERS_FIELD_END_TIME) long endTime,
@JsonProperty(TASK_MANAGERS_FIELD_DURATION) long duration,
@JsonProperty(TASK_MANAGERS_FIELD_METRICS) IOMetricsInfo metrics,
@JsonProperty(TASK_MANAGERS_FIELD_STATUS_COUNTS)
Map<ExecutionState, Integer> statusCounts,
@JsonProperty(TASK_MANAGERS_FIELD_TASKMANAGER_ID) String taskmanagerId,
@JsonProperty(TASK_MANAGERS_FIELD_AGGREGATED)
AggregatedTaskDetailsInfo aggregated) {
this.endpoint = checkNotNull(endpoint);
this.status = checkNotNull(status);
this.startTime = startTime;
this.endTime = endTime;
this.duration = duration;
this.metrics = checkNotNull(metrics);
this.statusCounts = checkNotNull(statusCounts);
this.taskmanagerId = taskmanagerId;
this.aggregated = aggregated;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TaskManagersInfo that = (TaskManagersInfo) o;
return Objects.equals(endpoint, that.endpoint)
&& Objects.equals(status, that.status)
&& startTime == that.startTime
&& endTime == that.endTime
&& duration == that.duration
&& Objects.equals(metrics, that.metrics)
&& Objects.equals(statusCounts, that.statusCounts)
&& Objects.equals(taskmanagerId, that.taskmanagerId)
&& Objects.equals(aggregated, that.aggregated);
}
@Override
public int hashCode() {
return Objects.hash(
endpoint,
status,
startTime,
endTime,
duration,
metrics,
statusCounts,
taskmanagerId,
aggregated);
}
}
}
| TaskManagersInfo |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/search/ClearScrollRequestTests.java | {
"start": 1234,
"end": 4715
} | class ____ extends ESTestCase {
public void testFromXContent() throws Exception {
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
if (randomBoolean()) {
// test that existing values get overridden
clearScrollRequest = createClearScrollRequest();
}
try (
XContentParser parser = createParser(
XContentFactory.jsonBuilder().startObject().array("scroll_id", "value_1", "value_2").endObject()
)
) {
clearScrollRequest.fromXContent(parser);
}
assertThat(clearScrollRequest.scrollIds(), contains("value_1", "value_2"));
}
public void testFromXContentWithoutArray() throws Exception {
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
if (randomBoolean()) {
// test that existing values get overridden
clearScrollRequest = createClearScrollRequest();
}
try (XContentParser parser = createParser(XContentFactory.jsonBuilder().startObject().field("scroll_id", "value_1").endObject())) {
clearScrollRequest.fromXContent(parser);
}
assertThat(clearScrollRequest.scrollIds(), contains("value_1"));
}
public void testFromXContentWithUnknownParamThrowsException() throws Exception {
XContentParser invalidContent = createParser(
XContentFactory.jsonBuilder().startObject().array("scroll_id", "value_1", "value_2").field("unknown", "keyword").endObject()
);
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
Exception e = expectThrows(IllegalArgumentException.class, () -> clearScrollRequest.fromXContent(invalidContent));
assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]"));
}
public void testToXContent() throws IOException {
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
clearScrollRequest.addScrollId("SCROLL_ID");
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
clearScrollRequest.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\"scroll_id\":[\"SCROLL_ID\"]}", Strings.toString(builder));
}
}
public void testFromAndToXContent() throws IOException {
XContentType xContentType = randomFrom(XContentType.values());
ClearScrollRequest originalRequest = createClearScrollRequest();
BytesReference originalBytes = toShuffledXContent(originalRequest, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean());
ClearScrollRequest parsedRequest = new ClearScrollRequest();
try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) {
parsedRequest.fromXContent(parser);
}
assertEquals(originalRequest.scrollIds(), parsedRequest.scrollIds());
BytesReference parsedBytes = XContentHelper.toXContent(parsedRequest, xContentType, randomBoolean());
assertToXContentEquivalent(originalBytes, parsedBytes, xContentType);
}
public static ClearScrollRequest createClearScrollRequest() {
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
int numScrolls = randomIntBetween(1, 10);
for (int i = 0; i < numScrolls; i++) {
clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(3, 10));
}
return clearScrollRequest;
}
}
| ClearScrollRequestTests |
java | apache__camel | components/camel-xmlsecurity/src/main/java/org/apache/camel/component/xmlsecurity/XmlSignerEndpoint.java | {
"start": 1879,
"end": 3307
} | class ____ extends DefaultEndpoint {
@UriPath
@Metadata(required = true)
private String name;
@UriParam
private XmlSignerConfiguration configuration;
public XmlSignerEndpoint(String uri, XmlSignerComponent component, XmlSignerConfiguration configuration) {
super(uri, component);
this.configuration = configuration;
}
@Override
public boolean isRemote() {
return false;
}
public String getName() {
return name;
}
/**
* The name part in the URI can be chosen by the user to distinguish between different signer endpoints within the
* camel context.
*/
public void setName(String name) {
this.name = name;
}
public XmlSignerConfiguration getConfiguration() {
return configuration;
}
/**
* Configuration
*/
public void setConfiguration(XmlSignerConfiguration configuration) {
this.configuration = configuration;
}
@Override
public Producer createProducer() throws Exception {
Processor processor = new XmlSignerProcessor(getCamelContext(), getConfiguration());
return new XmlSecurityProducer(this, processor);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
throw new UnsupportedOperationException("XML Signature endpoints are not meant to be consumed from.");
}
}
| XmlSignerEndpoint |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/conversion/lossy/VerySpecialNumberMapper.java | {
"start": 267,
"end": 488
} | class ____ {
VerySpecialNumber fromFloat(float f) {
return new VerySpecialNumber();
}
BigInteger toBigInteger(VerySpecialNumber v) {
return new BigInteger( "10" );
}
}
| VerySpecialNumberMapper |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-jackson/deployment/src/test/java/io/quarkus/resteasy/reactive/jackson/deployment/test/EmptyInputTest.java | {
"start": 1931,
"end": 2491
} | class ____ {
@Path("blocking")
@POST
public String blocking(Greeting greeting) {
return createResult(greeting);
}
@Path("nonBlocking")
@NonBlocking
@POST
public String nonBlocking(Greeting greeting) {
return createResult(greeting);
}
private String createResult(Greeting greeting) {
if (greeting == null) {
return "null";
}
return greeting.getMessage();
}
}
public static | GreetingResource |
java | google__dagger | javatests/dagger/internal/codegen/DuplicateBindingsValidationTest.java | {
"start": 40626,
"end": 40764
} | interface ____ {",
" Child build();",
" }",
"",
" @Module",
" static | Builder |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/session/MockitoSessionTest.java | {
"start": 1009,
"end": 4005
} | class ____ extends TestBase {
private JUnitCore junit = new JUnitCore();
@Test
public void session_without_any_configuration() {
// when
Result result = junit.run(MockitoSessionTest.SessionWithoutAnyConfiguration.class);
// expect
JUnitResultAssert.assertThat(result).succeeds(1);
}
@Test
public void session_without_init_mocks_configured() {
// when
Result result = junit.run(MockitoSessionTest.SessionWithoutInitMocksConfigured.class);
// expect
JUnitResultAssert.assertThat(result).succeeds(1);
}
@Test
public void session_without_strictness_configured() {
// when
Result result = junit.run(MockitoSessionTest.SessionWithoutStrictnessConfigured.class);
// expect
JUnitResultAssert.assertThat(result).succeeds(1);
}
@Test
public void session_with_incorrect_mockito_usage() {
// when
Result result = junit.run(MockitoSessionTest.SessionWithIncorrectMockitoUsage.class);
// expect
JUnitResultAssert.assertThat(result).fails(1, UnfinishedStubbingException.class);
}
@Test
public void reports_other_failure_and_incorrect_mockito_usage() {
// when
Result result =
junit.run(MockitoSessionTest.SessionWithTestFailureAndIncorrectMockitoUsage.class);
// expect
JUnitResultAssert.assertThat(result)
.failsExactly(AssertionError.class, UnfinishedStubbingException.class);
}
@Test
public void allows_initializing_mocks_manually() {
// when
Result result = junit.run(MockitoSessionTest.SessionWithManuallyInitializedMock.class);
// expect
JUnitResultAssert.assertThat(result).succeeds(1);
}
@Test
public void allows_updating_strictness() {
// when
Result result = junit.run(MockitoSessionTest.SessionWithUpdatedStrictness.class);
// expect
JUnitResultAssert.assertThat(result).succeeds(1);
}
@Test
public void allows_overriding_failure() {
// when
Result result = junit.run(MockitoSessionTest.SessionWithOverriddenFailure.class);
// expect
JUnitResultAssert.assertThat(result).isSuccessful();
// in order to demonstrate feature, we intentionally misuse Mockito and need to clean up
// state
resetState();
}
@Test
public void cleans_up_state_when_init_fails() {
// when
Result result = junit.run(MockitoSessionTest.SessionWithInitMocksFailure.class);
// expect that both failures are the same, indicating correct listener cleanup
// incorrect cleanup causes 1 failure to be InjectMocksException
// but the next test method would have failed with unuseful error that session was not
// cleaned up
JUnitResultAssert.assertThat(result).fails(2, InjectMocksException.class);
}
public static | MockitoSessionTest |
java | google__dagger | javatests/dagger/hilt/processor/internal/definecomponent/DefineComponentProcessorTest.java | {
"start": 2495,
"end": 2834
} | class ____ "
+ "information across multiple compilations.",
" */",
"@DefineComponentClasses(",
" component = \"test.FooComponent\"",
")",
"@Generated(\"dagger.hilt.processor.internal.definecomponent.DefineComponentProcessingStep\")",
"public | aggregates |
java | spring-projects__spring-security | oauth2/oauth2-authorization-server/src/test/java/org/springframework/security/oauth2/server/authorization/token/JwtEncodingContextTests.java | {
"start": 2365,
"end": 6136
} | class ____ {
@Test
public void withWhenJwsHeaderNullThenThrowIllegalArgumentException() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> JwtEncodingContext.with(null, TestJwtClaimsSets.jwtClaimsSet()))
.withMessage("jwsHeaderBuilder cannot be null");
}
@Test
public void withWhenClaimsNullThenThrowIllegalArgumentException() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> JwtEncodingContext.with(TestJwsHeaders.jwsHeader(), null))
.withMessage("claimsBuilder cannot be null");
}
@Test
public void setWhenValueNullThenThrowIllegalArgumentException() {
JwtEncodingContext.Builder builder = JwtEncodingContext.with(TestJwsHeaders.jwsHeader(),
TestJwtClaimsSets.jwtClaimsSet());
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> builder.registeredClient(null));
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> builder.principal(null));
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> builder.authorization(null));
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> builder.tokenType(null));
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> builder.authorizationGrantType(null));
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> builder.authorizationGrant(null));
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> builder.put(null, ""));
}
@Test
public void buildWhenAllValuesProvidedThenAllValuesAreSet() {
JwsHeader.Builder headers = TestJwsHeaders.jwsHeader();
JwtClaimsSet.Builder claims = TestJwtClaimsSets.jwtClaimsSet();
RegisteredClient registeredClient = TestRegisteredClients.registeredClient().build();
TestingAuthenticationToken principal = new TestingAuthenticationToken("principal", "password");
OAuth2Authorization authorization = TestOAuth2Authorizations.authorization().build();
OAuth2ClientAuthenticationToken clientPrincipal = new OAuth2ClientAuthenticationToken(registeredClient,
ClientAuthenticationMethod.CLIENT_SECRET_BASIC, registeredClient.getClientSecret());
OAuth2AuthorizationRequest authorizationRequest = authorization
.getAttribute(OAuth2AuthorizationRequest.class.getName());
OAuth2AuthorizationCodeAuthenticationToken authorizationGrant = new OAuth2AuthorizationCodeAuthenticationToken(
"code", clientPrincipal, authorizationRequest.getRedirectUri(), null);
JwtEncodingContext context = JwtEncodingContext.with(headers, claims)
.registeredClient(registeredClient)
.principal(principal)
.authorization(authorization)
.tokenType(OAuth2TokenType.ACCESS_TOKEN)
.authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE)
.authorizationGrant(authorizationGrant)
.put("custom-key-1", "custom-value-1")
.context((ctx) -> ctx.put("custom-key-2", "custom-value-2"))
.build();
assertThat(context.getJwsHeader()).isEqualTo(headers);
assertThat(context.getClaims()).isEqualTo(claims);
assertThat(context.getRegisteredClient()).isEqualTo(registeredClient);
assertThat(context.<Authentication>getPrincipal()).isEqualTo(principal);
assertThat(context.getAuthorization()).isEqualTo(authorization);
assertThat(context.getTokenType()).isEqualTo(OAuth2TokenType.ACCESS_TOKEN);
assertThat(context.getAuthorizationGrantType()).isEqualTo(AuthorizationGrantType.AUTHORIZATION_CODE);
assertThat(context.<OAuth2AuthorizationGrantAuthenticationToken>getAuthorizationGrant())
.isEqualTo(authorizationGrant);
assertThat(context.<String>get("custom-key-1")).isEqualTo("custom-value-1");
assertThat(context.<String>get("custom-key-2")).isEqualTo("custom-value-2");
}
}
| JwtEncodingContextTests |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/ConversionNotSupportedException.java | {
"start": 948,
"end": 1947
} | class ____ extends TypeMismatchException {
/**
* Create a new ConversionNotSupportedException.
* @param propertyChangeEvent the PropertyChangeEvent that resulted in the problem
* @param requiredType the required target type (or {@code null} if not known)
* @param cause the root cause (may be {@code null})
*/
public ConversionNotSupportedException(PropertyChangeEvent propertyChangeEvent,
@Nullable Class<?> requiredType, @Nullable Throwable cause) {
super(propertyChangeEvent, requiredType, cause);
}
/**
* Create a new ConversionNotSupportedException.
* @param value the offending value that couldn't be converted (may be {@code null})
* @param requiredType the required target type (or {@code null} if not known)
* @param cause the root cause (may be {@code null})
*/
public ConversionNotSupportedException(@Nullable Object value, @Nullable Class<?> requiredType, @Nullable Throwable cause) {
super(value, requiredType, cause);
}
}
| ConversionNotSupportedException |
java | junit-team__junit5 | junit-platform-suite-engine/src/main/java/org/junit/platform/suite/engine/AdditionalDiscoverySelectors.java | {
"start": 1139,
"end": 3920
} | class ____ {
static List<UriSelector> selectUris(String... uris) {
Preconditions.notNull(uris, "URI list must not be null");
Preconditions.containsNoNullElements(uris, "Individual URIs must not be null");
// @formatter:off
return uniqueStreamOf(uris)
.filter(StringUtils::isNotBlank)
.map(DiscoverySelectors::selectUri)
.toList();
// @formatter:on
}
static List<DirectorySelector> selectDirectories(String... paths) {
Preconditions.notNull(paths, "Directory paths must not be null");
Preconditions.containsNoNullElements(paths, "Individual directory paths must not be null");
// @formatter:off
return uniqueStreamOf(paths)
.filter(StringUtils::isNotBlank)
.map(DiscoverySelectors::selectDirectory)
.toList();
// @formatter:on
}
static List<PackageSelector> selectPackages(String... packageNames) {
Preconditions.notNull(packageNames, "Package names must not be null");
Preconditions.containsNoNullElements(packageNames, "Individual package names must not be null");
// @formatter:off
return uniqueStreamOf(packageNames)
.map(DiscoverySelectors::selectPackage)
.toList();
// @formatter:on
}
static List<ModuleSelector> selectModules(String... moduleNames) {
Preconditions.notNull(moduleNames, "Module names must not be null");
Preconditions.containsNoNullElements(moduleNames, "Individual module names must not be null");
return DiscoverySelectors.selectModules(uniqueStreamOf(moduleNames).collect(Collectors.toSet()));
}
static FileSelector selectFile(String path, int line, int column) {
Preconditions.notBlank(path, "File path must not be null or blank");
if (line <= 0) {
return DiscoverySelectors.selectFile(path);
}
if (column <= 0) {
return DiscoverySelectors.selectFile(path, FilePosition.from(line));
}
return DiscoverySelectors.selectFile(path, FilePosition.from(line, column));
}
static ClasspathResourceSelector selectClasspathResource(String classpathResourceName, int line, int column) {
Preconditions.notBlank(classpathResourceName, "Classpath resource name must not be null or blank");
if (line <= 0) {
return DiscoverySelectors.selectClasspathResource(classpathResourceName);
}
if (column <= 0) {
return DiscoverySelectors.selectClasspathResource(classpathResourceName, FilePosition.from(line));
}
return DiscoverySelectors.selectClasspathResource(classpathResourceName, FilePosition.from(line, column));
}
static List<? extends DiscoverySelector> parseIdentifiers(String[] identifiers) {
return DiscoverySelectors.parseAll(identifiers).toList();
}
private static <T> Stream<T> uniqueStreamOf(T[] elements) {
return Arrays.stream(elements).distinct();
}
private AdditionalDiscoverySelectors() {
}
}
| AdditionalDiscoverySelectors |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java | {
"start": 14445,
"end": 14796
} | interface ____ {
boolean accept(HashMap<ShmId, RegisteredShm> segments,
HashMultimap<ExtendedBlockId, Slot> slots);
}
@VisibleForTesting
public synchronized boolean visit(Visitor visitor) {
return visitor.accept(segments, slots);
}
@VisibleForTesting
public int getShmNum() {
return segments.size();
}
}
| Visitor |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/reflect/MethodUtilsTest.java | {
"start": 5900,
"end": 6032
} | class ____ implements PackagePrivateEmptyInterface {
// empty
}
public static | PublicImpl1OfPackagePrivateEmptyInterface |
java | apache__kafka | coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java | {
"start": 69054,
"end": 71334
} | class ____ implements CoordinatorEvent {
/**
* The topic partition that this internal event is applied to.
*/
final TopicPartition tp;
/**
* The operation name.
*/
final String name;
/**
* The internal operation to execute.
*/
final Runnable op;
/**
* The time this event was created.
*/
private final long createdTimeMs;
/**
* Constructor.
*
* @param name The operation name.
* @param tp The topic partition that the operation is applied to.
* @param op The operation.
*/
CoordinatorInternalEvent(
String name,
TopicPartition tp,
Runnable op
) {
this.tp = tp;
this.name = name;
this.op = op;
this.createdTimeMs = time.milliseconds();
}
/**
* @return The key used by the CoordinatorEventProcessor to ensure
* that events with the same key are not processed concurrently.
*/
@Override
public TopicPartition key() {
return tp;
}
/**
* Called by the CoordinatorEventProcessor when the event is executed.
*/
@Override
public void run() {
try {
op.run();
} catch (Throwable t) {
complete(t);
}
}
/**
* Logs any exceptions thrown while the event is executed.
*
* @param exception The exception.
*/
@Override
public void complete(Throwable exception) {
if (exception != null) {
log.error("Execution of {} failed due to {}.", name, exception.getMessage(), exception);
}
}
@Override
public long createdTimeMs() {
return this.createdTimeMs;
}
@Override
public String toString() {
return "InternalEvent(name=" + name + ")";
}
}
/**
* Partition listener to be notified when the high watermark of the partitions
* backing the coordinator are updated.
*/
| CoordinatorInternalEvent |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStore.java | {
"start": 4792,
"end": 20526
} | class ____ implements VersionedKeyValueStore<Bytes, byte[]> {
private static final Logger LOG = LoggerFactory.getLogger(RocksDBVersionedStore.class);
// a marker to indicate that no record version has yet been found as part of an ongoing
// put() procedure. any value which is not a valid record timestamp will do.
private static final long SENTINEL_TIMESTAMP = Long.MIN_VALUE;
private final String name;
private final long historyRetention;
private final long gracePeriod;
private final RocksDBMetricsRecorder metricsRecorder;
private final LogicalKeyValueSegment latestValueStore; // implemented as a "reserved segment" of the segments store
private final LogicalKeyValueSegments segmentStores;
private final RocksDBVersionedStoreClient versionedStoreClient;
private final RocksDBVersionedStoreRestoreWriteBuffer restoreWriteBuffer;
private InternalProcessorContext<?, ?> internalProcessorContext;
private Sensor expiredRecordSensor;
private long observedStreamTime = ConsumerRecord.NO_TIMESTAMP;
private boolean consistencyEnabled = false;
private Position position;
private OffsetCheckpoint positionCheckpoint;
private volatile boolean open;
RocksDBVersionedStore(final String name, final String metricsScope, final long historyRetention, final long segmentInterval) {
this.name = name;
this.historyRetention = historyRetention;
// history retention doubles as grace period for now. could be nice to allow users to
// configure the two separately in the future. if/when we do, we should enforce that
// history retention >= grace period, for sound semantics.
this.gracePeriod = historyRetention;
this.metricsRecorder = new RocksDBMetricsRecorder(metricsScope, name);
// pass store name as segments name so state dir subdirectory uses the store name
this.segmentStores = new LogicalKeyValueSegments(name, DB_FILE_DIR, historyRetention, segmentInterval, metricsRecorder);
this.latestValueStore = this.segmentStores.createReservedSegment(-1L, latestValueStoreName(name));
this.versionedStoreClient = new RocksDBVersionedStoreClient();
this.restoreWriteBuffer = new RocksDBVersionedStoreRestoreWriteBuffer(versionedStoreClient);
}
@Override
public long put(final Bytes key, final byte[] value, final long timestamp) {
Objects.requireNonNull(key, "key cannot be null");
validateStoreOpen();
synchronized (position) {
if (timestamp < observedStreamTime - gracePeriod) {
expiredRecordSensor.record(1.0d, internalProcessorContext.currentSystemTimeMs());
LOG.warn("Skipping record for expired put.");
StoreQueryUtils.updatePosition(position, internalProcessorContext);
return PUT_RETURN_CODE_NOT_PUT;
}
observedStreamTime = Math.max(observedStreamTime, timestamp);
final long foundTs = doPut(
versionedStoreClient,
observedStreamTime,
key,
value,
timestamp
);
StoreQueryUtils.updatePosition(position, internalProcessorContext);
return foundTs;
}
}
@Override
public VersionedRecord<byte[]> delete(final Bytes key, final long timestamp) {
Objects.requireNonNull(key, "key cannot be null");
validateStoreOpen();
synchronized (position) {
if (timestamp < observedStreamTime - gracePeriod) {
expiredRecordSensor.record(1.0d, internalProcessorContext.currentSystemTimeMs());
LOG.warn("Skipping record for expired delete.");
return null;
}
final VersionedRecord<byte[]> existingRecord = get(key, timestamp);
observedStreamTime = Math.max(observedStreamTime, timestamp);
doPut(
versionedStoreClient,
observedStreamTime,
key,
null,
timestamp
);
StoreQueryUtils.updatePosition(position, internalProcessorContext);
return existingRecord;
}
}
@Override
public VersionedRecord<byte[]> get(final Bytes key) {
Objects.requireNonNull(key, "key cannot be null");
validateStoreOpen();
// latest value (if present) is guaranteed to be in the latest value store
final byte[] rawLatestValueAndTimestamp = latestValueStore.get(key);
if (rawLatestValueAndTimestamp != null) {
return new VersionedRecord<>(
LatestValueFormatter.value(rawLatestValueAndTimestamp),
LatestValueFormatter.timestamp(rawLatestValueAndTimestamp)
);
} else {
return null;
}
}
@Override
public VersionedRecord<byte[]> get(final Bytes key, final long asOfTimestamp) {
Objects.requireNonNull(key, "key cannot be null");
validateStoreOpen();
if (asOfTimestamp < observedStreamTime - historyRetention) {
// history retention exceeded. we still check the latest value store in case the
// latest record version satisfies the timestamp bound, in which case it should
// still be returned (i.e., the latest record version per key never expires).
final byte[] rawLatestValueAndTimestamp = latestValueStore.get(key);
if (rawLatestValueAndTimestamp != null) {
final long latestTimestamp = LatestValueFormatter.timestamp(rawLatestValueAndTimestamp);
if (latestTimestamp <= asOfTimestamp) {
// latest value satisfies timestamp bound
return new VersionedRecord<>(
LatestValueFormatter.value(rawLatestValueAndTimestamp),
latestTimestamp
);
}
}
// history retention has elapsed and the latest record version (if present) does
// not satisfy the timestamp bound. return null for predictability, even if data
// is still present in segments.
LOG.warn("Returning null for expired get.");
return null;
}
// first check the latest value store
final byte[] rawLatestValueAndTimestamp = latestValueStore.get(key);
if (rawLatestValueAndTimestamp != null) {
final long latestTimestamp = LatestValueFormatter.timestamp(rawLatestValueAndTimestamp);
if (latestTimestamp <= asOfTimestamp) {
return new VersionedRecord<>(LatestValueFormatter.value(rawLatestValueAndTimestamp), latestTimestamp);
}
}
// check segment stores
final List<LogicalKeyValueSegment> segments = segmentStores.segments(asOfTimestamp, Long.MAX_VALUE, false);
for (final LogicalKeyValueSegment segment : segments) {
final byte[] rawSegmentValue = segment.get(key);
if (rawSegmentValue != null) {
final long nextTs = RocksDBVersionedStoreSegmentValueFormatter.nextTimestamp(rawSegmentValue);
if (nextTs <= asOfTimestamp) {
// this segment contains no data for the queried timestamp, so earlier segments
// cannot either
return null;
}
if (RocksDBVersionedStoreSegmentValueFormatter.minTimestamp(rawSegmentValue) > asOfTimestamp) {
// the segment only contains data for after the queried timestamp. skip and
// continue the search to earlier segments. as an optimization, this code
// could be updated to skip forward to the segment containing the minTimestamp
// in the if-condition above.
continue;
}
// the desired result is contained in this segment
final SegmentSearchResult searchResult =
RocksDBVersionedStoreSegmentValueFormatter
.deserialize(rawSegmentValue)
.find(asOfTimestamp, true);
if (searchResult.value() != null) {
return new VersionedRecord<>(searchResult.value(), searchResult.validFrom(), searchResult.validTo());
} else {
return null;
}
}
}
// checked all segments and no results found
return null;
}
@SuppressWarnings("unchecked")
VersionedRecordIterator<byte[]> get(final Bytes key, final long fromTimestamp, final long toTimestamp, final ResultOrder order) {
validateStoreOpen();
if (toTimestamp < observedStreamTime - historyRetention) {
// history retention exceeded. we still check the latest value store in case the
// latest record version satisfies the timestamp bound, in which case it should
// still be returned (i.e., the latest record version per key never expires).
return new LogicalSegmentIterator(Collections.singletonList(latestValueStore).listIterator(), key, fromTimestamp, toTimestamp, order);
} else {
final List<LogicalKeyValueSegment> segments = new ArrayList<>();
// add segment stores
// consider the search lower bound as -INF (LONG.MIN_VALUE) to find the record that has been inserted before the {@code fromTimestamp}
// but is still valid in query specified time interval.
if (order.equals(ResultOrder.ASCENDING)) {
segments.addAll(segmentStores.segments(Long.MIN_VALUE, toTimestamp, true));
segments.add(latestValueStore);
} else {
segments.add(latestValueStore);
segments.addAll(segmentStores.segments(Long.MIN_VALUE, toTimestamp, false));
}
return new LogicalSegmentIterator(segments.listIterator(), key, fromTimestamp, toTimestamp, order);
}
}
@Override
public String name() {
return name;
}
@Override
public void flush() {
segmentStores.flush();
// flushing segments store includes flushing latest value store, since they share the
// same physical RocksDB instance
}
@Override
public void close() {
open = false;
segmentStores.close();
// closing segments store includes closing latest value store, since they share the
// same physical RocksDB instance
}
@Override
public <R> QueryResult<R> query(
final Query<R> query,
final PositionBound positionBound,
final QueryConfig config) {
return StoreQueryUtils.handleBasicQueries(
query,
positionBound,
config,
this,
position,
internalProcessorContext
);
}
@Override
public boolean persistent() {
return true;
}
@Override
public boolean isOpen() {
return open;
}
@Override
public Position getPosition() {
return position;
}
@Override
public void init(final StateStoreContext stateStoreContext, final StateStore root) {
this.internalProcessorContext = ProcessorContextUtils.asInternalProcessorContext(stateStoreContext);
final StreamsMetricsImpl metrics = ProcessorContextUtils.metricsImpl(stateStoreContext);
final String threadId = Thread.currentThread().getName();
final String taskName = stateStoreContext.taskId().toString();
expiredRecordSensor = TaskMetrics.droppedRecordsSensor(
threadId,
taskName,
metrics
);
metricsRecorder.init(ProcessorContextUtils.metricsImpl(stateStoreContext), stateStoreContext.taskId());
final File positionCheckpointFile = new File(stateStoreContext.stateDir(), name() + ".position");
positionCheckpoint = new OffsetCheckpoint(positionCheckpointFile);
position = StoreQueryUtils.readPositionFromCheckpoint(positionCheckpoint);
segmentStores.setPosition(position);
segmentStores.openExisting(internalProcessorContext, observedStreamTime);
// register and possibly restore the state from the logs
stateStoreContext.register(
root,
(RecordBatchingStateRestoreCallback) RocksDBVersionedStore.this::restoreBatch,
() -> StoreQueryUtils.checkpointPosition(positionCheckpoint, position)
);
open = true;
consistencyEnabled = StreamsConfig.InternalConfig.getBoolean(
stateStoreContext.appConfigs(),
IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED,
false
);
}
// VisibleForTesting
void restoreBatch(final Collection<ConsumerRecord<byte[], byte[]>> records) {
// compute the observed stream time at the end of the restore batch, in order to speed up
// restore by not bothering to read from/write to segments which will have expired by the
// time the restoration process is complete.
long endOfBatchStreamTime = observedStreamTime;
for (final ConsumerRecord<byte[], byte[]> record : records) {
endOfBatchStreamTime = Math.max(endOfBatchStreamTime, record.timestamp());
}
final VersionedStoreClient<?> restoreClient = restoreWriteBuffer.getClient();
// note: there is increased risk for hitting an out-of-memory during this restore loop,
// compared to for non-versioned key-value stores, because this versioned store
// implementation stores multiple records (for the same key) together in a single RocksDB
// "segment" entry -- restoring a single changelog entry could require loading multiple
// records into memory. how high this memory amplification will be is very much dependent
// on the specific workload and the value of the "segment interval" parameter.
synchronized (position) {
for (final ConsumerRecord<byte[], byte[]> record : records) {
if (record.timestamp() < observedStreamTime - gracePeriod) {
// record is older than grace period and was therefore never written to the store
continue;
}
// advance observed stream time as usual, for use in deciding whether records have
// exceeded the store's grace period and should be dropped.
observedStreamTime = Math.max(observedStreamTime, record.timestamp());
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(
record,
consistencyEnabled,
position
);
// put records to write buffer
doPut(
restoreClient,
endOfBatchStreamTime,
new Bytes(record.key()),
record.value(),
record.timestamp()
);
}
try {
restoreWriteBuffer.flush();
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error restoring batch to store " + name, e);
}
}
}
private void validateStoreOpen() {
if (!open) {
throw new InvalidStateStoreException("Store " + name + " is currently closed");
}
}
/**
* Generic | RocksDBVersionedStore |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/java/AbstractArrayJavaType.java | {
"start": 971,
"end": 5095
} | class ____<T, E> extends AbstractClassJavaType<T>
implements BasicPluralJavaType<E> {
private final JavaType<E> componentJavaType;
public AbstractArrayJavaType(Class<T> clazz, JavaType<E> baseDescriptor, MutabilityPlan<T> mutabilityPlan) {
super( clazz, mutabilityPlan );
this.componentJavaType = baseDescriptor;
}
@Override
public JavaType<E> getElementJavaType() {
return componentJavaType;
}
@Override
public JdbcType getRecommendedJdbcType(JdbcTypeIndicators indicators) {
if ( componentJavaType instanceof UnknownBasicJavaType) {
throw new MappingException("Basic array has element type '"
+ componentJavaType.getTypeName()
+ "' which is not a known basic type"
+ " (attribute is not annotated '@ElementCollection', '@OneToMany', or '@ManyToMany')");
}
// Always determine the recommended type to make sure this is a valid basic java type
final var recommendedComponentJdbcType = componentJavaType.getRecommendedJdbcType( indicators );
final var typeConfiguration = indicators.getTypeConfiguration();
return typeConfiguration.getJdbcTypeRegistry()
.resolveTypeConstructorDescriptor(
indicators.getPreferredSqlTypeCodeForArray( recommendedComponentJdbcType.getDefaultSqlTypeCode() ),
typeConfiguration.getBasicTypeRegistry().resolve( componentJavaType, recommendedComponentJdbcType ),
ColumnTypeInformation.EMPTY
);
}
@Override
public boolean isWider(JavaType<?> javaType) {
// Support binding single element value
return this == javaType || componentJavaType == javaType;
}
@Override
public BasicType<?> resolveType(
TypeConfiguration typeConfiguration,
Dialect dialect,
BasicType<E> elementType,
ColumnTypeInformation columnTypeInformation,
JdbcTypeIndicators stdIndicators) {
final var elementJavaTypeClass = elementType.getJavaTypeDescriptor().getJavaTypeClass();
if ( elementType instanceof BasicPluralType<?, ?>
|| elementJavaTypeClass != null && elementJavaTypeClass.isArray() ) {
return null;
}
final var valueConverter = elementType.getValueConverter();
return valueConverter == null
? resolveType( typeConfiguration, this, elementType, columnTypeInformation, stdIndicators )
: createTypeUsingConverter( typeConfiguration, elementType, columnTypeInformation, stdIndicators, valueConverter );
}
private static JdbcType arrayJdbcType(
TypeConfiguration typeConfiguration,
BasicType<?> elementType,
ColumnTypeInformation columnTypeInformation,
JdbcTypeIndicators indicators) {
final int arrayTypeCode =
indicators.getPreferredSqlTypeCodeForArray( elementType.getJdbcType().getDefaultSqlTypeCode() );
return typeConfiguration.getJdbcTypeRegistry()
.resolveTypeConstructorDescriptor( arrayTypeCode, elementType, columnTypeInformation );
}
<F> BasicType<T> createTypeUsingConverter(
TypeConfiguration typeConfiguration,
BasicType<E> elementType,
ColumnTypeInformation columnTypeInformation,
JdbcTypeIndicators indicators,
BasicValueConverter<E, F> valueConverter) {
final var convertedElementClass = valueConverter.getRelationalJavaType().getJavaTypeClass();
final var convertedArrayClass = newInstance( convertedElementClass, 0 ).getClass();
final var relationalJavaType =
typeConfiguration.getJavaTypeRegistry()
.resolveDescriptor( convertedArrayClass );
return new ConvertedBasicArrayType<>(
elementType,
arrayJdbcType( typeConfiguration, elementType, columnTypeInformation, indicators ),
this,
new ArrayConverter<>( valueConverter, this, relationalJavaType )
);
}
BasicType<T> resolveType(
TypeConfiguration typeConfiguration,
AbstractArrayJavaType<T,E> arrayJavaType,
BasicType<E> elementType,
ColumnTypeInformation columnTypeInformation,
JdbcTypeIndicators indicators) {
final var arrayJdbcType =
arrayJdbcType( typeConfiguration, elementType, columnTypeInformation, indicators );
return typeConfiguration.getBasicTypeRegistry()
.resolve( arrayJavaType, arrayJdbcType,
() -> new BasicArrayType<>( elementType, arrayJdbcType, arrayJavaType ) );
}
}
| AbstractArrayJavaType |
java | mapstruct__mapstruct | core/src/main/java/org/mapstruct/control/MappingControl.java | {
"start": 2948,
"end": 4665
} | enum ____ {
/**
* Controls the mapping, allows for type conversion from source type to target type
* <p>
* Type conversions are typically supported directly in Java. The "toString()" is such an example,
* which allows for mapping for instance a {@link java.lang.Number} type to a {@link java.lang.String}.
* <p>
* Please refer to the MapStruct guide for more info.
*
* @since 1.4
*/
BUILT_IN_CONVERSION,
/**
* Controls the mapping from source to target type, allows mapping by calling:
* <ol>
* <li>A type conversion, passed into a mapping method</li>
* <li>A mapping method, passed into a type conversion</li>
* <li>A mapping method passed into another mapping method</li>
* </ol>
*
* @since 1.4
*/
COMPLEX_MAPPING,
/**
* Controls the mapping, allows for a direct mapping from source type to target type.
* <p>
* This means if source type and target type are of the same type, MapStruct will not perform
* any mappings anymore and assign the target to the source direct.
* <p>
* An exception are types from the package {@code java}, which will be mapped always directly.
*
* @since 1.4
*/
DIRECT,
/**
* Controls the mapping, allows for Direct Mapping from source type to target type.
* <p>
* The mapping method can be either a custom referred mapping method, or a MapStruct built in
* mapping method.
*
* @since 1.4
*/
MAPPING_METHOD
}
}
| Use |
java | elastic__elasticsearch | libs/x-content/src/main/java/org/elasticsearch/xcontent/spi/XContentProvider.java | {
"start": 2198,
"end": 2738
} | class ____ {
private Holder() {}
private static final String PROVIDER_NAME = "x-content";
private static final String PROVIDER_MODULE_NAME = "org.elasticsearch.xcontent.impl";
private static final Set<String> MISSING_MODULES = Set.of("com.fasterxml.jackson.databind");
private static final XContentProvider INSTANCE = (new ProviderLocator<>(
PROVIDER_NAME,
XContentProvider.class,
PROVIDER_MODULE_NAME,
MISSING_MODULES
)).get();
}
}
| Holder |
java | apache__spark | sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/AggregateHashMap.java | {
"start": 2007,
"end": 4145
} | class ____ {
private OnHeapColumnVector[] columnVectors;
private MutableColumnarRow aggBufferRow;
private int[] buckets;
private int numBuckets;
private int numRows = 0;
private int maxSteps = 3;
private static int DEFAULT_CAPACITY = 1 << 16;
private static double DEFAULT_LOAD_FACTOR = 0.25;
private static int DEFAULT_MAX_STEPS = 3;
public AggregateHashMap(StructType schema, int capacity, double loadFactor, int maxSteps) {
// We currently only support single key-value pair that are both longs
assert (schema.size() == 2 && schema.fields()[0].dataType() == LongType &&
schema.fields()[1].dataType() == LongType);
// capacity should be a power of 2
assert (capacity > 0 && ((capacity & (capacity - 1)) == 0));
this.maxSteps = maxSteps;
numBuckets = (int) (capacity / loadFactor);
columnVectors = OnHeapColumnVector.allocateColumns(capacity, schema);
aggBufferRow = new MutableColumnarRow(columnVectors);
buckets = new int[numBuckets];
Arrays.fill(buckets, -1);
}
public AggregateHashMap(StructType schema) {
this(schema, DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_MAX_STEPS);
}
public MutableColumnarRow findOrInsert(long key) {
int idx = find(key);
if (idx != -1 && buckets[idx] == -1) {
columnVectors[0].putLong(numRows, key);
columnVectors[1].putLong(numRows, 0);
buckets[idx] = numRows++;
}
aggBufferRow.rowId = buckets[idx];
return aggBufferRow;
}
@VisibleForTesting
public int find(long key) {
long h = hash(key);
int step = 0;
int idx = (int) h & (numBuckets - 1);
while (step < maxSteps) {
// Return bucket index if it's either an empty slot or already contains the key
if (buckets[idx] == -1) {
return idx;
} else if (equals(idx, key)) {
return idx;
}
idx = (idx + 1) & (numBuckets - 1);
step++;
}
// Didn't find it
return -1;
}
private long hash(long key) {
return key;
}
private boolean equals(int idx, long key1) {
return columnVectors[0].getLong(buckets[idx]) == key1;
}
}
| AggregateHashMap |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java | {
"start": 2431,
"end": 42163
} | class ____ extends ESTestCase {
public void testThreadedUpdatesToChildBreaker() throws Exception {
final int NUM_THREADS = scaledRandomIntBetween(3, 15);
final int BYTES_PER_THREAD = scaledRandomIntBetween(500, 4500);
final AtomicBoolean tripped = new AtomicBoolean(false);
final AtomicReference<Throwable> lastException = new AtomicReference<>(null);
final AtomicReference<ChildMemoryCircuitBreaker> breakerRef = new AtomicReference<>(null);
final CircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
Settings.EMPTY,
Collections.emptyList(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
) {
@Override
public CircuitBreaker getBreaker(String name) {
return breakerRef.get();
}
@Override
public void checkParentLimit(long newBytesReserved, String label) throws CircuitBreakingException {
// never trip
}
};
final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, (BYTES_PER_THREAD * NUM_THREADS) - 1, 1.0);
final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker(
CircuitBreakerMetrics.NOOP.getTripCount(),
settings,
logger,
(HierarchyCircuitBreakerService) service,
CircuitBreaker.REQUEST
);
breakerRef.set(breaker);
runInParallel(NUM_THREADS, i -> {
for (int j = 0; j < BYTES_PER_THREAD; j++) {
try {
breaker.addEstimateBytesAndMaybeBreak(1L, "test");
} catch (CircuitBreakingException e) {
if (tripped.get()) {
assertThat("tripped too many times", true, equalTo(false));
} else {
assertThat(tripped.compareAndSet(false, true), equalTo(true));
}
} catch (Exception e) {
lastException.set(e);
}
}
});
assertThat("no other exceptions were thrown", lastException.get(), equalTo(null));
assertThat("breaker was tripped", tripped.get(), equalTo(true));
assertThat("breaker was tripped at least once", breaker.getTrippedCount(), greaterThanOrEqualTo(1L));
}
public void testThreadedUpdatesToChildBreakerWithParentLimit() throws Exception {
final int NUM_THREADS = scaledRandomIntBetween(3, 15);
final int BYTES_PER_THREAD = scaledRandomIntBetween(500, 4500);
final int parentLimit = (BYTES_PER_THREAD * NUM_THREADS) - 2;
final int childLimit = parentLimit + 10;
final AtomicInteger tripped = new AtomicInteger(0);
final AtomicReference<Throwable> lastException = new AtomicReference<>(null);
final AtomicInteger parentTripped = new AtomicInteger(0);
final AtomicReference<ChildMemoryCircuitBreaker> breakerRef = new AtomicReference<>(null);
final CircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
Settings.EMPTY,
Collections.emptyList(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
) {
@Override
public CircuitBreaker getBreaker(String name) {
return breakerRef.get();
}
@Override
public void checkParentLimit(long newBytesReserved, String label) throws CircuitBreakingException {
// Parent will trip right before regular breaker would trip
long requestBreakerUsed = getBreaker(CircuitBreaker.REQUEST).getUsed();
if (requestBreakerUsed > parentLimit) {
parentTripped.incrementAndGet();
logger.info("--> parent tripped");
throw new CircuitBreakingException(
"parent tripped",
requestBreakerUsed + newBytesReserved,
parentLimit,
CircuitBreaker.Durability.PERMANENT
);
}
}
};
final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, childLimit, 1.0);
final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker(
CircuitBreakerMetrics.NOOP.getTripCount(),
settings,
logger,
(HierarchyCircuitBreakerService) service,
CircuitBreaker.REQUEST
);
breakerRef.set(breaker);
logger.info(
"--> NUM_THREADS: [{}], BYTES_PER_THREAD: [{}], TOTAL_BYTES: [{}], PARENT_LIMIT: [{}], CHILD_LIMIT: [{}]",
NUM_THREADS,
BYTES_PER_THREAD,
(BYTES_PER_THREAD * NUM_THREADS),
parentLimit,
childLimit
);
logger.info("--> starting threads...");
runInParallel(NUM_THREADS, i -> {
for (int j = 0; j < BYTES_PER_THREAD; j++) {
try {
breaker.addEstimateBytesAndMaybeBreak(1L, "test");
} catch (CircuitBreakingException e) {
tripped.incrementAndGet();
} catch (Exception e) {
lastException.set(e);
}
}
});
logger.info("--> child breaker: used: {}, limit: {}", breaker.getUsed(), breaker.getLimit());
logger.info("--> parent tripped: {}, total trip count: {} (expecting 1-2 for each)", parentTripped.get(), tripped.get());
assertThat("no other exceptions were thrown", lastException.get(), equalTo(null));
assertThat(
"breaker should be reset back to the parent limit after parent breaker trips",
breaker.getUsed(),
greaterThanOrEqualTo((long) parentLimit - NUM_THREADS)
);
assertThat("parent breaker was tripped at least once", parentTripped.get(), greaterThanOrEqualTo(1));
assertThat("total breaker was tripped at least once", tripped.get(), greaterThanOrEqualTo(1));
}
/**
* Test that a breaker correctly redistributes to a different breaker, in
* this case, the request breaker borrows space from the fielddata breaker
*/
public void testBorrowingSiblingBreakerMemory() {
Settings clusterSettings = Settings.builder()
.put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false)
.put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "200mb")
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb")
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb")
.build();
CircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
clusterSettings,
Collections.emptyList(),
new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
);
CircuitBreaker requestCircuitBreaker = service.getBreaker(CircuitBreaker.REQUEST);
CircuitBreaker fieldDataCircuitBreaker = service.getBreaker(CircuitBreaker.FIELDDATA);
assertEquals(ByteSizeValue.of(200, ByteSizeUnit.MB).getBytes(), service.stats().getStats(CircuitBreaker.PARENT).getLimit());
assertEquals(ByteSizeValue.of(150, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getLimit());
assertEquals(ByteSizeValue.of(150, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getLimit());
fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should not break");
assertEquals(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getUsed(), 0.0);
requestCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should not break");
assertEquals(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getUsed(), 0.0);
requestCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should not break");
assertEquals(ByteSizeValue.of(100, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getUsed(), 0.0);
CircuitBreakingException exception = expectThrows(
CircuitBreakingException.class,
() -> requestCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should break")
);
assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [should break] would be"));
assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]"));
assertThat(exception.getMessage(), containsString("usages ["));
assertThat(exception.getMessage(), containsString("fielddata=54001664/51.5mb"));
assertThat(exception.getMessage(), containsString("inflight_requests=0/0b"));
assertThat(exception.getMessage(), containsString("request=157286400/150mb"));
assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT));
assertCircuitBreakerLimitWarning();
}
public void testParentBreaksOnRealMemoryUsage() {
Settings clusterSettings = Settings.builder()
.put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), Boolean.TRUE)
.put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "200b")
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "350b")
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 2)
.build();
AtomicLong memoryUsage = new AtomicLong();
final CircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
clusterSettings,
Collections.emptyList(),
new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
) {
@Override
long currentMemoryUsage() {
return memoryUsage.get();
}
};
final CircuitBreaker requestBreaker = service.getBreaker(CircuitBreaker.REQUEST);
// anything below 100 bytes should work (overhead) - current memory usage is zero
requestBreaker.addEstimateBytesAndMaybeBreak(randomLongBetween(0, 99), "request");
assertEquals(0, requestBreaker.getTrippedCount());
// assume memory usage has increased to 150 bytes
memoryUsage.set(150);
// a reservation that bumps memory usage to less than 200 (150 bytes used + reservation < 200)
requestBreaker.addEstimateBytesAndMaybeBreak(randomLongBetween(0, 24), "request");
assertEquals(0, requestBreaker.getTrippedCount());
memoryUsage.set(181);
long reservationInBytes = randomLongBetween(10, 50);
// anything >= 20 bytes (10 bytes * 2 overhead) reservation breaks the parent but it must be low enough to avoid
// breaking the child breaker.
CircuitBreakingException exception = expectThrows(
CircuitBreakingException.class,
() -> requestBreaker.addEstimateBytesAndMaybeBreak(reservationInBytes, "request")
);
// it was the parent that rejected the reservation
assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [request] would be"));
assertThat(exception.getMessage(), containsString("which is larger than the limit of [200/200b]"));
assertThat(
exception.getMessage(),
containsString(
"real usage: [181/181b], new bytes reserved: ["
+ (reservationInBytes * 2)
+ "/"
+ ByteSizeValue.ofBytes(reservationInBytes * 2)
+ "]"
)
);
final long requestCircuitBreakerUsed = (requestBreaker.getUsed() + reservationInBytes) * 2;
assertThat(exception.getMessage(), containsString("usages ["));
assertThat(exception.getMessage(), containsString("fielddata=0/0b"));
assertThat(
exception.getMessage(),
containsString("request=" + requestCircuitBreakerUsed + "/" + ByteSizeValue.ofBytes(requestCircuitBreakerUsed))
);
assertThat(exception.getMessage(), containsString("inflight_requests=0/0b"));
assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT));
assertEquals(0, requestBreaker.getTrippedCount());
assertEquals(1, service.stats().getStats(CircuitBreaker.PARENT).getTrippedCount());
// lower memory usage again - the same reservation should succeed
memoryUsage.set(100);
requestBreaker.addEstimateBytesAndMaybeBreak(reservationInBytes, "request");
assertEquals(0, requestBreaker.getTrippedCount());
assertCircuitBreakerLimitWarning();
}
/**
* "Integration test" checking that we ask the G1 over limit check before parent breaking.
* Given that it depends on GC, the main assertion that we do not get a circuit breaking exception in the threads towards
* the end of the test is not enabled. The following tests checks this in more unit test style.
*/
public void testParentTriggersG1GCBeforeBreaking() throws InterruptedException, TimeoutException, BrokenBarrierException {
assumeTrue("Only G1GC can utilize the over limit check", JvmInfo.jvmInfo().useG1GC().equals("true"));
long g1RegionSize = JvmInfo.jvmInfo().getG1RegionSize();
assumeTrue("Must have region size", g1RegionSize > 0);
Settings clusterSettings = Settings.builder()
.put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), Boolean.TRUE)
.put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "50%")
.build();
AtomicInteger leaderTriggerCount = new AtomicInteger();
AtomicReference<Consumer<Boolean>> onOverLimit = new AtomicReference<>(leader -> {});
AtomicLong time = new AtomicLong(randomLongBetween(Long.MIN_VALUE / 2, Long.MAX_VALUE / 2));
long interval = randomLongBetween(1, 1000);
long fullGCInterval = randomLongBetween(500, 2000);
final HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
clusterSettings,
Collections.emptyList(),
new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
trackRealMemoryUsage -> new HierarchyCircuitBreakerService.G1OverLimitStrategy(
JvmInfo.jvmInfo(),
HierarchyCircuitBreakerService::realMemoryUsage,
HierarchyCircuitBreakerService.createYoungGcCountSupplier(),
time::get,
interval,
fullGCInterval,
TimeValue.timeValueSeconds(30),
TimeValue.timeValueSeconds(30)
) {
@Override
void overLimitTriggered(boolean leader) {
if (leader) {
leaderTriggerCount.incrementAndGet();
}
onOverLimit.get().accept(leader);
}
}
);
long maxHeap = JvmInfo.jvmInfo().getConfiguredMaxHeapSize();
int regionCount = Math.toIntExact((maxHeap / 2 + g1RegionSize - 1) / g1RegionSize);
// First setup a host of large byte[]'s, must be Humongous objects since those are cleaned during a young phase (no concurrent cycle
// necessary, which is hard to control in the test).
List<byte[]> data = new ArrayList<>();
for (int i = 0; i < regionCount; ++i) {
data.add(new byte[(int) (JvmInfo.jvmInfo().getG1RegionSize() / 2)]);
}
try {
service.checkParentLimit(0, "test");
fail("must exceed memory limit");
} catch (CircuitBreakingException e) {
// OK
}
time.addAndGet(randomLongBetween(interval, interval + 10));
onOverLimit.set(leader -> {
if (leader) {
data.clear();
}
});
logger.trace("black hole [{}]", data.hashCode());
int threadCount = randomIntBetween(1, 10);
startInParallel(threadCount, i -> {
try {
service.checkParentLimit(0, "test-thread");
} catch (CircuitBreakingException e) {
// very rare
logger.info("Thread got semi-unexpected circuit breaking exception", e);
}
});
assertThat(leaderTriggerCount.get(), equalTo(2));
}
public void testParentDoesOverLimitCheck() {
long g1RegionSize = JvmInfo.jvmInfo().getG1RegionSize();
Settings clusterSettings = Settings.builder()
.put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), Boolean.TRUE)
.put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "50%")
.build();
boolean saveTheDay = randomBoolean();
AtomicBoolean overLimitTriggered = new AtomicBoolean();
final HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
clusterSettings,
Collections.emptyList(),
new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
trackRealMemoryUsage -> memoryUsed -> {
assertTrue(overLimitTriggered.compareAndSet(false, true));
if (saveTheDay) {
return new HierarchyCircuitBreakerService.MemoryUsage(
memoryUsed.baseUsage / 2,
memoryUsed.totalUsage - (memoryUsed.baseUsage / 2),
memoryUsed.transientChildUsage,
memoryUsed.permanentChildUsage
);
} else {
return memoryUsed;
}
}
);
int allocationSize = g1RegionSize > 0 ? (int) (g1RegionSize / 2) : 1024 * 1024;
int allocationCount = (int) (JvmInfo.jvmInfo().getConfiguredMaxHeapSize() / allocationSize) + 1;
List<byte[]> data = new ArrayList<>();
try {
for (int i = 0; i < allocationCount && overLimitTriggered.get() == false; ++i) {
data.add(new byte[allocationSize]);
service.checkParentLimit(0, "test");
}
assertTrue(saveTheDay);
} catch (CircuitBreakingException e) {
assertFalse(saveTheDay);
}
logger.trace("black hole [{}]", data.hashCode());
}
public void testFallbackG1RegionSize() {
assumeTrue("Only G1GC can utilize the over limit check", JvmInfo.jvmInfo().useG1GC().equals("true"));
assumeTrue("Must have region size", JvmInfo.jvmInfo().getG1RegionSize() > 0);
assertThat(
HierarchyCircuitBreakerService.G1OverLimitStrategy.fallbackRegionSize(JvmInfo.jvmInfo()),
equalTo(JvmInfo.jvmInfo().getG1RegionSize())
);
}
public void testG1OverLimitStrategyBreakOnMemory() {
AtomicLong time = new AtomicLong(randomLongBetween(Long.MIN_VALUE / 2, Long.MAX_VALUE / 2));
AtomicInteger leaderTriggerCount = new AtomicInteger();
AtomicInteger nonLeaderTriggerCount = new AtomicInteger();
long interval = randomLongBetween(1, 1000);
long fullGCInterval = randomLongBetween(500, 2000);
AtomicLong memoryUsage = new AtomicLong();
HierarchyCircuitBreakerService.G1OverLimitStrategy strategy = new HierarchyCircuitBreakerService.G1OverLimitStrategy(
JvmInfo.jvmInfo(),
memoryUsage::get,
() -> 0,
time::get,
interval,
fullGCInterval,
TimeValue.timeValueSeconds(30),
TimeValue.timeValueSeconds(30)
) {
@Override
void overLimitTriggered(boolean leader) {
if (leader) {
leaderTriggerCount.incrementAndGet();
} else {
nonLeaderTriggerCount.incrementAndGet();
}
}
};
memoryUsage.set(randomLongBetween(100, 110));
HierarchyCircuitBreakerService.MemoryUsage input = new HierarchyCircuitBreakerService.MemoryUsage(
100,
randomLongBetween(100, 110),
randomLongBetween(0, 50),
randomLongBetween(0, 50)
);
assertThat(strategy.overLimit(input), sameInstance(input));
assertThat(leaderTriggerCount.get(), equalTo(1));
memoryUsage.set(99);
HierarchyCircuitBreakerService.MemoryUsage output = strategy.overLimit(input);
assertThat(output, not(sameInstance(input)));
assertThat(output.baseUsage, equalTo(memoryUsage.get()));
assertThat(output.totalUsage, equalTo(99 + input.totalUsage - 100));
assertThat(output.transientChildUsage, equalTo(input.transientChildUsage));
assertThat(output.permanentChildUsage, equalTo(input.permanentChildUsage));
assertThat(nonLeaderTriggerCount.get(), equalTo(1));
time.addAndGet(randomLongBetween(interval, interval * 2));
output = strategy.overLimit(input);
assertThat(output, not(sameInstance(input)));
assertThat(output.baseUsage, equalTo(memoryUsage.get()));
assertThat(output.totalUsage, equalTo(99 + input.totalUsage - 100));
assertThat(output.transientChildUsage, equalTo(input.transientChildUsage));
assertThat(output.permanentChildUsage, equalTo(input.permanentChildUsage));
assertThat(leaderTriggerCount.get(), equalTo(2));
}
public void testG1OverLimitStrategyBreakOnGcCount() {
AtomicLong time = new AtomicLong(randomLongBetween(Long.MIN_VALUE / 2, Long.MAX_VALUE / 2));
AtomicInteger leaderTriggerCount = new AtomicInteger();
AtomicInteger nonLeaderTriggerCount = new AtomicInteger();
long interval = randomLongBetween(1, 1000);
long fullGCInterval = randomLongBetween(500, 2000);
AtomicLong memoryUsageCounter = new AtomicLong();
AtomicLong gcCounter = new AtomicLong();
LongSupplier memoryUsageSupplier = () -> {
memoryUsageCounter.incrementAndGet();
return randomLongBetween(100, 110);
};
HierarchyCircuitBreakerService.G1OverLimitStrategy strategy = new HierarchyCircuitBreakerService.G1OverLimitStrategy(
JvmInfo.jvmInfo(),
memoryUsageSupplier,
gcCounter::incrementAndGet,
time::get,
interval,
fullGCInterval,
TimeValue.timeValueSeconds(30),
TimeValue.timeValueSeconds(30)
) {
@Override
void overLimitTriggered(boolean leader) {
if (leader) {
leaderTriggerCount.incrementAndGet();
} else {
nonLeaderTriggerCount.incrementAndGet();
}
}
};
HierarchyCircuitBreakerService.MemoryUsage input = new HierarchyCircuitBreakerService.MemoryUsage(
100,
randomLongBetween(100, 110),
randomLongBetween(0, 50),
randomLongBetween(0, 50)
);
assertThat(strategy.overLimit(input), sameInstance(input));
assertThat(leaderTriggerCount.get(), equalTo(1));
assertThat(gcCounter.get(), equalTo(2L));
// 1 before gc count break, 1 for full GC check and 1 to get resulting memory usage.
assertThat(memoryUsageCounter.get(), equalTo(3L));
}
public void testG1OverLimitStrategyThrottling() throws InterruptedException, BrokenBarrierException, TimeoutException {
AtomicLong time = new AtomicLong(randomLongBetween(Long.MIN_VALUE / 2, Long.MAX_VALUE / 2));
AtomicInteger leaderTriggerCount = new AtomicInteger();
long interval = randomLongBetween(1, 1000);
long fullGCInterval = randomLongBetween(500, 2000);
AtomicLong memoryUsage = new AtomicLong();
HierarchyCircuitBreakerService.G1OverLimitStrategy strategy = new HierarchyCircuitBreakerService.G1OverLimitStrategy(
JvmInfo.jvmInfo(),
memoryUsage::get,
() -> 0,
time::get,
interval,
fullGCInterval,
TimeValue.timeValueSeconds(30),
TimeValue.timeValueSeconds(30)
) {
@Override
void overLimitTriggered(boolean leader) {
if (leader) {
leaderTriggerCount.incrementAndGet();
}
}
};
int threadCount = randomIntBetween(1, 10);
CyclicBarrier barrier = new CyclicBarrier(threadCount + 1);
AtomicReference<CountDownLatch> countDown = new AtomicReference<>(new CountDownLatch(randomIntBetween(1, 20)));
List<Thread> threads = IntStream.range(0, threadCount).mapToObj(i -> new Thread(() -> {
safeAwait(barrier);
do {
HierarchyCircuitBreakerService.MemoryUsage input = new HierarchyCircuitBreakerService.MemoryUsage(
randomLongBetween(0, 100),
randomLongBetween(0, 100),
randomLongBetween(0, 100),
randomLongBetween(0, 100)
);
HierarchyCircuitBreakerService.MemoryUsage output = strategy.overLimit(input);
assertThat(output.totalUsage, equalTo(output.baseUsage + input.totalUsage - input.baseUsage));
assertThat(output.transientChildUsage, equalTo(input.transientChildUsage));
assertThat(output.permanentChildUsage, equalTo(input.permanentChildUsage));
countDown.get().countDown();
} while (Thread.interrupted() == false);
})).toList();
threads.forEach(Thread::start);
int iterationCount = randomIntBetween(1, 5);
int lastIterationTriggerCount = leaderTriggerCount.get();
safeAwait(barrier);
for (int i = 0; i < iterationCount; ++i) {
memoryUsage.set(randomLongBetween(0, 100));
safeAwait(countDown.get());
assertThat(leaderTriggerCount.get(), lessThanOrEqualTo(i + 1));
assertThat(leaderTriggerCount.get(), greaterThanOrEqualTo(lastIterationTriggerCount));
lastIterationTriggerCount = leaderTriggerCount.get();
time.addAndGet(randomLongBetween(interval, interval * 2));
countDown.set(new CountDownLatch(randomIntBetween(1, 20)));
}
threads.forEach(Thread::interrupt);
for (Thread thread : threads) {
thread.join(10000);
}
threads.forEach(thread -> assertFalse(thread.isAlive()));
}
public void testCreateOverLimitStrategy() {
assertThat(
HierarchyCircuitBreakerService.createOverLimitStrategy(false),
not(instanceOf(HierarchyCircuitBreakerService.G1OverLimitStrategy.class))
);
HierarchyCircuitBreakerService.OverLimitStrategy overLimitStrategy = HierarchyCircuitBreakerService.createOverLimitStrategy(true);
if (JvmInfo.jvmInfo().useG1GC().equals("true")) {
assertThat(overLimitStrategy, instanceOf(HierarchyCircuitBreakerService.G1OverLimitStrategy.class));
assertThat(
((HierarchyCircuitBreakerService.G1OverLimitStrategy) overLimitStrategy).getLockTimeout(),
equalTo(TimeValue.timeValueMillis(500))
);
} else {
assertThat(overLimitStrategy, not(instanceOf(HierarchyCircuitBreakerService.G1OverLimitStrategy.class)));
}
}
public void testG1LockTimeout() throws Exception {
CountDownLatch startedBlocking = new CountDownLatch(1);
CountDownLatch blockingUntil = new CountDownLatch(1);
AtomicLong gcCounter = new AtomicLong();
HierarchyCircuitBreakerService.G1OverLimitStrategy strategy = new HierarchyCircuitBreakerService.G1OverLimitStrategy(
JvmInfo.jvmInfo(),
() -> 100,
gcCounter::incrementAndGet,
() -> 0,
1,
1,
TimeValue.timeValueMillis(randomFrom(0, 5, 10)),
TimeValue.timeValueMillis(randomFrom(0, 5, 10))
) {
@Override
void overLimitTriggered(boolean leader) {
if (leader) {
startedBlocking.countDown();
// this is the central assertion - the overLimit call below should complete in a timely manner.
safeAwait(blockingUntil);
}
}
};
HierarchyCircuitBreakerService.MemoryUsage input = new HierarchyCircuitBreakerService.MemoryUsage(100, 100, 0, 0);
Thread blocker = new Thread(() -> { strategy.overLimit(input); });
blocker.start();
try {
assertThat(startedBlocking.await(10, TimeUnit.SECONDS), is(true));
// this should complete in a timely manner, verified by the assertion in the thread.
assertThat(strategy.overLimit(input), sameInstance(input));
} finally {
blockingUntil.countDown();
blocker.join(10000);
assertThat(blocker.isAlive(), is(false));
}
}
public void testTrippedCircuitBreakerDurability() {
Settings clusterSettings = Settings.builder()
.put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), Boolean.FALSE)
.put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "200mb")
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb")
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb")
.build();
CircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
clusterSettings,
Collections.emptyList(),
new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
);
CircuitBreaker requestCircuitBreaker = service.getBreaker(CircuitBreaker.REQUEST);
CircuitBreaker fieldDataCircuitBreaker = service.getBreaker(CircuitBreaker.FIELDDATA);
CircuitBreaker.Durability expectedDurability;
if (randomBoolean()) {
fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(mb(100), "should not break");
requestCircuitBreaker.addEstimateBytesAndMaybeBreak(mb(70), "should not break");
expectedDurability = CircuitBreaker.Durability.PERMANENT;
} else {
fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(mb(70), "should not break");
requestCircuitBreaker.addEstimateBytesAndMaybeBreak(mb(120), "should not break");
expectedDurability = CircuitBreaker.Durability.TRANSIENT;
}
CircuitBreakingException exception = expectThrows(
CircuitBreakingException.class,
() -> fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(mb(40), "should break")
);
assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [should break] would be"));
assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]"));
assertThat(
"Expected [" + expectedDurability + "] due to [" + exception.getMessage() + "]",
exception.getDurability(),
equalTo(expectedDurability)
);
assertCircuitBreakerLimitWarning();
}
public void testAllocationBucketsBreaker() {
Settings clusterSettings = Settings.builder()
.put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b")
.put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), "false")
.build();
HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
clusterSettings,
Collections.emptyList(),
new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
);
long parentLimitBytes = service.getParentLimit();
assertEquals(ByteSizeValue.of(100, ByteSizeUnit.BYTES).getBytes(), parentLimitBytes);
CircuitBreaker breaker = service.getBreaker(CircuitBreaker.REQUEST);
MultiBucketConsumerService.MultiBucketConsumer multiBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer(
10000,
breaker
);
// make sure used bytes is greater than the total circuit breaker limit
breaker.addWithoutBreaking(200);
// make sure that we check on the following call
for (int i = 0; i < 1023; i++) {
multiBucketConsumer.accept(0);
}
CircuitBreakingException exception = expectThrows(CircuitBreakingException.class, () -> multiBucketConsumer.accept(1024));
assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [allocated_buckets] would be"));
assertThat(exception.getMessage(), containsString("which is larger than the limit of [100/100b]"));
assertCircuitBreakerLimitWarning();
}
public void testRegisterCustomCircuitBreakers_WithDuplicates() {
IllegalArgumentException iae = expectThrows(
IllegalArgumentException.class,
() -> new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
Settings.EMPTY,
Collections.singletonList(new BreakerSettings(CircuitBreaker.FIELDDATA, 100, 1.2)),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
)
);
assertThat(
iae.getMessage(),
containsString("More than one circuit breaker with the name [fielddata] exists. Circuit breaker names must be unique")
);
iae = expectThrows(
IllegalArgumentException.class,
() -> new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
Settings.EMPTY,
Arrays.asList(new BreakerSettings("foo", 100, 1.2), new BreakerSettings("foo", 200, 0.1)),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
)
);
assertThat(
iae.getMessage(),
containsString("More than one circuit breaker with the name [foo] exists. Circuit breaker names must be unique")
);
}
public void testCustomCircuitBreakers() {
CircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
Settings.EMPTY,
Arrays.asList(new BreakerSettings("foo", 100, 1.2), new BreakerSettings("bar", 200, 0.1)),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
);
assertThat(service.getBreaker("foo"), is(not(nullValue())));
assertThat(service.getBreaker("foo").getOverhead(), equalTo(1.2));
assertThat(service.getBreaker("foo").getLimit(), equalTo(100L));
assertThat(service.getBreaker("bar"), is(not(nullValue())));
assertThat(service.getBreaker("bar").getOverhead(), equalTo(0.1));
assertThat(service.getBreaker("bar").getLimit(), equalTo(200L));
}
private static long mb(long size) {
return ByteSizeValue.of(size, ByteSizeUnit.MB).getBytes();
}
public void testUpdatingUseRealMemory() {
HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
Settings.EMPTY,
Collections.emptyList(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
);
// use real memory default true
assertTrue(service.isTrackRealMemoryUsage());
assertThat(service.getOverLimitStrategy(), instanceOf(HierarchyCircuitBreakerService.G1OverLimitStrategy.class));
// update use_real_memory to false
service.updateUseRealMemorySetting(false);
assertFalse(service.isTrackRealMemoryUsage());
assertThat(service.getOverLimitStrategy(), not(instanceOf(HierarchyCircuitBreakerService.G1OverLimitStrategy.class)));
// update use_real_memory to true
service.updateUseRealMemorySetting(true);
assertTrue(service.isTrackRealMemoryUsage());
assertThat(service.getOverLimitStrategy(), instanceOf(HierarchyCircuitBreakerService.G1OverLimitStrategy.class));
}
public void testApplySettingForUpdatingUseRealMemory() {
String useRealMemoryUsageSetting = HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey();
String totalCircuitBreakerLimitSetting = HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey();
Settings initialSettings = Settings.builder().put(useRealMemoryUsageSetting, "true").build();
ClusterSettings clusterSettings = new ClusterSettings(initialSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
Settings.EMPTY,
Collections.emptyList(),
clusterSettings
);
// total.limit defaults to 95% of the JVM heap if use_real_memory is true
assertEquals(
MemorySizeValue.parseBytesSizeValueOrHeapRatio("95%", totalCircuitBreakerLimitSetting).getBytes(),
service.getParentLimit()
);
// total.limit defaults to 70% of the JVM heap if use_real_memory set to false
clusterSettings.applySettings(Settings.builder().put(useRealMemoryUsageSetting, false).build());
assertEquals(
MemorySizeValue.parseBytesSizeValueOrHeapRatio("70%", totalCircuitBreakerLimitSetting).getBytes(),
service.getParentLimit()
);
// total.limit defaults to 95% of the JVM heap if use_real_memory set to true
clusterSettings.applySettings(Settings.builder().put(useRealMemoryUsageSetting, true).build());
assertEquals(
MemorySizeValue.parseBytesSizeValueOrHeapRatio("95%", totalCircuitBreakerLimitSetting).getBytes(),
service.getParentLimit()
);
}
public void testSizeBelowMinimumWarning() {
ByteSizeValue sizeValue = MemorySizeValue.parseHeapRatioOrDeprecatedByteSizeValue(
"19%",
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(),
20
);
assertWarnings("[indices.breaker.total.limit] setting of [19%] is below the recommended minimum of 20.0% of the heap");
}
public void testBuildParentTripMessage() {
| HierarchyCircuitBreakerServiceTests |
java | apache__camel | components/camel-http/src/test/java/org/apache/camel/component/http/HttpCompressionTest.java | {
"start": 6990,
"end": 7760
} | class ____ extends HttpEntityWrapper {
GzipCompressingEntity(final HttpEntity entity) {
super(entity);
}
@Override
public String getContentEncoding() {
return "gzip";
}
@Override
public void writeTo(OutputStream outStream) throws IOException {
try (GZIPOutputStream gzip = new GZIPOutputStream(outStream)) {
super.writeTo(gzip);
}
}
@Override
public long getContentLength() {
return -1;
}
@Override
public boolean isStreaming() {
return false;
}
}
}
}
| GzipCompressingEntity |
java | apache__camel | components/camel-jgroups-raft/src/generated/java/org/apache/camel/component/jgroups/raft/JGroupsRaftComponentConfigurer.java | {
"start": 739,
"end": 4033
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
JGroupsRaftComponent target = (JGroupsRaftComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "channelproperties":
case "channelProperties": target.setChannelProperties(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "rafthandle":
case "raftHandle": target.setRaftHandle(property(camelContext, org.jgroups.raft.RaftHandle.class, value)); return true;
case "raftid":
case "raftId": target.setRaftId(property(camelContext, java.lang.String.class, value)); return true;
case "statemachine":
case "stateMachine": target.setStateMachine(property(camelContext, org.jgroups.raft.StateMachine.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "channelproperties":
case "channelProperties": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "rafthandle":
case "raftHandle": return org.jgroups.raft.RaftHandle.class;
case "raftid":
case "raftId": return java.lang.String.class;
case "statemachine":
case "stateMachine": return org.jgroups.raft.StateMachine.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
JGroupsRaftComponent target = (JGroupsRaftComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "channelproperties":
case "channelProperties": return target.getChannelProperties();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "rafthandle":
case "raftHandle": return target.getRaftHandle();
case "raftid":
case "raftId": return target.getRaftId();
case "statemachine":
case "stateMachine": return target.getStateMachine();
default: return null;
}
}
}
| JGroupsRaftComponentConfigurer |
java | bumptech__glide | library/test/src/test/java/com/bumptech/glide/request/RequestOptionsTest.java | {
"start": 19124,
"end": 19322
} | class ____ extends BaseRequestOptions<FakeOptions> {
@Override
public boolean equals(Object o) {
return o instanceof FakeOptions && super.equals(o);
}
// Our | FakeOptions |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/checkpoints/CheckpointingStatistics.java | {
"start": 4057,
"end": 7599
} | class ____ {
public static final String FIELD_NAME_RESTORED_CHECKPOINTS = "restored";
public static final String FIELD_NAME_TOTAL_CHECKPOINTS = "total";
public static final String FIELD_NAME_IN_PROGRESS_CHECKPOINTS = "in_progress";
public static final String FIELD_NAME_COMPLETED_CHECKPOINTS = "completed";
public static final String FIELD_NAME_FAILED_CHECKPOINTS = "failed";
@JsonProperty(FIELD_NAME_RESTORED_CHECKPOINTS)
private final long numberRestoredCheckpoints;
@JsonProperty(FIELD_NAME_TOTAL_CHECKPOINTS)
private final long totalNumberCheckpoints;
@JsonProperty(FIELD_NAME_IN_PROGRESS_CHECKPOINTS)
private final int numberInProgressCheckpoints;
@JsonProperty(FIELD_NAME_COMPLETED_CHECKPOINTS)
private final long numberCompletedCheckpoints;
@JsonProperty(FIELD_NAME_FAILED_CHECKPOINTS)
private final long numberFailedCheckpoints;
@JsonCreator
public Counts(
@JsonProperty(FIELD_NAME_RESTORED_CHECKPOINTS) long numberRestoredCheckpoints,
@JsonProperty(FIELD_NAME_TOTAL_CHECKPOINTS) long totalNumberCheckpoints,
@JsonProperty(FIELD_NAME_IN_PROGRESS_CHECKPOINTS) int numberInProgressCheckpoints,
@JsonProperty(FIELD_NAME_COMPLETED_CHECKPOINTS) long numberCompletedCheckpoints,
@JsonProperty(FIELD_NAME_FAILED_CHECKPOINTS) long numberFailedCheckpoints) {
this.numberRestoredCheckpoints = numberRestoredCheckpoints;
this.totalNumberCheckpoints = totalNumberCheckpoints;
this.numberInProgressCheckpoints = numberInProgressCheckpoints;
this.numberCompletedCheckpoints = numberCompletedCheckpoints;
this.numberFailedCheckpoints = numberFailedCheckpoints;
}
public long getNumberRestoredCheckpoints() {
return numberRestoredCheckpoints;
}
public long getTotalNumberCheckpoints() {
return totalNumberCheckpoints;
}
public int getNumberInProgressCheckpoints() {
return numberInProgressCheckpoints;
}
public long getNumberCompletedCheckpoints() {
return numberCompletedCheckpoints;
}
public long getNumberFailedCheckpoints() {
return numberFailedCheckpoints;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Counts counts = (Counts) o;
return numberRestoredCheckpoints == counts.numberRestoredCheckpoints
&& totalNumberCheckpoints == counts.totalNumberCheckpoints
&& numberInProgressCheckpoints == counts.numberInProgressCheckpoints
&& numberCompletedCheckpoints == counts.numberCompletedCheckpoints
&& numberFailedCheckpoints == counts.numberFailedCheckpoints;
}
@Override
public int hashCode() {
return Objects.hash(
numberRestoredCheckpoints,
totalNumberCheckpoints,
numberInProgressCheckpoints,
numberCompletedCheckpoints,
numberFailedCheckpoints);
}
}
/** Checkpoint summary. */
@Schema(name = "CheckpointStatisticsSummary")
public static final | Counts |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/inference/strategies/DecimalPlusTypeStrategy.java | {
"start": 1871,
"end": 3034
} | class ____ implements TypeStrategy {
@Override
public Optional<DataType> inferType(CallContext callContext) {
final List<DataType> argumentDataTypes = callContext.getArgumentDataTypes();
final LogicalType addend1 = argumentDataTypes.get(0).getLogicalType();
final LogicalType addend2 = argumentDataTypes.get(1).getLogicalType();
// a hack to make legacy types possible until we drop them
if (addend1 instanceof LegacyTypeInformationType) {
return Optional.of(argumentDataTypes.get(0));
}
if (addend2 instanceof LegacyTypeInformationType) {
return Optional.of(argumentDataTypes.get(1));
}
if (!isDecimalComputation(addend1, addend2)) {
return Optional.empty();
}
final DecimalType decimalType =
LogicalTypeMerging.findAdditionDecimalType(
getPrecision(addend1),
getScale(addend1),
getPrecision(addend2),
getScale(addend2));
return Optional.of(fromLogicalToDataType(decimalType));
}
}
| DecimalPlusTypeStrategy |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/common/Constants.java | {
"start": 10676,
"end": 10855
} | class ____ {
public static final String INTERNAL_MODULE = "internal";
}
/**
* The constants in exception directory.
*/
public static | Remote |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java | {
"start": 1747,
"end": 8775
} | class ____ {
private int id;
private Uuid incarnationId;
private boolean zkMigrationReady;
private Map<String, Endpoint> listeners;
private Map<String, VersionRange> supportedFeatures;
public Builder() {
this.id = 0;
this.incarnationId = null;
this.zkMigrationReady = false;
this.listeners = null;
this.supportedFeatures = null;
}
public Builder(RegisterControllerRecord record) {
this.id = record.controllerId();
this.incarnationId = record.incarnationId();
this.zkMigrationReady = record.zkMigrationReady();
Map<String, Endpoint> newListeners = new HashMap<>();
record.endPoints().forEach(endPoint -> {
SecurityProtocol protocol = SecurityProtocol.forId(endPoint.securityProtocol());
if (protocol == null) {
throw new RuntimeException("Unknown security protocol " +
(int) endPoint.securityProtocol());
}
newListeners.put(endPoint.name(), new Endpoint(endPoint.name(),
protocol,
endPoint.host(),
endPoint.port()));
});
this.listeners = Collections.unmodifiableMap(newListeners);
Map<String, VersionRange> newSupportedFeatures = new HashMap<>();
record.features().forEach(feature ->
newSupportedFeatures.put(feature.name(), VersionRange.of(
feature.minSupportedVersion(), feature.maxSupportedVersion()))
);
this.supportedFeatures = Collections.unmodifiableMap(newSupportedFeatures);
}
public Builder setId(int id) {
this.id = id;
return this;
}
public Builder setIncarnationId(Uuid incarnationId) {
this.incarnationId = incarnationId;
return this;
}
public Builder setZkMigrationReady(boolean zkMigrationReady) {
this.zkMigrationReady = zkMigrationReady;
return this;
}
public Builder setListeners(Map<String, Endpoint> listeners) {
this.listeners = listeners;
return this;
}
public Builder setSupportedFeatures(Map<String, VersionRange> supportedFeatures) {
this.supportedFeatures = supportedFeatures;
return this;
}
public ControllerRegistration build() {
if (incarnationId == null) throw new RuntimeException("You must set incarnationId.");
if (listeners == null) throw new RuntimeException("You must set listeners.");
if (supportedFeatures == null) {
supportedFeatures = new HashMap<>();
supportedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of(
MetadataVersion.MINIMUM_VERSION.featureLevel(),
MetadataVersion.latestProduction().featureLevel()));
}
return new ControllerRegistration(id,
incarnationId,
zkMigrationReady,
listeners,
supportedFeatures);
}
}
private final int id;
private final Uuid incarnationId;
private final boolean zkMigrationReady;
private final Map<String, Endpoint> listeners;
private final Map<String, VersionRange> supportedFeatures;
private ControllerRegistration(int id,
Uuid incarnationId,
boolean zkMigrationReady,
Map<String, Endpoint> listeners,
Map<String, VersionRange> supportedFeatures
) {
this.id = id;
this.incarnationId = incarnationId;
this.zkMigrationReady = zkMigrationReady;
this.listeners = listeners;
this.supportedFeatures = supportedFeatures;
}
public int id() {
return id;
}
public Uuid incarnationId() {
return incarnationId;
}
public boolean zkMigrationReady() {
return zkMigrationReady;
}
public Map<String, Endpoint> listeners() {
return listeners;
}
public Optional<Node> node(String listenerName) {
Endpoint endpoint = listeners().get(listenerName);
if (endpoint == null) {
return Optional.empty();
}
return Optional.of(new Node(id, endpoint.host(), endpoint.port(), null));
}
public Map<String, VersionRange> supportedFeatures() {
return supportedFeatures;
}
public ApiMessageAndVersion toRecord(ImageWriterOptions options) {
RegisterControllerRecord registrationRecord = new RegisterControllerRecord().
setControllerId(id).
setIncarnationId(incarnationId).
setZkMigrationReady(zkMigrationReady);
for (Entry<String, Endpoint> entry : listeners.entrySet()) {
Endpoint endpoint = entry.getValue();
registrationRecord.endPoints().add(new ControllerEndpoint().
setName(entry.getKey()).
setHost(endpoint.host()).
setPort(endpoint.port()).
setSecurityProtocol(endpoint.securityProtocol().id));
}
for (Entry<String, VersionRange> entry : supportedFeatures.entrySet()) {
registrationRecord.features().add(new ControllerFeature().
setName(entry.getKey()).
setMinSupportedVersion(entry.getValue().min()).
setMaxSupportedVersion(entry.getValue().max()));
}
return new ApiMessageAndVersion(registrationRecord,
options.metadataVersion().registerControllerRecordVersion());
}
@Override
public int hashCode() {
return Objects.hash(id,
incarnationId,
zkMigrationReady,
listeners,
supportedFeatures);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof ControllerRegistration other)) return false;
return other.id == id &&
other.incarnationId.equals(incarnationId) &&
other.zkMigrationReady == zkMigrationReady &&
other.listeners.equals(listeners) &&
other.supportedFeatures.equals(supportedFeatures);
}
@Override
public String toString() {
return "ControllerRegistration(id=" + id +
", incarnationId=" + incarnationId +
", zkMigrationReady=" + zkMigrationReady +
", listeners=[" +
listeners.keySet().stream().sorted().
map(n -> listeners.get(n).toString()).
collect(Collectors.joining(", ")) +
"], supportedFeatures={" +
supportedFeatures.keySet().stream().sorted().
map(k -> k + ": " + supportedFeatures.get(k)).
collect(Collectors.joining(", ")) +
"}" +
")";
}
}
| Builder |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/api/ProxifyMethodChangingTheObjectUnderTest.java | {
"start": 10556,
"end": 10746
} | class ____
// *Assert classes define a constructor using interface (@see ListAssert for example).
// Instead we can read generic types from *Assert definition.
// Inspecting: | whereas |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/MonoIgnoreThen.java | {
"start": 2619,
"end": 11922
} | class ____<T> implements InnerOperator<T, T> {
final Publisher<?>[] ignoreMonos;
final Mono<T> lastMono;
final CoreSubscriber<? super T> actual;
@Nullable T value;
int index;
boolean done;
@Nullable Subscription activeSubscription;
volatile int state;
@SuppressWarnings("rawtypes")
private static final AtomicIntegerFieldUpdater<ThenIgnoreMain> STATE =
AtomicIntegerFieldUpdater.newUpdater(ThenIgnoreMain.class, "state");
// The following are to be used as bit masks, not as values per se.
static final int HAS_REQUEST = 0b00000010;
static final int HAS_SUBSCRIPTION = 0b00000100;
static final int HAS_VALUE = 0b00001000;
static final int HAS_COMPLETION = 0b00010000;
// The following are to be used as value (ie using == or !=).
static final int CANCELLED = 0b10000000;
ThenIgnoreMain(CoreSubscriber<? super T> subscriber,
Publisher<?>[] ignoreMonos, Mono<T> lastMono) {
this.actual = subscriber;
this.ignoreMonos = ignoreMonos;
this.lastMono = lastMono;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.PARENT) return this.activeSubscription;
if (key == Attr.CANCELLED) return isCancelled(this.state);
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return InnerOperator.super.scanUnsafe(key);
}
@Override
public CoreSubscriber<? super T> actual() {
return this.actual;
}
@Override
public void onSubscribe(Subscription s) {
if (Operators.validate(this.activeSubscription, s)) {
this.activeSubscription = s;
final int previousState = markHasSubscription();
if (isCancelled(previousState)) {
s.cancel();
return;
}
s.request(Long.MAX_VALUE);
}
}
@Override
public void cancel() {
int previousState = markCancelled();
if (hasSubscription(previousState)) {
Subscription a = this.activeSubscription;
assert a != null : "activeSubscription cannot be null when hasSubscription(previousState) is true";
a.cancel();
}
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
for (; ; ) {
final int state = this.state;
if (isCancelled(state)) {
return;
}
if (hasRequest(state)) {
return;
}
if (STATE.compareAndSet(this, state, state | HAS_REQUEST)) {
if (hasValue(state)) {
final CoreSubscriber<? super T> actual = this.actual;
final T v = this.value;
assert v != null : "v can not be null when hasValue(state) is true";
actual.onNext(v);
actual.onComplete();
}
return;
}
}
}
}
@Override
public void onNext(T t) {
if (this.done) {
Operators.onDiscard(t, currentContext());
return;
}
if (this.index != this.ignoreMonos.length) {
// ignored
Operators.onDiscard(t, currentContext());
return;
}
this.done = true;
complete(t);
}
@Override
public void onComplete() {
if (this.done) {
return;
}
if (this.index != this.ignoreMonos.length) {
final int previousState = markUnsubscribed();
if (isCancelled(previousState)) {
return;
}
this.activeSubscription = null;
this.index++;
subscribeNext();
return;
}
this.done = true;
this.actual.onComplete();
}
@SuppressWarnings({"unchecked", "rawtypes"})
void subscribeNext() {
final Publisher<?>[] a = this.ignoreMonos;
for (;;) {
final int i = this.index;
if (i == a.length) {
Mono<T> m = this.lastMono;
if (m instanceof Callable) {
if (isCancelled(this.state)) {
//NB: in the non-callable case, this is handled by activeSubscription.cancel()
return;
}
T v;
try {
v = ((Callable<@Nullable T>)m).call();
}
catch (Throwable ex) {
onError(Operators.onOperatorError(ex, currentContext()));
return;
}
if (v != null) {
onNext(v);
}
onComplete();
} else {
Operators.toFluxOrMono(m).subscribe(this);
}
return;
} else {
Publisher<?> p = a[i];
if (p instanceof Callable) {
if (isCancelled(this.state)) {
//NB: in the non-callable case, this is handled by activeSubscription.cancel()
return;
}
try {
Operators.onDiscard(((Callable<?>) p).call(), currentContext());
}
catch (Throwable ex) {
onError(Operators.onOperatorError(ex, currentContext()));
return;
}
this.index = i + 1;
continue;
}
p = Operators.toFluxOrMono(p);
p.subscribe((CoreSubscriber) this);
return;
}
}
}
@Override
public void onError(Throwable t) {
if (this.done) {
Operators.onErrorDropped(t, actual().currentContext());
return;
}
this.done = true;
this.actual.onError(t);
}
void complete(T value) {
for (; ; ) {
int s = this.state;
if (isCancelled(s)) {
Operators.onDiscard(value, this.actual.currentContext());
return;
}
if (hasRequest(s) && STATE.compareAndSet(this, s, s | (HAS_VALUE | HAS_COMPLETION))) {
final CoreSubscriber<? super T> actual = this.actual;
actual.onNext(value);
actual.onComplete();
return;
}
this.value = value;
if (STATE.compareAndSet(this, s, s | (HAS_VALUE | HAS_COMPLETION))) {
return;
}
}
}
int markHasSubscription() {
for (;;) {
final int state = this.state;
if (state == CANCELLED) {
return state;
}
if ((state & HAS_SUBSCRIPTION) == HAS_SUBSCRIPTION) {
return state;
}
if (STATE.compareAndSet(this, state, state | HAS_SUBSCRIPTION)) {
return state;
}
}
}
int markUnsubscribed() {
for (;;) {
final int state = this.state;
if (isCancelled(state)) {
return state;
}
if (!hasSubscription(state)) {
return state;
}
if (STATE.compareAndSet(this, state, state &~ HAS_SUBSCRIPTION)) {
return state;
}
}
}
int markCancelled() {
for (;;) {
final int state = this.state;
if (state == CANCELLED) {
return state;
}
if (STATE.compareAndSet(this, state, CANCELLED)) {
return state;
}
}
}
static boolean isCancelled(int s) {
return s == CANCELLED;
}
static boolean hasSubscription(int s) {
return (s & HAS_SUBSCRIPTION) == HAS_SUBSCRIPTION;
}
static boolean hasRequest(int s) {
return (s & HAS_REQUEST) == HAS_REQUEST;
}
static boolean hasValue(int s) {
return (s & HAS_VALUE) == HAS_VALUE;
}
}
}
| ThenIgnoreMain |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/mockito/MockitoBeanOverrideProcessorTests.java | {
"start": 1347,
"end": 1494
} | class ____ {
private final MockitoBeanOverrideProcessor processor = new MockitoBeanOverrideProcessor();
@Nested
| MockitoBeanOverrideProcessorTests |
java | dropwizard__dropwizard | dropwizard-jersey/src/main/java/io/dropwizard/jersey/DropwizardResourceConfig.java | {
"start": 7469,
"end": 8079
} | class ____ {
private final String httpMethod;
private final String basePath;
private final Class<?> klass;
EndpointLogLine(String httpMethod, String basePath, Class<?> klass) {
this.basePath = basePath;
this.klass = klass;
this.httpMethod = httpMethod;
}
@Override
public String toString() {
final String method = httpMethod == null ? "UNKNOWN" : httpMethod;
return String.format(" %-7s %s (%s)", method, basePath, klass.getCanonicalName());
}
}
private static | EndpointLogLine |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/procedure/internal/SQLServerCallableStatementSupport.java | {
"start": 276,
"end": 783
} | class ____ extends StandardCallableStatementSupport {
public static final StandardCallableStatementSupport INSTANCE = new SQLServerCallableStatementSupport( );
private SQLServerCallableStatementSupport() {
super( false );
}
@Override
protected void appendNameParameter(
StringBuilder buffer,
ProcedureParameterImplementor<?> parameter,
JdbcCallParameterRegistration registration) {
buffer.append( '@' ).append( parameter.getName() ).append( " = ?" );
}
}
| SQLServerCallableStatementSupport |
java | quarkusio__quarkus | extensions/oidc-token-propagation/deployment/src/main/java/io/quarkus/oidc/token/propagation/deployment/OidcTokenPropagationBuildStep.java | {
"start": 1863,
"end": 5166
} | class ____ {
private static final DotName JWT_ACCESS_TOKEN_CREDENTIAL = DotName.createSimple(JsonWebToken.class.getName());
OidcTokenPropagationConfig config;
@BuildStep
void registerProvider(BuildProducer<AdditionalBeanBuildItem> additionalBeans,
BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
BuildProducer<ResteasyJaxrsProviderBuildItem> jaxrsProviders,
BuildProducer<RestClientPredicateProviderBuildItem> providerPredicateProducer,
BuildProducer<GeneratedBeanBuildItem> generatedBeanProducer,
BuildProducer<UnremovableBeanBuildItem> unremovableBeanProducer,
List<AccessTokenInstanceBuildItem> accessTokenInstances,
BuildProducer<RestClientAnnotationProviderBuildItem> restAnnotationProvider) {
additionalBeans.produce(AdditionalBeanBuildItem.unremovableOf(AccessTokenRequestFilter.class));
additionalBeans.produce(AdditionalBeanBuildItem.unremovableOf(JsonWebTokenRequestFilter.class));
reflectiveClass
.produce(ReflectiveClassBuildItem.builder(AccessTokenRequestFilter.class, JsonWebTokenRequestFilter.class)
.reason(getClass().getName())
.methods().fields().build());
if (config.registerFilter()) {
Class<?> filterClass = config.jsonWebToken() ? JsonWebTokenRequestFilter.class : AccessTokenRequestFilter.class;
jaxrsProviders.produce(new ResteasyJaxrsProviderBuildItem(filterClass.getName()));
} else {
restAnnotationProvider.produce(new RestClientAnnotationProviderBuildItem(JWT_ACCESS_TOKEN_CREDENTIAL,
JsonWebTokenRequestFilter.class));
if (!accessTokenInstances.isEmpty()) {
var filterGenerator = new AccessTokenRequestFilterGenerator(unremovableBeanProducer, reflectiveClass,
generatedBeanProducer, AccessTokenRequestFilter.class);
for (AccessTokenInstanceBuildItem instance : accessTokenInstances) {
String providerClass = filterGenerator.generateClass(instance);
providerPredicateProducer.produce(new RestClientPredicateProviderBuildItem(providerClass,
ci -> instance.targetClass().equals(ci.name().toString())));
}
}
}
}
@BuildStep(onlyIf = IsEnabledDuringAuth.class)
SystemPropertyBuildItem activateTokenCredentialPropagationViaDuplicatedContext(Capabilities capabilities) {
if (capabilities.isPresent(Capability.OIDC)) {
return new SystemPropertyBuildItem(OIDC_PROPAGATE_TOKEN_CREDENTIAL, "true");
}
if (capabilities.isPresent(Capability.JWT)) {
return new SystemPropertyBuildItem(JWT_PROPAGATE_TOKEN_CREDENTIAL, "true");
}
throw new ConfigurationException(
"Configuration property 'quarkus.resteasy-client-oidc-token-propagation.enabled-during-authentication' is set to "
+
"'true', however this configuration property is only supported when either 'quarkus-oidc' or " +
"'quarkus-smallrye-jwt' extensions are present.");
}
public static | OidcTokenPropagationBuildStep |
java | apache__camel | components/camel-milo/src/main/java/org/apache/camel/component/milo/client/internal/SubscriptionManager.java | {
"start": 4933,
"end": 5659
} | class ____ implements OpcUaSubscription.SubscriptionListener {
@Override
public void onStatusChanged(final OpcUaSubscription subscription, final StatusCode status) {
LOG.info("Subscription status changed {} : {}", subscription.getSubscriptionId(), status);
}
@Override
public void onTransferFailed(final OpcUaSubscription subscription, final StatusCode statusCode) {
LOG.info("Transfer failed {} : {}", subscription.getSubscriptionId(), statusCode);
// we simply tear it down and build it up again
handleConnectionFailure(new RuntimeCamelException("Subscription failed to reconnect"));
}
}
public | SubscriptionListenerImpl |
java | netty__netty | transport-sctp/src/main/java/com/sun/nio/sctp/SctpStandardSocketOptions.java | {
"start": 725,
"end": 1630
} | class ____ {
static {
UnsupportedOperatingSystemException.raise();
}
public static final SctpSocketOption<Boolean> SCTP_DISABLE_FRAGMENTS = null;
public static final SctpSocketOption<Boolean> SCTP_EXPLICIT_COMPLETE = null;
public static final SctpSocketOption<Integer> SCTP_FRAGMENT_INTERLEAVE = null;
public static final SctpSocketOption<InitMaxStreams> SCTP_INIT_MAXSTREAMS = null;
public static final SctpSocketOption<Boolean> SCTP_NODELAY = null;
public static final SctpSocketOption<SocketAddress> SCTP_PRIMARY_ADDR = null;
public static final SctpSocketOption<SocketAddress> SCTP_SET_PEER_PRIMARY_ADDR = null;
public static final SctpSocketOption<Integer> SO_LINGER = null;
public static final SctpSocketOption<Integer> SO_RCVBUF = null;
public static final SctpSocketOption<Integer> SO_SNDBUF = null;
public static | SctpStandardSocketOptions |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/resume/ResumeActionAware.java | {
"start": 857,
"end": 1382
} | interface ____ adapters and other resume-related code to allow them to offer a way to set actions to be
* executed during the resume process. This is most likely to be used in situations where the resume adapter does not
* have the information required to resume because the resume logic is too broad (i.e.: a database component trying to
* resume operations cannot know in advance what is the SQL to be executed).
*
* This provides a way for integrations to inject that part of the logic into the resume API.
*/
public | for |
java | spring-projects__spring-boot | configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/immutable/ImmutableCollectionProperties.java | {
"start": 954,
"end": 1378
} | class ____ {
private final List<String> names;
private final List<Boolean> flags;
private final List<Duration> durations;
public ImmutableCollectionProperties(List<String> names, @TestDefaultValue({ "true", "false" }) List<Boolean> flags,
@TestDefaultValue({ "10s", "1m", "1h" }) List<Duration> durations) {
this.names = names;
this.flags = flags;
this.durations = durations;
}
}
| ImmutableCollectionProperties |
java | elastic__elasticsearch | x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/generator/QueryExecutor.java | {
"start": 537,
"end": 1250
} | interface ____ {
/**
* Execute the given command, returning the results.
* The depth is used to avoid infinite loops when commands generate sub-queries that are executed.
* @param query The command to execute
* @param depth Represents the number of iterations executed in current generative test sequence.
* It does not always correspond to the number of commands in the query, because some
* command generators may generate more than one command at a time.
* This value has to be passed to the resulting QueryExecuted.
* @return The results of the execution
*/
QueryExecuted execute(String query, int depth);
}
| QueryExecutor |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/EnableNameserviceRequestPBImpl.java | {
"start": 1595,
"end": 2673
} | class ____ extends EnableNameserviceRequest
implements PBRecord {
private FederationProtocolPBTranslator<EnableNameserviceRequestProto,
Builder, EnableNameserviceRequestProtoOrBuilder> translator =
new FederationProtocolPBTranslator<>(
EnableNameserviceRequestProto.class);
public EnableNameserviceRequestPBImpl() {
}
public EnableNameserviceRequestPBImpl(EnableNameserviceRequestProto proto) {
this.translator.setProto(proto);
}
@Override
public EnableNameserviceRequestProto getProto() {
return translator.build();
}
@Override
public void setProto(Message proto) {
this.translator.setProto(proto);
}
@Override
public void readInstance(String base64String) throws IOException {
this.translator.readInstance(base64String);
}
@Override
public String getNameServiceId() {
return this.translator.getProtoOrBuilder().getNameServiceId();
}
@Override
public void setNameServiceId(String nsId) {
this.translator.getBuilder().setNameServiceId(nsId);
}
}
| EnableNameserviceRequestPBImpl |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/task/TaskExecutorConfigurations.java | {
"start": 9369,
"end": 9514
} | class ____ {
}
@ConditionalOnProperty(value = "spring.task.execution.mode", havingValue = "force")
private static final | ExecutorBeanCondition |
java | apache__camel | components/camel-snakeyaml/src/test/java/org/apache/camel/component/snakeyaml/SnakeYAMLTest.java | {
"start": 1666,
"end": 3321
} | class ____ extends CamelTestSupport {
@Parameter
private SnakeYAMLDataFormat format;
@Parameter(1)
private Object body;
@Parameter(2)
private String expected;
@Parameters
public static Collection yamlCases() {
return Arrays.asList(new Object[][] {
{
createDataFormat(null),
createTestMap(),
"{name: Camel}"
},
{
createDataFormat(TestPojo.class),
createTestPojo(),
"!!org.apache.camel.component.snakeyaml.model.TestPojo {name: Camel}"
},
{
createPrettyFlowDataFormat(TestPojo.class, true),
createTestPojo(),
"!!org.apache.camel.component.snakeyaml.model.TestPojo {\n name: Camel\n}"
}
});
}
@Test
public void testMarshalAndUnmarshal() throws Exception {
SnakeYAMLTestHelper.marshalAndUnmarshal(
context(),
body,
"mock:reverse",
"direct:in",
"direct:back",
expected);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:in")
.marshal(format);
from("direct:back")
.unmarshal(format)
.to("mock:reverse");
}
};
}
}
| SnakeYAMLTest |
java | apache__flink | flink-kubernetes/src/test/java/org/apache/flink/kubernetes/highavailability/KubernetesHighAvailabilityTestBase.java | {
"start": 3432,
"end": 8942
} | class ____ {
private final KubernetesTestFixture kubernetesTestFixture;
final String componentId;
final String leaderAddress;
final LeaderElectionDriver leaderElectionDriver;
final TestingLeaderElectionListener electionEventHandler;
final LeaderRetrievalDriver leaderRetrievalDriver;
final TestingLeaderRetrievalEventHandler retrievalEventHandler;
final FlinkKubeClient flinkKubeClient;
final Configuration configuration;
final CompletableFuture<Void> closeKubeClientFuture;
Context() throws Exception {
kubernetesTestFixture =
new KubernetesTestFixture(CLUSTER_ID, LEADER_CONFIGMAP_NAME, LOCK_IDENTITY);
final UUID randomTestID = UUID.randomUUID();
componentId = "random-component-id-" + randomTestID;
leaderAddress = "random-address-" + randomTestID;
flinkKubeClient = kubernetesTestFixture.getFlinkKubeClient();
configuration = kubernetesTestFixture.getConfiguration();
closeKubeClientFuture = kubernetesTestFixture.getCloseKubeClientFuture();
electionEventHandler = new TestingLeaderElectionListener();
leaderElectionDriver = createLeaderElectionDriver();
retrievalEventHandler = new TestingLeaderRetrievalEventHandler();
leaderRetrievalDriver = createLeaderRetrievalDriver();
}
void runTest(RunnableWithException testMethod) throws Exception {
try {
testMethod.run();
} finally {
leaderElectionDriver.close();
leaderRetrievalDriver.close();
kubernetesTestFixture.close();
electionEventHandler.failIfErrorEventHappened();
}
}
TestingFlinkKubeClient.Builder createFlinkKubeClientBuilder() {
return kubernetesTestFixture.createFlinkKubeClientBuilder();
}
String getClusterId() {
return CLUSTER_ID;
}
KubernetesConfigMap getLeaderConfigMap() {
return kubernetesTestFixture.getLeaderConfigMap();
}
String getLeaderInformationKey() {
return KubernetesUtils.createSingleLeaderKey(componentId);
}
Optional<LeaderInformation> getLeaderInformationFromConfigMap() {
return KubernetesUtils.parseLeaderInformationSafely(
getLeaderConfigMap().getData().get(getLeaderInformationKey()));
}
void putLeaderInformationIntoConfigMap(UUID leaderSessionID, String leaderAddress) {
getLeaderConfigMap()
.getData()
.put(
getLeaderInformationKey(),
KubernetesUtils.encodeLeaderInformation(
LeaderInformation.known(leaderSessionID, leaderAddress)));
}
void grantLeadership() throws Exception {
kubernetesTestFixture.leaderCallbackGrantLeadership();
}
// Use the leader callback to manually grant leadership
UUID leaderCallbackGrantLeadership() throws Exception {
grantLeadership();
electionEventHandler.await(LeaderElectionEvent.IsLeaderEvent.class);
final UUID leaderSessionID = UUID.randomUUID();
leaderElectionDriver.publishLeaderInformation(
componentId, LeaderInformation.known(leaderSessionID, leaderAddress));
return leaderSessionID;
}
FlinkKubeClient.WatchCallbackHandler<KubernetesConfigMap>
getLeaderElectionConfigMapCallback() throws Exception {
return kubernetesTestFixture.getLeaderElectionConfigMapCallback();
}
FlinkKubeClient.WatchCallbackHandler<KubernetesConfigMap>
getLeaderRetrievalConfigMapCallback() throws Exception {
return kubernetesTestFixture.getLeaderRetrievalConfigMapCallback();
}
KubernetesLeaderElector.LeaderCallbackHandler getLeaderCallback() throws Exception {
return kubernetesTestFixture.getLeaderCallback();
}
private LeaderElectionDriver createLeaderElectionDriver() throws Exception {
final KubernetesLeaderElectionConfiguration leaderConfig =
new KubernetesLeaderElectionConfiguration(
LEADER_CONFIGMAP_NAME, LOCK_IDENTITY, configuration);
final KubernetesLeaderElectionDriverFactory factory =
new KubernetesLeaderElectionDriverFactory(
flinkKubeClient,
leaderConfig,
kubernetesTestFixture.getConfigMapSharedWatcher(),
watchCallbackExecutorService);
return factory.create(electionEventHandler);
}
private LeaderRetrievalDriver createLeaderRetrievalDriver() {
final KubernetesLeaderRetrievalDriverFactory factory =
new KubernetesLeaderRetrievalDriverFactory(
kubernetesTestFixture.getConfigMapSharedWatcher(),
watchCallbackExecutorService,
LEADER_CONFIGMAP_NAME,
componentId);
return factory.createLeaderRetrievalDriver(
retrievalEventHandler, retrievalEventHandler::handleError);
}
}
}
| Context |
java | google__guava | android/guava-tests/test/com/google/common/collect/BenchmarkHelpers.java | {
"start": 5672,
"end": 7836
} | enum ____ implements MapsImplEnum {
HashMapImpl {
@Override
public <K extends Comparable<K>, V> Map<K, V> create(Map<K, V> map) {
return new HashMap<>(map);
}
},
LinkedHashMapImpl {
@Override
public <K extends Comparable<K>, V> Map<K, V> create(Map<K, V> map) {
return new LinkedHashMap<>(map);
}
},
ConcurrentHashMapImpl {
@Override
public <K extends Comparable<K>, V> Map<K, V> create(Map<K, V> map) {
return new ConcurrentHashMap<>(map);
}
},
ImmutableMapImpl {
@Override
public <K extends Comparable<K>, V> Map<K, V> create(Map<K, V> map) {
return ImmutableMap.copyOf(map);
}
},
MapMakerStrongKeysStrongValues {
@Override
public <K extends Comparable<K>, V> Map<K, V> create(Map<K, V> map) {
// We use a "custom" equivalence to force MapMaker to make a MapMakerInternalMap.
ConcurrentMap<K, V> newMap = new MapMaker().keyEquivalence(Equivalence.equals()).makeMap();
checkState(newMap instanceof MapMakerInternalMap);
newMap.putAll(map);
return newMap;
}
},
MapMakerStrongKeysWeakValues {
@Override
public <K extends Comparable<K>, V> Map<K, V> create(Map<K, V> map) {
ConcurrentMap<K, V> newMap = new MapMaker().weakValues().makeMap();
checkState(newMap instanceof MapMakerInternalMap);
newMap.putAll(map);
return newMap;
}
},
MapMakerWeakKeysStrongValues {
@Override
public <K extends Comparable<K>, V> Map<K, V> create(Map<K, V> map) {
ConcurrentMap<K, V> newMap = new MapMaker().weakKeys().makeMap();
checkState(newMap instanceof MapMakerInternalMap);
newMap.putAll(map);
return newMap;
}
},
MapMakerWeakKeysWeakValues {
@Override
public <K extends Comparable<K>, V> Map<K, V> create(Map<K, V> map) {
ConcurrentMap<K, V> newMap = new MapMaker().weakKeys().weakValues().makeMap();
checkState(newMap instanceof MapMakerInternalMap);
newMap.putAll(map);
return newMap;
}
};
}
| MapImpl |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DialectFeatureChecks.java | {
"start": 26983,
"end": 27340
} | class ____ implements DialectFeatureCheck {
public boolean apply(Dialect dialect) {
try {
dialect.getAggregateSupport().requiresAggregateCustomWriteExpressionRenderer( SqlTypes.JSON );
return true;
}
catch (UnsupportedOperationException | IllegalArgumentException e) {
return false;
}
}
}
public static | SupportsJsonComponentUpdate |
java | apache__camel | components/camel-dataset/src/main/java/org/apache/camel/component/dataset/DataSetConstants.java | {
"start": 934,
"end": 1141
} | class ____ {
@Metadata(description = "The dataset index", javaType = "Long")
public static final String DATASET_INDEX = Exchange.DATASET_INDEX;
private DataSetConstants() {
}
}
| DataSetConstants |
java | qos-ch__slf4j | jcl-over-slf4j/src/main/java/org/apache/commons/logging/impl/SLF4JLogFactory.java | {
"start": 1706,
"end": 3783
} | class ____ extends LogFactory {
// ----------------------------------------------------------- Constructors
/**
* The {@link org.apache.commons.logging.Log}instances that have already been
* created, keyed by logger name.
*/
ConcurrentMap<String, Log> loggerMap;
/**
* Public no-arguments constructor required by the lookup mechanism.
*/
public SLF4JLogFactory() {
loggerMap = new ConcurrentHashMap<>();
}
// ----------------------------------------------------- Manifest Constants
/**
* The name of the system property identifying our {@link Log}implementation
* class.
*/
public static final String LOG_PROPERTY = "org.apache.commons.logging.Log";
// ----------------------------------------------------- Instance Variables
/**
* Configuration attributes.
*/
protected Hashtable attributes = new Hashtable();
// --------------------------------------------------------- Public Methods
/**
* Return the configuration attribute with the specified name (if any), or
* <code>null</code> if there is no such attribute.
*
* @param name
* Name of the attribute to return
*/
public Object getAttribute(String name) {
return (attributes.get(name));
}
/**
* Return an array containing the names of all currently defined configuration
* attributes. If there are no such attributes, a zero length array is
* returned.
*/
@SuppressWarnings("unchecked")
public String[] getAttributeNames() {
List<String> names = new ArrayList<>();
Enumeration<String> keys = attributes.keys();
while (keys.hasMoreElements()) {
names.add((String) keys.nextElement());
}
String[] results = new String[names.size()];
for (int i = 0; i < results.length; i++) {
results[i] = (String) names.get(i);
}
return (results);
}
/**
* Convenience method to derive a name from the specified | SLF4JLogFactory |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.