language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java
|
{
"start": 2909,
"end": 4264
}
|
class ____ implements Releasable {
/**
* This method will collect the doc and then either return itself or a new CollectionPhase
* It is responsible for determining when a phase is over and what phase will run next
*/
abstract CollectionPhase collectValue(LeafBucketCollector sub, int doc, double val) throws IOException;
/**
* @return the final number of buckets that will be used
* If this is not the final phase, then an instance of the next phase is created and it is asked for this answer.
*/
abstract int finalNumBuckets();
/**
* If this CollectionPhase is the final phase then this method will build and return the i'th bucket
* Otherwise, it will create an instance of the next phase and ask it for the i'th bucket (naturally, if that phase
* not the last phase then it will do the same and so on...)
*/
abstract InternalVariableWidthHistogram.Bucket buildBucket(int bucketOrd, InternalAggregations subAggregations) throws IOException;
}
/**
* Phase 1: Build up a buffer of docs (i.e. give each new doc its own bucket). No clustering decisions are made here.
* Building this buffer lets us analyze the distribution of the data before we begin clustering.
*/
private
|
CollectionPhase
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/util/LongArray.java
|
{
"start": 705,
"end": 1804
}
|
interface ____ extends BigArray, Writeable {
static LongArray readFrom(StreamInput in) throws IOException {
return new ReleasableLongArray(in);
}
/**
* Get an element given its index.
*/
long get(long index);
/**
* Set a value at the given index and return the previous value.
*/
long getAndSet(long index, long value);
/**
* Set a value at the given index.
*/
void set(long index, long value);
/**
* Increment value at the given index by <code>inc</code> and return the value.
*/
long increment(long index, long inc);
/**
* Fill slots between <code>fromIndex</code> inclusive to <code>toIndex</code> exclusive with <code>value</code>.
*/
void fill(long fromIndex, long toIndex, long value);
/**
* Alternative of {@link #readFrom(StreamInput)} where the written bytes are loaded into an existing {@link LongArray}
*/
void fillWith(StreamInput in) throws IOException;
/**
* Bulk set.
*/
void set(long index, byte[] buf, int offset, int len);
}
|
LongArray
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/web/servlet/samples/client/standalone/ExceptionHandlerTests.java
|
{
"start": 5923,
"end": 6207
}
|
class ____ {
@ExceptionHandler
Error handleException(Throwable exception) {
return new Error("globalPersonController - " + exception.getClass().getSimpleName());
}
}
@RestControllerAdvice
@Order(Ordered.LOWEST_PRECEDENCE)
private static
|
RestPersonControllerExceptionHandler
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/SysPropsForTestsLoader.java
|
{
"start": 1007,
"end": 2686
}
|
class ____ {
public static final String TEST_PROPERTIES_PROP = "test.properties";
static {
try {
String testFileName = System.getProperty(TEST_PROPERTIES_PROP, "test.properties");
File currentDir = new File(testFileName).getAbsoluteFile().getParentFile();
File testFile = new File(currentDir, testFileName);
while (currentDir != null && !testFile.exists()) {
testFile = new File(testFile.getAbsoluteFile().getParentFile().getParentFile(), testFileName);
currentDir = currentDir.getParentFile();
if (currentDir != null) {
testFile = new File(currentDir, testFileName);
}
}
if (testFile.exists()) {
System.out.println();
System.out.println(">>> " + TEST_PROPERTIES_PROP + " : " + testFile.getAbsolutePath());
Properties testProperties = new Properties();
testProperties.load(new FileReader(testFile));
for (Map.Entry entry : testProperties.entrySet()) {
if (!System.getProperties().containsKey(entry.getKey())) {
System.setProperty((String) entry.getKey(), (String) entry.getValue());
}
}
} else if (System.getProperty(TEST_PROPERTIES_PROP) != null) {
System.err.println(MessageFormat.format("Specified 'test.properties' file does not exist [{0}]",
System.getProperty(TEST_PROPERTIES_PROP)));
System.exit(-1);
} else {
System.out.println(">>> " + TEST_PROPERTIES_PROP + " : <NONE>");
}
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
public static void init() {
}
}
|
SysPropsForTestsLoader
|
java
|
google__error-prone
|
check_api/src/main/java/com/google/errorprone/bugpatterns/BugChecker.java
|
{
"start": 21115,
"end": 21237
}
|
interface ____ extends Suppressible {
Description matchTry(TryTree tree, VisitorState state);
}
public
|
TryTreeMatcher
|
java
|
apache__camel
|
components/camel-whatsapp/src/main/java/org/apache/camel/component/whatsapp/model/TextMessage.java
|
{
"start": 1023,
"end": 1468
}
|
class ____ {
@JsonProperty("preview_url")
private boolean previewUrl;
private String body;
public TextMessage() {
}
public boolean isPreviewUrl() {
return previewUrl;
}
public void setPreviewUrl(boolean previewUrl) {
this.previewUrl = previewUrl;
}
public String getBody() {
return body;
}
public void setBody(String body) {
this.body = body;
}
}
|
TextMessage
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/bindings/conflicting/ConflictingStereotypeBindingOnBeanTest.java
|
{
"start": 2003,
"end": 2094
}
|
interface ____ {
}
@Stereotype1
@Stereotype2
@Dependent
static
|
Stereotype2
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java
|
{
"start": 38090,
"end": 43832
}
|
class ____ that implements the <code>org.apache.kafka.streams.state.ProcessorWrapper</code> interface. "
+ "Must be passed in to the StreamsBuilder or Topology constructor in order to take effect";
/** {@code repartition.purge.interval.ms} */
@SuppressWarnings("WeakerAccess")
public static final String REPARTITION_PURGE_INTERVAL_MS_CONFIG = "repartition.purge.interval.ms";
private static final String REPARTITION_PURGE_INTERVAL_MS_DOC = "The frequency in milliseconds with which to delete fully consumed records from repartition topics." +
" Purging will occur after at least this value since the last purge, but may be delayed until later." +
" (Note, unlike <code>commit.interval.ms</code>, the default for this value remains unchanged when <code>processing.guarantee</code> is set to <code>" + EXACTLY_ONCE_V2 + "</code>).";
/** {@code receive.buffer.bytes} */
@SuppressWarnings("WeakerAccess")
public static final String RECEIVE_BUFFER_CONFIG = CommonClientConfigs.RECEIVE_BUFFER_CONFIG;
/** {@code rack.aware.assignment.non_overlap_cost} */
@SuppressWarnings("WeakerAccess")
public static final String RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG = "rack.aware.assignment.non_overlap_cost";
@Deprecated
public static final String RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_DOC = "Cost associated with moving tasks from existing assignment. This config and <code>rack.aware.assignment.traffic_cost</code> controls whether the "
+ "optimization algorithm favors minimizing cross rack traffic or minimize the movement of tasks in existing assignment. If set a larger value <code>" + RackAwareTaskAssignor.class.getName() + "</code> will "
+ "optimize to maintain the existing assignment. The default value is null which means it will use default non_overlap cost values in different assignors.";
/** {@code rack.aware.assignment.strategy} */
@SuppressWarnings("WeakerAccess")
public static final String RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG = "rack.aware.assignment.strategy";
@Deprecated
public static final String RACK_AWARE_ASSIGNMENT_STRATEGY_DOC = "The strategy we use for rack aware assignment. Rack aware assignment will take <code>client.rack</code> and <code>racks</code> of <code>TopicPartition</code> into account when assigning"
+ " tasks to minimize cross rack traffic. Valid settings are : <code>" + RACK_AWARE_ASSIGNMENT_STRATEGY_NONE + "</code> (default), which will disable rack aware assignment; <code>" + RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC
+ "</code>, which will compute minimum cross rack traffic assignment; <code>" + RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY + "</code>, which will compute minimum cross rack traffic and try to balance the tasks of same subtopologies across different clients";
/** {@code rack.aware.assignment.tags} */
@SuppressWarnings("WeakerAccess")
public static final String RACK_AWARE_ASSIGNMENT_TAGS_CONFIG = "rack.aware.assignment.tags";
private static final String RACK_AWARE_ASSIGNMENT_TAGS_DOC = "List of client tag keys used to distribute standby replicas across Kafka Streams instances." +
" When configured, Kafka Streams will make a best-effort to distribute" +
" the standby tasks over each client tag dimension.";
/** {@code rack.aware.assignment.traffic_cost} */
@SuppressWarnings("WeakerAccess")
public static final String RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG = "rack.aware.assignment.traffic_cost";
@Deprecated
public static final String RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_DOC = "Cost associated with cross rack traffic. This config and <code>rack.aware.assignment.non_overlap_cost</code> controls whether the "
+ "optimization algorithm favors minimizing cross rack traffic or minimize the movement of tasks in existing assignment. If set a larger value <code>" + RackAwareTaskAssignor.class.getName() + "</code> will "
+ "optimize for minimizing cross rack traffic. The default value is null which means it will use default traffic cost values in different assignors.";
/** {@code reconnect.backoff.ms} */
@SuppressWarnings("WeakerAccess")
public static final String RECONNECT_BACKOFF_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG;
/** {@code reconnect.backoff.max} */
@SuppressWarnings("WeakerAccess")
public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG;
/** {@code replication.factor} */
@SuppressWarnings("WeakerAccess")
public static final String REPLICATION_FACTOR_CONFIG = "replication.factor";
private static final String REPLICATION_FACTOR_DOC = "The replication factor for change log topics and repartition topics created by the stream processing application."
+ " The default of <code>-1</code> (meaning: use broker default replication factor) requires broker version 2.4 or newer";
/** {@code request.timeout.ms} */
@SuppressWarnings("WeakerAccess")
public static final String REQUEST_TIMEOUT_MS_CONFIG = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG;
/** {@code retry.backoff.ms} */
@SuppressWarnings("WeakerAccess")
public static final String RETRY_BACKOFF_MS_CONFIG = CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG;
/** {@code rocksdb.config.setter} */
@SuppressWarnings("WeakerAccess")
public static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG = "rocksdb.config.setter";
private static final String ROCKSDB_CONFIG_SETTER_CLASS_DOC = "A Rocks DB config setter
|
name
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_2505/Issue2505Mapper.java
|
{
"start": 582,
"end": 804
}
|
class ____ {
private Status status;
public Status getStatus() {
return status;
}
public void setStatus(Status stat) {
this.status = stat;
}
}
|
Customer
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundToLongBinarySearchEvaluator.java
|
{
"start": 4044,
"end": 4757
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory field;
private final long[] points;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field, long[] points) {
this.source = source;
this.field = field;
this.points = points;
}
@Override
public RoundToLongBinarySearchEvaluator get(DriverContext context) {
return new RoundToLongBinarySearchEvaluator(source, field.get(context), points, context);
}
@Override
public String toString() {
return "RoundToLongBinarySearchEvaluator[" + "field=" + field + "]";
}
}
}
|
Factory
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestIds.java
|
{
"start": 1266,
"end": 5078
}
|
class ____ {
@Test
public void testJobId() {
long ts1 = 1315890136000l;
long ts2 = 1315890136001l;
JobId j1 = createJobId(ts1, 2);
JobId j2 = createJobId(ts1, 1);
JobId j3 = createJobId(ts2, 1);
JobId j4 = createJobId(ts1, 2);
assertEquals(j1, j4);
assertNotEquals(j1, j2);
assertNotEquals(j1, j3);
assertTrue(j1.compareTo(j4) == 0);
assertTrue(j1.compareTo(j2) > 0);
assertTrue(j1.compareTo(j3) < 0);
assertTrue(j1.hashCode() == j4.hashCode());
assertFalse(j1.hashCode() == j2.hashCode());
assertFalse(j1.hashCode() == j3.hashCode());
JobId j5 = createJobId(ts1, 231415);
assertEquals("job_" + ts1 + "_0002", j1.toString());
assertEquals("job_" + ts1 + "_231415", j5.toString());
}
@Test
public void testTaskId() {
long ts1 = 1315890136000l;
long ts2 = 1315890136001l;
TaskId t1 = createTaskId(ts1, 1, 2, TaskType.MAP);
TaskId t2 = createTaskId(ts1, 1, 2, TaskType.REDUCE);
TaskId t3 = createTaskId(ts1, 1, 1, TaskType.MAP);
TaskId t4 = createTaskId(ts1, 1, 2, TaskType.MAP);
TaskId t5 = createTaskId(ts2, 1, 1, TaskType.MAP);
assertEquals(t1, t4);
assertNotEquals(t1, t2);
assertNotEquals(t1, t3);
assertNotEquals(t1, t5);
assertTrue(t1.compareTo(t4) == 0);
assertTrue(t1.compareTo(t2) < 0);
assertTrue(t1.compareTo(t3) > 0);
assertTrue(t1.compareTo(t5) < 0);
assertTrue(t1.hashCode() == t4.hashCode());
assertFalse(t1.hashCode() == t2.hashCode());
assertFalse(t1.hashCode() == t3.hashCode());
assertFalse(t1.hashCode() == t5.hashCode());
TaskId t6 = createTaskId(ts1, 324151, 54643747, TaskType.REDUCE);
assertEquals("task_" + ts1 + "_0001_m_000002", t1.toString());
assertEquals("task_" + ts1 + "_324151_r_54643747", t6.toString());
}
@Test
public void testTaskAttemptId() {
long ts1 = 1315890136000l;
long ts2 = 1315890136001l;
TaskAttemptId t1 = createTaskAttemptId(ts1, 2, 2, TaskType.MAP, 2);
TaskAttemptId t2 = createTaskAttemptId(ts1, 2, 2, TaskType.REDUCE, 2);
TaskAttemptId t3 = createTaskAttemptId(ts1, 2, 2, TaskType.MAP, 3);
TaskAttemptId t4 = createTaskAttemptId(ts1, 2, 2, TaskType.MAP, 1);
TaskAttemptId t5 = createTaskAttemptId(ts1, 2, 1, TaskType.MAP, 3);
TaskAttemptId t6 = createTaskAttemptId(ts1, 2, 2, TaskType.MAP, 2);
assertEquals(t1, t6);
assertNotEquals(t1, t2);
assertNotEquals(t1, t3);
assertNotEquals(t1, t5);
assertTrue(t1.compareTo(t6) == 0);
assertTrue(t1.compareTo(t2) < 0);
assertTrue(t1.compareTo(t3) < 0);
assertTrue(t1.compareTo(t4) > 0);
assertTrue(t1.compareTo(t5) > 0);
assertTrue(t1.hashCode() == t6.hashCode());
assertFalse(t1.hashCode() == t2.hashCode());
assertFalse(t1.hashCode() == t3.hashCode());
assertFalse(t1.hashCode() == t5.hashCode());
TaskAttemptId t7 =
createTaskAttemptId(ts2, 5463346, 4326575, TaskType.REDUCE, 54375);
assertEquals("attempt_" + ts1 + "_0002_m_000002_2", t1.toString());
assertEquals("attempt_" + ts2 + "_5463346_r_4326575_54375", t7.toString());
}
private JobId createJobId(long clusterTimestamp, int idInt) {
return MRBuilderUtils.newJobId(
ApplicationId.newInstance(clusterTimestamp, idInt), idInt);
}
private TaskId createTaskId(long clusterTimestamp, int jobIdInt,
int taskIdInt, TaskType taskType) {
return MRBuilderUtils.newTaskId(createJobId(clusterTimestamp, jobIdInt),
taskIdInt, taskType);
}
private TaskAttemptId createTaskAttemptId(long clusterTimestamp,
int jobIdInt, int taskIdInt, TaskType taskType, int taskAttemptIdInt) {
return MRBuilderUtils.newTaskAttemptId(
createTaskId(clusterTimestamp, jobIdInt, taskIdInt, taskType),
taskAttemptIdInt);
}
}
|
TestIds
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PushSorter.java
|
{
"start": 986,
"end": 1313
}
|
interface ____<E> extends Sorter<E> {
/** Writers a new record to the sorter. */
void writeRecord(E record) throws IOException, InterruptedException;
/**
* Finalizes the sorting. The method {@link #getIterator()} will not complete until this method
* is called.
*/
void finishReading();
}
|
PushSorter
|
java
|
quarkusio__quarkus
|
independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/AbstractSharedContext.java
|
{
"start": 365,
"end": 4233
}
|
class ____ implements InjectableContext, InjectableContext.ContextState {
protected final ContextInstances instances;
public AbstractSharedContext() {
this(new ComputingCacheContextInstances());
}
public AbstractSharedContext(ContextInstances instances) {
this.instances = Objects.requireNonNull(instances);
}
@SuppressWarnings("unchecked")
@Override
public <T> T get(Contextual<T> contextual, CreationalContext<T> creationalContext) {
Objects.requireNonNull(contextual, "Contextual must not be null");
Objects.requireNonNull(creationalContext, "CreationalContext must not be null");
InjectableBean<T> bean = (InjectableBean<T>) contextual;
if (!Scopes.scopeMatches(this, bean)) {
throw Scopes.scopeDoesNotMatchException(this, bean);
}
return (T) instances.computeIfAbsent(bean.getIdentifier(), new Supplier<ContextInstanceHandle<?>>() {
@Override
public ContextInstanceHandle<?> get() {
return createInstanceHandle(bean, creationalContext);
}
}).get();
}
@SuppressWarnings("unchecked")
@Override
public <T> T get(Contextual<T> contextual) {
Objects.requireNonNull(contextual, "Contextual must not be null");
InjectableBean<T> bean = (InjectableBean<T>) contextual;
if (!Scopes.scopeMatches(this, bean)) {
throw Scopes.scopeDoesNotMatchException(this, bean);
}
ContextInstanceHandle<?> handle = instances.getIfPresent(bean.getIdentifier());
return handle != null ? (T) handle.get() : null;
}
@Override
public ContextState getState() {
return this;
}
@Override
public ContextState getStateIfActive() {
return this;
}
@Override
public Map<InjectableBean<?>, Object> getContextualInstances() {
return instances.getAllPresent().stream()
.collect(Collectors.toUnmodifiableMap(ContextInstanceHandle::getBean, ContextInstanceHandle::get));
}
@Override
public boolean isActive() {
return true;
}
@Override
public void destroy(Contextual<?> contextual) {
InjectableBean<?> bean = (InjectableBean<?>) contextual;
ContextInstanceHandle<?> handle = instances.remove(bean.getIdentifier());
if (handle != null) {
handle.destroy();
}
}
@Override
public synchronized void destroy() {
// Note that shared contexts are usually only destroyed when the app stops
// I.e. we don't need to use the optimized ContextInstances methods here
Set<ContextInstanceHandle<?>> values = instances.getAllPresent();
if (values.isEmpty()) {
return;
}
// Destroy the producers first
for (Iterator<ContextInstanceHandle<?>> it = values.iterator(); it.hasNext();) {
ContextInstanceHandle<?> instanceHandle = it.next();
if (instanceHandle.getBean().getDeclaringBean() != null) {
instanceHandle.destroy();
it.remove();
}
}
for (ContextInstanceHandle<?> instanceHandle : values) {
instanceHandle.destroy();
}
instances.removeEach(null);
}
@Override
public void destroy(ContextState state) {
if (state == this) {
destroy();
} else {
throw new IllegalArgumentException("Invalid state: " + state.getClass().getName());
}
}
@SuppressWarnings({ "unchecked", "rawtypes" })
private static <T> ContextInstanceHandle createInstanceHandle(InjectableBean<T> bean,
CreationalContext<T> creationalContext) {
return new ContextInstanceHandleImpl(bean, bean.create(creationalContext), creationalContext);
}
}
|
AbstractSharedContext
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/snapshot/RocksDBFullSnapshotResources.java
|
{
"start": 2540,
"end": 11453
}
|
class ____<K> implements FullSnapshotResources<K> {
private final List<StateMetaInfoSnapshot> stateMetaInfoSnapshots;
private final ResourceGuard.Lease lease;
private final Snapshot snapshot;
private final RocksDB db;
private final List<MetaData> metaData;
/** Number of bytes in the key-group prefix. */
@Nonnegative private final int keyGroupPrefixBytes;
private final KeyGroupRange keyGroupRange;
private final TypeSerializer<K> keySerializer;
private final StreamCompressionDecorator streamCompressionDecorator;
private final List<HeapPriorityQueueStateSnapshot<?>> heapPriorityQueuesSnapshots;
public RocksDBFullSnapshotResources(
ResourceGuard.Lease lease,
Snapshot snapshot,
List<RocksDBKeyedStateBackend.RocksDbKvStateInfo> metaDataCopy,
List<HeapPriorityQueueStateSnapshot<?>> heapPriorityQueuesSnapshots,
List<StateMetaInfoSnapshot> stateMetaInfoSnapshots,
RocksDB db,
int keyGroupPrefixBytes,
KeyGroupRange keyGroupRange,
TypeSerializer<K> keySerializer,
StreamCompressionDecorator streamCompressionDecorator) {
this.lease = lease;
this.snapshot = snapshot;
this.stateMetaInfoSnapshots = stateMetaInfoSnapshots;
this.heapPriorityQueuesSnapshots = heapPriorityQueuesSnapshots;
this.db = db;
this.keyGroupPrefixBytes = keyGroupPrefixBytes;
this.keyGroupRange = keyGroupRange;
this.keySerializer = keySerializer;
this.streamCompressionDecorator = streamCompressionDecorator;
// we need to do this in the constructor, i.e. in the synchronous part of the snapshot
// TODO: better yet, we can do it outside the constructor
this.metaData = fillMetaData(metaDataCopy);
}
public static <K> RocksDBFullSnapshotResources<K> create(
final LinkedHashMap<String, RocksDBKeyedStateBackend.RocksDbKvStateInfo>
kvStateInformation,
// TODO: was it important that this is a LinkedHashMap
final Map<String, HeapPriorityQueueSnapshotRestoreWrapper<?>> registeredPQStates,
final RocksDB db,
final ResourceGuard rocksDBResourceGuard,
final KeyGroupRange keyGroupRange,
final TypeSerializer<K> keySerializer,
final int keyGroupPrefixBytes,
final StreamCompressionDecorator keyGroupCompressionDecorator)
throws IOException {
final List<StateMetaInfoSnapshot> stateMetaInfoSnapshots =
new ArrayList<>(kvStateInformation.size());
final List<RocksDBKeyedStateBackend.RocksDbKvStateInfo> metaDataCopy =
new ArrayList<>(kvStateInformation.size());
for (RocksDBKeyedStateBackend.RocksDbKvStateInfo stateInfo : kvStateInformation.values()) {
// snapshot meta info
stateMetaInfoSnapshots.add(stateInfo.metaInfo.snapshot());
metaDataCopy.add(stateInfo);
}
List<HeapPriorityQueueStateSnapshot<?>> heapPriorityQueuesSnapshots =
new ArrayList<>(registeredPQStates.size());
for (HeapPriorityQueueSnapshotRestoreWrapper<?> stateInfo : registeredPQStates.values()) {
stateMetaInfoSnapshots.add(stateInfo.getMetaInfo().snapshot());
heapPriorityQueuesSnapshots.add(stateInfo.stateSnapshot());
}
final ResourceGuard.Lease lease = rocksDBResourceGuard.acquireResource();
final Snapshot snapshot = db.getSnapshot();
return new RocksDBFullSnapshotResources<>(
lease,
snapshot,
metaDataCopy,
heapPriorityQueuesSnapshots,
stateMetaInfoSnapshots,
db,
keyGroupPrefixBytes,
keyGroupRange,
keySerializer,
keyGroupCompressionDecorator);
}
private List<MetaData> fillMetaData(
List<RocksDBKeyedStateBackend.RocksDbKvStateInfo> metaDataCopy) {
List<MetaData> metaData = new ArrayList<>(metaDataCopy.size());
for (RocksDBKeyedStateBackend.RocksDbKvStateInfo rocksDbKvStateInfo : metaDataCopy) {
StateSnapshotTransformer<byte[]> stateSnapshotTransformer = null;
if (rocksDbKvStateInfo.metaInfo instanceof RegisteredKeyValueStateBackendMetaInfo) {
stateSnapshotTransformer =
((RegisteredKeyValueStateBackendMetaInfo<?, ?>) rocksDbKvStateInfo.metaInfo)
.getStateSnapshotTransformFactory()
.createForSerializedState()
.orElse(null);
}
metaData.add(new MetaData(rocksDbKvStateInfo, stateSnapshotTransformer));
}
return metaData;
}
@Override
public KeyValueStateIterator createKVStateIterator() throws IOException {
CloseableRegistry closeableRegistry = new CloseableRegistry();
try {
ReadOptions readOptions = new ReadOptions();
closeableRegistry.registerCloseable(readOptions::close);
readOptions.setSnapshot(snapshot);
List<Tuple2<RocksIteratorWrapper, Integer>> kvStateIterators =
createKVStateIterators(closeableRegistry, readOptions);
List<SingleStateIterator> heapPriorityQueueIterators =
createHeapPriorityQueueIterators();
// Here we transfer ownership of the required resources to the
// RocksStatesPerKeyGroupMergeIterator
return new RocksStatesPerKeyGroupMergeIterator(
closeableRegistry,
kvStateIterators,
heapPriorityQueueIterators,
keyGroupPrefixBytes);
} catch (Throwable t) {
// If anything goes wrong, clean up our stuff. If things went smoothly the
// merging iterator is now responsible for closing the resources
IOUtils.closeQuietly(closeableRegistry);
throw new IOException("Error creating merge iterator", t);
}
}
private List<SingleStateIterator> createHeapPriorityQueueIterators() {
int kvStateId = metaData.size();
List<SingleStateIterator> queuesIterators =
new ArrayList<>(heapPriorityQueuesSnapshots.size());
for (HeapPriorityQueueStateSnapshot<?> queuesSnapshot : heapPriorityQueuesSnapshots) {
queuesIterators.add(
new RocksQueueIterator(
queuesSnapshot, keyGroupRange, keyGroupPrefixBytes, kvStateId++));
}
return queuesIterators;
}
private List<Tuple2<RocksIteratorWrapper, Integer>> createKVStateIterators(
CloseableRegistry closeableRegistry, ReadOptions readOptions) throws IOException {
final List<Tuple2<RocksIteratorWrapper, Integer>> kvStateIterators =
new ArrayList<>(metaData.size());
int kvStateId = 0;
for (MetaData metaDataEntry : metaData) {
RocksIteratorWrapper rocksIteratorWrapper =
createRocksIteratorWrapper(
db,
metaDataEntry.rocksDbKvStateInfo.columnFamilyHandle,
metaDataEntry.stateSnapshotTransformer,
readOptions);
kvStateIterators.add(Tuple2.of(rocksIteratorWrapper, kvStateId));
closeableRegistry.registerCloseable(rocksIteratorWrapper);
++kvStateId;
}
return kvStateIterators;
}
private static RocksIteratorWrapper createRocksIteratorWrapper(
RocksDB db,
ColumnFamilyHandle columnFamilyHandle,
StateSnapshotTransformer<byte[]> stateSnapshotTransformer,
ReadOptions readOptions) {
RocksIterator rocksIterator = db.newIterator(columnFamilyHandle, readOptions);
return stateSnapshotTransformer == null
? new RocksIteratorWrapper(rocksIterator)
: new RocksTransformingIteratorWrapper(rocksIterator, stateSnapshotTransformer);
}
@Override
public List<StateMetaInfoSnapshot> getMetaInfoSnapshots() {
return stateMetaInfoSnapshots;
}
@Override
public KeyGroupRange getKeyGroupRange() {
return keyGroupRange;
}
@Override
public TypeSerializer<K> getKeySerializer() {
return keySerializer;
}
@Override
public StreamCompressionDecorator getStreamCompressionDecorator() {
return streamCompressionDecorator;
}
@Override
public void release() {
db.releaseSnapshot(snapshot);
IOUtils.closeQuietly(snapshot);
IOUtils.closeQuietly(lease);
}
private static
|
RocksDBFullSnapshotResources
|
java
|
apache__camel
|
components/camel-sql/src/test/java/org/apache/camel/component/sql/stored/ProducerTest.java
|
{
"start": 1475,
"end": 3227
}
|
class ____ extends CamelTestSupport {
EmbeddedDatabase db;
@Override
public void doPreSetup() throws Exception {
db = new EmbeddedDatabaseBuilder()
.setName(getClass().getSimpleName())
.setType(EmbeddedDatabaseType.DERBY)
.addScript("sql/storedProcedureTest.sql").build();
}
@Override
public void doPostTearDown() throws Exception {
if (db != null) {
db.shutdown();
}
}
@Test
public void shouldExecuteStoredProcedure() throws InterruptedException {
MockEndpoint mock = getMockEndpoint("mock:query");
mock.expectedMessageCount(1);
Map<String, Object> headers = new HashMap<>();
headers.put("num1", 1);
headers.put("num2", 2);
template.requestBodyAndHeaders("direct:query", null, headers);
MockEndpoint.assertIsSatisfied(context);
Exchange exchange = mock.getExchanges().get(0);
assertEquals(Integer.valueOf(-1), exchange.getIn().getBody(Map.class).get("resultofsub"));
assertNotNull(exchange.getIn().getHeader(SqlStoredConstants.SQL_STORED_UPDATE_COUNT));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// required for the sql component
getContext().getComponent("sql-stored", SqlStoredComponent.class).setDataSource(db);
from("direct:query").to("sql-stored:SUBNUMBERS(INTEGER ${headers.num1},INTEGER ${headers"
+ ".num2},OUT INTEGER resultofsub)")
.to("mock:query");
}
};
}
}
|
ProducerTest
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/checkpoint/CheckpointNamingService.java
|
{
"start": 929,
"end": 1080
}
|
interface ____ {
/**
* Generate a new checkpoint Name
* @return the checkpoint name
*/
public String getNewName();
}
|
CheckpointNamingService
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/common/util/concurrent/TaskExecutionTimeTrackingEsThreadPoolExecutorTests.java
|
{
"start": 1665,
"end": 20064
}
|
class ____ extends ESTestCase {
public void testExecutionEWMACalculation() throws Exception {
ThreadContext context = new ThreadContext(Settings.EMPTY);
TaskExecutionTimeTrackingEsThreadPoolExecutor executor = new TaskExecutionTimeTrackingEsThreadPoolExecutor(
"test-threadpool",
1,
1,
1000,
TimeUnit.MILLISECONDS,
ConcurrentCollections.newBlockingQueue(),
settableWrapper(TimeUnit.NANOSECONDS.toNanos(100)),
TestEsExecutors.testOnlyDaemonThreadFactory("queuetest"),
new EsAbortPolicy(),
context,
randomBoolean()
? EsExecutors.TaskTrackingConfig.builder()
.trackOngoingTasks()
.trackExecutionTime(DEFAULT_EXECUTION_TIME_EWMA_ALPHA_FOR_TEST)
.build()
: EsExecutors.TaskTrackingConfig.builder().trackExecutionTime(DEFAULT_EXECUTION_TIME_EWMA_ALPHA_FOR_TEST).build()
);
executor.prestartAllCoreThreads();
logger.info("--> executor: {}", executor);
assertThat((long) executor.getTaskExecutionEWMA(), equalTo(0L));
assertThat(executor.getTotalTaskExecutionTime(), equalTo(0L));
// Using the settableWrapper each task would take 100ns
executeTask(executor, 1);
assertBusy(() -> {
assertThat((long) executor.getTaskExecutionEWMA(), equalTo(30L));
assertThat(executor.getTotalTaskExecutionTime(), equalTo(100L));
});
executeTask(executor, 1);
assertBusy(() -> {
assertThat((long) executor.getTaskExecutionEWMA(), equalTo(51L));
assertThat(executor.getTotalTaskExecutionTime(), equalTo(200L));
});
executeTask(executor, 1);
assertBusy(() -> {
assertThat((long) executor.getTaskExecutionEWMA(), equalTo(65L));
assertThat(executor.getTotalTaskExecutionTime(), equalTo(300L));
});
executeTask(executor, 1);
assertBusy(() -> {
assertThat((long) executor.getTaskExecutionEWMA(), equalTo(75L));
assertThat(executor.getTotalTaskExecutionTime(), equalTo(400L));
});
executeTask(executor, 1);
assertBusy(() -> {
assertThat((long) executor.getTaskExecutionEWMA(), equalTo(83L));
assertThat(executor.getTotalTaskExecutionTime(), equalTo(500L));
});
assertThat(executor.getOngoingTasks().toString(), executor.getOngoingTasks().size(), equalTo(0));
ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
}
/**
* Verifies that we can peek at the task in front of the task queue to fetch the duration that the oldest task has been queued.
* Tests {@link TaskExecutionTimeTrackingEsThreadPoolExecutor#peekMaxQueueLatencyInQueueMillis}.
*/
public void testFrontOfQueueLatency() throws Exception {
ThreadContext context = new ThreadContext(Settings.EMPTY);
final var barrier = new CyclicBarrier(2);
// Replace all tasks submitted to the thread pool with a configurable task that supports configuring queue latency durations and
// waiting for task execution to begin via the supplied barrier.
var adjustableTimedRunnable = new AdjustableQueueTimeWithExecutionBarrierTimedRunnable(
barrier,
// This won't actually be used, because it is reported after a task is taken off the queue. This test peeks at the still queued
// tasks.
TimeUnit.MILLISECONDS.toNanos(1)
);
TaskExecutionTimeTrackingEsThreadPoolExecutor executor = new TaskExecutionTimeTrackingEsThreadPoolExecutor(
"test-threadpool",
1,
1,
1_000,
TimeUnit.MILLISECONDS,
ConcurrentCollections.newBlockingQueue(),
(runnable) -> adjustableTimedRunnable,
TestEsExecutors.testOnlyDaemonThreadFactory("queue-latency-test"),
new EsAbortPolicy(),
context,
randomBoolean()
? EsExecutors.TaskTrackingConfig.builder()
.trackOngoingTasks()
.trackMaxQueueLatency()
.trackExecutionTime(DEFAULT_EXECUTION_TIME_EWMA_ALPHA_FOR_TEST)
.build()
: EsExecutors.TaskTrackingConfig.builder()
.trackMaxQueueLatency()
.trackExecutionTime(DEFAULT_EXECUTION_TIME_EWMA_ALPHA_FOR_TEST)
.build()
);
try {
executor.prestartAllCoreThreads();
logger.info("--> executor: {}", executor);
// Check that the peeking at a non-existence queue returns zero.
assertEquals("Zero should be returned when there is no queue", 0, executor.peekMaxQueueLatencyInQueueMillis());
// Submit two tasks, into the thread pool with a single worker thread. The second one will be queued (because the pool only has
// one thread) and can be peeked at.
executor.execute(() -> {});
executor.execute(() -> {});
waitForTimeToElapse();
var frontOfQueueDuration = executor.peekMaxQueueLatencyInQueueMillis();
assertThat("Expected a task to be queued", frontOfQueueDuration, greaterThan(0L));
waitForTimeToElapse();
var updatedFrontOfQueueDuration = executor.peekMaxQueueLatencyInQueueMillis();
assertThat(
"Expected a second peek to report a longer duration",
updatedFrontOfQueueDuration,
greaterThan(frontOfQueueDuration)
);
// Release the first task that's running, and wait for the second to start -- then it is ensured that the queue will be empty.
safeAwait(barrier);
safeAwait(barrier);
assertEquals("Queue should be emptied", 0, executor.peekMaxQueueLatencyInQueueMillis());
} finally {
ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
}
}
/**
* Verifies that tracking of the max queue latency (captured on task dequeue) is maintained.
* Tests {@link TaskExecutionTimeTrackingEsThreadPoolExecutor#getMaxQueueLatencyMillisSinceLastPollAndReset()}.
*/
public void testMaxDequeuedQueueLatency() throws Exception {
ThreadContext context = new ThreadContext(Settings.EMPTY);
final var barrier = new CyclicBarrier(2);
// Replace all tasks submitted to the thread pool with a configurable task that supports configuring queue latency durations and
// waiting for task execution to begin via the supplied barrier.
var adjustableTimedRunnable = new AdjustableQueueTimeWithExecutionBarrierTimedRunnable(
barrier,
TimeUnit.NANOSECONDS.toNanos(1000000) // Until changed, queue latencies will always be 1 millisecond.
);
TaskExecutionTimeTrackingEsThreadPoolExecutor executor = new TaskExecutionTimeTrackingEsThreadPoolExecutor(
"test-threadpool",
1,
1,
1000,
TimeUnit.MILLISECONDS,
ConcurrentCollections.newBlockingQueue(),
(runnable) -> adjustableTimedRunnable,
TestEsExecutors.testOnlyDaemonThreadFactory("queue-latency-test"),
new EsAbortPolicy(),
context,
randomBoolean()
? EsExecutors.TaskTrackingConfig.builder()
.trackOngoingTasks()
.trackMaxQueueLatency()
.trackExecutionTime(DEFAULT_EXECUTION_TIME_EWMA_ALPHA_FOR_TEST)
.build()
: EsExecutors.TaskTrackingConfig.builder()
.trackMaxQueueLatency()
.trackExecutionTime(DEFAULT_EXECUTION_TIME_EWMA_ALPHA_FOR_TEST)
.build()
);
try {
executor.prestartAllCoreThreads();
logger.info("--> executor: {}", executor);
// Check that the max is zero initially and after a reset.
assertEquals("The queue latency should be initialized zero", 0, executor.getMaxQueueLatencyMillisSinceLastPollAndReset());
executor.execute(() -> {});
safeAwait(barrier); // Wait for the task to start, which means implies has finished the queuing stage.
assertEquals("Ran one task of 1ms, should be the max", 1, executor.getMaxQueueLatencyMillisSinceLastPollAndReset());
assertEquals("The max was just reset, should be zero", 0, executor.getMaxQueueLatencyMillisSinceLastPollAndReset());
// Check that the max is kept across multiple calls, where the last is not the max.
adjustableTimedRunnable.setQueuedTimeTakenNanos(5000000);
executeTask(executor, 1);
safeAwait(barrier); // Wait for the task to start, which means implies has finished the queuing stage.
adjustableTimedRunnable.setQueuedTimeTakenNanos(1000000);
executeTask(executor, 1);
safeAwait(barrier);
assertEquals("Max should not be the last task", 5, executor.getMaxQueueLatencyMillisSinceLastPollAndReset());
assertEquals("The max was just reset, should be zero", 0, executor.getMaxQueueLatencyMillisSinceLastPollAndReset());
} finally {
ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
}
}
/** Use a runnable wrapper that simulates a task with unknown failures. */
public void testExceptionThrowingTask() throws Exception {
ThreadContext context = new ThreadContext(Settings.EMPTY);
TaskExecutionTimeTrackingEsThreadPoolExecutor executor = new TaskExecutionTimeTrackingEsThreadPoolExecutor(
"test-threadpool",
1,
1,
1000,
TimeUnit.MILLISECONDS,
ConcurrentCollections.newBlockingQueue(),
exceptionalWrapper(),
TestEsExecutors.testOnlyDaemonThreadFactory("queuetest"),
new EsAbortPolicy(),
context,
randomBoolean()
? EsExecutors.TaskTrackingConfig.builder()
.trackOngoingTasks()
.trackExecutionTime(DEFAULT_EXECUTION_TIME_EWMA_ALPHA_FOR_TEST)
.build()
: EsExecutors.TaskTrackingConfig.builder().trackExecutionTime(DEFAULT_EXECUTION_TIME_EWMA_ALPHA_FOR_TEST).build()
);
executor.prestartAllCoreThreads();
logger.info("--> executor: {}", executor);
// Using the exceptionalWrapper each task's execution time is -1 to simulate unknown failures/rejections.
assertThat((long) executor.getTaskExecutionEWMA(), equalTo(0L));
int taskCount = randomIntBetween(1, 100);
executeTask(executor, taskCount);
assertBusy(() -> assertThat(executor.getCompletedTaskCount(), equalTo((long) taskCount)));
assertThat((long) executor.getTaskExecutionEWMA(), equalTo(0L));
assertThat(executor.getTotalTaskExecutionTime(), equalTo(0L));
assertThat(executor.getActiveCount(), equalTo(0));
assertThat(executor.getOngoingTasks().toString(), executor.getOngoingTasks().size(), equalTo(0));
ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
}
public void testGetOngoingTasks() throws Exception {
var testStartTimeNanos = System.nanoTime();
ThreadContext context = new ThreadContext(Settings.EMPTY);
var executor = new TaskExecutionTimeTrackingEsThreadPoolExecutor(
"test-threadpool",
1,
1,
1000,
TimeUnit.MILLISECONDS,
ConcurrentCollections.newBlockingQueue(),
TimedRunnable::new,
TestEsExecutors.testOnlyDaemonThreadFactory("queuetest"),
new EsAbortPolicy(),
context,
EsExecutors.TaskTrackingConfig.builder()
.trackOngoingTasks()
.trackExecutionTime(DEFAULT_EXECUTION_TIME_EWMA_ALPHA_FOR_TEST)
.build()
);
var taskRunningLatch = new CountDownLatch(1);
var exitTaskLatch = new CountDownLatch(1);
assertThat(executor.getOngoingTasks().toString(), executor.getOngoingTasks().size(), equalTo(0));
Runnable runnable = () -> {
taskRunningLatch.countDown();
safeAwait(exitTaskLatch);
};
executor.execute(runnable);
safeAwait(taskRunningLatch);
var ongoingTasks = executor.getOngoingTasks();
assertThat(ongoingTasks.toString(), ongoingTasks.size(), equalTo(1));
assertThat(ongoingTasks.values().iterator().next(), greaterThanOrEqualTo(testStartTimeNanos));
exitTaskLatch.countDown();
assertBusy(() -> assertThat(executor.getOngoingTasks().toString(), executor.getOngoingTasks().size(), equalTo(0)));
assertThat(executor.getTotalTaskExecutionTime(), greaterThan(0L));
ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
}
public void testQueueLatencyHistogramMetrics() {
RecordingMeterRegistry meterRegistry = new RecordingMeterRegistry();
final var threadPoolName = randomIdentifier();
var executor = new TaskExecutionTimeTrackingEsThreadPoolExecutor(
threadPoolName,
1,
1,
1000,
TimeUnit.MILLISECONDS,
ConcurrentCollections.newBlockingQueue(),
TimedRunnable::new,
TestEsExecutors.testOnlyDaemonThreadFactory("queuetest"),
new EsAbortPolicy(),
new ThreadContext(Settings.EMPTY),
EsExecutors.TaskTrackingConfig.builder()
.trackOngoingTasks()
.trackExecutionTime(DEFAULT_EXECUTION_TIME_EWMA_ALPHA_FOR_TEST)
.build()
);
executor.setupMetrics(meterRegistry, threadPoolName);
try {
final var barrier = new CyclicBarrier(2);
final ExponentialBucketHistogram expectedHistogram = new ExponentialBucketHistogram(
TaskExecutionTimeTrackingEsThreadPoolExecutor.QUEUE_LATENCY_HISTOGRAM_BUCKETS
);
/*
* The thread pool has a single thread, so we submit a task that will occupy that thread
* and cause subsequent tasks to be queued
*/
Future<?> runningTask = executor.submit(() -> {
safeAwait(barrier);
safeAwait(barrier);
});
safeAwait(barrier); // wait till the first task starts
expectedHistogram.addObservation(0L); // the first task should not be delayed
/*
* On each iteration we submit a task - which will be queued because of the
* currently running task, pause for some random interval, then unblock the
* new task by releasing the currently running task. This gives us a lower
* bound for the real delays (the real delays will be greater than or equal
* to the synthetic delays we add, i.e. each percentile should be >= our
* expected values)
*/
for (int i = 0; i < 10; i++) {
Future<?> waitingTask = executor.submit(() -> {
safeAwait(barrier);
safeAwait(barrier);
});
final long delayTimeMs = randomLongBetween(1, 50);
safeSleep(delayTimeMs);
safeAwait(barrier); // let the running task complete
safeAwait(barrier); // wait for the next task to start
safeGet(runningTask); // ensure previous task is complete
expectedHistogram.addObservation(delayTimeMs);
runningTask = waitingTask;
}
safeAwait(barrier); // let the last task finish
safeGet(runningTask);
meterRegistry.getRecorder().collect();
List<Measurement> measurements = meterRegistry.getRecorder()
.getMeasurements(
InstrumentType.LONG_GAUGE,
ThreadPool.THREAD_POOL_METRIC_PREFIX + threadPoolName + ThreadPool.THREAD_POOL_METRIC_NAME_QUEUE_TIME
);
assertThat(measurements, hasSize(3));
// we have to use greater than or equal to because the actual delay might be higher than what we imposed
assertThat(getPercentile(measurements, "99"), greaterThanOrEqualTo(expectedHistogram.getPercentile(0.99f)));
assertThat(getPercentile(measurements, "90"), greaterThanOrEqualTo(expectedHistogram.getPercentile(0.9f)));
assertThat(getPercentile(measurements, "50"), greaterThanOrEqualTo(expectedHistogram.getPercentile(0.5f)));
} finally {
ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
}
}
private long getPercentile(List<Measurement> measurements, String percentile) {
return measurements.stream().filter(m -> m.attributes().get("percentile").equals(percentile)).findFirst().orElseThrow().getLong();
}
/**
* The returned function outputs a WrappedRunnabled that simulates the case
* where {@link TimedRunnable#getTotalExecutionNanos()} always returns {@code timeTakenNanos}.
*/
private Function<Runnable, WrappedRunnable> settableWrapper(long timeTakenNanos) {
return (runnable) -> new SettableTimedRunnable(timeTakenNanos, false);
}
/**
* The returned function outputs a WrappedRunnabled that simulates the case
* where {@link TimedRunnable#getTotalExecutionNanos()} returns -1 because
* the job failed or was rejected before it finished.
*/
private Function<Runnable, WrappedRunnable> exceptionalWrapper() {
return (runnable) -> new SettableTimedRunnable(TimeUnit.NANOSECONDS.toNanos(-1), true);
}
/** Execute a blank task {@code times} times for the executor */
private void executeTask(TaskExecutionTimeTrackingEsThreadPoolExecutor executor, int times) {
logger.info("--> executing a task [{}] times", times);
for (int i = 0; i < times; i++) {
executor.execute(() -> {});
}
}
public
|
TaskExecutionTimeTrackingEsThreadPoolExecutorTests
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/internal/invocation/InvocationComparator.java
|
{
"start": 303,
"end": 523
}
|
class ____ implements Comparator<Invocation> {
@Override
public int compare(Invocation o1, Invocation o2) {
return Integer.compare(o1.getSequenceNumber(), o2.getSequenceNumber());
}
}
|
InvocationComparator
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/internal/CoreMessageLogger.java
|
{
"start": 4564,
"end": 25064
}
|
class ____ be instantiated by Interceptor)",
id = 182)
void noDefaultConstructor(String name);
@LogMessage(level = INFO)
@Message(value = "Loaded properties from resource hibernate.properties: %s", id = 205)
void propertiesLoaded(Properties maskOut);
@LogMessage(level = DEBUG)
@Message(value = "'hibernate.properties' not found", id = 206)
void propertiesNotFound();
@LogMessage(level = WARN)
@Message(
value = """
Recognized obsolete hibernate namespace %s.\
Use namespace %s instead. Refer to Hibernate 3.6 Migration Guide""",
id = 223)
void recognizedObsoleteHibernateNamespace(String oldHibernateNamespace, String hibernateNamespace);
@LogMessage(level = INFO)
@Message(value = "Running hbm2ddl schema export", id = 227)
void runningHbm2ddlSchemaExport();
@LogMessage(level = INFO)
@Message(value = "Running hbm2ddl schema update", id = 228)
void runningHbm2ddlSchemaUpdate();
@LogMessage(level = INFO)
@Message(value = "Running schema validator", id = 229)
void runningSchemaValidator();
// @LogMessage(level = WARN)
// @Message(value = "Scoping types to session factory %s after already scoped %s", id = 233)
// void scopingTypesToSessionFactoryAfterAlreadyScoped(
// SessionFactoryImplementor factory,
// SessionFactoryImplementor factory2);
// @LogMessage(level = WARN)
// @Message(value = "SQL Error: %s, SQLState: %s", id = 247)
// void sqlWarning(int errorCode, String sqlState);
@LogMessage(level = INFO)
@Message(value = "Start time: %s", id = 251)
void startTime(long startTime);
@LogMessage(level = INFO)
@Message(value = "Table not found: %s", id = 262)
void tableNotFound(String name);
@LogMessage(level = INFO)
@Message(value = "More than one table found: %s", id = 263)
void multipleTablesFound(String name);
@LogMessage(level = INFO)
@Message(value = "Transactions: %s", id = 266)
void transactions(long transactionCount);
@LogMessage(level = WARN)
@Message(value = "Type [%s] defined no registration keys; ignoring", id = 269)
void typeDefinedNoRegistrationKeys(Object type);
@LogMessage(level = WARN)
@Message(value = "Error accessing type info result set: %s", id = 273)
void unableToAccessTypeInfoResultSet(String string);
@LogMessage(level = WARN)
@Message(value = "Unable to cleanup temporary id table after use [%s]", id = 283)
void unableToCleanupTemporaryIdTable(Throwable t);
@LogMessage(level = INFO)
@Message(value = "Error closing InitialContext [%s]", id = 285)
void unableToCloseInitialContext(String string);
@LogMessage(level = WARN)
@Message(value = "Could not close input stream", id = 287)
void unableToCloseInputStream(@Cause IOException e);
@LogMessage(level = WARN)
@Message(value = "IOException occurred closing output stream", id = 292)
void unableToCloseOutputStream(@Cause IOException e);
@LogMessage(level = WARN)
@Message(value = "IOException occurred closing stream", id = 296)
void unableToCloseStream(@Cause IOException e);
@LogMessage(level = ERROR)
@Message(value = "Could not close stream on hibernate.properties: %s", id = 297)
void unableToCloseStreamError(IOException error);
@LogMessage(level = WARN)
@Message(value = "Could not copy system properties, system properties will be ignored", id = 304)
void unableToCopySystemProperties();
@LogMessage(level = WARN)
@Message(value = "Could not create proxy factory for:%s", id = 305)
void unableToCreateProxyFactory(String entityName, @Cause HibernateException e);
@LogMessage(level = ERROR)
@Message(value = "Error creating schema ", id = 306)
void unableToCreateSchema(@Cause Exception e);
@LogMessage(level = ERROR)
@Message(value = "Problem loading properties from hibernate.properties", id = 329)
void unableToLoadProperties();
@LogMessage(level = WARN)
@Message(value = "Unable to log SQLWarnings: %s", id = 335)
void unableToLogSqlWarnings(SQLException sqle);
@LogMessage(level = ERROR)
@Message(value = "Unable to mark for rollback on PersistenceException: ", id = 337)
void unableToMarkForRollbackOnPersistenceException(@Cause Exception e);
@LogMessage(level = ERROR)
@Message(value = "Unable to mark for rollback on TransientObjectException: ", id = 338)
void unableToMarkForRollbackOnTransientObjectException(@Cause Exception e);
@LogMessage(level = ERROR)
@Message(value = "Could not release a cache lock: %s", id = 353)
void unableToReleaseCacheLock(CacheException ce);
@LogMessage(level = WARN)
@Message(value = "Unable to release type info result set", id = 357)
void unableToReleaseTypeInfoResultSet();
@LogMessage(level = WARN)
@Message(value = "Unable to erase previously added bag join fetch", id = 358)
void unableToRemoveBagJoinFetch();
@LogMessage(level = WARN)
@Message(value = "Unable to retrieve type info result set: %s", id = 362)
void unableToRetrieveTypeInfoResultSet(String string);
@LogMessage(level = ERROR)
@Message(value = "Error running schema update", id = 366)
void unableToRunSchemaUpdate(@Cause Exception e);
@LogMessage(level = WARN)
@Message(
value = """
The %s.%s.%s version of H2 implements temporary table creation such that it commits current transaction;\
multi-table, bulk HQL/JPQL will not work properly""",
id = 393)
void unsupportedMultiTableBulkHqlJpaql(int majorVersion, int minorVersion, int buildId);
@LogMessage(level = ERROR)
@Message(value = "Don't use old DTDs, read the Hibernate 3.x Migration Guide", id = 404)
void usingOldDtd();
@LogMessage(level = WARN)
@Message(value = "Warnings creating temp table: %s", id = 413)
void warningsCreatingTempTable(SQLWarning warning);
@LogMessage(level = WARN)
@Message(value = "Write locks via update not supported for non-versioned entities [%s]", id = 416)
void writeLocksNotSupported(String entityName);
@LogMessage(level = WARN)
@Message(
value = """
Dialect [%s] limits the number of elements in an IN predicate to %s entries. \
However, the given parameter list [%s] contained %s entries, which will likely cause failures \
to execute the query in the database""",
id = 443
)
void tooManyInExpressions(String dialectName, int limit, String paramName, int size);
@LogMessage(level = WARN)
@Message(
value = """
Encountered request for locking however dialect reports that database prefers locking be done in a \
separate select (follow-on locking); results will be locked after initial query executes""",
id = 444
)
void usingFollowOnLocking();
@LogMessage(level = INFO)
@Message(value = "Cannot locate column information using identifier [%s]; ignoring index [%s]", id = 475 )
void logCannotLocateIndexColumnInformation(String columnIdentifierText, String indexIdentifierText);
@LogMessage(level = DEBUG)
@Message(value = "Executing script [%s]", id = 476)
void executingScript(String scriptName);
@LogMessage(level = DEBUG)
@Message(value = "Starting delayed evictData of schema as part of SessionFactory shut-down'", id = 477)
void startingDelayedSchemaDrop();
@LogMessage(level = ERROR)
@Message(value = "Unsuccessful: %s", id = 478)
void unsuccessfulSchemaManagementCommand(String command);
@LogMessage(level = DEBUG)
@Message( value = "Error performing delayed DROP command [%s]", id = 479 )
void unsuccessfulDelayedDropCommand(CommandAcceptanceException e);
@LogMessage(level = WARN)
@Message(
value = """
A ManagedEntity was associated with a stale PersistenceContext.\
A ManagedEntity may only be associated with one PersistenceContext at a time; %s""",
id = 480)
void stalePersistenceContextInEntityEntry(String msg);
@LogMessage(level = ERROR)
@Message(value = "Illegally attempted to associate a proxy for entity [%s] with id [%s] with two open sessions.", id = 485)
void attemptToAssociateProxyWithTwoOpenSessions(String entityName, Object id);
@LogMessage(level = WARN)
@Message(value = "The query [%s] updates an immutable entity: %s",
id = 487)
void immutableEntityUpdateQuery(@Nullable String sourceQuery, String querySpaces);
@LogMessage(level = DEBUG)
@Message(value = "The query [%s] updates an immutable entity: %s",
id = 488)
void immutableEntityUpdateQueryAllowed(@Nullable String sourceQuery, String querySpaces);
@LogMessage(level = INFO)
@Message(value = "No JTA platform available (set 'hibernate.transaction.jta.platform' to enable JTA platform integration)", id = 489)
void noJtaPlatform();
@LogMessage(level = INFO)
@Message(value = "Using JTA platform [%s]", id = 490)
void usingJtaPlatform(String jtaPlatformClassName);
@LogMessage(level = WARN)
@Message(value = "Attempt to merge an uninitialized collection with queued operations; queued operations will be ignored: %s", id = 494)
void ignoreQueuedOperationsOnMerge(String collectionInfoString);
@LogMessage(level = WARN)
@Message(value = "The [%s] property of the [%s] entity was modified, but it won't be updated because the property is immutable.", id = 502)
void ignoreImmutablePropertyModification(String propertyName, String entityName);
@LogMessage(level = WARN)
@Message(value = """
Multiple configuration properties defined to create schema.\
Choose at most one among 'jakarta.persistence.create-database-schemas' or 'hibernate.hbm2ddl.create_namespaces'.""",
id = 504)
void multipleSchemaCreationSettingsDefined();
@LogMessage(level = WARN)
@Message(value = "Multi-table insert is not available due to missing identity and window function support for: %s", id = 509)
void multiTableInsertNotAvailable(String entityName);
@LogMessage(level = WARN)
@Message(value = "Association with '@Fetch(JOIN)' and 'fetch=FetchType.LAZY' found. This is interpreted as lazy: %s", id = 510)
void fetchModeJoinWithLazyWarning(String role);
@LogMessage(level = WARN)
@Message(
value = """
The %2$s version for [%s] is no longer supported, hence certain features may not work properly.\
The minimum supported version is %3$s. Check the community dialects project for available legacy versions.""",
id = 511)
void unsupportedDatabaseVersion(String databaseName, String actualVersion, String minimumVersion);
@LogMessage(level = DEBUG)
@Message(value = "Unable to create the ReflectionOptimizer for [%s]: %s",
id = 513)
void unableToGenerateReflectionOptimizer(String className, String cause);
@LogMessage(level = DEBUG)
@Message(
id = 517,
value = "Encountered a MappedSuperclass [%s] not used in any entity hierarchy"
)
void unusedMappedSuperclass(String name);
@LogMessage(level = WARN)
@Message(
id = 519,
value = "Invalid JSON column type [%s], was expecting [%s]; for efficiency schema should be migrate to JSON DDL type"
)
void invalidJSONColumnType(String actual, String expected);
@LogMessage(level = ERROR)
@Message(
id = 5001,
value = "Illegal argument on static metamodel field injection: %s#%s; expected type: %s; encountered type: %s"
)
void illegalArgumentOnStaticMetamodelFieldInjection(
String name,
String name2,
String name3,
String name4);
@LogMessage(level = WARN)
@Message(
id = 5002,
value = "Unable to locate static metamodel field: %s#%s; this may or may not indicate a problem with the static metamodel"
)
void unableToLocateStaticMetamodelField(
String name,
String name2);
@LogMessage(level = DEBUG)
@Message( id = 6001, value = "Error creating temp table" )
void errorCreatingTempTable(@Cause Exception e);
@LogMessage(level = DEBUG)
@Message( id = 6002, value = "Unable to create temporary table [%s]: '%s' failed" )
void unableToCreateTempTable(String qualifiedTableName, String creationCommand, @Cause SQLException e);
@LogMessage(level = DEBUG)
@Message( id = 6003, value = "Error dropping temp table" )
void errorDroppingTempTable(@Cause Exception e);
@LogMessage(level = DEBUG)
@Message( id = 6004, value = "Unable to drop temporary table [%s]: '%s' failed" )
void unableToDropTempTable(String qualifiedTableName, String creationCommand, @Cause SQLException e);
@LogMessage(level = TRACE)
@Message( id = 6005, value = "Cascading %s to child entity '%s'" )
void cascading(CascadingAction<?> delete, String childEntityName);
@LogMessage(level = TRACE)
@Message( id = 6006, value = "Cascading %s to collection '%s'" )
void cascadingCollection(CascadingAction<?> delete, String collectionRole);
@LogMessage(level = TRACE)
@Message( id = 6007, value = "Done cascading %s to collection '%s'" )
void doneCascadingCollection(CascadingAction<?> delete, String collectionRole);
@LogMessage(level = TRACE)
@Message( id = 6008, value = "Processing cascade %s for entity '%s'" )
void processingCascade(CascadingAction<?> action, String entityName);
@LogMessage(level = TRACE)
@Message( id = 6009, value = "Processing cascade %s for entity '%s'" )
void doneProcessingCascade(CascadingAction<?> action, String entityName);
@LogMessage(level = TRACE)
@Message( id = 6011, value = "Deleting orphaned child entity instance of type '%s'" )
void deletingOrphanOfType(String entityName);
@LogMessage(level = TRACE)
@Message( id = 6012, value = "Deleting orphaned child entity instance: %s" )
void deletingOrphan(String info);
@LogMessage(level = TRACE)
@Message( id = 6013, value = "Deleting orphans for collection '%s'" )
void deletingOrphans(String role);
@LogMessage(level = TRACE)
@Message( id = 6014, value = "Done deleting orphans for collection '%s'" )
void doneDeletingOrphans(String role);
@LogMessage(level = TRACE)
@Message( id = 6021, value = "Collection dirty: %s" )
void collectionDirty(String info);
@LogMessage(level = TRACE)
@Message( id = 6022, value = "Reset storedSnapshot to %s for %s" )
void resetStoredSnapshot(Serializable storedSnapshot, CollectionEntry collectionEntry);
@LogMessage(level = TRACE)
@Message( id = 6041, value = "Building session factory using provided StandardServiceRegistry" )
void buildingFactoryWithProvidedRegistry();
@LogMessage(level = TRACE)
@Message( id = 6042, value = "Building session factory using internal StandardServiceRegistryBuilder" )
void buildingFactoryWithInternalRegistryBuilder();
@LogMessage(level = TRACE)
@Message( id = 6043, value = "Found collection with unloaded owner: %s" )
void collectionWithUnloadedOwner(String info);
@LogMessage(level = TRACE)
@Message( id = 6044, value = "Forcing collection initialization" )
void forcingCollectionInitialization();
@LogMessage(level = TRACE)
@Message( id = 6045, value = "Collection dereferenced: %s" )
void collectionDereferenced(String info);
@LogMessage(level = TRACE)
@Message( id = 6046, value = "Skipping uninitialized bytecode-lazy collection: %s" )
void skippingUninitializedBytecodeLazyCollection(String info);
@LogMessage(level = TRACE)
@Message( id = 6047, value = "Collection found: %s, was: %s (initialized)" )
void collectionFoundInitialized(String is, String was);
@LogMessage(level = TRACE)
@Message( id = 6048, value = "Collection found: %s, was: %s (uninitialized)" )
void collectionFoundUninitialized(String is, String was);
@LogMessage(level = TRACE)
@Message( id = 6049, value = "Created collection wrapper for: %s" )
void createdCollectionWrapper(String s);
@LogMessage(level = TRACE)
@Message( id = 6051, value = "Starting serialization of [%s] EntityEntry entries" )
void startingEntityEntrySerialization(int count);
@LogMessage(level = TRACE)
@Message( id = 6052, value = "Starting deserialization of [%s] EntityEntry entries" )
void startingEntityEntryDeserialization(int count);
@LogMessage(level = ERROR)
@Message( id = 6053, value = "Unable to deserialize [%s]" )
void unableToDeserialize(String entityEntryClassName);
@LogMessage(level = TRACE)
@Message( id = 6061, value = "Extracted generated values for entity %s - %s" )
void extractedGeneratedValues(String info, String results);
@LogMessage(level = WARN)
@Message( id = 6062, value = "Could not resolve type name [%s] as Java type" )
void couldNotResolveTypeName(String typeName, @Cause ClassLoadingException exception);
@LogMessage(level = DEBUG)
@Message( id = 6063, value = "Problem releasing GenerationTarget [%s]" )
void problemReleasingGenerationTarget(GenerationTarget target, @Cause Exception e);
@LogMessage(level = WARN)
@Message( id = 6064, value = "Unable to close temp session" )
void unableToCLoseTempSession();
// AbstractEntityPersister
@LogMessage(level = TRACE)
@Message( id = 6565, value = "Initializing lazy properties from datastore (triggered for '%s')" )
void initializingLazyPropertiesFromDatastore(String fieldName);
@LogMessage(level = TRACE)
@Message( id = 6566, value = "Initializing lazy properties from second-level cache" )
void initializingLazyPropertiesFromSecondLevelCache();
@LogMessage(level = TRACE)
@Message( id = 6567, value = "Done initializing lazy properties" )
void doneInitializingLazyProperties();
@LogMessage(level = TRACE)
@Message( id = 6568, value = "Resolving unique key [%s] to identifier for entity [%s]" )
void resolvingUniqueKeyToIdentifier(Object key, String entityName);
@LogMessage(level = TRACE)
@Message( id = 6569, value = "Reading entity version: %s" )
void readingEntityVersion(String info);
@LogMessage(level = TRACE)
@Message( id = 6570, value = "Fetching entity: %s" )
void fetchingEntity(String info);
@LogMessage(level = TRACE)
@Message( id = 6571, value = "%s is dirty" )
void propertyIsDirty(String qualifiedProperty);
@LogMessage(level = TRACE)
@Message( id = 6572, value = "Forcing version increment [%s]" )
void forcingVersionIncrement(String info);
@LogMessage(level = TRACE)
@Message( id = 6573, value = "Getting current natural-id snapshot state for `%s#%s" )
void gettingCurrentNaturalIdSnapshot(String entityName, Object id);
@LogMessage(level = TRACE)
@Message( id = 6574, value = "Initializing lazy properties of: %s, field access: %s" )
void initializingLazyPropertiesOf(String info, String fieldName);
// TransactionImpl
@LogMessage(level = DEBUG)
@Message(id = 6581, value = "TransactionImpl created on closed Session/EntityManager")
void transactionCreatedOnClosedSession();
@LogMessage(level = DEBUG)
@Message(id = 6582, value = "TransactionImpl created in JPA compliant mode")
void transactionCreatedInJpaCompliantMode();
@LogMessage(level = DEBUG)
@Message(id = 6583, value = "Beginning transaction")
void beginningTransaction();
@LogMessage(level = DEBUG)
@Message(id = 6584, value = "Committing transaction")
void committingTransaction();
@LogMessage(level = DEBUG)
@Message(id = 6585, value = "Rolling back transaction")
void rollingBackTransaction();
@LogMessage(level = DEBUG)
@Message(id = 6586, value = "rollback() called with inactive transaction")
void rollbackCalledOnInactiveTransaction();
@LogMessage(level = DEBUG)
@Message(id = 6587, value = "setRollbackOnly() called with inactive transaction")
void setRollbackOnlyCalledOnInactiveTransaction();
// session builders
@LogMessage(level = DEBUG)
@Message(id = 6588, value = "Opening session [tenant=%s]")
void openingSession(Object tenantIdentifier);
@LogMessage(level = DEBUG)
@Message(id = 6589, value = "Opening stateless session [tenant=%s]")
void openingStatelessSession(Object tenantIdentifier);
@LogMessage(level = TRACE)
@Message(id = 6591, value = "Identifier unsaved-value strategy %s")
void idUnsavedValueStrategy(String strategy);
@LogMessage(level = TRACE)
@Message(id = 6592, value = "Identifier unsaved-value [%s]")
void idUnsavedValue(@Nullable Object value);
@LogMessage(level = TRACE)
@Message(id = 6593, value = "Version unsaved-value strategy %s")
void versionUnsavedValueStrategy(String strategy);
@LogMessage(level = TRACE)
@Message(id = 6594, value = "Version unsaved-value [%s]")
void versionUnsavedValue(@Nullable Object value);
@LogMessage(level = TRACE)
@Message(id = 601, value = "Attempting to resolve script source setting: %s")
void attemptingToResolveScriptSourceSetting(String scriptSourceSettingString);
@LogMessage(level = DEBUG)
@Message(id = 602, value = "Attempting to create non-existent script target file: %s")
void attemptingToCreateScriptTarget(String absolutePath);
@LogMessage(level = DEBUG)
@Message(id = 603, value = "Could not create non-existent script target file")
void couldNotCreateScriptTarget(@Cause Exception e);
@LogMessage(level = DEBUG)
@Message(id = 604, value = "Attempting to resolve writer for URL: %s")
void attemptingToCreateWriter(URL url);
}
|
must
|
java
|
alibaba__nacos
|
core/src/main/java/com/alibaba/nacos/core/controller/compatibility/ApiCompatibilityConfig.java
|
{
"start": 866,
"end": 3323
}
|
class ____ extends AbstractDynamicConfig {
private static final String API_COMPATIBILITY = "ApiCompatibility";
private static final ApiCompatibilityConfig INSTANCE = new ApiCompatibilityConfig();
private static final String PREFIX = "nacos.core.api.compatibility";
public static final String CLIENT_API_COMPATIBILITY_KEY = PREFIX + ".client.enabled";
public static final String CONSOLE_API_COMPATIBILITY_KEY = PREFIX + ".console.enabled";
public static final String ADMIN_API_COMPATIBILITY_KEY = PREFIX + ".admin.enabled";
private boolean clientApiCompatibility;
private boolean consoleApiCompatibility;
private boolean adminApiCompatibility;
protected ApiCompatibilityConfig() {
super(API_COMPATIBILITY);
resetConfig();
}
public static ApiCompatibilityConfig getInstance() {
return INSTANCE;
}
@Override
protected void getConfigFromEnv() {
clientApiCompatibility = EnvUtil.getProperty(CLIENT_API_COMPATIBILITY_KEY, Boolean.class, true);
consoleApiCompatibility = EnvUtil.getProperty(CONSOLE_API_COMPATIBILITY_KEY, Boolean.class, false);
adminApiCompatibility = EnvUtil.getProperty(ADMIN_API_COMPATIBILITY_KEY, Boolean.class, false);
}
@Override
protected String printConfig() {
return toString();
}
@Override
public String toString() {
return "ApiCompatibilityConfig{" + "clientApiCompatibility=" + clientApiCompatibility
+ ", consoleApiCompatibility=" + consoleApiCompatibility + ", adminApiCompatibility="
+ adminApiCompatibility + '}';
}
public boolean isClientApiCompatibility() {
return clientApiCompatibility;
}
public void setClientApiCompatibility(boolean clientApiCompatibility) {
this.clientApiCompatibility = clientApiCompatibility;
}
public boolean isConsoleApiCompatibility() {
return consoleApiCompatibility;
}
public void setConsoleApiCompatibility(boolean consoleApiCompatibility) {
this.consoleApiCompatibility = consoleApiCompatibility;
}
public boolean isAdminApiCompatibility() {
return adminApiCompatibility;
}
public void setAdminApiCompatibility(boolean adminApiCompatibility) {
this.adminApiCompatibility = adminApiCompatibility;
}
}
|
ApiCompatibilityConfig
|
java
|
apache__maven
|
impl/maven-impl/src/main/java/org/apache/maven/impl/model/DefaultProfileActivationContext.java
|
{
"start": 1886,
"end": 2028
}
|
class ____ implements ProfileActivationContext {
record ExistRequest(String path, boolean enableGlob) {}
|
DefaultProfileActivationContext
|
java
|
apache__dubbo
|
dubbo-common/src/main/java/org/apache/dubbo/common/utils/DateUtils.java
|
{
"start": 1290,
"end": 9220
}
|
class ____ {
public static final ZoneId GMT = ZoneId.of("GMT");
public static final ZoneId UTC = ZoneId.of("UTC");
public static final String DATE = "yyyy-MM-dd";
public static final String DATE_MIN = "yyyy-MM-dd HH:mm";
public static final String DATE_TIME = "yyyy-MM-dd HH:mm:ss";
public static final String JDK_TIME = "EEE MMM dd HH:mm:ss zzz yyyy";
public static final String ASC_TIME = "EEE MMM d HH:mm:ss yyyy";
public static final String RFC1036 = "EEE, dd-MMM-yy HH:mm:ss zzz";
public static final DateTimeFormatter DATE_FORMAT = DateTimeFormatter.ofPattern(DATE);
public static final DateTimeFormatter DATE_MIN_FORMAT = DateTimeFormatter.ofPattern(DATE_MIN);
public static final DateTimeFormatter DATE_TIME_FORMAT = DateTimeFormatter.ofPattern(DATE_TIME);
public static final DateTimeFormatter JDK_TIME_FORMAT = DateTimeFormatter.ofPattern(JDK_TIME, Locale.US);
public static final DateTimeFormatter ASC_TIME_FORMAT = DateTimeFormatter.ofPattern(ASC_TIME, Locale.US);
public static final DateTimeFormatter RFC1036_FORMAT = DateTimeFormatter.ofPattern(RFC1036, Locale.US);
private static final Map<String, DateTimeFormatter> CACHE = new LRUCache<>(64);
private static final List<DateTimeFormatter> CUSTOM_FORMATTERS = new CopyOnWriteArrayList<>();
private DateUtils() {}
public static void registerFormatter(String pattern) {
CUSTOM_FORMATTERS.add(DateTimeFormatter.ofPattern(pattern));
}
public static void registerFormatter(DateTimeFormatter formatter) {
CUSTOM_FORMATTERS.add(formatter);
}
public static Date parse(String str, String pattern) {
if (DATE_TIME.equals(pattern)) {
return parse(str, DATE_TIME_FORMAT);
}
DateTimeFormatter formatter = getFormatter(pattern);
return parse(str, formatter);
}
public static Date parse(String str, DateTimeFormatter formatter) {
return toDate(formatter.parse(str));
}
public static String format(Date date) {
return format(date, DATE_TIME_FORMAT);
}
public static String format(Date date, String pattern) {
if (DATE_TIME.equals(pattern)) {
return format(date, DATE_TIME_FORMAT);
}
DateTimeFormatter formatter = getFormatter(pattern);
return format(date, formatter);
}
public static String format(Date date, DateTimeFormatter formatter) {
return formatter.format(ZonedDateTime.ofInstant(date.toInstant(), ZoneId.systemDefault()));
}
public static String format(Date date, DateTimeFormatter formatter, ZoneId zone) {
return formatter.format(ZonedDateTime.ofInstant(date.toInstant(), zone));
}
public static String formatGMT(Date date, DateTimeFormatter formatter) {
return formatter.format(ZonedDateTime.ofInstant(date.toInstant(), GMT));
}
public static String formatUTC(Date date, DateTimeFormatter formatter) {
return formatter.format(ZonedDateTime.ofInstant(date.toInstant(), UTC));
}
public static String formatHeader(Date date) {
return DateTimeFormatter.RFC_1123_DATE_TIME.format(ZonedDateTime.ofInstant(date.toInstant(), GMT));
}
private static DateTimeFormatter getFormatter(String pattern) {
return CACHE.computeIfAbsent(pattern, DateTimeFormatter::ofPattern);
}
public static Date parse(Object value) {
if (value == null) {
return null;
}
if (value instanceof Date) {
return (Date) value;
}
if (value instanceof Calendar) {
return ((Calendar) value).getTime();
}
if (value.getClass() == Instant.class) {
return Date.from((Instant) value);
}
if (value instanceof TemporalAccessor) {
return Date.from(Instant.from((TemporalAccessor) value));
}
if (value instanceof Number) {
return new Date(((Number) value).longValue());
}
if (value instanceof CharSequence) {
return parse(value.toString());
}
throw new IllegalArgumentException("Can not cast to Date, value : '" + value + "'");
}
public static Date parse(String value) {
if (value == null) {
return null;
}
String str = value.trim();
int len = str.length();
if (len == 0) {
return null;
}
boolean isIso = true;
boolean isNumeric = true;
boolean hasDate = false;
boolean hasTime = false;
for (int i = 0; i < len; i++) {
char c = str.charAt(i);
switch (c) {
case ' ':
isIso = false;
break;
case '-':
hasDate = true;
break;
case 'T':
case ':':
hasTime = true;
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
continue;
default:
}
if (isNumeric) {
isNumeric = false;
}
}
DateTimeFormatter formatter = null;
if (isIso) {
if (hasDate) {
formatter = hasTime ? DateTimeFormatter.ISO_DATE_TIME : DateTimeFormatter.ISO_DATE;
} else if (hasTime) {
formatter = DateTimeFormatter.ISO_TIME;
}
}
if (isNumeric) {
long num = Long.parseLong(str);
if (num > 21000101 || num < 19700101) {
return new Date(num);
}
formatter = DateTimeFormatter.BASIC_ISO_DATE;
}
switch (len) {
case 10:
formatter = DATE_FORMAT;
break;
case 16:
formatter = DATE_MIN_FORMAT;
break;
case 19:
formatter = DATE_TIME_FORMAT;
break;
case 23:
case 24:
formatter = ASC_TIME_FORMAT;
break;
case 27:
formatter = RFC1036_FORMAT;
break;
case 28:
formatter = JDK_TIME_FORMAT;
break;
case 29:
formatter = DateTimeFormatter.RFC_1123_DATE_TIME;
break;
default:
}
if (formatter != null) {
try {
return toDate(formatter.parse(str));
} catch (Exception ignored) {
}
}
for (DateTimeFormatter dtf : CUSTOM_FORMATTERS) {
try {
return parse(str, dtf);
} catch (Exception ignored) {
}
}
throw new IllegalArgumentException("Can not cast to Date, value : '" + value + "'");
}
public static Date toDate(TemporalAccessor temporal) {
if (temporal instanceof Instant) {
return Date.from((Instant) temporal);
}
long timestamp;
if (temporal.isSupported(ChronoField.EPOCH_DAY)) {
timestamp = temporal.getLong(ChronoField.EPOCH_DAY) * 86400000;
} else {
timestamp = LocalDate.now().toEpochDay() * 86400000;
}
if (temporal.isSupported(ChronoField.MILLI_OF_DAY)) {
timestamp += temporal.getLong(ChronoField.MILLI_OF_DAY);
}
if (temporal.isSupported(ChronoField.OFFSET_SECONDS)) {
timestamp -= temporal.getLong(ChronoField.OFFSET_SECONDS) * 1000;
} else {
timestamp -= TimeZone.getDefault().getRawOffset();
}
return new Date(timestamp);
}
}
|
DateUtils
|
java
|
google__guava
|
android/guava/src/com/google/common/primitives/UnsignedBytes.java
|
{
"start": 2225,
"end": 10950
}
|
class ____ {
private UnsignedBytes() {}
/**
* The largest power of two that can be represented as an unsigned {@code byte}.
*
* @since 10.0
*/
public static final byte MAX_POWER_OF_TWO = (byte) 0x80;
/**
* The largest value that fits into an unsigned byte.
*
* @since 13.0
*/
public static final byte MAX_VALUE = (byte) 0xFF;
private static final int UNSIGNED_MASK = 0xFF;
/**
* Returns the value of the given byte as an integer, when treated as unsigned. That is, returns
* {@code value + 256} if {@code value} is negative; {@code value} itself otherwise.
*
* <p>Prefer {@link Byte#toUnsignedInt(byte)} instead.
*
* @since 6.0
*/
@InlineMe(replacement = "Byte.toUnsignedInt(value)")
public static int toInt(byte value) {
return Byte.toUnsignedInt(value);
}
/**
* Returns the {@code byte} value that, when treated as unsigned, is equal to {@code value}, if
* possible.
*
* @param value a value between 0 and 255 inclusive
* @return the {@code byte} value that, when treated as unsigned, equals {@code value}
* @throws IllegalArgumentException if {@code value} is negative or greater than 255
*/
@CanIgnoreReturnValue
public static byte checkedCast(long value) {
checkArgument(value >> Byte.SIZE == 0, "out of range: %s", value);
return (byte) value;
}
/**
* Returns the {@code byte} value that, when treated as unsigned, is nearest in value to {@code
* value}.
*
* @param value any {@code long} value
* @return {@code (byte) 255} if {@code value >= 255}, {@code (byte) 0} if {@code value <= 0}, and
* {@code value} cast to {@code byte} otherwise
*/
public static byte saturatedCast(long value) {
if (value > toUnsignedInt(MAX_VALUE)) {
return MAX_VALUE; // -1
}
if (value < 0) {
return (byte) 0;
}
return (byte) value;
}
/**
* Compares the two specified {@code byte} values, treating them as unsigned values between 0 and
* 255 inclusive. For example, {@code (byte) -127} is considered greater than {@code (byte) 127}
* because it is seen as having the value of positive {@code 129}.
*
* @param a the first {@code byte} to compare
* @param b the second {@code byte} to compare
* @return a negative value if {@code a} is less than {@code b}; a positive value if {@code a} is
* greater than {@code b}; or zero if they are equal
*/
public static int compare(byte a, byte b) {
return toUnsignedInt(a) - toUnsignedInt(b);
}
/**
* Returns the least value present in {@code array}, treating values as unsigned.
*
* @param array a <i>nonempty</i> array of {@code byte} values
* @return the value present in {@code array} that is less than or equal to every other value in
* the array according to {@link #compare}
* @throws IllegalArgumentException if {@code array} is empty
*/
public static byte min(byte... array) {
checkArgument(array.length > 0);
int min = toUnsignedInt(array[0]);
for (int i = 1; i < array.length; i++) {
int next = toUnsignedInt(array[i]);
if (next < min) {
min = next;
}
}
return (byte) min;
}
/**
* Returns the greatest value present in {@code array}, treating values as unsigned.
*
* @param array a <i>nonempty</i> array of {@code byte} values
* @return the value present in {@code array} that is greater than or equal to every other value
* in the array according to {@link #compare}
* @throws IllegalArgumentException if {@code array} is empty
*/
public static byte max(byte... array) {
checkArgument(array.length > 0);
int max = toUnsignedInt(array[0]);
for (int i = 1; i < array.length; i++) {
int next = toUnsignedInt(array[i]);
if (next > max) {
max = next;
}
}
return (byte) max;
}
/**
* Returns a string representation of x, where x is treated as unsigned.
*
* @since 13.0
*/
public static String toString(byte x) {
return toString(x, 10);
}
/**
* Returns a string representation of {@code x} for the given radix, where {@code x} is treated as
* unsigned.
*
* @param x the value to convert to a string.
* @param radix the radix to use while working with {@code x}
* @throws IllegalArgumentException if {@code radix} is not between {@link Character#MIN_RADIX}
* and {@link Character#MAX_RADIX}.
* @since 13.0
*/
public static String toString(byte x, int radix) {
checkArgument(
radix >= Character.MIN_RADIX && radix <= Character.MAX_RADIX,
"radix (%s) must be between Character.MIN_RADIX and Character.MAX_RADIX",
radix);
// Benchmarks indicate this is probably not worth optimizing.
return Integer.toString(toUnsignedInt(x), radix);
}
/**
* Returns the unsigned {@code byte} value represented by the given decimal string.
*
* @throws NumberFormatException if the string does not contain a valid unsigned {@code byte}
* value
* @throws NullPointerException if {@code string} is null (in contrast to {@link
* Byte#parseByte(String)})
* @since 13.0
*/
@CanIgnoreReturnValue
public static byte parseUnsignedByte(String string) {
return parseUnsignedByte(string, 10);
}
/**
* Returns the unsigned {@code byte} value represented by a string with the given radix.
*
* @param string the string containing the unsigned {@code byte} representation to be parsed.
* @param radix the radix to use while parsing {@code string}
* @throws NumberFormatException if the string does not contain a valid unsigned {@code byte} with
* the given radix, or if {@code radix} is not between {@link Character#MIN_RADIX} and {@link
* Character#MAX_RADIX}.
* @throws NullPointerException if {@code string} is null (in contrast to {@link
* Byte#parseByte(String)})
* @since 13.0
*/
@CanIgnoreReturnValue
public static byte parseUnsignedByte(String string, int radix) {
int parse = Integer.parseInt(checkNotNull(string), radix);
// We need to throw a NumberFormatException, so we have to duplicate checkedCast. =(
if (parse >> Byte.SIZE == 0) {
return (byte) parse;
} else {
throw new NumberFormatException("out of range: " + parse);
}
}
/**
* Returns a string containing the supplied {@code byte} values separated by {@code separator}.
* For example, {@code join(":", (byte) 1, (byte) 2, (byte) 255)} returns the string {@code
* "1:2:255"}.
*
* @param separator the text that should appear between consecutive values in the resulting string
* (but not at the start or end)
* @param array an array of {@code byte} values, possibly empty
*/
public static String join(String separator, byte... array) {
checkNotNull(separator);
if (array.length == 0) {
return "";
}
// For pre-sizing a builder, just get the right order of magnitude
StringBuilder builder = new StringBuilder(array.length * (3 + separator.length()));
builder.append(toUnsignedInt(array[0]));
for (int i = 1; i < array.length; i++) {
builder.append(separator).append(toString(array[i]));
}
return builder.toString();
}
/**
* Returns a comparator that compares two {@code byte} arrays <a
* href="http://en.wikipedia.org/wiki/Lexicographical_order">lexicographically</a>. That is, it
* compares, using {@link #compare(byte, byte)}), the first pair of values that follow any common
* prefix, or when one array is a prefix of the other, treats the shorter array as the lesser. For
* example, {@code [] < [0x01] < [0x01, 0x7F] < [0x01, 0x80] < [0x02]}. Values are treated as
* unsigned.
*
* <p>The returned comparator is inconsistent with {@link Object#equals(Object)} (since arrays
* support only identity equality), but it is consistent with {@link
* java.util.Arrays#equals(byte[], byte[])}.
*
* <p><b>Java 9+ users:</b> Use {@link Arrays#compareUnsigned(byte[], byte[])
* Arrays::compareUnsigned}.
*
* @since 2.0
*/
public static Comparator<byte[]> lexicographicalComparator() {
return LexicographicalComparatorHolder.BEST_COMPARATOR;
}
@VisibleForTesting
static Comparator<byte[]> lexicographicalComparatorJavaImpl() {
return LexicographicalComparatorHolder.PureJavaComparator.INSTANCE;
}
/**
* Provides a lexicographical comparator implementation; either a Java implementation or a faster
* implementation based on {@link Unsafe}.
*
* <p>Uses reflection to gracefully fall back to the Java implementation if {@code Unsafe} isn't
* available.
*/
@VisibleForTesting
static final
|
UnsignedBytes
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/search/index/TagIndex.java
|
{
"start": 726,
"end": 2243
}
|
interface ____ extends FieldIndex {
/**
* Defines the attribute associated to the field name
*
* @param as the associated attribute
* @return options object
*/
TagIndex as(String as);
/**
* Defines separator used for splitting the value of this attribute.
* The separator value must be a single character.
* <p>
* Default is <code>,</code>
*
* @param separator separator value
* @return options object
*/
TagIndex separator(String separator);
/**
* Defines sort mode applied to the value of this attribute
*
* @param sortMode sort mode
* @return options object
*/
TagIndex sortMode(SortMode sortMode);
/**
* Defines whether to keep the original letter cases of the tags.
* If not defined, the characters are converted to lowercase.
*
* @return options object
*/
TagIndex caseSensitive();
/**
* Defines to not index this attribute
*
* @return options object
*/
TagIndex noIndex();
/**
* Defines whether to keep a suffix trie with all terms which match the suffix.
*
* @return options object
*/
TagIndex withSuffixTrie();
/**
* Defines whether to index an empty value.
*
* @return options object
*/
TagIndex indexEmpty();
/**
* Defines to index documents that don't contain this attribute
*
* @return options object
*/
TagIndex indexMissing();
}
|
TagIndex
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/DuplicateBindingsValidationTest.java
|
{
"start": 10276,
"end": 11513
}
|
interface ____ {",
" Set<String> getStringSet();",
" }",
"}");
CompilerTests.daggerCompiler(component)
.withProcessingOptions(
ImmutableMap.<String, String>builder()
.putAll(fullBindingGraphValidationOption())
.buildOrThrow())
.compile(
subject -> {
String errorMessage =
message(
"Set<String> has incompatible bindings or declarations:",
" Set bindings and declarations:",
" @Binds @IntoSet String "
+ "Outer.TestModule1.bindStringSetElement(@Outer.SomeQualifier String)",
" @Provides @IntoSet String "
+ "Outer.TestModule1.stringSetElement()",
" Unique bindings and declarations:",
" @Provides Set<String> Outer.TestModule2.stringSet()");
if (fullBindingGraphValidation) {
subject.hasErrorCount(2);
subject.hasErrorContaining(errorMessage)
.onSource(component)
.onLineContaining("
|
TestComponent
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/SimpleAsyncExecutionController.java
|
{
"start": 3831,
"end": 6110
}
|
class ____<K> implements AsyncExecutor<RunnableTask<K, ?>> {
private final ExecutorService taskExecutorService;
private final boolean managedExecutor;
public TaskExecutor(ExecutorService taskExecutorService) {
this.taskExecutorService = taskExecutorService;
this.managedExecutor = false;
}
@Override
public CompletableFuture<Void> executeBatchRequests(
AsyncRequestContainer<RunnableTask<K, ?>> asyncRequestContainer) {
if (asyncRequestContainer.isEmpty()) {
return CompletableFuture.completedFuture(null);
}
RunnableContainer<K> container = (RunnableContainer<K>) asyncRequestContainer;
LinkedList<RunnableTask<K, ?>> requests = container.requests;
while (!requests.isEmpty()) {
RunnableTask<K, ?> request = requests.poll();
if (request != null) {
taskExecutorService.submit(
() -> {
try {
request.run();
} catch (Exception e) {
request.getFuture()
.completeExceptionally("Async task failed.", e);
}
});
}
}
return CompletableFuture.completedFuture(null);
}
@Override
public AsyncRequestContainer<RunnableTask<K, ?>> createRequestContainer() {
return new RunnableContainer<>();
}
@Override
public void executeRequestSync(RunnableTask<K, ?> asyncRequest) {
try {
asyncRequest.run();
} catch (Exception e) {
asyncRequest.getFuture().completeExceptionally("Task failed.", e);
}
}
@Override
public boolean fullyLoaded() {
// Implementation for checking if fully loaded
return false;
}
@Override
public void shutdown() {
if (managedExecutor) {
taskExecutorService.shutdown();
}
}
static
|
TaskExecutor
|
java
|
apache__camel
|
core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/DefaultBackOffTimer.java
|
{
"start": 1387,
"end": 2919
}
|
class ____ extends ServiceSupport implements BackOffTimer {
private final CamelContext camelContext;
private final ScheduledExecutorService scheduler;
private final String name;
private final Set<Task> tasks = new CopyOnWriteArraySet<>();
public DefaultBackOffTimer(CamelContext camelContext, String name, ScheduledExecutorService scheduler) {
this.camelContext = camelContext;
this.scheduler = scheduler;
this.name = name;
}
@Override
public Task schedule(BackOff backOff, ThrowingFunction<Task, Boolean, Exception> function) {
final BackOffTimerTask task = new BackOffTimerTask(this, backOff, scheduler, function);
long delay = task.next();
if (delay != BackOff.NEVER) {
tasks.add(task);
scheduler.schedule(task, delay, TimeUnit.MILLISECONDS);
} else {
task.cancel();
}
return task;
}
@Override
public String getName() {
return name;
}
@Override
public void remove(Task task) {
tasks.remove(task);
}
@Override
public Set<Task> getTasks() {
return Collections.unmodifiableSet(tasks);
}
@Override
public int size() {
return tasks.size();
}
@Override
protected void doStart() throws Exception {
camelContext.addService(this);
}
@Override
protected void doStop() throws Exception {
tasks.clear();
camelContext.removeService(this);
}
}
|
DefaultBackOffTimer
|
java
|
bumptech__glide
|
instrumentation/src/main/java/com/bumptech/glide/test/GlideWithBeforeSuperOnCreateActivity.java
|
{
"start": 217,
"end": 564
}
|
class ____ extends FragmentActivity {
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
Glide.with(this);
super.onCreate(savedInstanceState);
setContentView(new TextView(this));
}
@Override
protected void onResume() {
super.onResume();
Glide.with(this);
}
}
|
GlideWithBeforeSuperOnCreateActivity
|
java
|
netty__netty
|
codec-http2/src/main/java/io/netty/handler/codec/http2/Http2DataFrame.java
|
{
"start": 783,
"end": 1773
}
|
interface ____ extends Http2StreamFrame, ByteBufHolder {
/**
* Frame padding to use. Will be non-negative and less than 256.
*/
int padding();
/**
* Payload of DATA frame. Will not be {@code null}.
*/
@Override
ByteBuf content();
/**
* Returns the number of bytes that are flow-controlled initially, so even if the {@link #content()} is consumed
* this will not change.
*/
int initialFlowControlledBytes();
/**
* Returns {@code true} if the END_STREAM flag is set.
*/
boolean isEndStream();
@Override
Http2DataFrame copy();
@Override
Http2DataFrame duplicate();
@Override
Http2DataFrame retainedDuplicate();
@Override
Http2DataFrame replace(ByteBuf content);
@Override
Http2DataFrame retain();
@Override
Http2DataFrame retain(int increment);
@Override
Http2DataFrame touch();
@Override
Http2DataFrame touch(Object hint);
}
|
Http2DataFrame
|
java
|
elastic__elasticsearch
|
modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RomanianAnalyzerProvider.java
|
{
"start": 1422,
"end": 3364
}
|
class ____ extends AbstractIndexAnalyzerProvider<StopwordAnalyzerBase> {
private final StopwordAnalyzerBase analyzer;
RomanianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(name);
CharArraySet stopwords = Analysis.parseStopWords(env, settings, RomanianAnalyzer.getDefaultStopSet());
CharArraySet stemExclusionSet = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.UPGRADE_TO_LUCENE_10_0_0)) {
// since Lucene 10, this analyzer a modern unicode form and normalizes cedilla forms to forms with commas
analyzer = new RomanianAnalyzer(stopwords, stemExclusionSet);
} else {
// for older index versions we need the old behaviour without normalization
analyzer = new StopwordAnalyzerBase(Analysis.parseStopWords(env, settings, RomanianAnalyzer.getDefaultStopSet())) {
protected Analyzer.TokenStreamComponents createComponents(String fieldName) {
final Tokenizer source = new StandardTokenizer();
TokenStream result = new LowerCaseFilter(source);
result = new StopFilter(result, stopwords);
if (stemExclusionSet.isEmpty() == false) {
result = new SetKeywordMarkerFilter(result, stemExclusionSet);
}
result = new SnowballFilter(result, new LegacyRomanianStemmer());
return new TokenStreamComponents(source, result);
}
protected TokenStream normalize(String fieldName, TokenStream in) {
return new LowerCaseFilter(in);
}
};
}
}
@Override
public StopwordAnalyzerBase get() {
return this.analyzer;
}
}
|
RomanianAnalyzerProvider
|
java
|
quarkusio__quarkus
|
integration-tests/test-extension/tests/src/main/java/io/quarkus/it/extension/EnvironmentVariableTestEndpoint.java
|
{
"start": 364,
"end": 709
}
|
class ____ extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
DummyMapping dummyMapping = Arc.container().select(DummyMapping.class).get();
resp.getWriter().write(dummyMapping.name() + "-" + dummyMapping.age());
}
}
|
EnvironmentVariableTestEndpoint
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/stereotype/Indexed.java
|
{
"start": 2985,
"end": 3197
}
|
interface ____ is indexed, it will be
* automatically included with the {@code com.example.AdminService} stereotype.
* If there are more {@code @Indexed} interfaces and/or superclasses in the
* hierarchy, the
|
that
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java
|
{
"start": 6414,
"end": 7364
}
|
interface ____ {
/**
* Advance to the next value.
* @return {@code true} if there *is* a next value,
* {@code false} if there isn't
*/
boolean next();
/**
* The ordinal of the current value.
*/
long ord();
/**
* The current value.
*/
long value();
/**
* An {@linkplain BucketOrdsEnum} that is empty.
*/
BucketOrdsEnum EMPTY = new BucketOrdsEnum() {
@Override
public boolean next() {
return false;
}
@Override
public long ord() {
return 0;
}
@Override
public long value() {
return 0;
}
};
}
/**
* Implementation that only works if it is collecting from a single bucket.
*/
public static
|
BucketOrdsEnum
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/lazy_deserialize/LazyObjectFoo.java
|
{
"start": 812,
"end": 1271
}
|
class ____ implements Serializable {
private static final long serialVersionUID = 1L;
private Integer id;
private LazyObjectBar lazyObjectBar;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public LazyObjectBar getLazyObjectBar() {
return this.lazyObjectBar;
}
public void setLazyObjectBar(final LazyObjectBar lazyObjectBar) {
this.lazyObjectBar = lazyObjectBar;
}
}
|
LazyObjectFoo
|
java
|
elastic__elasticsearch
|
x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/aggregations/metrics/AggregateMetricBackedAvgAggregator.java
|
{
"start": 1610,
"end": 5791
}
|
class ____ extends NumericMetricsAggregator.SingleValue {
final AggregateMetricsValuesSource.AggregateMetricDouble valuesSource;
LongArray counts;
DoubleArray sums;
DoubleArray compensations;
DocValueFormat format;
AggregateMetricBackedAvgAggregator(
String name,
ValuesSourceConfig valuesSourceConfig,
AggregationContext context,
Aggregator parent,
Map<String, Object> metadata
) throws IOException {
super(name, context, parent, metadata);
assert valuesSourceConfig.hasValues();
this.valuesSource = (AggregateMetricsValuesSource.AggregateMetricDouble) valuesSourceConfig.getValuesSource();
final BigArrays bigArrays = context.bigArrays();
counts = bigArrays.newLongArray(1, true);
sums = bigArrays.newDoubleArray(1, true);
compensations = bigArrays.newDoubleArray(1, true);
this.format = valuesSourceConfig.format();
}
@Override
public ScoreMode scoreMode() {
return valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES;
}
@Override
public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, final LeafBucketCollector sub) throws IOException {
final BigArrays bigArrays = bigArrays();
// Retrieve aggregate values for metrics sum and value_count
final SortedNumericDoubleValues aggregateSums = valuesSource.getAggregateMetricValues(aggCtx.getLeafReaderContext(), Metric.sum);
final SortedNumericDoubleValues aggregateValueCounts = valuesSource.getAggregateMetricValues(
aggCtx.getLeafReaderContext(),
Metric.value_count
);
final CompensatedSum kahanSummation = new CompensatedSum(0, 0);
return new LeafBucketCollectorBase(sub, sums) {
@Override
public void collect(int doc, long bucket) throws IOException {
sums = bigArrays.grow(sums, bucket + 1);
compensations = bigArrays.grow(compensations, bucket + 1);
// Read aggregate values for sums
if (aggregateSums.advanceExact(doc)) {
// Compute the sum of double values with Kahan summation algorithm which is more
// accurate than naive summation.
double sum = sums.get(bucket);
double compensation = compensations.get(bucket);
kahanSummation.reset(sum, compensation);
for (int i = 0; i < aggregateSums.docValueCount(); i++) {
double value = aggregateSums.nextValue();
kahanSummation.add(value);
}
sums.set(bucket, kahanSummation.value());
compensations.set(bucket, kahanSummation.delta());
}
counts = bigArrays.grow(counts, bucket + 1);
// Read aggregate values for value_count
if (aggregateValueCounts.advanceExact(doc)) {
for (int i = 0; i < aggregateValueCounts.docValueCount(); i++) {
double d = aggregateValueCounts.nextValue();
long value = Double.valueOf(d).longValue();
counts.increment(bucket, value);
}
}
}
};
}
@Override
public double metric(long owningBucketOrd) {
if (owningBucketOrd >= sums.size()) {
return Double.NaN;
}
return sums.get(owningBucketOrd) / counts.get(owningBucketOrd);
}
@Override
public InternalAggregation buildAggregation(long bucket) {
if (bucket >= sums.size()) {
return buildEmptyAggregation();
}
return new InternalAvg(name, sums.get(bucket), counts.get(bucket), format, metadata());
}
@Override
public InternalAggregation buildEmptyAggregation() {
return InternalAvg.empty(name, format, metadata());
}
@Override
public void doClose() {
Releasables.close(counts, sums, compensations);
}
}
|
AggregateMetricBackedAvgAggregator
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-common/src/test/java/org/apache/flink/state/common/PeriodicMaterializationManagerTest.java
|
{
"start": 1504,
"end": 3168
}
|
class ____ {
void testInitialDelay() {
ManuallyTriggeredScheduledExecutorService scheduledExecutorService =
new ManuallyTriggeredScheduledExecutorService();
long periodicMaterializeDelay = 10_000L;
try (PeriodicMaterializationManager test =
new PeriodicMaterializationManager(
new SyncMailboxExecutor(),
newDirectExecutorService(),
"test",
(message, exception) -> {},
MaterializationTarget.NO_OP,
new ChangelogMaterializationMetricGroup(
UnregisteredMetricGroups.createUnregisteredOperatorMetricGroup()),
true,
periodicMaterializeDelay,
0,
"subtask-0",
scheduledExecutorService)) {
test.start();
assertThat(
getOnlyElement(
scheduledExecutorService
.getAllScheduledTasks()
.iterator())
.getDelay(MILLISECONDS))
.as(
String.format(
"task for initial materialization should be scheduled with a 0..%d delay",
periodicMaterializeDelay))
.isLessThanOrEqualTo(periodicMaterializeDelay);
}
}
}
|
PeriodicMaterializationManagerTest
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/web/servlet/assertj/ModelAssert.java
|
{
"start": 5578,
"end": 5804
}
|
class ____ extends AbstractBindingResultAssert<BindingResultAssert> {
public BindingResultAssert(String name, BindingResult bindingResult) {
super(name, bindingResult, BindingResultAssert.class);
}
}
}
|
BindingResultAssert
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceLocalizedEvent.java
|
{
"start": 1065,
"end": 1593
}
|
class ____ extends ContainerResourceEvent {
private final Path loc;
// > 0: downloaded
// < 0: cached
//
private long size;
public ContainerResourceLocalizedEvent(ContainerId container, LocalResourceRequest rsrc,
Path loc) {
super(container, ContainerEventType.RESOURCE_LOCALIZED, rsrc);
this.loc = loc;
}
public Path getLocation() {
return loc;
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
}
|
ContainerResourceLocalizedEvent
|
java
|
apache__camel
|
components/camel-whatsapp/src/main/java/org/apache/camel/component/whatsapp/model/Name.java
|
{
"start": 917,
"end": 2223
}
|
class ____ {
@JsonProperty("formatted_name")
private String formattedName;
@JsonProperty("first_name")
private String firstName;
@JsonProperty("last_name")
private String lastName;
@JsonProperty("middle_name")
private String middleName;
private String suffix;
private String prefix;
public Name() {
}
public String getFormattedName() {
return formattedName;
}
public void setFormattedName(String formattedName) {
this.formattedName = formattedName;
}
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
public String getMiddleName() {
return middleName;
}
public void setMiddleName(String middleName) {
this.middleName = middleName;
}
public String getSuffix() {
return suffix;
}
public void setSuffix(String suffix) {
this.suffix = suffix;
}
public String getPrefix() {
return prefix;
}
public void setPrefix(String prefix) {
this.prefix = prefix;
}
}
|
Name
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/http/impl/http2/Http2ServerStream.java
|
{
"start": 930,
"end": 2097
}
|
interface ____ extends Http2Stream {
static Http2ServerStream create(Http2ServerConnection connection,
HttpServerMetrics serverMetrics,
Object socketMetric,
ContextInternal context,
HttpRequestHeaders requestHeaders,
HttpMethod method,
String uri,
TracingPolicy tracingPolicy,
int promisedId) {
return new DefaultHttp2ServerStream(
connection,
serverMetrics,
socketMetric,
context,
requestHeaders,
method,
uri,
tracingPolicy,
promisedId);
}
static Http2ServerStream create(
Http2ServerConnection connection,
HttpServerMetrics<?, ?, ?> serverMetrics,
Object socketMetric,
ContextInternal context,
TracingPolicy tracingPolicy) {
return new DefaultHttp2ServerStream(connection, serverMetrics, socketMetric, context, tracingPolicy);
}
HttpHeaders headers();
HttpServerStream unwrap();
}
|
Http2ServerStream
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ConnectorUpdateActionResponse.java
|
{
"start": 1100,
"end": 2422
}
|
class ____ extends ActionResponse implements ToXContentObject {
final DocWriteResponse.Result result;
public ConnectorUpdateActionResponse(StreamInput in) throws IOException {
result = DocWriteResponse.Result.readFrom(in);
}
public ConnectorUpdateActionResponse(DocWriteResponse.Result result) {
this.result = result;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
this.result.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("result", this.result.getLowercase());
builder.endObject();
return builder;
}
public RestStatus status() {
return switch (result) {
case NOT_FOUND -> RestStatus.NOT_FOUND;
default -> RestStatus.OK;
};
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ConnectorUpdateActionResponse that = (ConnectorUpdateActionResponse) o;
return Objects.equals(result, that.result);
}
@Override
public int hashCode() {
return Objects.hash(result);
}
}
|
ConnectorUpdateActionResponse
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/UserSettings.java
|
{
"start": 1062,
"end": 1756
}
|
class ____ {
private final String username;
private final Integer maxRunningApps;
UserSettings(Builder builder) {
this.username = builder.username;
this.maxRunningApps = builder.maxRunningApps;
}
public String render() {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
addStartTag(pw);
AllocationFileWriter.addIfPresent(pw, "maxRunningApps", maxRunningApps);
addEndTag(pw);
pw.close();
return sw.toString();
}
private void addStartTag(PrintWriter pw) {
pw.println("<user name=\"" + username + "\">");
}
private void addEndTag(PrintWriter pw) {
pw.println("</user>");
}
/**
* Builder
|
UserSettings
|
java
|
junit-team__junit5
|
junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/ClassSelector.java
|
{
"start": 1901,
"end": 2684
}
|
class ____ implements DiscoverySelector {
private final @Nullable ClassLoader classLoader;
private final String className;
private @Nullable Class<?> javaClass;
ClassSelector(@Nullable ClassLoader classLoader, String className) {
this.className = className;
this.classLoader = classLoader;
}
ClassSelector(Class<?> javaClass) {
this.className = javaClass.getName();
this.classLoader = javaClass.getClassLoader();
this.javaClass = javaClass;
}
/**
* Get the {@link ClassLoader} used to load the selected class.
*
* @return the {@code ClassLoader}; potentially {@code null}
* @since 1.10
*/
@API(status = MAINTAINED, since = "1.13.3")
public @Nullable ClassLoader getClassLoader() {
return this.classLoader;
}
/**
* Get the selected
|
ClassSelector
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/streaming/api/functions/sink/legacy/RichSinkFunction.java
|
{
"start": 1266,
"end": 1416
}
|
class ____<IN> extends AbstractRichFunction
implements SinkFunction<IN> {
private static final long serialVersionUID = 1L;
}
|
RichSinkFunction
|
java
|
quarkusio__quarkus
|
extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/DefaultKeyStoreProviderTest.java
|
{
"start": 1075,
"end": 2276
}
|
class ____ {
private static final String configuration = """
# no configuration by default
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.add(new StringAsset(configuration), "application.properties"));
@Inject
TlsConfigurationRegistry certificates;
@Test
void test() throws KeyStoreException, CertificateParsingException {
TlsConfiguration def = certificates.getDefault().orElseThrow();
assertThat(def.getKeyStoreOptions()).isNotNull();
assertThat(def.getKeyStore()).isNotNull();
// dummy-entry-x is the alias of the certificate in the keystore generated by Vert.x.
X509Certificate certificate = (X509Certificate) def.getKeyStore().getCertificate("dummy-entry-0");
assertThat(certificate).isNotNull();
assertThat(certificate.getSubjectAlternativeNames()).anySatisfy(l -> {
assertThat(l.get(0)).isEqualTo(2);
assertThat(l.get(1)).isEqualTo("localhost");
});
}
@ApplicationScoped
static
|
DefaultKeyStoreProviderTest
|
java
|
junit-team__junit5
|
junit-jupiter-params/src/main/java/org/junit/jupiter/params/provider/FieldArgumentsProvider.java
|
{
"start": 1463,
"end": 6445
}
|
class ____ extends AnnotationBasedArgumentsProvider<FieldSource> {
@Override
protected Stream<? extends Arguments> provideArguments(ParameterDeclarations parameters, ExtensionContext context,
FieldSource fieldSource) {
Class<?> testClass = context.getRequiredTestClass();
Object testInstance = context.getTestInstance().orElse(null);
String[] fieldNames = fieldSource.value();
if (fieldNames.length == 0) {
Optional<Method> testMethod = context.getTestMethod();
Preconditions.condition(testMethod.isPresent(),
"You must specify a field name when using @FieldSource with @ParameterizedClass");
fieldNames = new String[] { testMethod.get().getName() };
}
// @formatter:off
return stream(fieldNames)
.map(fieldName -> findField(testClass, fieldName))
.map(field -> validateField(field, testInstance))
.map(field -> readField(field, testInstance))
.flatMap(fieldValue -> {
if (fieldValue instanceof Supplier<?> supplier) {
fieldValue = supplier.get();
}
return CollectionUtils.toStream(fieldValue);
})
.map(ArgumentsUtils::toArguments);
// @formatter:on
}
// package-private for testing
static Field findField(Class<?> testClass, String fieldName) {
Preconditions.notBlank(fieldName, "Field name must not be blank");
fieldName = fieldName.strip();
Class<?> clazz = testClass;
if (fieldName.contains("#") || fieldName.contains(".")) {
String[] fieldParts = ReflectionUtils.parseFullyQualifiedFieldName(fieldName);
String className = fieldParts[0];
fieldName = fieldParts[1];
ClassLoader classLoader = ClassLoaderUtils.getClassLoader(testClass);
clazz = ReflectionUtils.loadRequiredClass(className, classLoader);
}
Class<?> resolvedClass = clazz;
String resolvedFieldName = fieldName;
Predicate<Field> nameMatches = field -> field.getName().equals(resolvedFieldName);
Field field = ReflectionUtils.streamFields(resolvedClass, nameMatches, HierarchyTraversalMode.BOTTOM_UP)//
.findFirst()//
.orElse(null);
return Preconditions.notNull(field,
() -> "Could not find field named [%s] in class [%s]".formatted(resolvedFieldName,
resolvedClass.getName()));
}
private static Field validateField(Field field, @Nullable Object testInstance) {
Preconditions.condition(field.getDeclaringClass().isInstance(testInstance) || ModifierSupport.isStatic(field),
() -> """
Field '%s' must be static: local @FieldSource fields must be static \
unless the PER_CLASS @TestInstance lifecycle mode is used; \
external @FieldSource fields must always be static.""".formatted(field.toGenericString()));
return field;
}
private static Object readField(Field field, @Nullable Object testInstance) {
Object value = ReflectionSupport.tryToReadFieldValue(field, testInstance).getOrThrow(
cause -> new JUnitException("Could not read field [%s]".formatted(field.getName()), cause));
String fieldName = field.getName();
String declaringClass = field.getDeclaringClass().getName();
Preconditions.notNull(value,
() -> "The value of field [%s] in class [%s] must not be null".formatted(fieldName, declaringClass));
Preconditions.condition(!(value instanceof BaseStream),
() -> "The value of field [%s] in class [%s] must not be a stream".formatted(fieldName, declaringClass));
Preconditions.condition(!(value instanceof Iterator),
() -> "The value of field [%s] in class [%s] must not be an Iterator".formatted(fieldName, declaringClass));
Preconditions.condition(isConvertibleToStream(field, value),
() -> "The value of field [%s] in class [%s] must be convertible to a Stream".formatted(fieldName,
declaringClass));
return value;
}
/**
* Determine if the supplied value can be converted into a {@code Stream} or
* if the declared type of the supplied field is a {@link Supplier} of a type
* that can be converted into a {@code Stream}.
*/
private static boolean isConvertibleToStream(Field field, Object value) {
// Check actual value type.
if (CollectionUtils.isConvertibleToStream(value.getClass())) {
return true;
}
// Check declared type T of Supplier<T>.
if (Supplier.class.isAssignableFrom(field.getType())) {
Type genericType = field.getGenericType();
if (genericType instanceof ParameterizedType parameterizedType) {
Type[] typeArguments = parameterizedType.getActualTypeArguments();
if (typeArguments.length == 1) {
Type type = typeArguments[0];
// Handle cases such as Supplier<IntStream>
if (type instanceof Class<?> clazz) {
return CollectionUtils.isConvertibleToStream(clazz);
}
// Handle cases such as Supplier<Stream<String>>
if (type instanceof ParameterizedType innerParameterizedType) {
Type rawType = innerParameterizedType.getRawType();
if (rawType instanceof Class<?> clazz) {
return CollectionUtils.isConvertibleToStream(clazz);
}
}
}
}
}
return false;
}
}
|
FieldArgumentsProvider
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/shard/LiveDocsEstimationTests.java
|
{
"start": 876,
"end": 3727
}
|
class ____ extends IndexShardTestCase {
@Override
protected Settings nodeSettings() {
return Settings.builder().put(DiscoveryNode.STATELESS_ENABLED_SETTING_NAME, true).build();
}
public void testShardFieldStatsWithDeletes() throws IOException {
Settings settings = Settings.builder()
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
.put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.MINUS_ONE)
.build();
IndexShard shard = newShard(true, settings);
assertNull(shard.getShardFieldStats());
recoverShardFromStore(shard);
// index some documents
int numDocs = 10;
for (int i = 0; i < numDocs; i++) {
indexDoc(shard, "_doc", "first_" + i, """
{
"f1": "foo",
"f2": "bar"
}
""");
}
shard.refresh("test");
var stats = shard.getShardFieldStats();
assertThat(stats.numSegments(), equalTo(1));
assertThat(stats.liveDocsBytes(), equalTo(0L));
// delete a doc
deleteDoc(shard, "first_0");
// Refresh and fetch new stats:
shard.refresh("test");
stats = shard.getShardFieldStats();
// More segments because delete operation is stored in the new segment for replication purposes.
assertThat(stats.numSegments(), equalTo(2));
long expectedLiveDocsSize = 0;
// Delete op is stored in new segment, but marked as deleted. All segements have live docs:
expectedLiveDocsSize += new FixedBitSet(numDocs).ramBytesUsed();
// Second segment the delete operation that is marked as deleted:
expectedLiveDocsSize += new FixedBitSet(1).ramBytesUsed();
assertThat(stats.liveDocsBytes(), equalTo(expectedLiveDocsSize));
// delete another doc:
deleteDoc(shard, "first_1");
shard.getMinRetainedSeqNo();
// Refresh and fetch new stats:
shard.refresh("test");
stats = shard.getShardFieldStats();
// More segments because delete operation is stored in the new segment for replication purposes.
assertThat(stats.numSegments(), equalTo(3));
expectedLiveDocsSize = 0;
// Delete op is stored in new segment, but marked as deleted. All segements have live docs:
// First segment with deletes
expectedLiveDocsSize += new FixedBitSet(numDocs).ramBytesUsed();
// Second and third segments the delete operation that is marked as deleted:
expectedLiveDocsSize += new FixedBitSet(1).ramBytesUsed();
expectedLiveDocsSize += new FixedBitSet(1).ramBytesUsed();
assertThat(stats.liveDocsBytes(), equalTo(expectedLiveDocsSize));
closeShards(shard);
}
}
|
LiveDocsEstimationTests
|
java
|
apache__camel
|
components/camel-micrometer-observability/src/main/java/org/apache/camel/micrometer/observability/MicrometerObservabilityTracer.java
|
{
"start": 2200,
"end": 4990
}
|
class ____ extends org.apache.camel.telemetry.Tracer {
private static final Logger LOG = LoggerFactory.getLogger(MicrometerObservabilityTracer.class);
private Tracer tracer;
private ObservationRegistry observationRegistry;
private Propagator propagator;
public Tracer getTracer() {
return tracer;
}
public void setTracer(Tracer tracer) {
this.tracer = tracer;
}
public ObservationRegistry getObservationRegistry() {
return observationRegistry;
}
public void setObservationRegistry(ObservationRegistry observationRegistry) {
this.observationRegistry = observationRegistry;
}
public Propagator getPropagator() {
return propagator;
}
public void setPropagator(Propagator propagator) {
this.propagator = propagator;
}
@Override
protected void initTracer() {
if (tracer == null) {
tracer = CamelContextHelper.findSingleByType(getCamelContext(), Tracer.class);
}
if (tracer == null) {
tracer = new SimpleTracer();
LOG.warn("No tracer was provided. A default inmemory tracer is used. " +
"This can be useful for development only, avoid this in a production environment.");
}
if (observationRegistry == null) {
observationRegistry = CamelContextHelper.findSingleByType(getCamelContext(), ObservationRegistry.class);
}
if (observationRegistry == null) {
MeterRegistry meterRegistry = new SimpleMeterRegistry();
this.observationRegistry = ObservationRegistry.create();
this.observationRegistry.observationConfig().observationHandler(
new DefaultMeterObservationHandler(meterRegistry));
LOG.warn("No observation registry was provided. A default inmemory observation registry is used. " +
"This can be useful for development only, avoid this in a production environment.");
}
if (propagator == null) {
propagator = CamelContextHelper.findSingleByType(getCamelContext(), Propagator.class);
}
if (propagator == null) {
propagator = Propagator.NOOP;
LOG.warn("No propagator was provided. A NOOP implementation is used, you won't be able to trace " +
"upstream activity. " +
"This can be useful for development only, avoid this in a production environment.");
}
this.setSpanLifecycleManager(new MicrometerObservabilitySpanLifecycleManager());
}
@Override
protected void doStart() throws Exception {
super.doStart();
LOG.info("Micrometer Observability enabled");
}
private
|
MicrometerObservabilityTracer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/ops/MergeExplicitInitialVersionTest.java
|
{
"start": 1590,
"end": 1687
}
|
class ____ {
@Id
long id = 5;
@Version
int version = 1;
String text;
}
@Entity
static
|
F
|
java
|
spring-projects__spring-framework
|
spring-expression/src/test/java/org/springframework/expression/spel/MethodInvocationTests.java
|
{
"start": 20754,
"end": 20907
}
|
class ____ {
public int doit(int i) {
return i;
}
@Anno
public String doit(double d) {
return "double "+d;
}
}
public static
|
TestObject
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/runner/bootstrap/ForkJoinClassLoading.java
|
{
"start": 204,
"end": 2427
}
|
class ____ {
private static final Logger log = Logger.getLogger(ForkJoinClassLoading.class.getName());
/**
* A yucky hack, basically attempt to make sure every thread in the common pool has
* the correct CL.
* <p>
* It's not perfect, but as this only affects test and dev mode and not production it is better
* than nothing.
* <p>
* Really we should just not use the common pool at all.
* <p>
* TODO: This no longer works in Java 25 because the `ForkJoinPool` now does
* <a href=
* "https://github.com/openjdk/jdk/blob/jdk-25%2B36/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java#L2085">
* this</a>, which ends up calling <a href=
* "https://github.com/openjdk/jdk/blob/jdk-25%2B36/src/java.base/share/classes/java/util/concurrent/ForkJoinWorkerThread.java#L280">this</a>.
* We need to figure out how to deal with this
*/
public static void setForkJoinClassLoader(ClassLoader classLoader) {
CountDownLatch allDone = new CountDownLatch(ForkJoinPool.getCommonPoolParallelism());
CountDownLatch taskRelease = new CountDownLatch(1);
for (int i = 0; i < ForkJoinPool.getCommonPoolParallelism(); ++i) {
ForkJoinPool.commonPool().execute(new Runnable() {
@Override
public void run() {
Thread.currentThread().setContextClassLoader(classLoader);
allDone.countDown();
try {
taskRelease.await();
} catch (InterruptedException e) {
log.error("Failed to set fork join ClassLoader", e);
}
}
});
}
try {
if (!allDone.await(1, TimeUnit.SECONDS)) {
log.error(
"Timed out trying to set fork join ClassLoader, this should never happen unless something has tied up a fork join thread before the app launched");
}
} catch (InterruptedException e) {
log.error("Failed to set fork join ClassLoader", e);
} finally {
taskRelease.countDown();
}
}
}
|
ForkJoinClassLoading
|
java
|
apache__flink
|
flink-table/flink-table-common/src/test/java/org/apache/flink/table/utils/TableSchemaUtilsTest.java
|
{
"start": 1803,
"end": 6444
}
|
class ____ {
@Test
void testBuilderWithGivenSchema() {
TableSchema oriSchema =
TableSchema.builder()
.field("a", DataTypes.INT().notNull())
.field("b", DataTypes.STRING())
.field("c", DataTypes.INT(), "a + 1")
.field("t", DataTypes.TIMESTAMP(3))
.primaryKey("ct1", new String[] {"a"})
.watermark("t", "t", DataTypes.TIMESTAMP(3))
.build();
TableSchema newSchema = TableSchemaUtils.builderWithGivenSchema(oriSchema).build();
assertThat(newSchema).isEqualTo(oriSchema);
}
@Test
void testDropConstraint() {
TableSchema originalSchema =
TableSchema.builder()
.field("a", DataTypes.INT().notNull())
.field("b", DataTypes.STRING())
.field("c", DataTypes.INT(), "a + 1")
.field("t", DataTypes.TIMESTAMP(3))
.primaryKey("ct1", new String[] {"a"})
.watermark("t", "t", DataTypes.TIMESTAMP(3))
.build();
TableSchema newSchema = TableSchemaUtils.dropConstraint(originalSchema, "ct1");
TableSchema expectedSchema =
TableSchema.builder()
.field("a", DataTypes.INT().notNull())
.field("b", DataTypes.STRING())
.field("c", DataTypes.INT(), "a + 1")
.field("t", DataTypes.TIMESTAMP(3))
.watermark("t", "t", DataTypes.TIMESTAMP(3))
.build();
assertThat(newSchema).isEqualTo(expectedSchema);
// Drop non-exist constraint.
assertThatThrownBy(() -> TableSchemaUtils.dropConstraint(originalSchema, "ct2"))
.isInstanceOf(ValidationException.class)
.hasMessage("Constraint ct2 to drop does not exist");
}
@Test
void testRemoveTimeAttribute() {
DataType rowTimeType =
DataTypeUtils.replaceLogicalType(
DataTypes.TIMESTAMP(3), new TimestampType(true, TimestampKind.ROWTIME, 3));
ResolvedSchema schema =
new ResolvedSchema(
Arrays.asList(
Column.physical("id", DataTypes.INT().notNull()),
Column.physical("t", rowTimeType),
Column.computed(
"date",
ResolvedExpressionMock.of(DataTypes.DATE(), "TO_DATE(t)")),
Column.metadata("metadata-1", DataTypes.INT(), "metadata", false)),
Collections.singletonList(
WatermarkSpec.of("t", ResolvedExpressionMock.of(rowTimeType, "t"))),
UniqueConstraint.primaryKey("test-pk", Collections.singletonList("id")),
Collections.singletonList(
DefaultIndex.newIndex("idx", Collections.singletonList("id"))));
assertThat(TableSchemaUtils.removeTimeAttributeFromResolvedSchema(schema))
.isEqualTo(
new ResolvedSchema(
Arrays.asList(
Column.physical("id", DataTypes.INT().notNull()),
Column.physical("t", DataTypes.TIMESTAMP(3)),
Column.computed(
"date",
new ResolvedExpressionMock(
DataTypes.DATE(), () -> "TO_DATE(t)")),
Column.metadata(
"metadata-1", DataTypes.INT(), "metadata", false)),
Collections.singletonList(
WatermarkSpec.of(
"t", ResolvedExpressionMock.of(rowTimeType, "t"))),
UniqueConstraint.primaryKey(
"test-pk", Collections.singletonList("id")),
Collections.singletonList(
DefaultIndex.newIndex(
"idx", Collections.singletonList("id")))));
}
}
|
TableSchemaUtilsTest
|
java
|
google__guice
|
core/test/com/google/inject/TypeListenerTest.java
|
{
"start": 21060,
"end": 24688
}
|
class ____.
*/
@Test
public void testTypesWithNoInjectableMembersAreNotified() {
final AtomicInteger notificationCount = new AtomicInteger();
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(
onlyAbcd,
new TypeListener() {
@Override
public <I> void hear(TypeLiteral<I> type, TypeEncounter<I> encounter) {
notificationCount.incrementAndGet();
}
});
bind(C.class).toInstance(new C());
}
});
assertEquals(1, notificationCount.get());
}
@Test
public void testEncounterCannotBeUsedAfterHearReturns() {
final AtomicReference<TypeEncounter<?>> encounterReference =
new AtomicReference<TypeEncounter<?>>();
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(
any(),
new TypeListener() {
@Override
public <I> void hear(TypeLiteral<I> type, TypeEncounter<I> encounter) {
encounterReference.set(encounter);
}
});
bind(C.class);
}
});
TypeEncounter<?> encounter = encounterReference.get();
try {
encounter.register(
new InjectionListener<Object>() {
@Override
public void afterInjection(Object injectee) {}
});
fail();
} catch (IllegalStateException expected) {
}
try {
encounter.bindInterceptor(
any(),
new MethodInterceptor() {
@Override
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
return methodInvocation.proceed();
}
});
fail();
} catch (IllegalStateException expected) {
}
try {
encounter.addError(new Exception());
fail();
} catch (IllegalStateException expected) {
}
try {
encounter.getMembersInjector(A.class);
fail();
} catch (IllegalStateException expected) {
}
try {
encounter.getProvider(B.class);
fail();
} catch (IllegalStateException expected) {
}
}
@Test
public void testAddErrors() {
try {
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
requestInjection(new Object());
bindListener(
Matchers.any(),
new TypeListener() {
@Override
public <I> void hear(TypeLiteral<I> type, TypeEncounter<I> encounter) {
encounter.addError("There was an error on %s", type);
encounter.addError(new IllegalArgumentException("whoops!"));
encounter.addError(new Message("And another problem"));
encounter.addError(new IllegalStateException());
}
});
}
});
fail();
} catch (CreationException expected) {
assertContains(
expected.getMessage(),
"1) There was an error on Object",
"2) [Guice/ErrorInUserCode]: An exception was caught and reported. Message: whoops!",
"3) And another problem",
"4) [Guice/ErrorInUserCode]: An exception was caught and reported. Message: null",
"4 errors");
}
}
private static
|
constructor
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/StringConcatToTextBlockTest.java
|
{
"start": 2055,
"end": 2227
}
|
class ____ {
String s = "hello\\n" + "world\\n";
}
""")
.addOutputLines(
"Test.java",
"""
|
Test
|
java
|
apache__dubbo
|
dubbo-cluster/src/test/java/org/apache/dubbo/rpc/cluster/filter/DemoService.java
|
{
"start": 855,
"end": 944
}
|
interface ____ {
String sayHello(String name);
int plus(int a, int b);
}
|
DemoService
|
java
|
apache__camel
|
components/camel-twitter/src/main/java/org/apache/camel/component/twitter/timeline/MentionsConsumerHandler.java
|
{
"start": 1104,
"end": 1754
}
|
class ____ extends AbstractStatusConsumerHandler {
public MentionsConsumerHandler(TwitterEndpoint endpoint) {
super(endpoint);
}
@Override
protected List<Status> doPoll() throws TwitterException {
Paging paging = getLastIdPaging();
log.trace("doPoll.getMentionsTimeline(sinceId={})", paging.sinceId);
return getTwitter().v1().timelines().getMentionsTimeline(paging);
}
@Override
protected List<Status> doDirect() throws TwitterException {
log.trace("doDirect.getMentionsTimeline()");
return getTwitter().v1().timelines().getMentionsTimeline();
}
}
|
MentionsConsumerHandler
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/util/ClassUtils.java
|
{
"start": 19325,
"end": 19595
}
|
class ____ a primitive, void, or
* a wrapper class
*/
public static boolean isPrimitiveOrWrapper(Class<?> clazz) {
Assert.notNull(clazz, "Class must not be null");
return (clazz.isPrimitive() || isPrimitiveWrapper(clazz));
}
/**
* Check if the given
|
represents
|
java
|
greenrobot__greendao
|
DaoCore/src/main/java/org/greenrobot/greendao/test/DbTest.java
|
{
"start": 1064,
"end": 1381
}
|
class ____ database related testing, which prepares an in-memory or an file-based DB (using the test {@link
* android.content.Context}). Also, offers some convenience methods to create new {@link Application} objects similar
* to {@link android.test.ApplicationTestCase}.
* <p/>
* Unlike ApplicationTestCase, this
|
for
|
java
|
quarkusio__quarkus
|
extensions/arc/runtime/src/main/java/io/quarkus/arc/lookup/LookupUnlessProperty.java
|
{
"start": 1682,
"end": 2304
}
|
interface ____ {
/**
* Name of the runtime time property to check
*/
String name();
/**
* Expected {@code String} value of the runtime time property (specified by {@code name}) if the bean should be skipped at
* runtime.
*/
String stringValue();
/**
* Determines if the bean should be looked up when the property name specified by {@code name} has not been specified at
* all
*/
boolean lookupIfMissing() default false;
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.METHOD, ElementType.TYPE, ElementType.FIELD })
@
|
LookupUnlessProperty
|
java
|
apache__camel
|
components/camel-workday/src/test/java/org/apache/camel/WorkdayCommonAPIProducerTest.java
|
{
"start": 1403,
"end": 8619
}
|
class ____ extends CamelTestSupport {
@Test
public void createProducerMinimalConfiguration() throws Exception {
WorkdayComponent workdayComponent = context.getComponent("workday", WorkdayComponent.class);
WorkdayEndpoint workdayEndpoint = (WorkdayEndpoint) workdayComponent
.createEndpoint("workday:commonAPI:/workers?" + "host=impl.workday.com" + "&tenant=camel"
+ "&clientId=f7014d38-99d2-4969-b740-b5b62db6b46a"
+ "&clientSecret=7dbaf280-3cea-11ea-b77f-2e728ce88125" + "&tokenRefresh=88689ab63cda"
+ "&reportFormat=json");
WorkdayConfiguration workdayConfiguration = workdayEndpoint.getWorkdayConfiguration();
assertEquals(WorkdayConfiguration.Entity.commonAPI, workdayConfiguration.getEntity());
assertEquals("/workers", workdayConfiguration.getPath());
assertEquals("impl.workday.com", workdayConfiguration.getHost());
assertEquals("camel", workdayConfiguration.getTenant());
assertEquals("f7014d38-99d2-4969-b740-b5b62db6b46a", workdayConfiguration.getClientId());
assertEquals("7dbaf280-3cea-11ea-b77f-2e728ce88125", workdayConfiguration.getClientSecret());
assertEquals("88689ab63cda", workdayConfiguration.getTokenRefresh());
}
@Test
public void createProducerNoHostConfiguration() {
WorkdayComponent workdayComponent = context.getComponent("workday", WorkdayComponent.class);
try {
WorkdayEndpoint workdayEndpoint = (WorkdayEndpoint) workdayComponent
.createEndpoint(
"workday:commonAPI:/workers?" + "tenant=camel" + "&clientId=f7014d38-99d2-4969-b740-b5b62db6b46a"
+ "&clientSecret=7dbaf280-3cea-11ea-b77f-2e728ce88125" + "&tokenRefresh=88689ab63cda"
+ "&format=json");
} catch (Exception exception) {
assertEquals(exception.getClass(), IllegalArgumentException.class);
assertEquals("Host must be specified", exception.getMessage());
return;
}
fail("Required parameters validation failed.");
}
@Test
public void createProducerInvalidAPIConfiguration() throws Exception {
WorkdayComponent workdayComponent = context.getComponent("workday", WorkdayComponent.class);
WorkdayEndpoint workdayEndpoint = (WorkdayEndpoint) workdayComponent
.createEndpoint("workday:commonAPI:/worker?" + "host=impl.workday.com" + "&tenant=camel"
+ "&clientId=f7014d38-99d2-4969-b740-b5b62db6b46a"
+ "&clientSecret=7dbaf280-3cea-11ea-b77f-2e728ce88125" + "&tokenRefresh=88689ab63cda"
+ "&format=json");
WorkdayCommonAPIProducer workdayProducer = new WorkdayCommonAPIProducer(workdayEndpoint);
try {
String workdayUri = workdayProducer.prepareUri(workdayEndpoint.getWorkdayConfiguration());
} catch (Exception exception) {
assertEquals(exception.getClass(), MalformedURLException.class);
assertEquals("An invalid Workday Common endpoint: '/worker' was provided.", exception.getMessage());
return;
}
fail("Required parameters validation failed.");
}
@Test
public void createProducerWorkersValidAPIConfiguration() throws Exception {
WorkdayComponent workdayComponent = context.getComponent("workday", WorkdayComponent.class);
WorkdayEndpoint workdayEndpoint = (WorkdayEndpoint) workdayComponent
.createEndpoint("workday:commonAPI:/workers?" + "host=impl.workday.com" + "&tenant=camel"
+ "&clientId=f7014d38-99d2-4969-b740-b5b62db6b46a"
+ "&clientSecret=7dbaf280-3cea-11ea-b77f-2e728ce88125" + "&tokenRefresh=88689ab63cda"
+ "&format=json");
WorkdayCommonAPIProducer workdayProducer = new WorkdayCommonAPIProducer(workdayEndpoint);
String workdayUri = workdayProducer.prepareUri(workdayEndpoint.getWorkdayConfiguration());
assertEquals("https://impl.workday.com/ccx/api/v1/camel/workers", workdayUri);
}
@Test
public void createProducerPayslipByIDValidAPIConfiguration() throws Exception {
WorkdayComponent workdayComponent = context.getComponent("workday", WorkdayComponent.class);
WorkdayEndpoint workdayEndpoint = (WorkdayEndpoint) workdayComponent
.createEndpoint(
"workday:commonAPI:/workers/4ab56f4b34c4b4a2be3e4f5a732c2343/paySlips/4ab56f4c39c4b4a2bf3e4f5a732c2343?"
+ "host=impl.workday.com" + "&tenant=camel" + "&clientId=f7014d38-99d2-4969-b740-b5b62db6b46a"
+ "&clientSecret=7dbaf280-3cea-11ea-b77f-2e728ce88125" + "&tokenRefresh=88689ab63cda"
+ "&format=json");
WorkdayCommonAPIProducer workdayProducer = new WorkdayCommonAPIProducer(workdayEndpoint);
String workdayUri = workdayProducer.prepareUri(workdayEndpoint.getWorkdayConfiguration());
assertEquals(
"https://impl.workday.com/ccx/api/v1/camel/workers/4ab56f4b34c4b4a2be3e4f5a732c2343/paySlips/4ab56f4c39c4b4a2bf3e4f5a732c2343",
workdayUri);
}
@Test
public void createProducerCurrenciesValidConfiguration() throws Exception {
WorkdayComponent workdayComponent = context.getComponent("workday", WorkdayComponent.class);
WorkdayEndpoint workdayEndpoint = (WorkdayEndpoint) workdayComponent
.createEndpoint("workday:commonAPI:/currencies?" + "host=impl.workday.com" + "&tenant=camel"
+ "&clientId=f7014d38-99d2-4969-b740-b5b62db6b46a"
+ "&clientSecret=7dbaf280-3cea-11ea-b77f-2e728ce88125" + "&tokenRefresh=88689ab63cda"
+ "&format=json");
WorkdayCommonAPIProducer workdayProducer = new WorkdayCommonAPIProducer(workdayEndpoint);
workdayProducer.prepareUri(workdayEndpoint.getWorkdayConfiguration());
}
@Test
public void createProducerCurrenciesInvalidIDConfiguration() throws Exception {
WorkdayComponent workdayComponent = context.getComponent("workday", WorkdayComponent.class);
WorkdayEndpoint workdayEndpoint = (WorkdayEndpoint) workdayComponent
.createEndpoint("workday:commonAPI:/currencies/4ab56f4b34c4b4a2be3g4f5a732c2343?" + "host=impl.workday.com"
+ "&tenant=camel" + "&clientId=f7014d38-99d2-4969-b740-b5b62db6b46a"
+ "&clientSecret=7dbaf280-3cea-11ea-b77f-2e728ce88125" + "&tokenRefresh=88689ab63cda"
+ "&format=json");
WorkdayCommonAPIProducer workdayProducer = new WorkdayCommonAPIProducer(workdayEndpoint);
assertThrows(MalformedURLException.class,
() -> workdayProducer.prepareUri(workdayEndpoint.getWorkdayConfiguration()));
}
}
|
WorkdayCommonAPIProducerTest
|
java
|
apache__dubbo
|
dubbo-common/src/main/java/org/apache/dubbo/common/resource/GlobalResourceInitializer.java
|
{
"start": 1072,
"end": 2144
}
|
class ____<T> extends CallableSafeInitializer<T> {
/**
* The Dispose action to be executed on shutdown.
*/
private Consumer<T> disposeAction;
private Disposable disposable;
public GlobalResourceInitializer(Callable<T> initializer) {
super(initializer);
}
public GlobalResourceInitializer(Callable<T> initializer, Consumer<T> disposeAction) {
super(initializer);
this.disposeAction = disposeAction;
}
public GlobalResourceInitializer(Callable<T> callable, Disposable disposable) {
super(callable);
this.disposable = disposable;
}
@Override
protected T initialize() {
T value = super.initialize();
// register disposable to release automatically
if (this.disposable != null) {
GlobalResourcesRepository.getInstance().registerDisposable(this.disposable);
} else {
GlobalResourcesRepository.getInstance().registerDisposable(() -> this.remove(disposeAction));
}
return value;
}
}
|
GlobalResourceInitializer
|
java
|
apache__kafka
|
jmh-benchmarks/src/main/java/org/apache/kafka/jmh/clients/Murmur2Benchmark.java
|
{
"start": 2855,
"end": 3426
}
|
class ____ {
@Param
public TestCase testCase;
}
@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@Warmup(iterations = 5, time = 1000, timeUnit = MILLISECONDS)
@Measurement(iterations = 20, time = 1000, timeUnit = MILLISECONDS)
@Fork(value = 1, warmups = 0)
public void hashBytes(TestCaseState testCaseState, Blackhole blackhole) {
var data = testCaseState.testCase.data;
for (byte[] b : data) {
blackhole.consume(Utils.murmur2(b));
}
}
}
|
TestCaseState
|
java
|
apache__camel
|
components/camel-google/camel-google-sheets/src/generated/java/org/apache/camel/component/google/sheets/GoogleSheetsEndpointUriFactory.java
|
{
"start": 523,
"end": 4576
}
|
class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":apiName/methodName";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(54);
props.add("accessToken");
props.add("apiName");
props.add("applicationName");
props.add("backoffErrorThreshold");
props.add("backoffIdleThreshold");
props.add("backoffMultiplier");
props.add("batchClearValuesRequest");
props.add("batchGetValuesByDataFilterRequest");
props.add("batchUpdateSpreadsheetRequest");
props.add("batchUpdateValuesByDataFilterRequest");
props.add("batchUpdateValuesRequest");
props.add("bridgeErrorHandler");
props.add("clearValuesRequest");
props.add("clientId");
props.add("clientSecret");
props.add("content");
props.add("dateTimeRenderOption");
props.add("delay");
props.add("delegate");
props.add("exceptionHandler");
props.add("exchangePattern");
props.add("excludeTablesInBandedRanges");
props.add("getSpreadsheetByDataFilterRequest");
props.add("greedy");
props.add("inBody");
props.add("includeGridData");
props.add("includeValuesInResponse");
props.add("initialDelay");
props.add("insertDataOption");
props.add("lazyStartProducer");
props.add("majorDimension");
props.add("methodName");
props.add("pollStrategy");
props.add("range");
props.add("ranges");
props.add("refreshToken");
props.add("repeatCount");
props.add("responseDateTimeRenderOption");
props.add("responseValueRenderOption");
props.add("runLoggingLevel");
props.add("scheduledExecutorService");
props.add("scheduler");
props.add("schedulerProperties");
props.add("scopes");
props.add("sendEmptyMessageWhenIdle");
props.add("serviceAccountKey");
props.add("splitResult");
props.add("spreadsheetId");
props.add("startScheduler");
props.add("timeUnit");
props.add("useFixedDelay");
props.add("valueInputOption");
props.add("valueRenderOption");
props.add("values");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(3);
secretProps.add("accessToken");
secretProps.add("clientSecret");
secretProps.add("refreshToken");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
Map<String, String> prefixes = new HashMap<>(1);
prefixes.put("schedulerProperties", "scheduler.");
MULTI_VALUE_PREFIXES = Collections.unmodifiableMap(prefixes);
}
@Override
public boolean isEnabled(String scheme) {
return "google-sheets".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "apiName", null, true, copy);
uri = buildPathParameter(syntax, uri, "methodName", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
|
GoogleSheetsEndpointUriFactory
|
java
|
elastic__elasticsearch
|
qa/smoke-test-http/src/internalClusterTest/java/org/elasticsearch/http/NodeStatsRestCancellationIT.java
|
{
"start": 682,
"end": 948
}
|
class ____ extends BlockedSearcherRestCancellationTestCase {
public void testNodeStatsRestCancellation() throws Exception {
runTest(new Request(HttpGet.METHOD_NAME, "/_nodes/stats"), TransportNodesStatsAction.TYPE.name());
}
}
|
NodeStatsRestCancellationIT
|
java
|
apache__camel
|
components/camel-mail/src/main/java/org/apache/camel/component/mail/MailUtils.java
|
{
"start": 1187,
"end": 7921
}
|
class ____ {
public static final int DEFAULT_PORT_SMTP = 25;
public static final int DEFAULT_PORT_SMTPS = 465;
public static final int DEFAULT_PORT_POP3 = 110;
public static final int DEFAULT_PORT_POP3S = 995;
public static final int DEFAULT_PORT_NNTP = 119;
public static final int DEFAULT_PORT_IMAP = 143;
public static final int DEFAULT_PORT_IMAPS = 993;
public static final String PROTOCOL_SMTP = "smtp";
public static final String PROTOCOL_SMTPS = "smtps";
public static final String PROTOCOL_POP3 = "pop3";
public static final String PROTOCOL_POP3S = "pop3s";
public static final String PROTOCOL_NNTP = "nntp";
public static final String PROTOCOL_IMAP = "imap";
public static final String PROTOCOL_IMAPS = "imaps";
private MailUtils() {
}
/**
* Returns the default port for a given protocol.
* <p>
* If a protocol could not successfully be determined the default port number for SMTP protocol is returned.
*
* @param protocol the protocol
* @return the default port
*/
public static int getDefaultPortForProtocol(final String protocol) {
int port = DEFAULT_PORT_SMTP;
if (protocol != null) {
if (protocol.equalsIgnoreCase(PROTOCOL_IMAP)) {
port = DEFAULT_PORT_IMAP;
} else if (protocol.equalsIgnoreCase(PROTOCOL_IMAPS)) {
port = DEFAULT_PORT_IMAPS;
} else if (protocol.equalsIgnoreCase(PROTOCOL_NNTP)) {
port = DEFAULT_PORT_NNTP;
} else if (protocol.equalsIgnoreCase(PROTOCOL_POP3)) {
port = DEFAULT_PORT_POP3;
} else if (protocol.equalsIgnoreCase(PROTOCOL_POP3S)) {
port = DEFAULT_PORT_POP3S;
} else if (protocol.equalsIgnoreCase(PROTOCOL_SMTP)) {
port = DEFAULT_PORT_SMTP;
} else if (protocol.equalsIgnoreCase(PROTOCOL_SMTPS)) {
port = DEFAULT_PORT_SMTPS;
} else {
port = DEFAULT_PORT_SMTP;
}
}
return port;
}
/**
* Gets a log dump of the given message that can be used for tracing etc.
*
* @param message the Mail message
* @return a log string with important fields dumped
*/
public static String dumpMessage(Message message) {
if (message == null) {
return "null";
}
try {
StringBuilder sb = new StringBuilder();
int number = message.getMessageNumber();
sb.append("messageNumber=[").append(number).append("]");
Address[] from = message.getFrom();
if (from != null) {
for (Address adr : from) {
sb.append(", from=[").append(adr).append("]");
}
}
Address[] to = message.getRecipients(Message.RecipientType.TO);
if (to != null) {
for (Address adr : to) {
sb.append(", to=[").append(adr).append("]");
}
}
String subject = message.getSubject();
if (subject != null) {
sb.append(", subject=[").append(subject).append("]");
}
Date sentDate = message.getSentDate();
if (sentDate != null) {
sb.append(", sentDate=[").append(DateFormat.getDateTimeInstance().format(sentDate)).append("]");
}
Date receivedDate = message.getReceivedDate();
if (receivedDate != null) {
sb.append(", receivedDate=[").append(DateFormat.getDateTimeInstance().format(receivedDate)).append("]");
}
return sb.toString();
} catch (MessagingException e) {
// ignore the error and just return tostring
return message.toString();
}
}
/**
* Pads the content-type so it has a space after semi colon that separate pairs.
* <p/>
* This is needed as some mail servers will choke otherwise
*
* @param contentType the content type
* @return the padded content type
*/
public static String padContentType(String contentType) {
StringBuilder sb = new StringBuilder();
String[] parts = contentType.split(";");
for (int i = 0; i < parts.length; i++) {
String part = parts[i];
if (ObjectHelper.isNotEmpty(part)) {
part = part.trim();
sb.append(part);
if (i < parts.length - 1) {
sb.append("; ");
}
}
}
return sb.toString();
}
/**
* Replaces the charset in the content-type
*
* @param contentType the content-type
* @param charset the charset to replace, can be <tt>null</tt> to remove charset
* @return the content-type with replaced charset
*/
public static String replaceCharSet(String contentType, String charset) {
boolean replaced = false;
StringBuilder sb = new StringBuilder();
String[] parts = contentType.split(";");
for (String part : parts) {
part = part.trim();
if (!part.startsWith("charset")) {
part = part.trim();
if (sb.length() > 0) {
sb.append("; ");
}
sb.append(part);
} else if (charset != null) {
// replace with new charset
if (sb.length() > 0) {
sb.append("; ");
}
sb.append("charset=");
sb.append(charset);
replaced = true;
}
}
// if we did not replace any existing charset, then append new charset at the end
if (!replaced && charset != null) {
if (sb.length() > 0) {
sb.append("; ");
}
sb.append("charset=");
sb.append(charset);
}
return sb.toString();
}
/**
* Gets the charset from the content-type
*
* @param contentType the content-type
* @return the charset, or <tt>null</tt> if no charset existed
*/
public static String getCharSetFromContentType(String contentType) {
if (contentType == null) {
return null;
}
String[] parts = contentType.split(";");
for (String part : parts) {
part = part.trim();
if (part.startsWith("charset")) {
return StringHelper.after(part, "charset=");
}
}
return null;
}
}
|
MailUtils
|
java
|
resilience4j__resilience4j
|
resilience4j-circularbuffer/src/main/java/io/github/resilience4j/circularbuffer/ConcurrentEvictingQueue.java
|
{
"start": 10816,
"end": 11976
}
|
class ____ implements Iterator<E> {
private int visitedCount = 0;
private int cursor;
private int expectedModificationsCount;
Iter(final int headIndex, final int modificationsCount) {
this.cursor = headIndex;
this.expectedModificationsCount = modificationsCount;
}
@Override
public boolean hasNext() {
return visitedCount < size;
}
@Override
@SuppressWarnings("unchecked")
public E next() {
Supplier<E> nextElement = () -> {
checkForModification();
if (visitedCount >= size) {
throw new NoSuchElementException();
}
E item = (E) ringBuffer[cursor];
cursor = nextIndex(cursor);
visitedCount++;
return item;
};
return readConcurrently(nextElement);
}
private void checkForModification() {
if (modificationsCount != expectedModificationsCount) {
throw new ConcurrentModificationException();
}
}
}
}
|
Iter
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/RLocalCachedMap.java
|
{
"start": 1036,
"end": 2786
}
|
interface ____<K, V> extends RMap<K, V> {
/**
* Pre-warm the cached entries. Not guaranteed to load ALL values, but statistically
* will preload approximately all (all if no concurrent mutating activity).
* Entries are loaded in a batch with size of 10 elements.
*/
void preloadCache();
/**
* Pre-warm the cached entries. Not guaranteed to load ALL values, but statistically
* will preload approximately all (all if no concurrent mutating activity)
* Entries are loaded in a batch. Batch size is defined by <code>count</code> param.
*
* @param count - size of batch
*/
void preloadCache(int count);
/**
* Clears local cache across all instances
*
* @return void
*/
RFuture<Void> clearLocalCacheAsync();
/**
* Clears local cache across all instances
*/
void clearLocalCache();
/**
* Returns all keys stored in local cache
*
* @return keys
*/
Set<K> cachedKeySet();
/**
* Returns all values stored in local cache
*
* @return values
*/
Collection<V> cachedValues();
/**
* Returns all map entries stored in local cache
*
* @return entries
*/
Set<Entry<K, V>> cachedEntrySet();
/**
* Returns state of local cache
*
* @return map
*/
Map<K, V> getCachedMap();
/**
* Adds local cache event listener
*
* @see org.redisson.api.listener.LocalCacheUpdateListener
* @see org.redisson.api.listener.LocalCacheInvalidateListener
*
* @param listener - local cache event listener
* @return listener id
*/
@Override
int addListener(ObjectListener listener);
}
|
RLocalCachedMap
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java
|
{
"start": 1196,
"end": 1809
}
|
class ____ extends HtmlBlock {
@Inject
AboutBlock(View.ViewContext ctx) {
super(ctx);
}
@Override
protected void render(Block html) {
TimelineAbout tsInfo = TimelineUtils.createTimelineAbout(
"Timeline Server - Generic History Service UI");
info("Timeline Server Overview").
__("Timeline Server Version:", tsInfo.getTimelineServiceBuildVersion() +
" on " + tsInfo.getTimelineServiceVersionBuiltOn()).
__("Hadoop Version:", tsInfo.getHadoopBuildVersion() +
" on " + tsInfo.getHadoopVersionBuiltOn());
html.__(InfoBlock.class);
}
}
|
AboutBlock
|
java
|
apache__camel
|
components/camel-zookeeper-master/src/main/java/org/apache/camel/component/zookeepermaster/CamelNodeState.java
|
{
"start": 985,
"end": 1607
}
|
class ____ extends NodeState {
@JsonProperty
String consumer;
@JsonProperty
boolean started;
public CamelNodeState() {
}
public CamelNodeState(String id) {
super(id);
}
public CamelNodeState(String id, String container) {
super(id, container);
}
public String getConsumer() {
return consumer;
}
public void setConsumer(String consumer) {
this.consumer = consumer;
}
public boolean isStarted() {
return started;
}
public void setStarted(boolean started) {
this.started = started;
}
}
|
CamelNodeState
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/telemetry/internals/TelemetryMetricNamingConvention.java
|
{
"start": 1109,
"end": 1386
}
|
class ____ naming and mapping conventions defined as part of
* <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability#KIP714:Clientmetricsandobservability-Metricsnamingandformat">Metrics naming and format</a>
*/
public
|
encapsulates
|
java
|
google__guava
|
android/guava/src/com/google/common/base/Converter.java
|
{
"start": 3684,
"end": 5341
}
|
class ____ implement its {@link #doForward} and {@link #doBackward} methods.
* </ul>
*
* <p>Using a converter:
*
* <ul>
* <li>Convert one instance in the "forward" direction using {@code converter.convert(a)}.
* <li>Convert multiple instances "forward" using {@code converter.convertAll(as)}.
* <li>Convert in the "backward" direction using {@code converter.reverse().convert(b)} or {@code
* converter.reverse().convertAll(bs)}.
* <li>Use {@code converter} or {@code converter.reverse()} anywhere a {@link
* java.util.function.Function} is accepted (for example {@link java.util.stream.Stream#map
* Stream.map}).
* <li><b>Do not</b> call {@link #doForward} or {@link #doBackward} directly; these exist only to
* be overridden.
* </ul>
*
* <h3>Example</h3>
*
* {@snippet :
* return Converter.from(
* Integer::toHexString,
* s -> parseUnsignedInt(s, 16));
* }
*
* <p>An alternative using a subclass:
*
* {@snippet :
* return new Converter<Integer, String>() {
* @Override
* protected String doForward(Integer i) {
* return Integer.toHexString(i);
* }
*
* @Override
* protected Integer doBackward(String s) {
* return parseUnsignedInt(s, 16);
* }
* }
* }
*
* @author Mike Ward
* @author Kurt Alfred Kluever
* @author Gregory Kick
* @since 16.0
*/
@GwtCompatible
/*
* 1. The type parameter is <T> rather than <T extends @Nullable> so that we can use T in the
* doForward and doBackward methods to indicate that the parameter cannot be null. (We also take
* advantage of that for convertAll, as discussed on that method.)
*
* 2. The supertype of this
|
and
|
java
|
google__guice
|
core/src/com/google/inject/internal/BytecodeGen.java
|
{
"start": 1564,
"end": 1900
}
|
class ____ heavy use of {@link Function} and {@link BiFunction} types when interacting
* with generated fast-classes and enhanced proxies. This is a deliberate design decision to avoid
* using Guice-specific types in the generated classes. This means generated classes can be defined
* in the same {@link ClassLoader} as their host
|
makes
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/compress/Compressor.java
|
{
"start": 818,
"end": 3086
}
|
interface ____ {
boolean isCompressed(BytesReference bytes);
/**
* Same as {@link #threadLocalInputStream(InputStream)} but wraps the returned stream as a {@link StreamInput}.
*/
default StreamInput threadLocalStreamInput(InputStream in) throws IOException {
// wrap stream in buffer since InputStreamStreamInput doesn't do any buffering itself but does a lot of small reads
return new InputStreamStreamInput(new BufferedInputStream(threadLocalInputStream(in), DeflateCompressor.BUFFER_SIZE) {
@Override
public int read() throws IOException {
// override read to avoid synchronized single byte reads now that JEP374 removed biased locking
if (pos >= count) {
return super.read();
}
return buf[pos++] & 0xFF;
}
});
}
/**
* Creates a new input stream that decompresses the contents read from the provided input stream.
* Closing the returned {@link InputStream} will close the provided stream input.
* Note: The returned stream may only be used on the thread that created it as it might use thread-local resources and must be safely
* closed after use
*/
InputStream threadLocalInputStream(InputStream in) throws IOException;
/**
* Creates a new output stream that compresses the contents and writes to the provided output stream.
* Closing the returned {@link OutputStream} will close the provided output stream.
* Note: The returned stream may only be used on the thread that created it as it might use thread-local resources and must be safely
* closed after use
*/
OutputStream threadLocalOutputStream(OutputStream out) throws IOException;
/**
* Decompress bytes into a newly allocated buffer.
*
* @param bytesReference bytes to decompress
* @return decompressed bytes
*/
BytesReference uncompress(BytesReference bytesReference) throws IOException;
/**
* Compress bytes into a newly allocated buffer.
*
* @param bytesReference bytes to compress
* @return compressed bytes
*/
BytesReference compress(BytesReference bytesReference) throws IOException;
}
|
Compressor
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/filter/LineItem.java
|
{
"start": 201,
"end": 1237
}
|
class ____ {
private Long id;
private Order order;
private int sequence;
private Product product;
private long quantity;
/*package*/ LineItem() {}
public static LineItem generate(Order order, int sequence, Product product, long quantity) {
LineItem item = new LineItem();
item.order = order;
item.sequence = sequence;
item.product = product;
item.quantity = quantity;
item.order.getLineItems().add(sequence, item);
return item;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Order getOrder() {
return order;
}
public void setOrder(Order order) {
this.order = order;
}
public int getSequence() {
return sequence;
}
public void setSequence(int sequence) {
this.sequence = sequence;
}
public Product getProduct() {
return product;
}
public void setProduct(Product product) {
this.product = product;
}
public long getQuantity() {
return quantity;
}
public void setQuantity(long quantity) {
this.quantity = quantity;
}
}
|
LineItem
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/common/functions/FlatJoinFunction.java
|
{
"start": 2402,
"end": 2972
}
|
interface ____<IN1, IN2, OUT> extends Function, Serializable {
/**
* The join method, called once per joined pair of elements.
*
* @param first The element from first input.
* @param second The element from second input.
* @param out The collector used to return zero, one, or more elements.
* @throws Exception This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
void join(IN1 first, IN2 second, Collector<OUT> out) throws Exception;
}
|
FlatJoinFunction
|
java
|
apache__kafka
|
connect/api/src/main/java/org/apache/kafka/connect/sink/SinkConnector.java
|
{
"start": 1391,
"end": 4509
}
|
class ____ extends Connector {
/**
* <p>
* Configuration key for the list of input topics for this connector.
* </p>
* <p>
* Usually this setting is only relevant to the Kafka Connect framework, but is provided here for
* the convenience of Connector developers if they also need to know the set of topics.
* </p>
*/
public static final String TOPICS_CONFIG = "topics";
@Override
protected SinkConnectorContext context() {
return (SinkConnectorContext) context;
}
/**
* Invoked when users request to manually alter/reset the offsets for this connector via the Connect worker's REST
* API. Connectors that manage offsets externally can propagate offset changes to their external system in this
* method. Connectors may also validate these offsets if, for example, an offset is out of range for what can be
* feasibly written to the external system.
* <p>
* Connectors that neither manage offsets externally nor require custom offset validation need not implement this
* method beyond simply returning {@code true}.
* <p>
* User requests to alter/reset offsets will be handled by the Connect runtime and will be reflected in the offsets
* for this connector's consumer group.
* <p>
* Note that altering / resetting offsets is expected to be an idempotent operation and this method should be able
* to handle being called more than once with the same arguments (which could occur if a user retries the request
* due to a failure in altering the consumer group offsets, for example).
* <p>
* Similar to {@link #validate(Map) validate}, this method may be called by the runtime before the
* {@link #start(Map) start} method is invoked.
*
* @param connectorConfig the configuration of the connector
* @param offsets a map from topic partition to offset, containing the offsets that the user has requested to
* alter/reset. For any topic partitions whose offsets are being reset instead of altered, their
* corresponding value in the map will be {@code null}. This map may be empty, but never null. An
* empty offsets map could indicate that the offsets were reset previously or that no offsets have
* been committed yet.
* @return whether this method has been overridden by the connector; the default implementation returns
* {@code false}, and all other implementations (that do not unconditionally throw exceptions) should return
* {@code true}
* @throws UnsupportedOperationException if it is impossible to alter/reset the offsets for this connector
* @throws org.apache.kafka.connect.errors.ConnectException if the offsets for this connector cannot be
* reset for any other reason (for example, they have failed custom validation logic specific to this connector)
* @since 3.6
*/
public boolean alterOffsets(Map<String, String> connectorConfig, Map<TopicPartition, Long> offsets) {
return false;
}
}
|
SinkConnector
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/cache/LongAdderTest.java
|
{
"start": 829,
"end": 1522
}
|
class ____ extends TestCase {
/**
* No-op null-pointer test for {@link LongAdder} to override the {@link PackageSanityTests}
* version, which checks package-private methods that we don't want to have to annotate as {@code
* Nullable} because we don't want diffs from jsr166e.
*/
public void testNulls() {}
public void testOverflows() {
LongAdder longAdder = new LongAdder();
longAdder.add(Long.MAX_VALUE);
assertThat(longAdder.sum()).isEqualTo(Long.MAX_VALUE);
longAdder.add(1);
// silently overflows; is this a bug?
// See https://github.com/google/guava/issues/3503
assertThat(longAdder.sum()).isEqualTo(-9223372036854775808L);
}
}
|
LongAdderTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/tree/NodeInfo.java
|
{
"start": 16195,
"end": 18655
}
|
interface ____<P1, P2, P3, P4, P5, P6, P7, P8, P9, T> {
T apply(Source l, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9);
}
public static <T extends Node<?>, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10> NodeInfo<T> create(
T n,
NodeCtor10<P1, P2, P3, P4, P5, P6, P7, P8, P9, P10, T> ctor,
P1 p1,
P2 p2,
P3 p3,
P4 p4,
P5 p5,
P6 p6,
P7 p7,
P8 p8,
P9 p9,
P10 p10
) {
return new NodeInfo<T>(n) {
@Override
protected List<Object> innerProperties() {
return Arrays.asList(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10);
}
protected T innerTransform(Function<Object, Object> rule) {
boolean same = true;
@SuppressWarnings("unchecked")
P1 newP1 = (P1) rule.apply(p1);
same &= Objects.equals(p1, newP1);
@SuppressWarnings("unchecked")
P2 newP2 = (P2) rule.apply(p2);
same &= Objects.equals(p2, newP2);
@SuppressWarnings("unchecked")
P3 newP3 = (P3) rule.apply(p3);
same &= Objects.equals(p3, newP3);
@SuppressWarnings("unchecked")
P4 newP4 = (P4) rule.apply(p4);
same &= Objects.equals(p4, newP4);
@SuppressWarnings("unchecked")
P5 newP5 = (P5) rule.apply(p5);
same &= Objects.equals(p5, newP5);
@SuppressWarnings("unchecked")
P6 newP6 = (P6) rule.apply(p6);
same &= Objects.equals(p6, newP6);
@SuppressWarnings("unchecked")
P7 newP7 = (P7) rule.apply(p7);
same &= Objects.equals(p7, newP7);
@SuppressWarnings("unchecked")
P8 newP8 = (P8) rule.apply(p8);
same &= Objects.equals(p8, newP8);
@SuppressWarnings("unchecked")
P9 newP9 = (P9) rule.apply(p9);
same &= Objects.equals(p9, newP9);
@SuppressWarnings("unchecked")
P10 newP10 = (P10) rule.apply(p10);
same &= Objects.equals(p10, newP10);
return same ? node : ctor.apply(node.source(), newP1, newP2, newP3, newP4, newP5, newP6, newP7, newP8, newP9, newP10);
}
};
}
public
|
NodeCtor9
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jms/src/test/java/org/springframework/boot/jms/autoconfigure/JmsAutoConfigurationTests.java
|
{
"start": 21393,
"end": 21729
}
|
class ____ {
@Bean
JmsMessagingTemplate jmsMessagingTemplate(JmsTemplate jmsTemplate) {
JmsMessagingTemplate messagingTemplate = new JmsMessagingTemplate(jmsTemplate);
messagingTemplate.setDefaultDestinationName("fooBar");
return messagingTemplate;
}
}
@Configuration(proxyBeanMethods = false)
static
|
TestConfiguration5
|
java
|
quarkusio__quarkus
|
integration-tests/kafka-snappy/src/test/java/io/quarkus/it/kafka/KafkaSnappyProducerITCase.java
|
{
"start": 115,
"end": 184
}
|
class ____ extends KafkaSnappyProducerTest {
}
|
KafkaSnappyProducerITCase
|
java
|
apache__dubbo
|
dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/client/metadata/SpringCloudMetadataServiceURLBuilder.java
|
{
"start": 1532,
"end": 2186
}
|
class ____ implements MetadataServiceURLBuilder {
public static final String NAME = "spring-cloud";
@Override
public List<URL> build(ServiceInstance serviceInstance) {
Map<String, String> metadata = serviceInstance.getMetadata();
String dubboUrlsForJson = metadata.get(METADATA_SERVICE_URLS_PROPERTY_NAME);
if (StringUtils.isBlank(dubboUrlsForJson)) {
return Collections.emptyList();
}
List<String> urlStrings = JsonUtils.toJavaList(dubboUrlsForJson, String.class);
return urlStrings.stream().map(URL::valueOf).collect(Collectors.toList());
}
}
|
SpringCloudMetadataServiceURLBuilder
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/conditional/propertyname/sourcepropertyname/ConditionalMethodInMapperWithAllExceptTarget.java
|
{
"start": 1011,
"end": 1856
}
|
class ____ {
Set<String> visited = new LinkedHashSet<>();
Set<String> visitedSources = new LinkedHashSet<>();
}
@Mapping(target = "country", source = "originCountry")
@Mapping(target = "addresses", source = "originAddresses")
Employee map(EmployeeDto employee, @Context PresenceUtils utils);
@Condition
default boolean isNotBlank(String value,
DomainModel source,
@SourcePropertyName String propName,
@Context PresenceUtils utils) {
utils.visited.add( propName );
utils.visitedSources.add( source.getClass().getSimpleName() );
if ( propName.equalsIgnoreCase( "firstName" ) ) {
return true;
}
return value != null && !value.trim().isEmpty();
}
}
|
PresenceUtils
|
java
|
grpc__grpc-java
|
core/src/test/java/io/grpc/internal/SpiffeUtilTest.java
|
{
"start": 4882,
"end": 8350
}
|
class ____ {
@Test
public void spiffeUriFormatTest() {
NullPointerException npe = assertThrows(NullPointerException.class, () ->
SpiffeUtil.parse(null));
assertEquals("uri", npe.getMessage());
IllegalArgumentException iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse("https://example.com"));
assertEquals("Spiffe Id must start with spiffe://", iae.getMessage());
iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse("spiffe://example.com/workload#1"));
assertEquals("Spiffe Id must not contain query fragments", iae.getMessage());
iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse("spiffe://example.com/workload-1?t=1"));
assertEquals("Spiffe Id must not contain query parameters", iae.getMessage());
}
@Test
public void spiffeTrustDomainFormatTest() {
IllegalArgumentException iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse("spiffe://"));
assertEquals("Trust Domain can't be empty", iae.getMessage());
iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse("spiffe://eXample.com"));
assertEquals(
"Trust Domain must contain only letters, numbers, dots, dashes, and underscores "
+ "([a-z0-9.-_])",
iae.getMessage());
StringBuilder longTrustDomain = new StringBuilder("spiffe://pi.eu.");
for (int i = 0; i < 50; i++) {
longTrustDomain.append("pi.eu");
}
iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse(longTrustDomain.toString()));
assertEquals("Trust Domain maximum length is 255 characters", iae.getMessage());
@SuppressWarnings("OrphanedFormatString")
StringBuilder longSpiffe = new StringBuilder("spiffe://mydomain%21com/");
for (int i = 0; i < 405; i++) {
longSpiffe.append("qwert");
}
iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse(longSpiffe.toString()));
assertEquals("Spiffe Id maximum length is 2048 characters", iae.getMessage());
}
@Test
public void spiffePathFormatTest() {
IllegalArgumentException iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse("spiffe://example.com//"));
assertEquals("Path must not include a trailing '/'", iae.getMessage());
iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse("spiffe://example.com/"));
assertEquals("Path must not include a trailing '/'", iae.getMessage());
iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse("spiffe://example.com/us//miami"));
assertEquals("Individual path segments must not be empty", iae.getMessage());
iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse("spiffe://example.com/us/."));
assertEquals("Individual path segments must not be relative path modifiers (i.e. ., ..)",
iae.getMessage());
iae = assertThrows(IllegalArgumentException.class, () ->
SpiffeUtil.parse("spiffe://example.com/us!"));
assertEquals("Individual path segments must contain only letters, numbers, dots, dashes, and "
+ "underscores ([a-zA-Z0-9.-_])", iae.getMessage());
}
}
public static
|
ExceptionMessageTest
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/compression/XZInputStreamFactory.java
|
{
"start": 1152,
"end": 1696
}
|
class ____ implements InflaterInputStreamFactory<XZCompressorInputStream> {
private static final XZInputStreamFactory INSTANCE = new XZInputStreamFactory();
public static XZInputStreamFactory getInstance() {
return INSTANCE;
}
@Override
public XZCompressorInputStream create(InputStream in) throws IOException {
return new XZCompressorInputStream(in, true);
}
@Override
public Collection<String> getCommonFileExtensions() {
return Collections.singleton("xz");
}
}
|
XZInputStreamFactory
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/map/MapLoader.java
|
{
"start": 858,
"end": 1223
}
|
interface ____<K, V> {
/**
* Loads map value by key.
*
* @param key - map key
* @return value or <code>null</code> if value doesn't exists
*/
V load(K key);
/**
* Loads all keys.
*
* @return Iterable object. It's helpful if all keys don't fit in memory.
*/
Iterable<K> loadAllKeys();
}
|
MapLoader
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/config/RepositoriesJavaConfigTests.java
|
{
"start": 1643,
"end": 1966
}
|
class ____ {
@Autowired Repositories repositories;
@Test // DATAJPA-323
void foo() {
assertThat(repositories.hasRepositoryFor(User.class)).isTrue();
}
@Configuration
@EnableJpaRepositories(basePackageClasses = UserRepository.class)
@ImportResource("classpath:infrastructure.xml")
static
|
RepositoriesJavaConfigTests
|
java
|
apache__spark
|
sql/catalyst/src/test/java/org/apache/spark/sql/catalyst/JavaTypeInferenceBeans.java
|
{
"start": 2527,
"end": 2593
}
|
class ____ extends BarWrapper<String> {
}
static
|
StringBarWrapper
|
java
|
grpc__grpc-java
|
xds/src/main/java/io/grpc/xds/XdsServerWrapper.java
|
{
"start": 29880,
"end": 33698
}
|
class ____ implements ResourceWatcher<RdsUpdate> {
private final String resourceName;
private ImmutableList<VirtualHost> savedVirtualHosts;
private boolean isPending = true;
private RouteDiscoveryState(String resourceName) {
this.resourceName = checkNotNull(resourceName, "resourceName");
}
@Override
public void onChanged(final RdsUpdate update) {
syncContext.execute(new Runnable() {
@Override
public void run() {
if (!routeDiscoveryStates.containsKey(resourceName)) {
return;
}
if (savedVirtualHosts == null && !isPending) {
logger.log(Level.WARNING, "Received valid Rds {0} configuration.", resourceName);
}
savedVirtualHosts = ImmutableList.copyOf(update.virtualHosts);
updateRdsRoutingConfig();
maybeUpdateSelector();
}
});
}
@Override
public void onResourceDoesNotExist(final String resourceName) {
syncContext.execute(new Runnable() {
@Override
public void run() {
if (!routeDiscoveryStates.containsKey(resourceName)) {
return;
}
logger.log(Level.WARNING, "Rds {0} unavailable", resourceName);
savedVirtualHosts = null;
updateRdsRoutingConfig();
maybeUpdateSelector();
}
});
}
@Override
public void onError(final Status error) {
syncContext.execute(new Runnable() {
@Override
public void run() {
if (!routeDiscoveryStates.containsKey(resourceName)) {
return;
}
String description = error.getDescription() == null ? "" : error.getDescription() + " ";
Status errorWithNodeId = error.withDescription(
description + "xDS node ID: " + xdsClient.getBootstrapInfo().node().getId());
logger.log(Level.WARNING, "Error loading RDS resource {0} from XdsClient: {1}.",
new Object[]{resourceName, errorWithNodeId});
maybeUpdateSelector();
}
});
}
private void updateRdsRoutingConfig() {
for (FilterChain filterChain : savedRdsRoutingConfigRef.keySet()) {
HttpConnectionManager hcm = filterChain.httpConnectionManager();
if (!resourceName.equals(hcm.rdsName())) {
continue;
}
ServerRoutingConfig updatedRoutingConfig;
if (savedVirtualHosts == null) {
updatedRoutingConfig = ServerRoutingConfig.FAILING_ROUTING_CONFIG;
} else {
HashMap<String, Filter> chainFilters = activeFilters.get(filterChain.name());
ImmutableMap<Route, ServerInterceptor> interceptors = generatePerRouteInterceptors(
hcm.httpFilterConfigs(), savedVirtualHosts, chainFilters);
updatedRoutingConfig = ServerRoutingConfig.create(savedVirtualHosts, interceptors);
}
logger.log(Level.FINEST, "Updating filter chain {0} rds routing config: {1}",
new Object[]{filterChain.name(), updatedRoutingConfig});
savedRdsRoutingConfigRef.get(filterChain).set(updatedRoutingConfig);
}
}
// Update the selector to use the most recently updated configs only after all rds have been
// discovered for the first time. Later changes on rds will be applied through virtual host
// list atomic ref.
private void maybeUpdateSelector() {
isPending = false;
boolean isLastPending = pendingRds.remove(resourceName) && pendingRds.isEmpty();
if (isLastPending) {
updateSelector();
}
}
}
}
@VisibleForTesting
final
|
RouteDiscoveryState
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/aggregate/TestMapReduceAggregates.java
|
{
"start": 1644,
"end": 4867
}
|
class ____ {
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
idFormat.setMinimumIntegerDigits(4);
idFormat.setGroupingUsed(false);
}
@Test
public void testAggregates() throws Exception {
launch();
}
public static void launch() throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
int numOfInputLines = 20;
String baseDir = System.getProperty("test.build.data", "build/test/data");
Path OUTPUT_DIR = new Path(baseDir + "/output_for_aggregates_test");
Path INPUT_DIR = new Path(baseDir + "/input_for_aggregates_test");
String inputFile = "input.txt";
fs.delete(INPUT_DIR, true);
fs.mkdirs(INPUT_DIR);
fs.delete(OUTPUT_DIR, true);
StringBuilder inputData = new StringBuilder();
StringBuilder expectedOutput = new StringBuilder();
expectedOutput.append("max\t19\n");
expectedOutput.append("min\t1\n");
FSDataOutputStream fileOut = fs.create(new Path(INPUT_DIR, inputFile));
for (int i = 1; i < numOfInputLines; i++) {
expectedOutput.append("count_").append(idFormat.format(i));
expectedOutput.append("\t").append(i).append("\n");
inputData.append(idFormat.format(i));
for (int j = 1; j < i; j++) {
inputData.append(" ").append(idFormat.format(i));
}
inputData.append("\n");
}
expectedOutput.append("value_as_string_max\t9\n");
expectedOutput.append("value_as_string_min\t1\n");
expectedOutput.append("uniq_count\t15\n");
fileOut.write(inputData.toString().getBytes(StandardCharsets.UTF_8));
fileOut.close();
System.out.println("inputData:");
System.out.println(inputData.toString());
conf.setInt(ValueAggregatorJobBase.DESCRIPTOR_NUM, 1);
conf.set(ValueAggregatorJobBase.DESCRIPTOR + ".0",
"UserDefined,org.apache.hadoop.mapreduce.lib.aggregate.AggregatorTests");
conf.setLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, 14);
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, INPUT_DIR);
job.setInputFormatClass(TextInputFormat.class);
FileOutputFormat.setOutputPath(job, OUTPUT_DIR);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
job.setMapperClass(ValueAggregatorMapper.class);
job.setReducerClass(ValueAggregatorReducer.class);
job.setCombinerClass(ValueAggregatorCombiner.class);
job.waitForCompletion(true);
assertTrue(job.isSuccessful());
//
// Finally, we compare the reconstructed answer key with the
// original one. Remember, we need to ignore zero-count items
// in the original key.
//
String outdata = MapReduceTestUtil.readOutput(OUTPUT_DIR, conf);
System.out.println("full out data:");
System.out.println(outdata.toString());
outdata = outdata.substring(0, expectedOutput.toString().length());
assertEquals(expectedOutput.toString(),outdata);
fs.delete(OUTPUT_DIR, true);
fs.delete(INPUT_DIR, true);
}
}
|
TestMapReduceAggregates
|
java
|
processing__processing4
|
app/src/processing/app/contrib/UpdateListPanel.java
|
{
"start": 902,
"end": 3872
}
|
class ____ extends ListPanel {
Contribution.Filter contribFilter;
public UpdateListPanel(ContributionTab contributionTab,
Contribution.Filter contribFilter) {
super(contributionTab, contribFilter, true,
ContributionColumn.STATUS_NO_HEADER,
ContributionColumn.NAME,
ContributionColumn.AUTHOR,
ContributionColumn.INSTALLED_VERSION,
ContributionColumn.AVAILABLE_VERSION);
TableColumnModel tcm = table.getColumnModel();
tcm.getColumn(3).setMaxWidth(ManagerFrame.VERSION_WIDTH);
tcm.getColumn(4).setMaxWidth(ManagerFrame.VERSION_WIDTH);
this.contribFilter = contribFilter;
// This is apparently a hack to prevent rows from being sorted by
// clicking on the column headers, which makes a mess because this
// list has sub-headers for the categories mixed into the list.
// However, unfortunately it also breaks column resizing. [fry 220726]
table.getTableHeader().setEnabled(false);
}
/*
// Thread: EDT
@Override
public void contributionAdded(final Contribution contribution) {
// Ensures contributionAdded in ListPanel is only run on LocalContributions
if (contribFilter.matches(contribution)) {
super.contributionAdded(contribution);
// Enable the update button if contributions are available
((UpdateStatusPanel) contributionTab.statusPanel).setUpdateEnabled(anyRows());
}
}
// Thread: EDT
@Override
public void contributionRemoved(final Contribution contribution) {
if (contribFilter.matches(contribution)) {
super.contributionRemoved(contribution);
// Disable the update button if no contributions in the list
((UpdateStatusPanel) contributionTab.statusPanel).setUpdateEnabled(anyRows());
}
}
*/
// TODO This seems a little weird… Wasn't checking against the filter,
// and not entirely clear why it isn't just calling super().
// Also seems dangerous to do its own add/remove calls.
// However, if removed, the StatusDetail entries for the Updates
// panel are all f-ed up (NPE for progressBar). [fry 230119]
// Thread: EDT
@Override
public void contributionChanged(final Contribution oldContrib,
final Contribution newContrib) {
// TODO Matching against oldContrib brings back NPEs,
// but using newContrib seems to work. [fry 230119]
if (contribFilter.matches(newContrib)) {
StatusDetail detail = detailForContrib.get(oldContrib);
if (detail == null) {
contributionAdded(newContrib);
} else if (newContrib.isInstalled()) {
detailForContrib.remove(oldContrib);
}
// model.fireTableDataChanged();
}
}
@Override
protected void updateModel() {
super.updateModel();
boolean anyRows = sorter.getViewRowCount() > 0;
((UpdateStatusPanel) contributionTab.statusPanel).setUpdateEnabled(anyRows);
}
}
|
UpdateListPanel
|
java
|
resilience4j__resilience4j
|
resilience4j-spring/src/main/java/io/github/resilience4j/ratelimiter/configure/RateLimiterConfigurationProperties.java
|
{
"start": 707,
"end": 1616
}
|
class ____ extends
io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties {
private int rateLimiterAspectOrder = Ordered.LOWEST_PRECEDENCE - 2;
/**
* As of release 0.16.0 as we set an implicit spring aspect order now which is retry then
* circuit breaker then rate limiter then bulkhead but user can override it still if he has
* different use case but bulkhead will be first aspect all the time due to the implicit order
* we have it for bulkhead
*/
public int getRateLimiterAspectOrder() {
return rateLimiterAspectOrder;
}
/**
* set rate limiter aspect order
*
* @param rateLimiterAspectOrder the aspect order
*/
public void setRateLimiterAspectOrder(int rateLimiterAspectOrder) {
this.rateLimiterAspectOrder = rateLimiterAspectOrder;
}
}
|
RateLimiterConfigurationProperties
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/time/JavaInstantGetSecondsGetNano.java
|
{
"start": 2094,
"end": 2909
}
|
class ____ extends BugChecker
implements MethodInvocationTreeMatcher {
private static final Matcher<ExpressionTree> GET_EPOCH_SECOND =
instanceMethod().onExactClass("java.time.Instant").named("getEpochSecond");
private static final Matcher<ExpressionTree> GET_NANO =
allOf(
instanceMethod().onExactClass("java.time.Instant").named("getNano"),
Matchers.not(Matchers.packageStartsWith("java.")));
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (GET_NANO.matches(tree, state)) {
if (!containsCallToSameReceiverNearby(
tree, GET_EPOCH_SECOND, state, /* checkProtoChains= */ false)) {
return describeMatch(tree);
}
}
return Description.NO_MATCH;
}
}
|
JavaInstantGetSecondsGetNano
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeContains.java
|
{
"start": 1643,
"end": 3004
}
|
class ____ implements MaybeObserver<Object>, Disposable {
final SingleObserver<? super Boolean> downstream;
final Object value;
Disposable upstream;
ContainsMaybeObserver(SingleObserver<? super Boolean> actual, Object value) {
this.downstream = actual;
this.value = value;
}
@Override
public void dispose() {
upstream.dispose();
upstream = DisposableHelper.DISPOSED;
}
@Override
public boolean isDisposed() {
return upstream.isDisposed();
}
@Override
public void onSubscribe(Disposable d) {
if (DisposableHelper.validate(this.upstream, d)) {
this.upstream = d;
downstream.onSubscribe(this);
}
}
@Override
public void onSuccess(Object value) {
upstream = DisposableHelper.DISPOSED;
downstream.onSuccess(Objects.equals(value, this.value));
}
@Override
public void onError(Throwable e) {
upstream = DisposableHelper.DISPOSED;
downstream.onError(e);
}
@Override
public void onComplete() {
upstream = DisposableHelper.DISPOSED;
downstream.onSuccess(false);
}
}
}
|
ContainsMaybeObserver
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/support/CancellableFanOut.java
|
{
"start": 1743,
"end": 8528
}
|
class ____<Item, ItemResponse, FinalResponse> {
private static final Logger logger = LogManager.getLogger(CancellableFanOut.class);
/**
* Run the fan-out action.
*
* @param task The task to watch for cancellations. If {@code null} or not a {@link CancellableTask} then the fan-out still
* works, just without any cancellation handling.
* @param itemsIterator The items over which to fan out. Iterated on the calling thread.
* @param listener A listener for the final response, which is completed after all the fanned-out actions have completed. It is not
* completed promptly on cancellation. Completed on the thread that handles the final per-item response (or
* the calling thread if there are no items).
*/
public final void run(@Nullable Task task, Iterator<Item> itemsIterator, ActionListener<FinalResponse> listener) {
final var cancellableTask = task instanceof CancellableTask ct ? ct : null;
// Captures the final result as soon as it's known (either on completion or on cancellation) without necessarily completing the
// outer listener, because we do not want to complete the outer listener until all sub-tasks are complete
final var resultListener = new SubscribableListener<FinalResponse>();
// Completes resultListener (either on completion or on cancellation). Captures a reference to 'this', but within an
// 'AtomicReference' which is cleared, releasing the reference promptly, when executed.
final var resultListenerCompleter = new AtomicReference<Runnable>(() -> {
if (cancellableTask != null && cancellableTask.notifyIfCancelled(resultListener)) {
return;
}
// It's important that we complete resultListener before returning, because otherwise there's a risk that a cancellation arrives
// later which might unexpectedly complete the final listener on a transport thread.
ActionListener.completeWith(resultListener, this::onCompletion);
});
// Collects the per-item listeners up so they can all be completed exceptionally on cancellation. Never completed successfully.
final var itemCancellationListener = new SubscribableListener<ItemResponse>();
if (cancellableTask != null) {
cancellableTask.addListener(() -> {
assert cancellableTask.isCancelled();
// probably on a transport thread and we don't know if any of the callbacks are slow so we must avoid running them by
// blocking the thread which might add a subscriber to resultListener until after we've completed it
final var semaphore = new Semaphore(0);
// resultListenerCompleter is currently either a no-op, or else it immediately completes resultListener with a cancellation
// while it has no subscribers, so either way this semaphore is not held for long
resultListenerCompleter.getAndSet(semaphore::acquireUninterruptibly).run();
semaphore.release();
// finally, release refs to all the per-item listeners (without calling onItemFailure, so this is also fast)
cancellableTask.notifyIfCancelled(itemCancellationListener);
});
}
try (var refs = new RefCountingRunnable(new SubtasksCompletionHandler<>(resultListenerCompleter, resultListener, listener))) {
while (itemsIterator.hasNext()) {
final var item = itemsIterator.next();
// Captures a reference to 'this', but within a 'notifyOnce' so it is released promptly when completed.
final ActionListener<ItemResponse> itemResponseListener = ActionListener.notifyOnce(new ActionListener<>() {
@Override
public void onResponse(ItemResponse itemResponse) {
try {
onItemResponse(item, itemResponse);
} catch (Exception e) {
logger.error(
() -> Strings.format(
"unexpected exception handling [%s] for item [%s] in [%s]",
itemResponse,
item,
CancellableFanOut.this
),
e
);
assert false : e;
}
}
@Override
public void onFailure(Exception e) {
if (cancellableTask != null && cancellableTask.isCancelled()) {
// Completed on cancellation so it is released promptly, but there's no need to handle the exception.
return;
}
onItemFailure(item, e); // must not throw, enforced by the ActionListener#notifyOnce wrapper
}
@Override
public String toString() {
return "[" + CancellableFanOut.this + "][" + listener + "][" + item + "]";
}
});
if (cancellableTask != null) {
if (cancellableTask.isCancelled()) {
return;
}
// Register this item's listener for prompt cancellation notification.
itemCancellationListener.addListener(itemResponseListener);
}
// Process the item, capturing a ref to make sure the outer listener is completed after this item is processed.
ActionListener.run(ActionListener.releaseAfter(itemResponseListener, refs.acquire()), l -> sendItemRequest(item, l));
}
} catch (Exception e) {
// NB the listener may have been completed already (by exiting this try block) so this exception may not be sent to the caller,
// but we cannot do anything else with it; an exception here is a bug anyway.
logger.error("unexpected failure in [" + this + "][" + listener + "]", e);
assert false : e;
throw e;
}
}
/**
* Run the action (typically by sending a transport request) for an individual item. Called in sequence on the thread that invoked
* {@link #run}. May not be called for every item if the task is cancelled during the iteration.
* <p>
* Note that it's easy to accidentally capture another reference to this
|
CancellableFanOut
|
java
|
mockito__mockito
|
mockito-extensions/mockito-junit-jupiter/src/test/java/org/mockitousage/GenericTypeMockTest.java
|
{
"start": 12663,
"end": 12998
}
|
class ____ implements BaseRepository<Map<Integer, String>, String> {
public Map<Integer, String> findById(String id) {
return Map.of();
}
public Map<Integer, String> save(Map<Integer, String> entity) {
return entity;
}
}
public
|
TwoRepository
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/appender/rolling/action/PathCondition.java
|
{
"start": 1088,
"end": 2298
}
|
interface ____ {
/**
* The empty array.
*/
static final PathCondition[] EMPTY_ARRAY = {};
/**
* Copies the given input.
*
* @param source What to copy
* @return a copy, never null.
*/
static PathCondition[] copy(PathCondition... source) {
return source == null || source.length == 0 ? EMPTY_ARRAY : Arrays.copyOf(source, source.length);
}
/**
* Invoked before a new {@linkplain Files#walkFileTree(Path, java.util.Set, int, java.nio.file.FileVisitor) file
* tree walk} is started. Stateful PathConditions can reset their state when this method is called.
*/
void beforeFileTreeWalk();
/**
* Returns {@code true} if the specified candidate path should be deleted, {@code false} otherwise.
*
* @param baseDir the directory from where to start scanning for deletion candidate files
* @param relativePath the candidate for deletion. This path is relative to the baseDir.
* @param attrs attributes of the candidate path
* @return whether the candidate path should be deleted
*/
boolean accept(final Path baseDir, final Path relativePath, final BasicFileAttributes attrs);
}
|
PathCondition
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java
|
{
"start": 1786,
"end": 18048
}
|
class ____ extends AbstractWireSerializingTestCase<CohereEmbeddingsServiceSettings> {
public static CohereEmbeddingsServiceSettings createRandom() {
var commonSettings = CohereServiceSettingsTests.createRandom();
var embeddingType = randomFrom(CohereEmbeddingType.values());
return new CohereEmbeddingsServiceSettings(commonSettings, embeddingType);
}
public void testFromMap() {
var url = "https://www.abc.com";
var similarity = SimilarityMeasure.DOT_PRODUCT.toString();
var dims = 1536;
var maxInputTokens = 512;
var model = "model";
var serviceSettings = CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(
Map.of(
ServiceFields.URL,
url,
ServiceFields.SIMILARITY,
similarity,
ServiceFields.DIMENSIONS,
dims,
ServiceFields.MAX_INPUT_TOKENS,
maxInputTokens,
CohereServiceSettings.OLD_MODEL_ID_FIELD,
model,
CohereEmbeddingsServiceSettings.EMBEDDING_TYPE,
DenseVectorFieldMapper.ElementType.BYTE.toString()
)
),
ConfigurationParseContext.PERSISTENT
);
MatcherAssert.assertThat(
serviceSettings,
is(
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(
ServiceUtils.createUri(url),
SimilarityMeasure.DOT_PRODUCT,
dims,
maxInputTokens,
model,
null,
CohereServiceSettings.CohereApiVersion.V1
),
CohereEmbeddingType.BYTE
)
)
);
}
public void testFromMap_WithModelId() {
var url = "https://www.abc.com";
var similarity = SimilarityMeasure.DOT_PRODUCT.toString();
var dims = 1536;
var maxInputTokens = 512;
var model = "model";
var serviceSettings = CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(
Map.of(
ServiceFields.URL,
url,
ServiceFields.SIMILARITY,
similarity,
ServiceFields.DIMENSIONS,
dims,
ServiceFields.MAX_INPUT_TOKENS,
maxInputTokens,
CohereServiceSettings.OLD_MODEL_ID_FIELD,
model,
CohereEmbeddingsServiceSettings.EMBEDDING_TYPE,
CohereEmbeddingType.INT8.toString()
)
),
ConfigurationParseContext.REQUEST
);
MatcherAssert.assertThat(
serviceSettings,
is(
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(
ServiceUtils.createUri(url),
SimilarityMeasure.DOT_PRODUCT,
dims,
maxInputTokens,
model,
null,
CohereServiceSettings.CohereApiVersion.V2
),
CohereEmbeddingType.INT8
)
)
);
}
public void testFromMap_PrefersModelId_OverModel() {
var url = "https://www.abc.com";
var similarity = SimilarityMeasure.DOT_PRODUCT.toString();
var dims = 1536;
var maxInputTokens = 512;
var model = "model";
var serviceSettings = CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(
Map.of(
ServiceFields.URL,
url,
ServiceFields.SIMILARITY,
similarity,
ServiceFields.DIMENSIONS,
dims,
ServiceFields.MAX_INPUT_TOKENS,
maxInputTokens,
CohereServiceSettings.OLD_MODEL_ID_FIELD,
"old_model",
CohereServiceSettings.MODEL_ID,
model,
CohereEmbeddingsServiceSettings.EMBEDDING_TYPE,
CohereEmbeddingType.BYTE.toString(),
CohereServiceSettings.API_VERSION,
CohereServiceSettings.CohereApiVersion.V1.toString()
)
),
ConfigurationParseContext.PERSISTENT
);
MatcherAssert.assertThat(
serviceSettings,
is(
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(
ServiceUtils.createUri(url),
SimilarityMeasure.DOT_PRODUCT,
dims,
maxInputTokens,
model,
null,
CohereServiceSettings.CohereApiVersion.V1
),
CohereEmbeddingType.BYTE
)
)
);
}
public void testFromMap_MissingEmbeddingType_DefaultsToFloat() {
var serviceSettings = CohereEmbeddingsServiceSettings.fromMap(new HashMap<>(Map.of()), ConfigurationParseContext.PERSISTENT);
assertThat(serviceSettings.getEmbeddingType(), is(CohereEmbeddingType.FLOAT));
}
public void testFromMap_EmptyEmbeddingType_ThrowsError() {
var thrownException = expectThrows(
ValidationException.class,
() -> CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, "", CohereServiceSettings.MODEL_ID, "model")),
ConfigurationParseContext.REQUEST
)
);
MatcherAssert.assertThat(
thrownException.getMessage(),
containsString(
Strings.format(
"Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;",
CohereEmbeddingsServiceSettings.EMBEDDING_TYPE
)
)
);
}
public void testFromMap_InvalidEmbeddingType_ThrowsError_ForRequest() {
var thrownException = expectThrows(
ValidationException.class,
() -> CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, "abc", CohereServiceSettings.MODEL_ID, "model")),
ConfigurationParseContext.REQUEST
)
);
MatcherAssert.assertThat(
thrownException.getMessage(),
is(
Strings.format(
"Validation Failed: 1: [service_settings] Invalid value [abc] received. "
+ "[embedding_type] must be one of [binary, bit, byte, float, int8];"
)
)
);
}
public void testFromMap_InvalidEmbeddingType_ThrowsError_ForPersistent() {
var thrownException = expectThrows(
ValidationException.class,
() -> CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, "abc")),
ConfigurationParseContext.PERSISTENT
)
);
MatcherAssert.assertThat(
thrownException.getMessage(),
is(
Strings.format(
"Validation Failed: 1: [service_settings] Invalid value [abc] received. "
+ "[embedding_type] must be one of [bit, byte, float];"
)
)
);
}
public void testFromMap_ReturnsFailure_WhenEmbeddingTypesAreNotValid() {
var exception = expectThrows(
ValidationException.class,
() -> CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, List.of("abc"))),
ConfigurationParseContext.PERSISTENT
)
);
MatcherAssert.assertThat(
exception.getMessage(),
containsString("field [embedding_type] is not of the expected type. The value [[abc]] cannot be converted to a [String]")
);
}
public void testFromMap_ConvertsElementTypeByte_ToCohereEmbeddingTypeByte() {
assertThat(
CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, DenseVectorFieldMapper.ElementType.BYTE.toString())),
ConfigurationParseContext.PERSISTENT
),
is(
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(CohereServiceSettings.CohereApiVersion.V1),
CohereEmbeddingType.BYTE
)
)
);
}
public void testFromMap_ConvertsElementTypeFloat_ToCohereEmbeddingTypeFloat() {
assertThat(
CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, DenseVectorFieldMapper.ElementType.FLOAT.toString())),
ConfigurationParseContext.PERSISTENT
),
is(
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(CohereServiceSettings.CohereApiVersion.V1),
CohereEmbeddingType.FLOAT
)
)
);
}
public void testFromMap_ConvertsInt8_ToCohereEmbeddingTypeInt8() {
assertThat(
CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, CohereEmbeddingType.INT8.toString())),
ConfigurationParseContext.PERSISTENT
),
is(
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(CohereServiceSettings.CohereApiVersion.V1),
CohereEmbeddingType.INT8
)
)
);
}
public void testFromMap_ConvertsBit_ToCohereEmbeddingTypeBit() {
assertThat(
CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(
Map.of(
CohereEmbeddingsServiceSettings.EMBEDDING_TYPE,
CohereEmbeddingType.BIT.toString(),
CohereServiceSettings.MODEL_ID,
"model"
)
),
ConfigurationParseContext.REQUEST
),
is(
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings((String) null, null, null, null, "model", null, CohereServiceSettings.CohereApiVersion.V2),
CohereEmbeddingType.BIT
)
)
);
}
public void testFromMap_PreservesEmbeddingTypeFloat() {
assertThat(
CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(
Map.of(
CohereEmbeddingsServiceSettings.EMBEDDING_TYPE,
CohereEmbeddingType.FLOAT.toString(),
CohereServiceSettings.MODEL_ID,
"model"
)
),
ConfigurationParseContext.REQUEST
),
is(
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings((String) null, null, null, null, "model", null, CohereServiceSettings.CohereApiVersion.V2),
CohereEmbeddingType.FLOAT
)
)
);
}
public void testFromMap_PersistentReadsInt8() {
assertThat(
CohereEmbeddingsServiceSettings.fromMap(
new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, "int8")),
ConfigurationParseContext.PERSISTENT
),
is(
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(CohereServiceSettings.CohereApiVersion.V1),
CohereEmbeddingType.INT8
)
)
);
}
public void testFromCohereOrDenseVectorEnumValues() {
var validation = new ValidationException();
assertEquals(CohereEmbeddingType.BYTE, CohereEmbeddingsServiceSettings.fromCohereOrDenseVectorEnumValues("byte", validation));
assertEquals(CohereEmbeddingType.INT8, CohereEmbeddingsServiceSettings.fromCohereOrDenseVectorEnumValues("int8", validation));
assertEquals(CohereEmbeddingType.FLOAT, CohereEmbeddingsServiceSettings.fromCohereOrDenseVectorEnumValues("float", validation));
assertEquals(CohereEmbeddingType.BINARY, CohereEmbeddingsServiceSettings.fromCohereOrDenseVectorEnumValues("binary", validation));
assertEquals(CohereEmbeddingType.BIT, CohereEmbeddingsServiceSettings.fromCohereOrDenseVectorEnumValues("bit", validation));
assertTrue(validation.validationErrors().isEmpty());
}
public void testToXContent_WritesAllValues() throws IOException {
var serviceSettings = new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(
"url",
SimilarityMeasure.COSINE,
5,
10,
"model_id",
new RateLimitSettings(3),
CohereServiceSettings.CohereApiVersion.V2
),
CohereEmbeddingType.INT8
);
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
serviceSettings.toXContent(builder, null);
String xContentResult = Strings.toString(builder);
assertThat(xContentResult, is("""
{"url":"url","similarity":"cosine","dimensions":5,"max_input_tokens":10,"model_id":"model_id",""" + """
"rate_limit":{"requests_per_minute":3},"api_version":"V2","embedding_type":"byte"}"""));
}
@Override
protected Writeable.Reader<CohereEmbeddingsServiceSettings> instanceReader() {
return CohereEmbeddingsServiceSettings::new;
}
@Override
protected CohereEmbeddingsServiceSettings createTestInstance() {
return createRandom();
}
@Override
protected CohereEmbeddingsServiceSettings mutateInstance(CohereEmbeddingsServiceSettings instance) throws IOException {
if (randomBoolean()) {
CohereServiceSettings commonSettings = randomValueOtherThan(
instance.getCommonSettings(),
CohereServiceSettingsTests::createRandom
);
return new CohereEmbeddingsServiceSettings(commonSettings, instance.getEmbeddingType());
} else {
CohereEmbeddingType embeddingType = randomValueOtherThan(
instance.getEmbeddingType(),
() -> randomFrom(CohereEmbeddingType.values())
);
return new CohereEmbeddingsServiceSettings(instance.getCommonSettings(), embeddingType);
}
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(new MlInferenceNamedXContentProvider().getNamedWriteables());
entries.addAll(InferenceNamedWriteablesProvider.getNamedWriteables());
return new NamedWriteableRegistry(entries);
}
public static Map<String, Object> getServiceSettingsMap(@Nullable String url, @Nullable String model, @Nullable Enum<?> embeddingType) {
var map = new HashMap<>(CohereServiceSettingsTests.getServiceSettingsMap(url, model));
if (embeddingType != null) {
map.put(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, embeddingType.toString());
}
return map;
}
}
|
CohereEmbeddingsServiceSettingsTests
|
java
|
quarkusio__quarkus
|
extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/tracing/intrumentation/vertx/RedisClientInstrumenterVertxTracer.java
|
{
"start": 8342,
"end": 8977
}
|
enum ____ implements AttributesExtractor<CommandTrace, Object> {
INSTANCE;
@Override
public void onStart(AttributesBuilder attributes, io.opentelemetry.context.Context parentContext,
CommandTrace request) {
AttributesExtractorUtil.internalSet(attributes, DB_NAMESPACE, request.dbIndex());
}
@Override
public void onEnd(AttributesBuilder attributes,
io.opentelemetry.context.Context context,
CommandTrace request,
Object response,
Throwable error) {
}
}
}
|
RedisClientAttributesExtractor
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/atomic/referencearray/AtomicReferenceArrayAssert_isSubsetOf_with_Array_Test.java
|
{
"start": 1074,
"end": 1659
}
|
class ____ extends AtomicReferenceArrayAssertBaseTest {
private final Object[] values = array("Yoda", "Luke");
@Override
protected AtomicReferenceArrayAssert<Object> invoke_api_method() {
return assertions.isSubsetOf(values);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertIsSubsetOf(info(), internalArray(), asList(values));
}
@Test
void invoke_api_like_user() {
assertThat(new AtomicReferenceArray<>(array("Luke", "Yoda"))).isSubsetOf("Yoda", "Luke", "Chewbacca");
}
}
|
AtomicReferenceArrayAssert_isSubsetOf_with_Array_Test
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-authorization-server/src/test/java/org/springframework/security/oauth2/server/authorization/web/authentication/OAuth2DeviceAuthorizationConsentAuthenticationConverterTests.java
|
{
"start": 1932,
"end": 14159
}
|
class ____ {
private static final String VERIFICATION_URI = "/oauth2/device_verification";
private static final String USER_CODE = "BCDF-GHJK";
private static final String CLIENT_ID = "client-1";
private static final String STATE = "abc123";
private OAuth2DeviceAuthorizationConsentAuthenticationConverter converter;
@BeforeEach
public void setUp() {
this.converter = new OAuth2DeviceAuthorizationConsentAuthenticationConverter();
}
@AfterEach
public void tearDown() {
SecurityContextHolder.clearContext();
}
@Test
public void convertWhenGetThenReturnNull() {
MockHttpServletRequest request = createRequest();
request.setMethod(HttpMethod.GET.name());
assertThat(this.converter.convert(request)).isNull();
}
@Test
public void convertWhenMissingStateThenReturnNull() {
MockHttpServletRequest request = createRequest();
assertThat(this.converter.convert(request)).isNull();
}
@Test
public void convertWhenMissingClientIdThenInvalidRequestError() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, STATE);
// @formatter:off
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.converter.convert(request))
.withMessageContaining(OAuth2ParameterNames.CLIENT_ID)
.extracting(OAuth2AuthenticationException::getError)
.extracting(OAuth2Error::getErrorCode)
.isEqualTo(OAuth2ErrorCodes.INVALID_REQUEST);
// @formatter:on
}
@Test
public void convertWhenEmptyClientIdThenInvalidRequestError() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.CLIENT_ID, "");
// @formatter:off
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.converter.convert(request))
.withMessageContaining(OAuth2ParameterNames.CLIENT_ID)
.extracting(OAuth2AuthenticationException::getError)
.extracting(OAuth2Error::getErrorCode)
.isEqualTo(OAuth2ErrorCodes.INVALID_REQUEST);
// @formatter:on
}
@Test
public void convertWhenMultipleClientIdParametersThenInvalidRequestError() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
request.addParameter(OAuth2ParameterNames.CLIENT_ID, "another");
// @formatter:off
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.converter.convert(request))
.withMessageContaining(OAuth2ParameterNames.CLIENT_ID)
.extracting(OAuth2AuthenticationException::getError)
.extracting(OAuth2Error::getErrorCode)
.isEqualTo(OAuth2ErrorCodes.INVALID_REQUEST);
// @formatter:on
}
@Test
public void convertWhenMissingUserCodeThenInvalidRequestError() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
// @formatter:off
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.converter.convert(request))
.withMessageContaining(OAuth2ParameterNames.USER_CODE)
.extracting(OAuth2AuthenticationException::getError)
.extracting(OAuth2Error::getErrorCode)
.isEqualTo(OAuth2ErrorCodes.INVALID_REQUEST);
// @formatter:on
}
@Test
public void convertWhenEmptyUserCodeThenInvalidRequestError() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
request.addParameter(OAuth2ParameterNames.USER_CODE, "");
// @formatter:off
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.converter.convert(request))
.withMessageContaining(OAuth2ParameterNames.USER_CODE)
.extracting(OAuth2AuthenticationException::getError)
.extracting(OAuth2Error::getErrorCode)
.isEqualTo(OAuth2ErrorCodes.INVALID_REQUEST);
// @formatter:on
}
@Test
public void convertWhenInvalidUserCodeThenInvalidRequestError() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
request.addParameter(OAuth2ParameterNames.USER_CODE, "LONG-USER-CODE");
// @formatter:off
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.converter.convert(request))
.withMessageContaining(OAuth2ParameterNames.USER_CODE)
.extracting(OAuth2AuthenticationException::getError)
.extracting(OAuth2Error::getErrorCode)
.isEqualTo(OAuth2ErrorCodes.INVALID_REQUEST);
// @formatter:on
}
@Test
public void convertWhenMultipleUserCodeParametersThenInvalidRequestError() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
request.addParameter(OAuth2ParameterNames.USER_CODE, USER_CODE);
request.addParameter(OAuth2ParameterNames.USER_CODE, "another");
// @formatter:off
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.converter.convert(request))
.withMessageContaining(OAuth2ParameterNames.USER_CODE)
.extracting(OAuth2AuthenticationException::getError)
.extracting(OAuth2Error::getErrorCode)
.isEqualTo(OAuth2ErrorCodes.INVALID_REQUEST);
// @formatter:on
}
@Test
public void convertWhenEmptyStateParameterThenInvalidRequestError() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, "");
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
request.addParameter(OAuth2ParameterNames.USER_CODE, USER_CODE);
// @formatter:off
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.converter.convert(request))
.withMessageContaining(OAuth2ParameterNames.STATE)
.extracting(OAuth2AuthenticationException::getError)
.extracting(OAuth2Error::getErrorCode)
.isEqualTo(OAuth2ErrorCodes.INVALID_REQUEST);
// @formatter:on
}
@Test
public void convertWhenMultipleStateParametersThenInvalidRequestError() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.STATE, "another");
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
request.addParameter(OAuth2ParameterNames.USER_CODE, USER_CODE);
// @formatter:off
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.converter.convert(request))
.withMessageContaining(OAuth2ParameterNames.STATE)
.extracting(OAuth2AuthenticationException::getError)
.extracting(OAuth2Error::getErrorCode)
.isEqualTo(OAuth2ErrorCodes.INVALID_REQUEST);
// @formatter:on
}
@Test
public void convertWhenMissingPrincipalThenReturnDeviceAuthorizationConsentAuthentication() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
request.addParameter(OAuth2ParameterNames.USER_CODE, USER_CODE);
OAuth2DeviceAuthorizationConsentAuthenticationToken authentication = (OAuth2DeviceAuthorizationConsentAuthenticationToken) this.converter
.convert(request);
assertThat(authentication).isNotNull();
assertThat(authentication.getAuthorizationUri()).endsWith(VERIFICATION_URI);
assertThat(authentication.getClientId()).isEqualTo(CLIENT_ID);
assertThat(authentication.getPrincipal()).isInstanceOf(AnonymousAuthenticationToken.class);
assertThat(authentication.getUserCode()).isEqualTo(USER_CODE);
assertThat(authentication.getScopes()).isEmpty();
assertThat(authentication.getAdditionalParameters()).isEmpty();
}
@Test
public void convertWhenMissingScopeThenReturnDeviceAuthorizationConsentAuthentication() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
request.addParameter(OAuth2ParameterNames.USER_CODE, USER_CODE);
SecurityContextImpl securityContext = new SecurityContextImpl();
securityContext.setAuthentication(new TestingAuthenticationToken("user", null));
SecurityContextHolder.setContext(securityContext);
OAuth2DeviceAuthorizationConsentAuthenticationToken authentication = (OAuth2DeviceAuthorizationConsentAuthenticationToken) this.converter
.convert(request);
assertThat(authentication).isNotNull();
assertThat(authentication.getAuthorizationUri()).endsWith(VERIFICATION_URI);
assertThat(authentication.getClientId()).isEqualTo(CLIENT_ID);
assertThat(authentication.getPrincipal()).isInstanceOf(TestingAuthenticationToken.class);
assertThat(authentication.getUserCode()).isEqualTo(USER_CODE);
assertThat(authentication.getScopes()).isEmpty();
assertThat(authentication.getAdditionalParameters()).isEmpty();
}
@Test
public void convertWhenAllParametersThenReturnDeviceAuthorizationConsentAuthentication() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.USER_CODE, USER_CODE);
request.addParameter(OAuth2ParameterNames.SCOPE, "message.read");
request.addParameter(OAuth2ParameterNames.SCOPE, "message.write");
request.addParameter("param-1", "value-1");
request.addParameter("param-2", "value-1", "value-2");
SecurityContextImpl securityContext = new SecurityContextImpl();
securityContext.setAuthentication(new TestingAuthenticationToken("user", null));
SecurityContextHolder.setContext(securityContext);
OAuth2DeviceAuthorizationConsentAuthenticationToken authentication = (OAuth2DeviceAuthorizationConsentAuthenticationToken) this.converter
.convert(request);
assertThat(authentication).isNotNull();
assertThat(authentication.getAuthorizationUri()).endsWith(VERIFICATION_URI);
assertThat(authentication.getClientId()).isEqualTo(CLIENT_ID);
assertThat(authentication.getPrincipal()).isInstanceOf(TestingAuthenticationToken.class);
assertThat(authentication.getUserCode()).isEqualTo(USER_CODE);
assertThat(authentication.getScopes()).containsExactly("message.read", "message.write");
assertThat(authentication.getAdditionalParameters()).containsExactly(Map.entry("param-1", "value-1"),
Map.entry("param-2", new String[] { "value-1", "value-2" }));
}
@Test
public void convertWhenNonNormalizedUserCodeThenReturnDeviceAuthorizationConsentAuthentication() {
MockHttpServletRequest request = createRequest();
request.addParameter(OAuth2ParameterNames.CLIENT_ID, CLIENT_ID);
request.addParameter(OAuth2ParameterNames.STATE, STATE);
request.addParameter(OAuth2ParameterNames.USER_CODE, USER_CODE.toLowerCase().replace("-", " . "));
SecurityContextImpl securityContext = new SecurityContextImpl();
securityContext.setAuthentication(new TestingAuthenticationToken("user", null));
SecurityContextHolder.setContext(securityContext);
OAuth2DeviceAuthorizationConsentAuthenticationToken authentication = (OAuth2DeviceAuthorizationConsentAuthenticationToken) this.converter
.convert(request);
assertThat(authentication).isNotNull();
assertThat(authentication.getAuthorizationUri()).endsWith(VERIFICATION_URI);
assertThat(authentication.getClientId()).isEqualTo(CLIENT_ID);
assertThat(authentication.getPrincipal()).isInstanceOf(TestingAuthenticationToken.class);
assertThat(authentication.getUserCode()).isEqualTo(USER_CODE);
assertThat(authentication.getScopes()).isEmpty();
assertThat(authentication.getAdditionalParameters()).isEmpty();
}
private static MockHttpServletRequest createRequest() {
MockHttpServletRequest request = new MockHttpServletRequest();
request.setMethod(HttpMethod.POST.name());
request.setRequestURI(VERIFICATION_URI);
return request;
}
}
|
OAuth2DeviceAuthorizationConsentAuthenticationConverterTests
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.