language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/api/java/typeutils/PojoTypeExtractionTest.java
|
{
"start": 4944,
"end": 5162
}
|
class ____ {
private int isPrivate;
public int getIsPrivate() {
return isPrivate;
}
// setter is missing (intentional)
}
// correct pojo
public static
|
IncorrectPojo
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/hash/ChecksumHashFunctionTest.java
|
{
"start": 966,
"end": 3283
}
|
class ____ extends TestCase {
public void testCrc32_equalsChecksumValue() throws Exception {
assertChecksum(CRC_32, "");
assertChecksum(CRC_32, "Z");
assertChecksum(CRC_32, "foobar");
}
public void testAdler32_equalsChecksumValue() throws Exception {
assertChecksum(ADLER_32, "");
assertChecksum(ADLER_32, "Z");
assertChecksum(ADLER_32, "foobar");
}
public void testCrc32_knownValues() throws Exception {
assertHash32(0x1C8600E3, CRC_32, "hell");
assertHash32(0x3610A686, CRC_32, "hello");
assertHash32(0xED81F9F6, CRC_32, "hello ");
assertHash32(0x4850DDC2, CRC_32, "hello w");
assertHash32(0x7A2D6005, CRC_32, "hello wo");
assertHash32(0x1C192672, CRC_32, "hello wor");
assertHash32(0x414FA339, CRC_32, "The quick brown fox jumps over the lazy dog");
assertHash32(0x4400B5BC, CRC_32, "The quick brown fox jumps over the lazy cog");
}
public void testAdler32_knownValues() throws Exception {
assertHash32(0x041701A6, ADLER_32, "hell");
assertHash32(0x062C0215, ADLER_32, "hello");
assertHash32(0x08610235, ADLER_32, "hello ");
assertHash32(0x0B0D02AC, ADLER_32, "hello w");
assertHash32(0x0E28031B, ADLER_32, "hello wo");
assertHash32(0x11B5038D, ADLER_32, "hello wor");
assertHash32(0x5BDC0FDA, ADLER_32, "The quick brown fox jumps over the lazy dog");
assertHash32(0x5BD90FD9, ADLER_32, "The quick brown fox jumps over the lazy cog");
}
private static void assertChecksum(ImmutableSupplier<Checksum> supplier, String input) {
byte[] bytes = HashTestUtils.ascii(input);
Checksum checksum = supplier.get();
checksum.update(bytes, 0, bytes.length);
long value = checksum.getValue();
String toString = "name";
HashFunction func = new ChecksumHashFunction(supplier, 32, toString);
assertEquals(toString, func.toString());
assertEquals(value, func.hashBytes(bytes).padToLong());
}
private static void assertHash32(
int expected, ImmutableSupplier<Checksum> supplier, String input) {
byte[] bytes = HashTestUtils.ascii(input);
String toString = "name";
HashFunction func = new ChecksumHashFunction(supplier, 32, toString);
assertEquals(expected, func.hashBytes(bytes).asInt());
assertEquals(toString, func.toString());
}
}
|
ChecksumHashFunctionTest
|
java
|
google__guava
|
guava/src/com/google/common/collect/CompactHashSet.java
|
{
"start": 3646,
"end": 25326
}
|
class ____<E extends @Nullable Object> extends AbstractSet<E> implements Serializable {
// TODO(user): cache all field accesses in local vars
/** Creates an empty {@code CompactHashSet} instance. */
public static <E extends @Nullable Object> CompactHashSet<E> create() {
return new CompactHashSet<>();
}
/**
* Creates a <i>mutable</i> {@code CompactHashSet} instance containing the elements of the given
* collection in unspecified order.
*
* @param collection the elements that the set should contain
* @return a new {@code CompactHashSet} containing those elements (minus duplicates)
*/
public static <E extends @Nullable Object> CompactHashSet<E> create(
Collection<? extends E> collection) {
CompactHashSet<E> set = createWithExpectedSize(collection.size());
set.addAll(collection);
return set;
}
/**
* Creates a <i>mutable</i> {@code CompactHashSet} instance containing the given elements in
* unspecified order.
*
* @param elements the elements that the set should contain
* @return a new {@code CompactHashSet} containing those elements (minus duplicates)
*/
@SafeVarargs
public static <E extends @Nullable Object> CompactHashSet<E> create(E... elements) {
CompactHashSet<E> set = createWithExpectedSize(elements.length);
Collections.addAll(set, elements);
return set;
}
/**
* Creates a {@code CompactHashSet} instance, with a high enough "initial capacity" that it
* <i>should</i> hold {@code expectedSize} elements without growth.
*
* @param expectedSize the number of elements you expect to add to the returned set
* @return a new, empty {@code CompactHashSet} with enough capacity to hold {@code expectedSize}
* elements without resizing
* @throws IllegalArgumentException if {@code expectedSize} is negative
*/
public static <E extends @Nullable Object> CompactHashSet<E> createWithExpectedSize(
int expectedSize) {
return new CompactHashSet<>(expectedSize);
}
/**
* Maximum allowed false positive probability of detecting a hash flooding attack given random
* input.
*/
@VisibleForTesting(
)
static final double HASH_FLOODING_FPP = 0.001;
/**
* Maximum allowed length of a hash table bucket before falling back to a j.u.LinkedHashSet based
* implementation. Experimentally determined.
*/
private static final int MAX_HASH_BUCKET_LENGTH = 9;
// See CompactHashMap for a detailed description of how the following fields work. That
// description talks about `keys`, `values`, and `entries`; here the `keys` and `values` arrays
// are replaced by a single `elements` array but everything else works similarly.
/**
* The hashtable object. This can be either:
*
* <ul>
* <li>a byte[], short[], or int[], with size a power of two, created by
* CompactHashing.createTable, whose values are either
* <ul>
* <li>UNSET, meaning "null pointer"
* <li>one plus an index into the entries and elements array
* </ul>
* <li>another java.util.Set delegate implementation. In most modern JDKs, normal java.util hash
* collections intelligently fall back to a binary search tree if hash table collisions are
* detected. Rather than going to all the trouble of reimplementing this ourselves, we
* simply switch over to use the JDK implementation wholesale if probable hash flooding is
* detected, sacrificing the compactness guarantee in very rare cases in exchange for much
* more reliable worst-case behavior.
* <li>null, if no entries have yet been added to the map
* </ul>
*/
private transient @Nullable Object table;
/**
* Contains the logical entries, in the range of [0, size()). The high bits of each int are the
* part of the smeared hash of the element not covered by the hashtable mask, whereas the low bits
* are the "next" pointer (pointing to the next entry in the bucket chain), which will always be
* less than or equal to the hashtable mask.
*
* <pre>
* hash = aaaaaaaa
* mask = 00000fff
* next = 00000bbb
* entry = aaaaabbb
* </pre>
*
* <p>The pointers in [size(), entries.length) are all "null" (UNSET).
*/
private transient int @Nullable [] entries;
/**
* The elements contained in the set, in the range of [0, size()). The elements in [size(),
* elements.length) are all {@code null}.
*/
@VisibleForTesting transient @Nullable Object @Nullable [] elements;
/**
* Keeps track of metadata like the number of hash table bits and modifications of this data
* structure (to make it possible to throw ConcurrentModificationException in the iterator). Note
* that we choose not to make this volatile, so we do less of a "best effort" to track such
* errors, for better performance.
*/
private transient int metadata;
/** The number of elements contained in the set. */
private transient int size;
/** Constructs a new empty instance of {@code CompactHashSet}. */
CompactHashSet() {
init(CompactHashing.DEFAULT_SIZE);
}
/**
* Constructs a new instance of {@code CompactHashSet} with the specified capacity.
*
* @param expectedSize the initial capacity of this {@code CompactHashSet}.
*/
CompactHashSet(int expectedSize) {
init(expectedSize);
}
/** Pseudoconstructor for serialization support. */
void init(int expectedSize) {
Preconditions.checkArgument(expectedSize >= 0, "Expected size must be >= 0");
// Save expectedSize for use in allocArrays()
this.metadata = Ints.constrainToRange(expectedSize, 1, CompactHashing.MAX_SIZE);
}
/** Returns whether arrays need to be allocated. */
boolean needsAllocArrays() {
return table == null;
}
/** Handle lazy allocation of arrays. */
@CanIgnoreReturnValue
int allocArrays() {
Preconditions.checkState(needsAllocArrays(), "Arrays already allocated");
int expectedSize = metadata;
int buckets = CompactHashing.tableSize(expectedSize);
this.table = CompactHashing.createTable(buckets);
setHashTableMask(buckets - 1);
this.entries = new int[expectedSize];
this.elements = new Object[expectedSize];
return expectedSize;
}
@SuppressWarnings("unchecked")
@VisibleForTesting
@Nullable Set<E> delegateOrNull() {
if (table instanceof Set) {
return (Set<E>) table;
}
return null;
}
private Set<E> createHashFloodingResistantDelegate(int tableSize) {
return new LinkedHashSet<>(tableSize, 1.0f);
}
@CanIgnoreReturnValue
Set<E> convertToHashFloodingResistantImplementation() {
Set<E> newDelegate = createHashFloodingResistantDelegate(hashTableMask() + 1);
for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) {
newDelegate.add(element(i));
}
this.table = newDelegate;
this.entries = null;
this.elements = null;
incrementModCount();
return newDelegate;
}
@VisibleForTesting
boolean isUsingHashFloodingResistance() {
return delegateOrNull() != null;
}
/** Stores the hash table mask as the number of bits needed to represent an index. */
private void setHashTableMask(int mask) {
int hashTableBits = Integer.SIZE - Integer.numberOfLeadingZeros(mask);
metadata =
CompactHashing.maskCombine(metadata, hashTableBits, CompactHashing.HASH_TABLE_BITS_MASK);
}
/** Gets the hash table mask using the stored number of hash table bits. */
private int hashTableMask() {
return (1 << (metadata & CompactHashing.HASH_TABLE_BITS_MASK)) - 1;
}
void incrementModCount() {
metadata += CompactHashing.MODIFICATION_COUNT_INCREMENT;
}
@CanIgnoreReturnValue
@Override
public boolean add(@ParametricNullness E object) {
if (needsAllocArrays()) {
allocArrays();
}
Set<E> delegate = delegateOrNull();
if (delegate != null) {
return delegate.add(object);
}
int[] entries = requireEntries();
@Nullable Object[] elements = requireElements();
int newEntryIndex = this.size; // current size, and pointer to the entry to be appended
int newSize = newEntryIndex + 1;
int hash = smearedHash(object);
int mask = hashTableMask();
int tableIndex = hash & mask;
int next = CompactHashing.tableGet(requireTable(), tableIndex);
if (next == UNSET) { // uninitialized bucket
if (newSize > mask) {
// Resize and add new entry
mask = resizeTable(mask, CompactHashing.newCapacity(mask), hash, newEntryIndex);
} else {
CompactHashing.tableSet(requireTable(), tableIndex, newEntryIndex + 1);
}
} else {
int entryIndex;
int entry;
int hashPrefix = CompactHashing.getHashPrefix(hash, mask);
int bucketLength = 0;
do {
entryIndex = next - 1;
entry = entries[entryIndex];
if (CompactHashing.getHashPrefix(entry, mask) == hashPrefix
&& Objects.equals(object, elements[entryIndex])) {
return false;
}
next = CompactHashing.getNext(entry, mask);
bucketLength++;
} while (next != UNSET);
if (bucketLength >= MAX_HASH_BUCKET_LENGTH) {
return convertToHashFloodingResistantImplementation().add(object);
}
if (newSize > mask) {
// Resize and add new entry
mask = resizeTable(mask, CompactHashing.newCapacity(mask), hash, newEntryIndex);
} else {
entries[entryIndex] = CompactHashing.maskCombine(entry, newEntryIndex + 1, mask);
}
}
resizeMeMaybe(newSize);
insertEntry(newEntryIndex, object, hash, mask);
this.size = newSize;
incrementModCount();
return true;
}
/**
* Creates a fresh entry with the specified object at the specified position in the entry arrays.
*/
void insertEntry(int entryIndex, @ParametricNullness E object, int hash, int mask) {
setEntry(entryIndex, CompactHashing.maskCombine(hash, UNSET, mask));
setElement(entryIndex, object);
}
/** Resizes the entries storage if necessary. */
private void resizeMeMaybe(int newSize) {
int entriesSize = requireEntries().length;
if (newSize > entriesSize) {
// 1.5x but round up to nearest odd (this is optimal for memory consumption on Android)
int newCapacity = min(CompactHashing.MAX_SIZE, (entriesSize + max(1, entriesSize >>> 1)) | 1);
if (newCapacity != entriesSize) {
resizeEntries(newCapacity);
}
}
}
/**
* Resizes the internal entries array to the specified capacity, which may be greater or less than
* the current capacity.
*/
void resizeEntries(int newCapacity) {
this.entries = Arrays.copyOf(requireEntries(), newCapacity);
this.elements = Arrays.copyOf(requireElements(), newCapacity);
}
@CanIgnoreReturnValue
private int resizeTable(int oldMask, int newCapacity, int targetHash, int targetEntryIndex) {
Object newTable = CompactHashing.createTable(newCapacity);
int newMask = newCapacity - 1;
if (targetEntryIndex != UNSET) {
// Add target first; it must be last in the chain because its entry hasn't yet been created
CompactHashing.tableSet(newTable, targetHash & newMask, targetEntryIndex + 1);
}
Object oldTable = requireTable();
int[] entries = requireEntries();
// Loop over current hashtable
for (int oldTableIndex = 0; oldTableIndex <= oldMask; oldTableIndex++) {
int oldNext = CompactHashing.tableGet(oldTable, oldTableIndex);
while (oldNext != UNSET) {
int entryIndex = oldNext - 1;
int oldEntry = entries[entryIndex];
// Rebuild hash using entry hashPrefix and tableIndex ("hashSuffix")
int hash = CompactHashing.getHashPrefix(oldEntry, oldMask) | oldTableIndex;
int newTableIndex = hash & newMask;
int newNext = CompactHashing.tableGet(newTable, newTableIndex);
CompactHashing.tableSet(newTable, newTableIndex, oldNext);
entries[entryIndex] = CompactHashing.maskCombine(hash, newNext, newMask);
oldNext = CompactHashing.getNext(oldEntry, oldMask);
}
}
this.table = newTable;
setHashTableMask(newMask);
return newMask;
}
@Override
public boolean contains(@Nullable Object object) {
if (needsAllocArrays()) {
return false;
}
Set<E> delegate = delegateOrNull();
if (delegate != null) {
return delegate.contains(object);
}
int hash = smearedHash(object);
int mask = hashTableMask();
int next = CompactHashing.tableGet(requireTable(), hash & mask);
if (next == UNSET) {
return false;
}
int hashPrefix = CompactHashing.getHashPrefix(hash, mask);
do {
int entryIndex = next - 1;
int entry = entry(entryIndex);
if (CompactHashing.getHashPrefix(entry, mask) == hashPrefix
&& Objects.equals(object, element(entryIndex))) {
return true;
}
next = CompactHashing.getNext(entry, mask);
} while (next != UNSET);
return false;
}
@CanIgnoreReturnValue
@Override
public boolean remove(@Nullable Object object) {
if (needsAllocArrays()) {
return false;
}
Set<E> delegate = delegateOrNull();
if (delegate != null) {
return delegate.remove(object);
}
int mask = hashTableMask();
int index =
CompactHashing.remove(
object,
/* value= */ null,
mask,
requireTable(),
requireEntries(),
requireElements(),
/* values= */ null);
if (index == -1) {
return false;
}
moveLastEntry(index, mask);
size--;
incrementModCount();
return true;
}
/**
* Moves the last entry in the entry array into {@code dstIndex}, and nulls out its old position.
*/
void moveLastEntry(int dstIndex, int mask) {
Object table = requireTable();
int[] entries = requireEntries();
@Nullable Object[] elements = requireElements();
int srcIndex = size() - 1;
if (dstIndex < srcIndex) {
// move last entry to deleted spot
Object object = elements[srcIndex];
elements[dstIndex] = object;
elements[srcIndex] = null;
// move the last entry to the removed spot, just like we moved the element
entries[dstIndex] = entries[srcIndex];
entries[srcIndex] = 0;
// also need to update whoever's "next" pointer was pointing to the last entry place
int tableIndex = smearedHash(object) & mask;
int next = CompactHashing.tableGet(table, tableIndex);
int srcNext = srcIndex + 1;
if (next == srcNext) {
// we need to update the root pointer
CompactHashing.tableSet(table, tableIndex, dstIndex + 1);
} else {
// we need to update a pointer in an entry
int entryIndex;
int entry;
do {
entryIndex = next - 1;
entry = entries[entryIndex];
next = CompactHashing.getNext(entry, mask);
} while (next != srcNext);
// here, entries[entryIndex] points to the old entry location; update it
entries[entryIndex] = CompactHashing.maskCombine(entry, dstIndex + 1, mask);
}
} else {
elements[dstIndex] = null;
entries[dstIndex] = 0;
}
}
int firstEntryIndex() {
return isEmpty() ? -1 : 0;
}
int getSuccessor(int entryIndex) {
return (entryIndex + 1 < size) ? entryIndex + 1 : -1;
}
/**
* Updates the index an iterator is pointing to after a call to remove: returns the index of the
* entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the
* index that *was* the next entry that would be looked at.
*/
int adjustAfterRemove(int indexBeforeRemove, @SuppressWarnings("unused") int indexRemoved) {
return indexBeforeRemove - 1;
}
@Override
public Iterator<E> iterator() {
Set<E> delegate = delegateOrNull();
if (delegate != null) {
return delegate.iterator();
}
return new Iterator<E>() {
int expectedMetadata = metadata;
int currentIndex = firstEntryIndex();
int indexToRemove = -1;
@Override
public boolean hasNext() {
return currentIndex >= 0;
}
@Override
@ParametricNullness
public E next() {
checkForConcurrentModification();
if (!hasNext()) {
throw new NoSuchElementException();
}
indexToRemove = currentIndex;
E result = element(currentIndex);
currentIndex = getSuccessor(currentIndex);
return result;
}
@Override
public void remove() {
checkForConcurrentModification();
checkRemove(indexToRemove >= 0);
incrementExpectedModCount();
CompactHashSet.this.remove(element(indexToRemove));
currentIndex = adjustAfterRemove(currentIndex, indexToRemove);
indexToRemove = -1;
}
void incrementExpectedModCount() {
expectedMetadata += CompactHashing.MODIFICATION_COUNT_INCREMENT;
}
private void checkForConcurrentModification() {
if (metadata != expectedMetadata) {
throw new ConcurrentModificationException();
}
}
};
}
@Override
public Spliterator<E> spliterator() {
if (needsAllocArrays()) {
return Spliterators.spliterator(new Object[0], Spliterator.DISTINCT | Spliterator.ORDERED);
}
Set<E> delegate = delegateOrNull();
return (delegate != null)
? delegate.spliterator()
: Spliterators.spliterator(
requireElements(), 0, size, Spliterator.DISTINCT | Spliterator.ORDERED);
}
@Override
public void forEach(Consumer<? super E> action) {
checkNotNull(action);
Set<E> delegate = delegateOrNull();
if (delegate != null) {
delegate.forEach(action);
} else {
for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) {
action.accept(element(i));
}
}
}
@Override
public int size() {
Set<E> delegate = delegateOrNull();
return (delegate != null) ? delegate.size() : size;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public @Nullable Object[] toArray() {
if (needsAllocArrays()) {
return new Object[0];
}
Set<E> delegate = delegateOrNull();
return (delegate != null) ? delegate.toArray() : Arrays.copyOf(requireElements(), size);
}
@CanIgnoreReturnValue
@Override
@SuppressWarnings("nullness") // b/192354773 in our checker affects toArray declarations
public <T extends @Nullable Object> T[] toArray(T[] a) {
if (needsAllocArrays()) {
if (a.length > 0) {
a[0] = null;
}
return a;
}
Set<E> delegate = delegateOrNull();
return (delegate != null)
? delegate.toArray(a)
: ObjectArrays.toArrayImpl(requireElements(), 0, size, a);
}
/**
* Ensures that this {@code CompactHashSet} has the smallest representation in memory, given its
* current size.
*/
public void trimToSize() {
if (needsAllocArrays()) {
return;
}
Set<E> delegate = delegateOrNull();
if (delegate != null) {
Set<E> newDelegate = createHashFloodingResistantDelegate(size());
newDelegate.addAll(delegate);
this.table = newDelegate;
return;
}
int size = this.size;
if (size < requireEntries().length) {
resizeEntries(size);
}
int minimumTableSize = CompactHashing.tableSize(size);
int mask = hashTableMask();
if (minimumTableSize < mask) { // smaller table size will always be less than current mask
resizeTable(mask, minimumTableSize, UNSET, UNSET);
}
}
@Override
public void clear() {
if (needsAllocArrays()) {
return;
}
incrementModCount();
Set<E> delegate = delegateOrNull();
if (delegate != null) {
metadata =
Ints.constrainToRange(size(), CompactHashing.DEFAULT_SIZE, CompactHashing.MAX_SIZE);
delegate.clear(); // invalidate any iterators left over!
table = null;
size = 0;
} else {
Arrays.fill(requireElements(), 0, size, null);
CompactHashing.tableClear(requireTable());
Arrays.fill(requireEntries(), 0, size, 0);
this.size = 0;
}
}
@J2ktIncompatible
private void writeObject(ObjectOutputStream stream) throws IOException {
stream.defaultWriteObject();
stream.writeInt(size());
for (E e : this) {
stream.writeObject(e);
}
}
@SuppressWarnings("unchecked")
@J2ktIncompatible
private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException {
stream.defaultReadObject();
int elementCount = stream.readInt();
if (elementCount < 0) {
throw new InvalidObjectException("Invalid size: " + elementCount);
}
init(elementCount);
for (int i = 0; i < elementCount; i++) {
E element = (E) stream.readObject();
add(element);
}
}
/*
* For discussion of the safety of the following methods, see the comments near the end of
* CompactHashMap.
*/
private Object requireTable() {
return requireNonNull(table);
}
private int[] requireEntries() {
return requireNonNull(entries);
}
private @Nullable Object[] requireElements() {
return requireNonNull(elements);
}
@SuppressWarnings("unchecked")
private E element(int i) {
return (E) requireElements()[i];
}
private int entry(int i) {
return requireEntries()[i];
}
private void setElement(int i, E value) {
requireElements()[i] = value;
}
private void setEntry(int i, int value) {
requireEntries()[i] = value;
}
}
|
CompactHashSet
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java
|
{
"start": 3223,
"end": 3791
}
|
class ____ extends ESIntegTestCase {
private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
private static final String MULTI_VALUED_FIELD_NAME = "l_values";
static int numDocs;
static int interval;
static int numValueBuckets, numValuesBuckets;
static long[] valueCounts, valuesCounts;
static Map<Long, Map<String, Object>> expectedMultiSortBuckets;
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singleton(CustomScriptPlugin.class);
}
public static
|
HistogramIT
|
java
|
apache__thrift
|
lib/java/src/test/java/org/apache/thrift/transport/TestTByteBuffer.java
|
{
"start": 382,
"end": 2723
}
|
class ____ {
@Test
public void testReadWrite() throws Exception {
final TByteBuffer byteBuffer = new TByteBuffer(ByteBuffer.allocate(16));
byteBuffer.write("Hello World".getBytes(StandardCharsets.UTF_8));
assertEquals(
"Hello World", new String(byteBuffer.flip().toByteArray(), StandardCharsets.UTF_8));
}
@Test
public void testReuseReadWrite() throws Exception {
final TByteBuffer byteBuffer = new TByteBuffer(ByteBuffer.allocate(16));
byteBuffer.write("Hello World".getBytes(StandardCharsets.UTF_8));
assertEquals(
"Hello World", new String(byteBuffer.flip().toByteArray(), StandardCharsets.UTF_8));
byteBuffer.clear();
byteBuffer.write("Goodbye Horses".getBytes(StandardCharsets.UTF_8));
assertEquals(
"Goodbye Horses", new String(byteBuffer.flip().toByteArray(), StandardCharsets.UTF_8));
}
@Test
public void testOverflow() throws Exception {
final TByteBuffer byteBuffer = new TByteBuffer(ByteBuffer.allocate(4));
TTransportException e =
assertThrows(
TTransportException.class,
() -> byteBuffer.write("Hello World".getBytes(StandardCharsets.UTF_8)));
assertEquals("Not enough room in output buffer", e.getMessage());
}
@Test
public void testSmallTConfiguration() throws Exception {
// Test that TByteBuffer init fail with small max message size.
final TConfiguration configSmall =
new TConfiguration(
4, TConfiguration.DEFAULT_MAX_FRAME_SIZE, TConfiguration.DEFAULT_RECURSION_DEPTH);
TTransportException e =
assertThrows(
TTransportException.class,
() -> new TByteBuffer(configSmall, ByteBuffer.allocate(100)));
assertEquals(TTransportException.MESSAGE_SIZE_LIMIT, e.getType());
}
@Test
public void testLargeTConfiguration() throws Exception {
// Test that TByteBuffer init pass with large max message size beyond
// TConfiguration.DEFAULT_MAX_MESSAGE_SIZE.
int maxSize = 101 * 1024 * 1024;
int bufferSize = (100 * 1024 + 512) * 1024;
final TConfiguration configLarge =
new TConfiguration(
maxSize, TConfiguration.DEFAULT_MAX_FRAME_SIZE, TConfiguration.DEFAULT_RECURSION_DEPTH);
assertDoesNotThrow(() -> new TByteBuffer(configLarge, ByteBuffer.allocate(bufferSize)));
}
}
|
TestTByteBuffer
|
java
|
apache__maven
|
impl/maven-impl/src/main/java/org/apache/maven/impl/resolver/scopes/Maven3ScopeManagerConfiguration.java
|
{
"start": 2087,
"end": 7826
}
|
class ____ implements ScopeManagerConfiguration {
public static final Maven3ScopeManagerConfiguration INSTANCE = new Maven3ScopeManagerConfiguration();
public static final String RS_NONE = "none";
public static final String RS_MAIN_COMPILE = "main-compile";
public static final String RS_MAIN_COMPILE_PLUS_RUNTIME = "main-compilePlusRuntime";
public static final String RS_MAIN_RUNTIME = "main-runtime";
public static final String RS_MAIN_RUNTIME_PLUS_SYSTEM = "main-runtimePlusSystem";
public static final String RS_TEST_COMPILE = "test-compile";
public static final String RS_TEST_RUNTIME = "test-runtime";
private Maven3ScopeManagerConfiguration() {}
@Override
public String getId() {
return "Maven3";
}
@Override
public boolean isStrictDependencyScopes() {
return false;
}
@Override
public boolean isStrictResolutionScopes() {
return false;
}
@Override
public BuildScopeSource getBuildScopeSource() {
return new BuildScopeMatrixSource(
Collections.singletonList(CommonBuilds.PROJECT_PATH_MAIN),
Arrays.asList(CommonBuilds.BUILD_PATH_COMPILE, CommonBuilds.BUILD_PATH_RUNTIME),
CommonBuilds.MAVEN_TEST_BUILD_SCOPE);
}
@Override
public Collection<org.eclipse.aether.scope.DependencyScope> buildDependencyScopes(
InternalScopeManager internalScopeManager) {
ArrayList<org.eclipse.aether.scope.DependencyScope> result = new ArrayList<>();
result.add(internalScopeManager.createDependencyScope(DependencyScope.COMPILE.id(), true, all()));
result.add(internalScopeManager.createDependencyScope(
DependencyScope.RUNTIME.id(), true, byBuildPath(CommonBuilds.BUILD_PATH_RUNTIME)));
result.add(internalScopeManager.createDependencyScope(
DependencyScope.PROVIDED.id(),
false,
union(
byBuildPath(CommonBuilds.BUILD_PATH_COMPILE),
select(CommonBuilds.PROJECT_PATH_TEST, CommonBuilds.BUILD_PATH_RUNTIME))));
result.add(internalScopeManager.createDependencyScope(
DependencyScope.TEST.id(), false, byProjectPath(CommonBuilds.PROJECT_PATH_TEST)));
result.add(internalScopeManager.createSystemDependencyScope(
DependencyScope.SYSTEM.id(), false, all(), MavenArtifactProperties.LOCAL_PATH));
return result;
}
@Override
public Collection<org.eclipse.aether.scope.ResolutionScope> buildResolutionScopes(
InternalScopeManager internalScopeManager) {
Collection<org.eclipse.aether.scope.DependencyScope> allDependencyScopes =
internalScopeManager.getDependencyScopeUniverse();
Collection<org.eclipse.aether.scope.DependencyScope> nonTransitiveDependencyScopes =
allDependencyScopes.stream().filter(s -> !s.isTransitive()).collect(Collectors.toSet());
org.eclipse.aether.scope.DependencyScope system = internalScopeManager
.getDependencyScope(DependencyScope.SYSTEM.id())
.orElse(null);
ArrayList<org.eclipse.aether.scope.ResolutionScope> result = new ArrayList<>();
result.add(internalScopeManager.createResolutionScope(
RS_NONE,
InternalScopeManager.Mode.REMOVE,
Collections.emptySet(),
Collections.emptySet(),
allDependencyScopes));
result.add(internalScopeManager.createResolutionScope(
RS_MAIN_COMPILE,
InternalScopeManager.Mode.ELIMINATE,
singleton(CommonBuilds.PROJECT_PATH_MAIN, CommonBuilds.BUILD_PATH_COMPILE),
Collections.singletonList(system),
nonTransitiveDependencyScopes));
result.add(internalScopeManager.createResolutionScope(
RS_MAIN_COMPILE_PLUS_RUNTIME,
InternalScopeManager.Mode.ELIMINATE,
byProjectPath(CommonBuilds.PROJECT_PATH_MAIN),
Collections.singletonList(system),
nonTransitiveDependencyScopes));
result.add(internalScopeManager.createResolutionScope(
RS_MAIN_RUNTIME,
InternalScopeManager.Mode.ELIMINATE,
singleton(CommonBuilds.PROJECT_PATH_MAIN, CommonBuilds.BUILD_PATH_RUNTIME),
Collections.emptySet(),
nonTransitiveDependencyScopes));
result.add(internalScopeManager.createResolutionScope(
RS_MAIN_RUNTIME_PLUS_SYSTEM,
InternalScopeManager.Mode.ELIMINATE,
singleton(CommonBuilds.PROJECT_PATH_MAIN, CommonBuilds.BUILD_PATH_RUNTIME),
Collections.singletonList(system),
nonTransitiveDependencyScopes));
result.add(internalScopeManager.createResolutionScope(
RS_TEST_COMPILE,
InternalScopeManager.Mode.ELIMINATE,
select(CommonBuilds.PROJECT_PATH_TEST, CommonBuilds.BUILD_PATH_COMPILE),
Collections.singletonList(system),
nonTransitiveDependencyScopes));
result.add(internalScopeManager.createResolutionScope(
RS_TEST_RUNTIME,
InternalScopeManager.Mode.ELIMINATE,
select(CommonBuilds.PROJECT_PATH_TEST, CommonBuilds.BUILD_PATH_RUNTIME),
Collections.singletonList(system),
nonTransitiveDependencyScopes));
return result;
}
// ===
public static void main(String... args) {
ScopeManagerDump.dump(Maven3ScopeManagerConfiguration.INSTANCE);
}
}
|
Maven3ScopeManagerConfiguration
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/statement/SQLShowErrorsStatement.java
|
{
"start": 773,
"end": 992
}
|
class ____ extends SQLStatementImpl implements SQLShowStatement {
@Override
protected void accept0(SQLASTVisitor visitor) {
visitor.visit(this);
visitor.endVisit(this);
}
}
|
SQLShowErrorsStatement
|
java
|
grpc__grpc-java
|
binder/src/main/java/io/grpc/binder/BindServiceFlags.java
|
{
"start": 2663,
"end": 7785
}
|
class ____ {
private int flags;
private Builder(int flags) {
this.flags = flags;
}
/**
* Sets or clears the {@link android.content.Context#BIND_ABOVE_CLIENT} flag.
*
* <p>This flag has no additional meaning at the gRPC layer. See the Android docs for more.
*
* @return this, for fluent construction
*/
public Builder setAboveClient(boolean newValue) {
return setFlag(BIND_ABOVE_CLIENT, newValue);
}
/**
* Sets or clears the {@link android.content.Context#BIND_ADJUST_WITH_ACTIVITY} flag.
*
* <p>This flag has no additional meaning at the gRPC layer. See the Android docs for more.
*
* @return this, for fluent construction
*/
public Builder setAdjustWithActivity(boolean newValue) {
return setFlag(BIND_ADJUST_WITH_ACTIVITY, newValue);
}
/**
* Sets or clears the {@code android.content.Context#BIND_ALLOW_ACTIVITY_STARTS} flag.
*
* <p>This flag has no additional meaning at the gRPC layer. See the Android docs for more.
*
* @return this, for fluent construction
*/
@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
public Builder setAllowActivityStarts(boolean newValue) {
return setFlag(BIND_ALLOW_ACTIVITY_STARTS, newValue);
}
/**
* Sets or clears the {@link android.content.Context#BIND_ALLOW_OOM_MANAGEMENT} flag.
*
* <p>This flag has no additional meaning at the gRPC layer. See the Android docs for more.
*
* @return this, for fluent construction
*/
public Builder setAllowOomManagement(boolean newValue) {
return setFlag(BIND_ALLOW_OOM_MANAGEMENT, newValue);
}
/**
* Controls whether sending a call over the associated {@link io.grpc.Channel} will cause the
* target {@link android.app.Service} to be created and whether in-flight calls will keep it in
* existence absent any other binding in the system.
*
* <p>If false, RPCs will not succeed until the remote Service comes into existence for some
* other reason (if ever). See also {@link io.grpc.CallOptions#withWaitForReady()}.
*
* <p>See {@link android.content.Context#BIND_AUTO_CREATE} for more.
*
* @return this, for fluent construction
*/
public Builder setAutoCreate(boolean newValue) {
return setFlag(BIND_AUTO_CREATE, newValue);
}
/**
* Sets or clears the {@link android.content.Context#BIND_IMPORTANT} flag.
*
* <p>This flag has no additional meaning at the gRPC layer. See the Android docs for more.
*
* @return this, for fluent construction
*/
public Builder setImportant(boolean newValue) {
return setFlag(BIND_IMPORTANT, newValue);
}
/**
* Sets or clears the {@link android.content.Context#BIND_INCLUDE_CAPABILITIES} flag.
*
* <p>This flag has no additional meaning at the gRPC layer. See the Android docs for more.
*
* @return this, for fluent construction
*/
@RequiresApi(api = 29)
public Builder setIncludeCapabilities(boolean newValue) {
return setFlag(BIND_INCLUDE_CAPABILITIES, newValue);
}
/**
* Sets or clears the {@link android.content.Context#BIND_NOT_FOREGROUND} flag.
*
* <p>This flag has no additional meaning at the gRPC layer. See the Android docs for more.
*
* @return this, for fluent construction
*/
public Builder setNotForeground(boolean newValue) {
return setFlag(BIND_NOT_FOREGROUND, newValue);
}
/**
* Sets or clears the {@link android.content.Context#BIND_NOT_PERCEPTIBLE} flag.
*
* <p>This flag has no additional meaning at the gRPC layer. See the Android docs for more.
*
* @return this, for fluent construction
*/
@RequiresApi(api = 29)
public Builder setNotPerceptible(boolean newValue) {
return setFlag(BIND_NOT_PERCEPTIBLE, newValue);
}
/**
* Sets or clears the {@link android.content.Context#BIND_WAIVE_PRIORITY} flag.
*
* <p>This flag has no additional meaning at the gRPC layer. See the Android docs for more.
*
* @return this, for fluent construction
*/
public Builder setWaivePriority(boolean newValue) {
return setFlag(BIND_WAIVE_PRIORITY, newValue);
}
/**
* Returns a new instance of {@link BindServiceFlags} that reflects the state of this builder.
*/
public BindServiceFlags build() {
return new BindServiceFlags(flags);
}
private Builder setFlag(int flag, boolean newValue) {
if (newValue) {
flags |= flag;
} else {
flags &= ~flag;
}
return this;
}
}
@Override
public String toString() {
return "BindServiceFlags{" + toHexString(flags) + "}";
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
BindServiceFlags that = (BindServiceFlags) o;
return flags == that.flags;
}
@Override
public int hashCode() {
return flags;
}
}
|
Builder
|
java
|
apache__kafka
|
connect/transforms/src/main/java/org/apache/kafka/connect/transforms/util/RegexValidator.java
|
{
"start": 995,
"end": 1401
}
|
class ____ implements ConfigDef.Validator {
@Override
public void ensureValid(String name, Object value) {
try {
Pattern.compile((String) value);
} catch (Exception e) {
throw new ConfigException(name, value, "Invalid regex: " + e.getMessage());
}
}
@Override
public String toString() {
return "valid regex";
}
}
|
RegexValidator
|
java
|
apache__flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
{
"start": 7506,
"end": 7671
}
|
class ____.",
e);
}
}
private static boolean isJarFilename(String filename) {
return filename.endsWith(".jar");
}
}
|
name
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/main/java/org/hibernate/envers/query/order/internal/PropertyAuditOrder.java
|
{
"start": 473,
"end": 1157
}
|
class ____ implements AuditOrder {
private final String alias;
private final PropertyNameGetter propertyNameGetter;
private final boolean asc;
private NullPrecedence nullPrecedence;
public PropertyAuditOrder(String alias, PropertyNameGetter propertyNameGetter, boolean asc) {
this.alias = alias;
this.propertyNameGetter = propertyNameGetter;
this.asc = asc;
}
@Override
public AuditOrder nulls(NullPrecedence nullPrecedence) {
this.nullPrecedence = nullPrecedence;
return this;
}
@Override
public OrderData getData(Configuration configuration) {
return new OrderData( alias, propertyNameGetter.get( configuration ), asc, nullPrecedence );
}
}
|
PropertyAuditOrder
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/BinaryRecordOutput.java
|
{
"start": 1293,
"end": 3950
}
|
class ____ implements RecordOutput {
private DataOutput out;
private BinaryRecordOutput() {}
private void setDataOutput(DataOutput out) {
this.out = out;
}
private static final ThreadLocal<BinaryRecordOutput> B_OUT =
new ThreadLocal<BinaryRecordOutput>() {
@Override
protected BinaryRecordOutput initialValue() {
return new BinaryRecordOutput();
}
};
/**
* Get a thread-local record output for the supplied DataOutput.
* @param out data output stream
* @return binary record output corresponding to the supplied DataOutput.
*/
public static BinaryRecordOutput get(DataOutput out) {
BinaryRecordOutput bout = B_OUT.get();
bout.setDataOutput(out);
return bout;
}
/** Creates a new instance of BinaryRecordOutput */
public BinaryRecordOutput(OutputStream out) {
this.out = new DataOutputStream(out);
}
/** Creates a new instance of BinaryRecordOutput */
public BinaryRecordOutput(DataOutput out) {
this.out = out;
}
@Override
public void writeByte(byte b, String tag) throws IOException {
out.writeByte(b);
}
@Override
public void writeBool(boolean b, String tag) throws IOException {
out.writeBoolean(b);
}
@Override
public void writeInt(int i, String tag) throws IOException {
Utils.writeVInt(out, i);
}
@Override
public void writeLong(long l, String tag) throws IOException {
Utils.writeVLong(out, l);
}
@Override
public void writeFloat(float f, String tag) throws IOException {
out.writeFloat(f);
}
@Override
public void writeDouble(double d, String tag) throws IOException {
out.writeDouble(d);
}
@Override
public void writeString(String s, String tag) throws IOException {
Utils.toBinaryString(out, s);
}
@Override
public void writeBuffer(Buffer buf, String tag)
throws IOException {
byte[] barr = buf.get();
int len = buf.getCount();
Utils.writeVInt(out, len);
out.write(barr, 0, len);
}
@Override
public void startRecord(Record r, String tag) throws IOException {}
@Override
public void endRecord(Record r, String tag) throws IOException {}
@Override
public void startVector(ArrayList v, String tag) throws IOException {
writeInt(v.size(), tag);
}
@Override
public void endVector(ArrayList v, String tag) throws IOException {}
@Override
public void startMap(TreeMap v, String tag) throws IOException {
writeInt(v.size(), tag);
}
@Override
public void endMap(TreeMap v, String tag) throws IOException {}
}
|
BinaryRecordOutput
|
java
|
apache__flink
|
flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/FsStateChangelogStorageForRecovery.java
|
{
"start": 1459,
"end": 2270
}
|
class ____
implements StateChangelogStorageView<ChangelogStateHandleStreamImpl> {
private final ChangelogStreamHandleReader changelogStreamHandleReader;
public FsStateChangelogStorageForRecovery(
ChangelogStreamHandleReader changelogStreamHandleReader) {
this.changelogStreamHandleReader = changelogStreamHandleReader;
}
@Override
public StateChangelogHandleReader<ChangelogStateHandleStreamImpl> createReader() {
return new StateChangelogHandleStreamHandleReader(
new StateChangeIteratorImpl(changelogStreamHandleReader));
}
@Override
public void close() throws Exception {
if (changelogStreamHandleReader != null) {
changelogStreamHandleReader.close();
}
}
}
|
FsStateChangelogStorageForRecovery
|
java
|
quarkusio__quarkus
|
extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/transactions/ReactiveTransactionalRedisDataSource.java
|
{
"start": 4713,
"end": 12221
}
|
class ____ the members
* @param <V> the type of the member
* @return the object to execute geo commands.
*/
default <V> ReactiveTransactionalGeoCommands<String, V> geo(Class<V> memberType) {
return geo(String.class, memberType);
}
/**
* Gets the object to execute commands manipulating keys and expiration times.
*
* @param redisKeyType the type of the keys
* @param <K> the type of the key
* @return the object to execute commands manipulating keys.
*/
<K> ReactiveTransactionalKeyCommands<K> key(Class<K> redisKeyType);
/**
* Gets the object to execute commands manipulating keys and expiration times.
*
* @return the object to execute commands manipulating keys.
*/
default ReactiveTransactionalKeyCommands<String> key() {
return key(String.class);
}
/**
* Gets the object to execute commands manipulating sorted sets.
*
* @param redisKeyType the type of the keys
* @param valueType the type of the value sorted in the sorted sets
* @param <K> the type of the key
* @param <V> the type of the value
* @return the object to manipulate sorted sets.
*/
<K, V> ReactiveTransactionalSortedSetCommands<K, V> sortedSet(Class<K> redisKeyType, Class<V> valueType);
/**
* Gets the object to execute commands manipulating sorted sets.
*
* @param valueType the type of the value sorted in the sorted sets
* @param <V> the type of the value
* @return the object to manipulate sorted sets.
*/
default <V> ReactiveTransactionalSortedSetCommands<String, V> sortedSet(Class<V> valueType) {
return sortedSet(String.class, valueType);
}
/**
* Gets the object to execute commands manipulating stored strings.
*
* <p>
* <strong>NOTE:</strong> Instead of {@code string}, this group is named {@code value} to avoid the confusion with the
* Java String type. Indeed, Redis strings can be strings, numbers, byte arrays...
*
* @param redisKeyType the type of the keys
* @param valueType the type of the value, often String, or the value are encoded/decoded using codecs.
* @param <K> the type of the key
* @param <V> the type of the value
* @return the object to manipulate stored strings.
*/
<K, V> ReactiveTransactionalValueCommands<K, V> value(Class<K> redisKeyType, Class<V> valueType);
/**
* Gets the object to execute commands manipulating stored strings.
*
* <p>
* <strong>NOTE:</strong> Instead of {@code string}, this group is named {@code value} to avoid the confusion with the
* Java String type. Indeed, Redis strings can be strings, numbers, byte arrays...
*
* @param valueType the type of the value, often String, or the value are encoded/decoded using codecs.
* @param <V> the type of the value
* @return the object to manipulate stored strings.
*/
default <V> ReactiveTransactionalValueCommands<String, V> value(Class<V> valueType) {
return value(String.class, valueType);
}
/**
* Gets the object to execute commands manipulating stored strings.
*
* @param redisKeyType the type of the keys
* @param valueType the type of the value, often String, or the value are encoded/decoded using codecs.
* @param <K> the type of the key
* @param <V> the type of the value
* @return the object to manipulate stored strings.
* @deprecated Use {@link #value(Class, Class)} instead
*/
@Deprecated
<K, V> ReactiveTransactionalStringCommands<K, V> string(Class<K> redisKeyType, Class<V> valueType);
/**
* Gets the object to execute commands manipulating stored strings.
*
* @param valueType the type of the value, often String, or the value are encoded/decoded using codecs.
* @param <V> the type of the value
* @return the object to manipulate stored strings.
* @deprecated Use {@link #value(Class)} instead
*/
@Deprecated
default <V> ReactiveTransactionalStringCommands<String, V> string(Class<V> valueType) {
return string(String.class, valueType);
}
/**
* Gets the object to execute commands manipulating sets.
*
* @param redisKeyType the type of the keys
* @param memberType the type of the member stored in each set
* @param <K> the type of the key
* @param <V> the type of the member
* @return the object to manipulate sets.
*/
<K, V> ReactiveTransactionalSetCommands<K, V> set(Class<K> redisKeyType, Class<V> memberType);
/**
* Gets the object to execute commands manipulating sets.
*
* @param memberType the type of the member stored in each set
* @param <V> the type of the member
* @return the object to manipulate sets.
*/
default <V> ReactiveTransactionalSetCommands<String, V> set(Class<V> memberType) {
return set(String.class, memberType);
}
/**
* Gets the object to execute commands manipulating lists.
*
* @param redisKeyType the type of the keys
* @param memberType the type of the member stored in each list
* @param <K> the type of the key
* @param <V> the type of the member
* @return the object to manipulate sets.
*/
<K, V> ReactiveTransactionalListCommands<K, V> list(Class<K> redisKeyType, Class<V> memberType);
/**
* Gets the object to execute commands manipulating lists.
*
* @param memberType the type of the member stored in each list
* @param <V> the type of the member
* @return the object to manipulate sets.
*/
default <V> ReactiveTransactionalListCommands<String, V> list(Class<V> memberType) {
return list(String.class, memberType);
}
/**
* Gets the object to execute commands manipulating hyperloglog data structures.
*
* @param redisKeyType the type of the keys
* @param memberType the type of the member stored in the data structure
* @param <K> the type of the key
* @param <V> the type of the member
* @return the object to manipulate hyper log log data structures.
*/
<K, V> ReactiveTransactionalHyperLogLogCommands<K, V> hyperloglog(Class<K> redisKeyType, Class<V> memberType);
/**
* Gets the object to execute commands manipulating hyperloglog data structures.
*
* @param memberType the type of the member stored in the data structure
* @param <V> the type of the member
* @return the object to manipulate hyper log log data structures.
*/
default <V> ReactiveTransactionalHyperLogLogCommands<String, V> hyperloglog(Class<V> memberType) {
return hyperloglog(String.class, memberType);
}
/**
* Gets the object to execute commands manipulating bitmap data structures.
*
* @param redisKeyType the type of the keys
* @param <K> the type of the key
* @return the object to manipulate bitmap data structures.
*/
<K> ReactiveTransactionalBitMapCommands<K> bitmap(Class<K> redisKeyType);
/**
* Gets the object to execute commands manipulating bitmap data structures.
*
* @return the object to manipulate bitmap data structures.
*/
default ReactiveTransactionalBitMapCommands<String> bitmap() {
return bitmap(String.class);
}
/**
* Gets the object to execute commands manipulating streams.
* <p>
*
* @param redisKeyType the
|
of
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/Suspendable.java
|
{
"start": 901,
"end": 1232
}
|
interface ____ indicate a custom component has custom implementation for suspending the
* {@link SuspendableService} service. <br/>
* This is needed to let Camel know if there is special code happening during a suspension.
* <p/>
* The {@link ServiceSupport} implementation that most Camel components / endpoints etc use as base
|
to
|
java
|
quarkusio__quarkus
|
extensions/redis-client/deployment/src/test/java/io/quarkus/redis/deployment/client/patterns/CacheTest.java
|
{
"start": 2075,
"end": 2524
}
|
class ____ {
private final ValueCommands<String, BusinessObject> commands;
public MyRedisCache(RedisDataSource ds) {
commands = ds.value(BusinessObject.class);
}
public BusinessObject get(String key) {
return commands.get(key);
}
public void set(String key, BusinessObject bo) {
commands.setex(key, 1, bo); // Expires after 1 second
}
}
}
|
MyRedisCache
|
java
|
apache__maven
|
api/maven-api-core/src/test/java/org/apache/maven/api/feature/FeaturesTest.java
|
{
"start": 1198,
"end": 7344
}
|
class ____ {
@Test
void testDeployBuildPomDefaultValue() {
// Test that deployBuildPom returns true by default (when property is not set)
Map<String, Object> emptyProperties = Map.of();
assertTrue(Features.deployBuildPom(emptyProperties));
// Test with null properties
assertTrue(Features.deployBuildPom(null));
}
@Test
void testDeployBuildPomWithStringTrue() {
// Test with string "true"
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, "true");
assertTrue(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithStringFalse() {
// Test with string "false"
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, "false");
assertFalse(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithBooleanTrue() {
// Test with Boolean.TRUE
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, Boolean.TRUE);
assertTrue(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithBooleanFalse() {
// Test with Boolean.FALSE
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, Boolean.FALSE);
assertFalse(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithStringTrueUpperCase() {
// Test case-insensitive string parsing - TRUE
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, "TRUE");
assertTrue(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithStringFalseUpperCase() {
// Test case-insensitive string parsing - FALSE
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, "FALSE");
assertFalse(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithStringTrueMixedCase() {
// Test case-insensitive string parsing - True
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, "True");
assertTrue(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithStringFalseMixedCase() {
// Test case-insensitive string parsing - False
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, "False");
assertFalse(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithInvalidStringValue() {
// Test that invalid string values default to false (Boolean.parseBoolean behavior)
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, "invalid");
assertFalse(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithEmptyString() {
// Test that empty string defaults to false
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, "");
assertFalse(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithYesString() {
// Test that "yes" string defaults to false (not a valid boolean)
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, "yes");
assertFalse(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithNumericString() {
// Test that numeric string defaults to false
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, "1");
assertFalse(Features.deployBuildPom(properties));
}
@Test
void testDeployBuildPomWithIntegerOne() {
// Test with integer 1 (should use toString() and then parseBoolean)
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, 1);
assertFalse(Features.deployBuildPom(properties)); // "1".parseBoolean() = false
}
@Test
void testDeployBuildPomWithIntegerZero() {
// Test with integer 0 (should use toString() and then parseBoolean)
Map<String, Object> properties = Map.of(Constants.MAVEN_DEPLOY_BUILD_POM, 0);
assertFalse(Features.deployBuildPom(properties)); // "0".parseBoolean() = false
}
@Test
void testDeployBuildPomWithMutableMap() {
// Test with a mutable map to ensure the method doesn't modify the input
Map<String, Object> properties = new HashMap<>();
properties.put(Constants.MAVEN_DEPLOY_BUILD_POM, "false");
assertFalse(Features.deployBuildPom(properties));
// Verify the map wasn't modified
assertEquals(1, properties.size());
assertEquals("false", properties.get(Constants.MAVEN_DEPLOY_BUILD_POM));
}
@Test
void testDeployBuildPomWithOtherProperties() {
// Test that other properties don't interfere
Map<String, Object> properties = Map.of(
Constants.MAVEN_CONSUMER_POM,
"false",
Constants.MAVEN_MAVEN3_PERSONALITY,
"true",
"some.other.property",
"value",
Constants.MAVEN_DEPLOY_BUILD_POM,
"false");
assertFalse(Features.deployBuildPom(properties));
}
@Test
void testConsistencyWithOtherFeatureMethodsFalse() {
// Test that deployBuildPom behaves consistently with other feature methods when false
Map<String, Object> properties = Map.of(
Constants.MAVEN_DEPLOY_BUILD_POM, "false",
Constants.MAVEN_CONSUMER_POM, "false");
assertFalse(Features.deployBuildPom(properties));
assertFalse(Features.consumerPom(properties));
}
@Test
void testConsistencyWithOtherFeatureMethodsTrue() {
// Test that deployBuildPom behaves consistently with other feature methods when true
Map<String, Object> properties = Map.of(
Constants.MAVEN_DEPLOY_BUILD_POM, "true",
Constants.MAVEN_CONSUMER_POM, "true");
assertTrue(Features.deployBuildPom(properties));
assertTrue(Features.consumerPom(properties));
}
}
|
FeaturesTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java
|
{
"start": 963,
"end": 1012
}
|
class ____ for better separation.
*/
public
|
directly
|
java
|
junit-team__junit5
|
junit-platform-commons/src/main/java/org/junit/platform/commons/support/ReflectionSupport.java
|
{
"start": 22003,
"end": 23246
}
|
class ____ instantiate; never {@code null}
* @param args the arguments to pass to the constructor, none of which may
* be {@code null}
* @return the new instance; never {@code null}
* @see ExceptionUtils#throwAsUncheckedException(Throwable)
*/
public static <T> T newInstance(Class<T> clazz, Object... args) {
return ReflectionUtils.newInstance(clazz, args);
}
/**
* Invoke the supplied method, making it accessible if necessary and
* {@linkplain ExceptionUtils#throwAsUncheckedException masking} any
* checked exception as an unchecked exception.
*
* @param method the method to invoke; never {@code null}
* @param target the object on which to invoke the method; may be
* {@code null} if the method is {@code static}
* @param args the arguments to pass to the method; never {@code null}
* @return the value returned by the method invocation or {@code null}
* if the return type is {@code void}
* @see ExceptionUtils#throwAsUncheckedException(Throwable)
*/
public static @Nullable Object invokeMethod(Method method, @Nullable Object target, @Nullable Object... args) {
return ReflectionUtils.invokeMethod(method, target, args);
}
/**
* Find all distinct {@linkplain Field fields} of the supplied
|
to
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-ec2/src/test/java/org/apache/camel/component/aws2/ec2/AWS2EC2ProducerHealthCheckProfileCredsTest.java
|
{
"start": 1451,
"end": 3979
}
|
class ____ extends CamelTestSupport {
CamelContext context;
@Override
protected CamelContext createCamelContext() throws Exception {
context = super.createCamelContext();
context.getPropertiesComponent().setLocation("ref:prop");
// install health check manually (yes a bit cumbersome)
HealthCheckRegistry registry = new DefaultHealthCheckRegistry();
registry.setCamelContext(context);
Object hc = registry.resolveById("context");
registry.register(hc);
hc = registry.resolveById("routes");
registry.register(hc);
hc = registry.resolveById("consumers");
registry.register(hc);
HealthCheckRepository hcr = (HealthCheckRepository) registry.resolveById("producers");
hcr.setEnabled(true);
registry.register(hcr);
context.getCamelContextExtension().addContextPlugin(HealthCheckRegistry.class, registry);
return context;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:listClusters")
.to("aws2-ec2://TestDomain?operation=describeInstances®ion=l&useDefaultCredentialsProvider=true");
}
};
}
@Test
public void testConnectivity() {
Collection<HealthCheck.Result> res = HealthCheckHelper.invokeLiveness(context);
boolean up = res.stream().allMatch(r -> r.getState().equals(HealthCheck.State.UP));
Assertions.assertTrue(up, "liveness check");
// health-check readiness should be down
await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> {
Collection<HealthCheck.Result> res2 = HealthCheckHelper.invokeReadiness(context);
boolean down = res2.stream().allMatch(r -> r.getState().equals(HealthCheck.State.DOWN));
boolean containsAws2AthenaHealthCheck = res2.stream()
.anyMatch(result -> result.getCheck().getId().startsWith("producer:aws2-ec2"));
boolean hasRegionMessage = res2.stream()
.anyMatch(r -> r.getMessage().stream().anyMatch(msg -> msg.contains("region")));
Assertions.assertTrue(down, "liveness check");
Assertions.assertTrue(containsAws2AthenaHealthCheck, "aws2-ec2 check");
Assertions.assertTrue(hasRegionMessage, "aws2-ec2 check error message");
});
}
}
|
AWS2EC2ProducerHealthCheckProfileCredsTest
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestListActionTaker.java
|
{
"start": 1944,
"end": 6053
}
|
class ____ extends AbstractAbfsTestWithTimeout {
public TestListActionTaker() throws Exception {
}
/**
* This test method verifies the behavior of the producer-consumer pattern implemented in the ListActionTaker class.
* The producer (ListActionTaker) should only resume producing (listing and enqueuing blobs) when the consumer lag becomes tolerable.
* The test method mocks the necessary components and checks the behavior of the ListActionTaker under these conditions.
*
* @throws IOException if an I/O error occurs
*/
@Test
public void testProducerResumeOnlyOnConsumerLagBecomesTolerable() throws
IOException {
Path path = new Path("test");
AbfsConfiguration abfsConfiguration = Mockito.mock(AbfsConfiguration.class);
AbfsBlobClient client = Mockito.mock(AbfsBlobClient.class);
Mockito.doReturn(abfsConfiguration).when(client).getAbfsConfiguration();
Mockito.doReturn(DEFAULT_AZURE_LIST_MAX_RESULTS)
.when(abfsConfiguration)
.getListingMaxConsumptionLag();
Mockito.doReturn(DEFAULT_FS_AZURE_PRODUCER_QUEUE_MAX_SIZE)
.when(abfsConfiguration)
.getProducerQueueMaxSize();
ListResponseData listResponseData = Mockito.mock(ListResponseData.class);
AbfsRestOperation op = Mockito.mock(AbfsRestOperation.class);
AbfsHttpOperation httpOperation = Mockito.mock(AbfsHttpOperation.class);
Mockito.doReturn(httpOperation).when(op).getResult();
Mockito.doReturn(op).when(listResponseData).getOp();
BlobListResultSchema listResultSchema = Mockito.mock(
BlobListResultSchema.class);
Mockito.doReturn(listResultSchema)
.when(httpOperation)
.getListResultSchema();
Mockito.doReturn("a")
.doReturn("b")
.doReturn("c")
.doReturn(null)
.when(listResultSchema).getNextMarker();
TracingContext tracingContext = Mockito.mock(TracingContext.class);
ListActionTaker listActionTaker = new ListActionTaker(path, client,
tracingContext) {
private ListBlobQueue listBlobQueue;
private boolean isListAndEnqueueInProgress;
private boolean completed;
@Override
protected ListBlobQueue createListBlobQueue(final AbfsConfiguration configuration)
throws InvalidConfigurationValueException {
listBlobQueue = super.createListBlobQueue(configuration);
return listBlobQueue;
}
@Override
int getMaxConsumptionParallelism() {
return DEFAULT_FS_AZURE_LISTING_ACTION_THREADS;
}
@Override
boolean takeAction(final Path path) throws AzureBlobFileSystemException {
while (!isListAndEnqueueInProgress
&& listBlobQueue.size() < DEFAULT_AZURE_LIST_MAX_RESULTS
&& !completed) {
// wait for the producer to produce more items
}
return true;
}
@Override
protected String listAndEnqueue(final ListBlobQueue listBlobQueue,
final String continuationToken) throws AzureBlobFileSystemException {
isListAndEnqueueInProgress = true;
String contToken = super.listAndEnqueue(listBlobQueue,
continuationToken);
isListAndEnqueueInProgress = false;
if (contToken == null) {
completed = true;
}
return contToken;
}
@Override
protected void addPaths(final List<Path> paths,
final ListResultSchema retrievedSchema) {
for (int i = 0; i < DEFAULT_AZURE_LIST_MAX_RESULTS; i++) {
paths.add(new Path("test" + i));
}
}
};
final int[] occurrences = {0};
Mockito.doAnswer(answer -> {
occurrences[0]++;
Assertions.assertThat((int) answer.getArgument(2))
.isEqualTo(DEFAULT_AZURE_LIST_MAX_RESULTS);
return listResponseData;
}).when(client)
.listPath(Mockito.anyString(), Mockito.anyBoolean(), Mockito.anyInt(),
Mockito.nullable(String.class), Mockito.any(TracingContext.class), Mockito.nullable(URI.class));
listActionTaker.listRecursiveAndTakeAction();
}
}
|
TestListActionTaker
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/types/parser/ByteParserTest.java
|
{
"start": 847,
"end": 2093
}
|
class ____ extends ParserTestBase<Byte> {
@Override
public String[] getValidTestValues() {
return new String[] {
"0",
"1",
"76",
"-66",
String.valueOf(Byte.MAX_VALUE),
String.valueOf(Byte.MIN_VALUE),
"19"
};
}
@Override
public Byte[] getValidTestResults() {
return new Byte[] {
(byte) 0, (byte) 1, (byte) 76, (byte) -66, Byte.MAX_VALUE, Byte.MIN_VALUE, (byte) 19
};
}
@Override
public String[] getInvalidTestValues() {
return new String[] {
"a",
"9a",
"-57-6",
"7-88",
String.valueOf(Byte.MAX_VALUE) + "0",
String.valueOf(Short.MIN_VALUE),
String.valueOf(Byte.MAX_VALUE + 1),
String.valueOf(Byte.MIN_VALUE - 1),
" 1",
"2 ",
" ",
"\t"
};
}
@Override
public boolean allowsEmptyField() {
return false;
}
@Override
public FieldParser<Byte> getParser() {
return new ByteParser();
}
@Override
public Class<Byte> getTypeClass() {
return Byte.class;
}
}
|
ByteParserTest
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/FluxWindowTimeout.java
|
{
"start": 2817,
"end": 25926
}
|
class ____<T>
implements InnerOperator<T, Flux<T>> {
final @Nullable StateLogger logger;
final CoreSubscriber<? super Flux<T>> actual;
final long timespan;
final TimeUnit unit;
final Scheduler scheduler;
final int maxSize;
final Scheduler.Worker worker;
final int limit;
volatile long requested;
@SuppressWarnings("rawtypes")
static final AtomicLongFieldUpdater<WindowTimeoutWithBackpressureSubscriber> REQUESTED =
AtomicLongFieldUpdater.newUpdater(WindowTimeoutWithBackpressureSubscriber.class, "requested");
volatile long state;
@SuppressWarnings("rawtypes")
static final AtomicLongFieldUpdater<WindowTimeoutWithBackpressureSubscriber> STATE =
AtomicLongFieldUpdater.newUpdater(WindowTimeoutWithBackpressureSubscriber.class, "state");
static final long CANCELLED_FLAG =
0b1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000L;
static final long TERMINATED_FLAG =
0b0100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000L;
static final long HAS_UNSENT_WINDOW =
0b0010_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000L;
static final long HAS_WORK_IN_PROGRESS =
0b0001_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000L;
static final long REQUEST_INDEX_MASK =
0b0000_1111_1111_1111_1111_1111_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000L;
static final long ACTIVE_WINDOW_INDEX_MASK =
0b0000_0000_0000_0000_0000_0000_1111_1111_1111_1111_1111_0000_0000_0000_0000_0000L;
static final long NEXT_WINDOW_INDEX_MASK =
0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1111_1111_1111_1111_1111L;
static final int ACTIVE_WINDOW_INDEX_SHIFT = 20;
static final int REQUEST_INDEX_SHIFT = 40;
boolean done;
@Nullable Throwable error;
@SuppressWarnings("NotNullFieldNotInitialized") // s initialized in onSubscribe
Subscription s;
@Nullable InnerWindow<T> window;
WindowTimeoutWithBackpressureSubscriber(CoreSubscriber<? super Flux<T>> actual,
int maxSize,
long timespan,
TimeUnit unit,
Scheduler scheduler,
@Nullable StateLogger logger) {
this.actual = actual;
this.timespan = timespan;
this.unit = unit;
this.scheduler = scheduler;
this.maxSize = maxSize;
this.limit = Operators.unboundedOrLimit(maxSize);
this.worker = scheduler.createWorker();
this.logger = logger;
STATE.lazySet(this, 1);
}
@Override
public CoreSubscriber<? super Flux<T>> actual() {
return this.actual;
}
@Override
public void onSubscribe(Subscription s) {
if (Operators.validate(this.s, s)) {
this.s = s;
this.actual.onSubscribe(this);
}
}
@Override
public void onNext(T t) {
if (this.done) {
Operators.onNextDropped(t, this.actual.currentContext());
return;
}
while(true) {
if (isCancelled(this.state)) {
Operators.onDiscard(t, this.actual.currentContext());
return;
}
final InnerWindow<T> window = this.window;
assert window != null : "window can not be null";
if (window.sendNext(t)) {
return;
}
}
}
@Override
public void onError(Throwable t) {
if (this.done) {
Operators.onErrorDropped(t, this.actual.currentContext());
return;
}
this.error = t;
this.done = true;
final long previousState = markTerminated(this);
if (isCancelled(previousState) || isTerminated(previousState)) {
return;
}
final InnerWindow<T> window = this.window;
if (window != null) {
window.sendError(wrapSource(t));
if (hasUnsentWindow(previousState)) {
return;
}
}
if (hasWorkInProgress(previousState)) {
return;
}
this.actual.onError(t);
}
@Override
public void onComplete() {
if (this.done) {
return;
}
this.done = true;
final long previousState = markTerminated(this);
if (isCancelled(previousState) || isTerminated(previousState)) {
return;
}
InnerWindow<T> window = this.window;
if (window != null) {
window.sendComplete();
if (hasUnsentWindow(previousState)) {
return;
}
}
if (hasWorkInProgress(previousState)) {
return;
}
this.actual.onComplete();
}
@Override
public void request(long n) {
final long previousRequested = Operators.addCap(REQUESTED, this, n);
if (previousRequested == Long.MAX_VALUE) {
return;
}
long previousState;
long expectedState;
boolean hasUnsentWindow;
for (;;) {
previousState = this.state;
if (isCancelled(previousState)) {
return;
}
if (hasWorkInProgress(previousState)) {
long nextState = (previousState & ~REQUEST_INDEX_MASK) | incrementRequestIndex(previousState);
if (STATE.compareAndSet(this, previousState, nextState)) {
if (this.logger != null) {
this.logger.log(this.toString(), "mre", previousState, nextState);
}
return;
}
continue;
}
hasUnsentWindow = hasUnsentWindow(previousState);
if (!hasUnsentWindow && (isTerminated(previousState) || activeWindowIndex(previousState) == nextWindowIndex(previousState))) {
return;
}
expectedState =
(previousState &~ HAS_UNSENT_WINDOW) |
HAS_WORK_IN_PROGRESS;
if (STATE.compareAndSet(this, previousState, expectedState)) {
if (this.logger != null) {
this.logger.log(this.toString(), "mre", previousState, expectedState);
}
break;
}
}
drain(previousState, expectedState);
}
void tryCreateNextWindow(int windowIndex) {
long previousState;
long expectedState;
for (;;) {
previousState = this.state;
if (isCancelled(previousState)) {
return;
}
if (nextWindowIndex(previousState) != windowIndex) {
return;
}
boolean hasWorkInProgress = hasWorkInProgress(previousState);
if (!hasWorkInProgress && isTerminated(previousState) && !hasUnsentWindow(previousState)) {
return;
}
expectedState = (previousState &~ NEXT_WINDOW_INDEX_MASK) | incrementNextWindowIndex(previousState) | HAS_WORK_IN_PROGRESS;
if (STATE.compareAndSet(this, previousState, expectedState)) {
if (hasWorkInProgress) {
return;
}
break;
}
}
drain(previousState, expectedState);
}
void drain(long previousState, long expectedState) {
for (;;) {
long n = this.requested;
if (this.logger != null) {
this.logger.log(this.toString(), "dr"+n, previousState,
expectedState);
}
final boolean hasUnsentWindow = hasUnsentWindow(previousState);
final int activeWindowIndex = activeWindowIndex(expectedState);
final int nextWindowIndex = nextWindowIndex(expectedState);
// short path to exit from loop if there is active window which was not
// terminated yet and this window is delivered so we dont have to
// perform any work (we just need to remove lock and exit)
if (activeWindowIndex == nextWindowIndex && !hasUnsentWindow) {
expectedState = markWorkDone(this, expectedState);
previousState = expectedState | HAS_WORK_IN_PROGRESS;
if (isCancelled(expectedState)) {
return;
}
if (isTerminated(expectedState)) {
final Throwable e = this.error;
if (e != null) {
this.actual.onError(e);
}
else {
this.actual.onComplete();
}
return;
}
if (!hasWorkInProgress(expectedState)) {
return;
}
continue;
}
if (n > 0) {
// here we are if we entered from the request() method
if (hasUnsentWindow) {
final InnerWindow<T> currentUnsentWindow = this.window;
assert currentUnsentWindow != null : "currentUnsentWindow can not be null";
// Delivers current unsent window
this.actual.onNext(currentUnsentWindow);
if (n != Long.MAX_VALUE) {
n = REQUESTED.decrementAndGet(this);
if (this.logger != null) {
this.logger.log(this.toString(), "dec", n, n);
}
}
// Marks as sent current unsent window. Also, delivers
// onComplete to the window subscriber if it was not delivered
final long previousInnerWindowState = currentUnsentWindow.sendSent();
if (isTerminated(expectedState)) {
Throwable e = this.error;
if (e != null) {
if (!isTerminated(previousInnerWindowState)) {
currentUnsentWindow.sendError(e);
}
this.actual.onError(e);
}
else {
if (!isTerminated(previousInnerWindowState)) {
currentUnsentWindow.sendComplete();
}
this.actual.onComplete();
}
return;
}
// we should create next window if we see that
// currentUnsentWindow is terminated and nextWindowIndex is
// greater than currentWindowIndex
if (nextWindowIndex > activeWindowIndex && (InnerWindow.isTimeout(previousInnerWindowState) || InnerWindow.isTerminated(previousInnerWindowState))) {
final boolean shouldBeUnsent = n == 0;
final InnerWindow<T> nextWindow =
new InnerWindow<>(this.maxSize, this,
nextWindowIndex, shouldBeUnsent, logger);
this.window = nextWindow;
if (!shouldBeUnsent) {
this.actual.onNext(nextWindow);
if (n != Long.MAX_VALUE) {
REQUESTED.decrementAndGet(this);
}
}
previousState = commitWork(this, expectedState, shouldBeUnsent);
expectedState =
(((previousState &~ACTIVE_WINDOW_INDEX_MASK) &~ HAS_UNSENT_WINDOW) ^ (expectedState == previousState ? HAS_WORK_IN_PROGRESS : 0)) |
incrementActiveWindowIndex(previousState) |
(shouldBeUnsent ? HAS_UNSENT_WINDOW : 0);
// we need to put unsent flag here because we check at the
// beginning of the loop the presence for unsent flag from
// the previousState
previousState = (previousState &~ HAS_UNSENT_WINDOW) | (shouldBeUnsent ? HAS_UNSENT_WINDOW : 0);
if (isCancelled(expectedState)) {
nextWindow.sendCancel();
if (shouldBeUnsent) {
nextWindow.cancel();
}
return;
}
if (isTerminated(expectedState) && !shouldBeUnsent) {
final Throwable e = this.error;
if (e != null) {
nextWindow.sendError(e);
this.actual.onError(e);
}
else {
nextWindow.sendComplete();
this.actual.onComplete();
}
return;
}
try {
nextWindow.scheduleTimeout();
}
catch (Exception e) {
if (hasWorkInProgress(expectedState)) {
this.actual.onError(Operators.onOperatorError(this.s, e, this.actual.currentContext()));
}
else {
this.onError(Operators.onOperatorError(this.s, e, this.actual.currentContext()));
}
return;
}
final long nextRequest = InnerWindow.received(previousInnerWindowState);
if (nextRequest > 0) {
this.s.request(nextRequest);
}
if (!hasWorkInProgress(expectedState)) {
return;
}
} else {
previousState = commitSent(this, expectedState);
expectedState = (previousState &~ HAS_UNSENT_WINDOW) ^ (expectedState == previousState ? HAS_WORK_IN_PROGRESS : 0);
previousState &= ~HAS_UNSENT_WINDOW;
if (isCancelled(expectedState)) {
return;
}
if (isTerminated(expectedState)) {
final Throwable e = this.error;
if (e != null) {
this.actual.onError(e);
}
else {
this.actual.onComplete();
}
return;
}
if (!hasWorkInProgress(expectedState)) {
return;
}
}
} else {
final InnerWindow<T> nextWindow =
new InnerWindow<>(this.maxSize, this, nextWindowIndex,
false, logger);
final InnerWindow<T> previousWindow = this.window;
this.window = nextWindow;
this.actual.onNext(nextWindow);
if (n != Long.MAX_VALUE) {
REQUESTED.decrementAndGet(this);
}
previousState = commitWork(this, expectedState, false);
expectedState =
(((previousState &~ACTIVE_WINDOW_INDEX_MASK) &~ HAS_UNSENT_WINDOW) ^ (expectedState == previousState ? HAS_WORK_IN_PROGRESS : 0)) |
incrementActiveWindowIndex(previousState);
if (isCancelled(expectedState)) {
assert previousWindow != null : "previousWindow can not be null";
previousWindow.sendCancel();
nextWindow.sendCancel();
return;
}
if (isTerminated(expectedState)) {
assert previousWindow != null : "previousWindow can not be null";
final Throwable e = this.error;
if (e != null) {
previousWindow.sendError(e);
nextWindow.sendError(e);
this.actual.onError(e);
}
else {
previousWindow.sendComplete();
nextWindow.sendComplete();
this.actual.onComplete();
}
return;
}
try {
nextWindow.scheduleTimeout();
}
catch (Exception e) {
if (hasWorkInProgress(expectedState)) {
this.actual.onError(Operators.onOperatorError(this.s, e, this.actual.currentContext()));
}
else {
this.onError(Operators.onOperatorError(this.s, e, this.actual.currentContext()));
}
return;
}
final long nextRequest;
if (previousWindow == null) {
// possible at the very beginning
nextRequest = this.maxSize;
}
else {
long previousActiveWindowState = previousWindow.sendComplete();
nextRequest = InnerWindow.received(previousActiveWindowState);
}
if (nextRequest > 0) {
this.s.request(nextRequest);
}
if (!hasWorkInProgress(expectedState)) {
return;
}
}
}
else if (n == 0 && !hasUnsentWindow) {
final InnerWindow<T> nextWindow =
new InnerWindow<>(this.maxSize, this, nextWindowIndex, true, logger);
final InnerWindow<T> previousWindow = this.window;
assert previousWindow != null : "previousWindow can not be null";
this.window = nextWindow;
// doesn't propagate through onNext since window is unsent
previousState = commitWork(this, expectedState, true);
expectedState =
(((previousState &~ACTIVE_WINDOW_INDEX_MASK) &~ HAS_UNSENT_WINDOW) ^ (expectedState == previousState ? HAS_WORK_IN_PROGRESS : 0)) |
incrementActiveWindowIndex(previousState) |
HAS_UNSENT_WINDOW;
previousState |= HAS_UNSENT_WINDOW;
if (isCancelled(expectedState)) {
previousWindow.sendCancel();
nextWindow.sendCancel();
nextWindow.cancel();
return;
}
// window is deliberately unsent, we can not deliver it since no
// demand even if it is terminated
try {
nextWindow.scheduleTimeout();
}
catch (Exception e) {
if (hasWorkInProgress(expectedState)) {
this.actual.onError(Operators.onOperatorError(this.s, e, this.actual.currentContext()));
}
else {
this.onError(Operators.onOperatorError(this.s, e, this.actual.currentContext()));
}
return;
}
long previousActiveWindowState = previousWindow.sendComplete();
final long nextRequest = InnerWindow.received(previousActiveWindowState);
if (nextRequest > 0) {
this.s.request(nextRequest);
}
if (!hasWorkInProgress(expectedState)) {
return;
}
}
else {
expectedState = markWorkDone(this, expectedState);
previousState = expectedState | HAS_WORK_IN_PROGRESS;
if (isCancelled(expectedState)) {
final InnerWindow<T> currentWindow = this.window;
assert currentWindow != null : "currentWindow can not be null";
final long previousWindowState = currentWindow.sendCancel();
if (!InnerWindow.isSent(previousWindowState)) {
currentWindow.cancel();
}
return;
}
if (isTerminated(expectedState) && !hasUnsentWindow(expectedState)) {
final Throwable e = this.error;
if (e != null) {
this.actual.onError(e);
}
else {
this.actual.onComplete();
}
return;
}
if (!hasWorkInProgress(expectedState)) {
return;
}
}
}
}
@Override
public void cancel() {
final long previousState = markCancelled(this);
if ((!hasWorkInProgress(previousState) && isTerminated(previousState) && !hasUnsentWindow(previousState)) || isCancelled(previousState)) {
return;
}
this.s.cancel();
final InnerWindow<T> currentActiveWindow = this.window;
if (currentActiveWindow != null) {
if (!InnerWindow.isSent(currentActiveWindow.sendCancel())) {
if (!hasWorkInProgress(previousState)) {
currentActiveWindow.cancel();
}
}
}
}
Disposable schedule(Runnable runnable, long createTime) {
final long delayedNanos = scheduler.now(TimeUnit.NANOSECONDS) - createTime;
final long timeSpanInNanos = unit.toNanos(timespan);
final long newTimeSpanInNanos = timeSpanInNanos - delayedNanos;
if (newTimeSpanInNanos > 0) {
return worker.schedule(runnable, timespan, unit);
} else {
runnable.run();
return InnerWindow.DISPOSED;
}
}
long now() {
return scheduler.now(TimeUnit.NANOSECONDS);
}
static boolean hasUnsentWindow(long state) {
return (state & HAS_UNSENT_WINDOW) == HAS_UNSENT_WINDOW;
}
static boolean isCancelled(long state) {
return (state & CANCELLED_FLAG) == CANCELLED_FLAG;
}
static boolean isTerminated(long state) {
return (state & TERMINATED_FLAG) == TERMINATED_FLAG;
}
static boolean hasWorkInProgress(long state) {
return (state & HAS_WORK_IN_PROGRESS) == HAS_WORK_IN_PROGRESS;
}
static long incrementRequestIndex(long state) {
return ((((state & REQUEST_INDEX_MASK) >> REQUEST_INDEX_SHIFT) + 1) << REQUEST_INDEX_SHIFT) & REQUEST_INDEX_MASK;
}
static long incrementActiveWindowIndex(long state) {
return ((((state & ACTIVE_WINDOW_INDEX_MASK) >> ACTIVE_WINDOW_INDEX_SHIFT) + 1) << ACTIVE_WINDOW_INDEX_SHIFT) & ACTIVE_WINDOW_INDEX_MASK;
}
static int activeWindowIndex(long state) {
return (int) ((state & ACTIVE_WINDOW_INDEX_MASK) >> ACTIVE_WINDOW_INDEX_SHIFT);
}
static long incrementNextWindowIndex(long state) {
return ((state & NEXT_WINDOW_INDEX_MASK) + 1) & NEXT_WINDOW_INDEX_MASK;
}
static int nextWindowIndex(long state) {
return (int) (state & NEXT_WINDOW_INDEX_MASK);
}
/**
* Adds {@link #TERMINATED_FLAG} to indicate cancellation fact. Operation fails
* if current state is already terminated or cancelled
*
* @param instance from which to read state
*
* @return previous state
*/
static <T> long markTerminated(WindowTimeoutWithBackpressureSubscriber<T> instance) {
for(;;) {
final long previousState = instance.state;
if (isTerminated(previousState) || isCancelled(previousState)) {
return previousState;
}
final long nextState = previousState | TERMINATED_FLAG;
if (STATE.compareAndSet(instance, previousState, nextState)) {
if (instance.logger != null) {
instance.logger.log(instance.toString(), "mtd", previousState, nextState);
}
return previousState;
}
}
}
/**
* Adds {@link #CANCELLED_FLAG} to indicate cancellation fact. Operation fails
* if current state is already cancelled, or it has no work-in-progress and
* is-terminated and has no unsent window
*
* @param instance from which to read state
*
* @return previous state
*/
static <T> long markCancelled(WindowTimeoutWithBackpressureSubscriber<T> instance) {
for (;;) {
final long previousState = instance.state;
if ((!hasWorkInProgress(previousState) && isTerminated(previousState) && !hasUnsentWindow(previousState)) || isCancelled(previousState)) {
return previousState;
}
final long nextState = previousState | CANCELLED_FLAG;
if (STATE.compareAndSet(instance, previousState, nextState)) {
return previousState;
}
}
}
/**
* Removes {@link #HAS_WORK_IN_PROGRESS} to indicate no work-in-progress.
* Operation fails if current state does not equeal to the expected one
*
* @param instance from which to read state
*
* @return current state if fail or next state if successfully applied
*/
static <T> long markWorkDone(WindowTimeoutWithBackpressureSubscriber<T> instance, long expectedState) {
for (;;) {
final long currentState = instance.state;
if (expectedState != currentState) {
if (instance.logger != null) {
instance.logger.log(instance.toString(), "fwd", currentState, currentState);
}
return currentState;
}
final long nextState = currentState ^ HAS_WORK_IN_PROGRESS;
if (STATE.compareAndSet(instance, currentState, nextState)) {
if (instance.logger != null) {
instance.logger.log(instance.toString(), "mwd", currentState, nextState);
}
return nextState;
}
}
}
/**
* Commits fact that an unsent window is delivered. This
* operation tries to remove {@link #HAS_WORK_IN_PROGRESS} flag if current state
* is equal to the expected one.
*
* @param instance from which to read state
*
* @return previous state
*/
static <T> long commitSent(WindowTimeoutWithBackpressureSubscriber<T> instance, long expectedState) {
for (;;) {
final long currentState = instance.state;
final long clearState = (currentState &~ HAS_UNSENT_WINDOW);
final long nextState = (clearState ^ (expectedState == currentState ? HAS_WORK_IN_PROGRESS : 0));
if (STATE.compareAndSet(instance, currentState, nextState)) {
if (instance.logger != null) {
instance.logger.log(instance.toString(), "cts", currentState, nextState);
}
return currentState;
}
}
}
/**
* Commits new active window index and removes {@link #HAS_UNSENT_WINDOW} if
* specified. This operation tries to remove {@link #HAS_WORK_IN_PROGRESS} flag
* if current state is equal to the expected one.
*
* @param instance from which to read state
*
* @return previous state
*/
static <T> long commitWork(WindowTimeoutWithBackpressureSubscriber<T> instance, long expectedState, boolean setUnsentFlag) {
for (;;) {
final long currentState = instance.state;
final long clearState = ((currentState &~ACTIVE_WINDOW_INDEX_MASK) &~ HAS_UNSENT_WINDOW);
final long nextState = (clearState ^ (expectedState == currentState ? HAS_WORK_IN_PROGRESS : 0)) |
incrementActiveWindowIndex(currentState) |
(setUnsentFlag ? HAS_UNSENT_WINDOW : 0);
if (STATE.compareAndSet(instance, currentState, nextState)) {
if (instance.logger != null) {
instance.logger.log(instance.toString(), "ctw", currentState, nextState);
}
return currentState;
}
}
}
}
static final
|
WindowTimeoutWithBackpressureSubscriber
|
java
|
apache__camel
|
components/camel-google/camel-google-secret-manager/src/main/java/org/apache/camel/component/google/secret/manager/GoogleSecretManagerConstants.java
|
{
"start": 908,
"end": 1529
}
|
interface ____ {
@Metadata(description = "The operation to perform",
javaType = "org.apache.camel.component.google.secret.manager.GoogleSecretManagerOperations")
String OPERATION = "GoogleSecretManagerOperation";
@Metadata(description = "The id of the secret", javaType = "String")
String SECRET_ID = "CamelGoogleSecretManagerSecretId";
@Metadata(description = "The version of the secret", javaType = "String", defaultValue = "latest")
String VERSION_ID = "CamelGoogleSecretManagerVersionId";
String REPLICATION = "CamelGoogleSecretManagerReplication";
}
|
GoogleSecretManagerConstants
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/InternalMiniClusterExtension.java
|
{
"start": 2027,
"end": 5237
}
|
class ____
implements BeforeAllCallback,
AfterAllCallback,
BeforeEachCallback,
AfterEachCallback,
ParameterResolver,
CustomExtension {
private final MiniClusterResource miniClusterResource;
public InternalMiniClusterExtension(
final MiniClusterResourceConfiguration miniClusterResourceConfiguration) {
this.miniClusterResource = new MiniClusterResource(miniClusterResourceConfiguration);
}
public int getNumberSlots() {
return miniClusterResource.getNumberSlots();
}
public MiniCluster getMiniCluster() {
return miniClusterResource.getMiniCluster();
}
public UnmodifiableConfiguration getClientConfiguration() {
return miniClusterResource.getClientConfiguration();
}
public URI getRestAddress() {
return miniClusterResource.getRestAddress();
}
@Override
public void beforeAll(ExtensionContext context) throws Exception {
miniClusterResource.before();
}
@Override
public void afterAll(ExtensionContext context) throws Exception {
miniClusterResource.after();
}
@Override
public boolean supportsParameter(
ParameterContext parameterContext, ExtensionContext extensionContext)
throws ParameterResolutionException {
Class<?> parameterType = parameterContext.getParameter().getType();
if (parameterContext.isAnnotated(InjectMiniCluster.class)
&& parameterType.isAssignableFrom(MiniCluster.class)) {
return true;
}
if (parameterContext.isAnnotated(InjectClusterClientConfiguration.class)
&& parameterType.isAssignableFrom(UnmodifiableConfiguration.class)) {
return true;
}
return parameterContext.isAnnotated(InjectClusterRESTAddress.class)
&& parameterType.isAssignableFrom(URI.class);
}
@Override
public Object resolveParameter(
ParameterContext parameterContext, ExtensionContext extensionContext)
throws ParameterResolutionException {
if (parameterContext.isAnnotated(InjectMiniCluster.class)) {
return miniClusterResource.getMiniCluster();
}
if (parameterContext.isAnnotated(InjectClusterClientConfiguration.class)) {
return miniClusterResource.getClientConfiguration();
}
if (parameterContext.isAnnotated(InjectClusterRESTAddress.class)) {
return miniClusterResource.getRestAddress();
}
throw new ParameterResolutionException("Unsupported parameter");
}
@Override
public void beforeEach(ExtensionContext context) throws Exception {
miniClusterResource.before();
}
@Override
public void afterEach(ExtensionContext context) throws Exception {
miniClusterResource.after();
}
@Override
public void before(ExtensionContext context) throws Exception {
miniClusterResource.before();
}
@Override
public void after(ExtensionContext context) throws Exception {
miniClusterResource.after();
}
}
|
InternalMiniClusterExtension
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/legacy/RecursiveComparisonAssert_isEqualTo_with_optional_Test.java
|
{
"start": 1217,
"end": 6188
}
|
class ____ extends WithLegacyIntrospectionStrategyBaseTest {
@ParameterizedTest
@MethodSource("sameBooks")
void should_pass_when_comparing_optional_fields_recursively_and_not_using_optional_equals(BookWithOptionalCoAuthor actual,
BookWithOptionalCoAuthor expected) {
assertThat(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.isEqualTo(expected);
}
static Stream<Arguments> sameBooks() {
return Stream.of(Arguments.of(new BookWithOptionalCoAuthor("test"), new BookWithOptionalCoAuthor("test")),
// empty optional coAuthor
Arguments.of(new BookWithOptionalCoAuthor(null), new BookWithOptionalCoAuthor(null)),
// null coAuthor
Arguments.of(new BookWithOptionalCoAuthor(), new BookWithOptionalCoAuthor()));
}
@ParameterizedTest(name = "author 1 {0} / author 2 {1} / diff {2}")
@MethodSource("differentBookWithOptionalCoAuthors")
void should_fail_when_comparing_different_optional_fields(BookWithOptionalCoAuthor actual,
BookWithOptionalCoAuthor expected,
ComparisonDifference diff) {
compareRecursivelyFailsWithDifferences(actual, expected, diff);
}
private static Stream<Arguments> differentBookWithOptionalCoAuthors() {
BookWithOptionalCoAuthor pratchett = new BookWithOptionalCoAuthor("Terry Pratchett");
BookWithOptionalCoAuthor georgeMartin = new BookWithOptionalCoAuthor("George Martin");
return Stream.of(Arguments.of(pratchett, georgeMartin,
javaTypeDiff("coAuthor.value.name", "Terry Pratchett", "George Martin")),
Arguments.of(pratchett, new BookWithOptionalCoAuthor(null),
diff("coAuthor", Optional.of(new Author("Terry Pratchett")), Optional.empty())),
Arguments.of(new BookWithOptionalCoAuthor(null), pratchett,
diff("coAuthor", Optional.empty(), Optional.of(new Author("Terry Pratchett")))),
Arguments.of(new BookWithOptionalCoAuthor("Terry Pratchett", 1, 2L, 3.0),
new BookWithOptionalCoAuthor("Terry Pratchett", 2, 2L, 3.0),
javaTypeDiff("numberOfPages", OptionalInt.of(1), OptionalInt.of(2))),
Arguments.of(new BookWithOptionalCoAuthor("Terry Pratchett", 1, 2L, 3.0),
new BookWithOptionalCoAuthor("Terry Pratchett", 1, 4L, 3.0),
javaTypeDiff("bookId", OptionalLong.of(2L), OptionalLong.of(4L))),
Arguments.of(new BookWithOptionalCoAuthor("Terry Pratchett", 1, 2L, 3.0),
new BookWithOptionalCoAuthor("Terry Pratchett", 1, 2L, 6.0),
javaTypeDiff("price", OptionalDouble.of(3.0), OptionalDouble.of(6.0))));
}
@Test
void should_fail_when_comparing_non_optional_expected_field_with_optional_actual_field() {
// GIVEN
Author pratchett = new Author("Terry Pratchett");
BookWithOptionalCoAuthor actual = new BookWithOptionalCoAuthor(pratchett.name);
BookWithCoAuthor expected = new BookWithCoAuthor(pratchett);
// WHEN/THEN
compareRecursivelyFailsWithDifferences(actual, expected,
diff("bookId", null, 0L),
diff("coAuthor", Optional.of(pratchett), pratchett,
"Actual was compared to expected with equals because it is a java type (java.util.Optional) and expected is not (org.assertj.tests.core.api.recursive.data.Author)"),
diff("numberOfPages", null, 0),
diff("price", null, 0.0));
}
@Test
void should_fail_when_comparing_optional_expected_field_with_non_optional_actual_field() {
// GIVEN
Author pratchett = new Author("Terry Pratchett");
BookWithCoAuthor actual = new BookWithCoAuthor(pratchett);
BookWithOptionalCoAuthor expected = new BookWithOptionalCoAuthor(pratchett.name);
// WHEN/THEN
compareRecursivelyFailsWithDifferences(actual, expected,
diff("bookId", 0L, null),
diff("coAuthor", pratchett, Optional.of(pratchett),
"expected field is an Optional but actual field is not (org.assertj.tests.core.api.recursive.data.Author)"),
diff("numberOfPages", 0, null),
diff("price", 0.0, null));
}
static
|
RecursiveComparisonAssert_isEqualTo_with_optional_Test
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/main/java/org/springframework/jdbc/datasource/init/DatabasePopulator.java
|
{
"start": 997,
"end": 1996
}
|
interface ____ {
/**
* Populate, initialize, or clean up the database using the provided JDBC
* connection.
* <p><strong>Warning</strong>: Concrete implementations should not close
* the provided {@link Connection}.
* <p>Concrete implementations <em>may</em> throw an {@link SQLException} if
* an error is encountered but are <em>strongly encouraged</em> to throw a
* specific {@link ScriptException} instead. For example, Spring's
* {@link ResourceDatabasePopulator} and {@link DatabasePopulatorUtils} wrap
* all {@code SQLExceptions} in {@code ScriptExceptions}.
* @param connection the JDBC connection to use; already configured and
* ready to use; never {@code null}
* @throws SQLException if an unrecoverable data access exception occurs
* while interacting with the database
* @throws ScriptException in all other error cases
* @see DatabasePopulatorUtils#execute
*/
void populate(Connection connection) throws SQLException, ScriptException;
}
|
DatabasePopulator
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/jaxb/mapping/spi/JaxbPersistentAttribute.java
|
{
"start": 322,
"end": 697
}
|
interface ____ {
/**
* The attribute's name
*/
String getName();
void setName(String name);
/**
* JPA's way to specify an access-strategy
*/
AccessType getAccess();
void setAccess(AccessType accessType);
/**
* Hibernate's pluggable access-strategy support
*/
String getAttributeAccessor();
void setAttributeAccessor(String value);
}
|
JaxbPersistentAttribute
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/junit4/statements/SpringRepeat.java
|
{
"start": 1406,
"end": 3084
}
|
class ____ extends Statement {
protected static final Log logger = LogFactory.getLog(SpringRepeat.class);
private final Statement next;
private final Method testMethod;
private final int repeat;
/**
* Construct a new {@code SpringRepeat} statement for the supplied
* {@code testMethod}, retrieving the configured repeat count from the
* {@code @Repeat} annotation on the supplied method.
* @param next the next {@code Statement} in the execution chain
* @param testMethod the current test method
* @see TestAnnotationUtils#getRepeatCount(Method)
*/
public SpringRepeat(Statement next, Method testMethod) {
this(next, testMethod, TestAnnotationUtils.getRepeatCount(testMethod));
}
/**
* Construct a new {@code SpringRepeat} statement for the supplied
* {@code testMethod} and {@code repeat} count.
* @param next the next {@code Statement} in the execution chain
* @param testMethod the current test method
* @param repeat the configured repeat count for the current test method
*/
public SpringRepeat(Statement next, Method testMethod, int repeat) {
this.next = next;
this.testMethod = testMethod;
this.repeat = Math.max(1, repeat);
}
/**
* Evaluate the next {@link Statement statement} in the execution chain
* repeatedly, using the specified repeat count.
*/
@Override
public void evaluate() throws Throwable {
for (int i = 0; i < this.repeat; i++) {
if (this.repeat > 1 && logger.isTraceEnabled()) {
logger.trace(String.format("Repetition %d of test %s#%s()", (i + 1),
this.testMethod.getDeclaringClass().getSimpleName(), this.testMethod.getName()));
}
this.next.evaluate();
}
}
}
|
SpringRepeat
|
java
|
apache__dubbo
|
dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/integration/ReferenceCountExporter.java
|
{
"start": 984,
"end": 1924
}
|
class ____<T> implements Exporter<T> {
private final Exporter<T> exporter;
private final String providerKey;
private final ExporterFactory exporterFactory;
private final AtomicInteger count = new AtomicInteger(0);
public ReferenceCountExporter(Exporter<T> exporter, String providerKey, ExporterFactory exporterFactory) {
this.exporter = exporter;
this.providerKey = providerKey;
this.exporterFactory = exporterFactory;
}
@Override
public Invoker<T> getInvoker() {
return exporter.getInvoker();
}
public void increaseCount() {
count.incrementAndGet();
}
@Override
public void unexport() {
if (count.decrementAndGet() == 0) {
exporter.unexport();
}
exporterFactory.remove(providerKey, this);
}
@Override
public void register() {}
@Override
public void unregister() {}
}
|
ReferenceCountExporter
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/asm/Symbol.java
|
{
"start": 6083,
"end": 6342
}
|
class ____ this symbol. Only used for {@link
* #CONSTANT_FIELDREF_TAG}, {@link #CONSTANT_METHODREF_TAG}, {@link
* #CONSTANT_INTERFACE_METHODREF_TAG}, and {@link #CONSTANT_METHOD_HANDLE_TAG} symbols.
*/
final String owner;
/**
* The name of the
|
of
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/time/DatePrinter.java
|
{
"start": 2275,
"end": 3508
}
|
class ____, usually StringBuilder or StringBuffer.
* @return the specified string buffer.
* @since 3.5
*/
<B extends Appendable> B format(Calendar calendar, B buf);
/**
* Formats a {@link Calendar} object into the supplied {@link StringBuffer}.
* The TimeZone set on the Calendar is only used to adjust the time offset.
* The TimeZone specified during the construction of the Parser will determine the TimeZone
* used in the formatted string.
*
* @param calendar the calendar to format.
* @param buf the buffer to format into.
* @return the specified string buffer.
* @deprecated Use {{@link #format(Calendar, Appendable)}.
*/
@Deprecated
StringBuffer format(Calendar calendar, StringBuffer buf);
/**
* Formats a {@link Date} object using a {@link GregorianCalendar}.
*
* @param date the date to format
* @return the formatted string
*/
String format(Date date);
/**
* Formats a {@link Date} object into the
* supplied {@link Appendable} using a {@link GregorianCalendar}.
*
* @param date the date to format.
* @param buf the buffer to format into.
* @param <B> the Appendable
|
type
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/cfg/PersistenceSettings.java
|
{
"start": 6621,
"end": 7863
}
|
class ____ implements {@link org.hibernate.SessionFactoryObserver} and has
* a constructor with no parameters.
*
* @see org.hibernate.boot.SessionFactoryBuilder#addSessionFactoryObservers(SessionFactoryObserver...)
*/
String SESSION_FACTORY_OBSERVER = "hibernate.session_factory_observer";
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Legacy JPA settings
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/**
* @deprecated Use {@link #JAKARTA_PERSISTENCE_PROVIDER} instead
*/
@Deprecated
@SuppressWarnings("DeprecatedIsStillUsed")
String JPA_PERSISTENCE_PROVIDER = "javax.persistence.provider";
/**
* The type of transactions supported by the entity managers.
* <p>
* See JPA 2 sections 9.4.3 and 8.2.1.2
*
* @deprecated Use {@link #JAKARTA_TRANSACTION_TYPE} instead
*/
@Deprecated
String JPA_TRANSACTION_TYPE = "javax.persistence.transactionType";
/**
* Specifies whether unowned (i.e. {@code mapped-by}) associations should be considered
* when validating transient entity instance references.
*
* @settingDefault {@code false}
*/
String UNOWNED_ASSOCIATION_TRANSIENT_CHECK = "hibernate.unowned_association_transient_check";
}
|
which
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexMatch.java
|
{
"start": 990,
"end": 3065
}
|
class ____<T extends StringPattern> extends UnaryScalarFunction {
private final T pattern;
private final boolean caseInsensitive;
protected RegexMatch(Source source, Expression value, T pattern, boolean caseInsensitive) {
super(source, value);
this.pattern = pattern;
this.caseInsensitive = caseInsensitive;
}
public T pattern() {
return pattern;
}
public boolean caseInsensitive() {
return caseInsensitive;
}
@Override
public DataType dataType() {
return DataType.BOOLEAN;
}
@Override
public Nullability nullable() {
if (pattern() == null) {
return Nullability.TRUE;
}
return field().nullable();
}
@Override
protected TypeResolution resolveType() {
return isStringAndExact(field(), sourceText(), DEFAULT);
}
@Override
public boolean foldable() {
// right() is not directly foldable in any context but Like can fold it.
return field().foldable();
}
@Override
public Boolean fold(FoldContext ctx) {
throw new UnsupportedOperationException();
}
/**
* Returns an equivalent optimized expression taking into account the case of the pattern(s)
* @param unwrappedField the field with to_upper/to_lower function removed
* @param matchesCaseFn a predicate to check if a pattern matches the case
* @return an optimized equivalent Expression or this if no optimization is possible
*/
public abstract Expression optimizeStringCasingWithInsensitiveRegexMatch(Expression unwrappedField, Predicate<String> matchesCaseFn);
@Override
public boolean equals(Object obj) {
if (super.equals(obj)) {
RegexMatch<?> other = (RegexMatch<?>) obj;
return caseInsensitive == other.caseInsensitive && Objects.equals(pattern, other.pattern);
}
return false;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), pattern(), caseInsensitive);
}
}
|
RegexMatch
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/MultiSelectResultTypeTest.java
|
{
"start": 915,
"end": 4895
}
|
class ____ {
@BeforeAll
public static void setUp(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
TestEntity testEntity = new TestEntity( 1, "a" );
entityManager.persist( testEntity );
}
);
}
@Test
public void testResultOfMultiSelect(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<Integer[]> q = cb.createQuery( Integer[].class );
Root<TestEntity> r = q.from( TestEntity.class );
q.multiselect( List.of( r.get( "id" ), r.get( "id" ) ) );
List<Integer[]> idPairs = entityManager.createQuery( q ).getResultList();
assertThat( idPairs.size() ).isEqualTo( 1 );
Integer[] ids = idPairs.get( 0 );
assertThat( ids[0] ).isEqualTo( 1 );
}
);
}
@Test
public void testResultOfMultiSelectPrimitive(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<int[]> q = cb.createQuery( int[].class );
Root<TestEntity> r = q.from( TestEntity.class );
q.multiselect( List.of( r.get( "id" ), r.get( "id" ) ) );
List<int[]> idPairs = entityManager.createQuery( q ).getResultList();
assertThat( idPairs.size() ).isEqualTo( 1 );
int[] ids = idPairs.get( 0 );
assertThat( ids[0] ).isEqualTo( 1 );
}
);
}
@Test
public void testResultOfMultiSelect2(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<Object[]> q = cb.createQuery( Object[].class );
Root<TestEntity> r = q.from( TestEntity.class );
q.multiselect( List.of( r.get( "id" ), r.get( "name" ) ) );
List<Object[]> values = entityManager.createQuery( q ).getResultList();
assertThat( values.size() ).isEqualTo( 1 );
Object[] value = values.get( 0 );
assertThat( value[0] ).isEqualTo( 1 );
assertThat( value[1] ).isEqualTo( "a" );
}
);
}
@Test
public void testResultOfSelect(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<Integer> q = cb.createQuery( Integer.class );
Root<TestEntity> r = q.from( TestEntity.class );
q.select( r.get( "id" ) );
List<Integer> idPairs = entityManager.createQuery( q ).getResultList();
assertThat( idPairs.size() ).isEqualTo( 1 );
assertThat( idPairs.get( 0 ) ).isEqualTo( 1 );
}
);
}
@Test
public void testValidateSelectItemAgainstArrayComponentType(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<String[]> q = cb.createQuery( String[].class );
Root<TestEntity> r = q.from( TestEntity.class );
q.select( r.get( "id" ) );
try {
entityManager.createQuery( q );
fail( "Should fail with a type validation error" );
}
catch (QueryTypeMismatchException ex) {
assertThat( ex.getMessage() ).contains( String[].class.getName(), Integer.class.getName() );
}
}
);
}
@Test
public void testValidateSelectItemAgainstArrayComponentType2(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<String[]> q = cb.createQuery( String[].class );
Root<TestEntity> r = q.from( TestEntity.class );
q.multiselect( r.get( "name" ), r.get( "id" ) );
try {
entityManager.createQuery( q );
fail( "Should fail with a type validation error" );
}
catch (QueryTypeMismatchException ex) {
assertThat( ex.getMessage() ).contains( String.class.getName(), Integer.class.getName() );
}
}
);
}
@Entity(name = "TestEntity")
public static
|
MultiSelectResultTypeTest
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/HandlerType.java
|
{
"start": 115,
"end": 661
}
|
enum ____ {
/**
* A regular route handler invoked on the event loop.
*
* @see io.vertx.ext.web.Route#handler(Handler)
*/
NORMAL,
/**
* A blocking route handler, invoked on a worker thread.
*
* @see io.vertx.ext.web.Route#blockingHandler(Handler)
*/
BLOCKING,
/**
* A failure handler, invoked when an exception is thrown from a route handler.
* This is invoked on the event loop.
*
* @see io.vertx.ext.web.Route#failureHandler(Handler)
*/
FAILURE
}
|
HandlerType
|
java
|
quarkusio__quarkus
|
extensions/vertx/runtime/src/main/java/io/quarkus/vertx/runtime/VertxProducer.java
|
{
"start": 945,
"end": 3312
}
|
class ____ {
private static final Logger LOGGER = Logger.getLogger(VertxProducer.class);
@Singleton
@Produces
public EventBus eventbus(Vertx vertx) {
return vertx.eventBus();
}
@Singleton
@Produces
public io.vertx.mutiny.core.Vertx mutiny(Vertx vertx) {
return io.vertx.mutiny.core.Vertx.newInstance(vertx);
}
@Singleton
@Produces
public io.vertx.mutiny.core.eventbus.EventBus mutinyEventBus(io.vertx.mutiny.core.Vertx mutiny) {
return mutiny.eventBus();
}
/**
* Undeploy verticles backed by contextual instances of {@link ApplicationScoped} beans before the application context is
* destroyed. Otherwise, Vertx may attempt to stop the verticles after the CDI container is shut down.
*
* @param event
* @param beanManager
*/
void undeployVerticles(@Observes @BeforeDestroyed(ApplicationScoped.class) Object event, BeanManager beanManager) {
// Only beans with the AbstractVerticle in the set of bean types are considered - we need a deployment id
Set<Bean<?>> beans = beanManager.getBeans(AbstractVerticle.class, Any.Literal.INSTANCE);
Context applicationContext = beanManager.getContext(ApplicationScoped.class);
for (Bean<?> bean : beans) {
if (ApplicationScoped.class.equals(bean.getScope())) {
// Only beans with @ApplicationScoped are considered
Object instance = applicationContext.get(bean);
if (instance != null) {
// Only existing instances are considered
try {
AbstractVerticle verticle = (AbstractVerticle) instance;
io.vertx.mutiny.core.Vertx mutiny = beanManager.createInstance()
.select(io.vertx.mutiny.core.Vertx.class).get();
mutiny.undeploy(verticle.deploymentID()).await().indefinitely();
LOGGER.debugf("Undeployed verticle: %s", instance.getClass());
} catch (Exception e) {
// In theory, a user can undeploy the verticle manually
LOGGER.debugf("Unable to undeploy verticle %s: %s", instance.getClass(), e.toString());
}
}
}
}
}
}
|
VertxProducer
|
java
|
quarkusio__quarkus
|
independent-projects/tools/devtools-testing/src/main/java/io/quarkus/devtools/testing/SnapshotTesting.java
|
{
"start": 1316,
"end": 15702
}
|
class ____ {
// The PathTree API is used to support code start testing in the platform where snapshots are located in test JARs
private static volatile PathTree snapshotsBaseRoot;
private static final String SNAPSHOTS_DIR_NAME = "__snapshots__";
public static final Path SNAPSHOTS_DIR = Path.of("src/test/resources").resolve(SNAPSHOTS_DIR_NAME);
public static final String UPDATE_SNAPSHOTS_PROPERTY = "update-snapshots";
public static final String UPDATE_SNAPSHOTS_PROPERTY_SHORTCUT = "snap";
public static PathTree getSnapshotsBaseTree() {
if (snapshotsBaseRoot != null) {
return snapshotsBaseRoot;
}
PathTree srcTree = null;
if (Files.isDirectory(SNAPSHOTS_DIR)) {
srcTree = PathTree.ofDirectoryOrArchive(SNAPSHOTS_DIR.getParent());
} else if (shouldUpdateSnapshot(SNAPSHOTS_DIR_NAME)) {
try {
Files.createDirectories(SNAPSHOTS_DIR);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
srcTree = PathTree.ofDirectoryOrArchive(SNAPSHOTS_DIR.getParent());
}
final URL url = Thread.currentThread().getContextClassLoader().getResource(SNAPSHOTS_DIR_NAME);
if (url == null) {
if (srcTree == null) {
Assertions.fail("Failed to locate " + SNAPSHOTS_DIR_NAME + " directory on the classpath and "
+ SNAPSHOTS_DIR.toAbsolutePath() + " directory does not exist (use -Dsnap to create it automatically)");
}
return snapshotsBaseRoot = srcTree;
} else if ("file".equals(url.getProtocol())) {
final Path p;
try {
p = Path.of(url.toURI());
} catch (URISyntaxException e) {
throw new IllegalStateException("Failed to translate " + url + " to path", e);
}
return snapshotsBaseRoot = new MultiRootPathTree(PathTree.ofDirectoryOrArchive(p.getParent()), srcTree);
} else if ("jar".equals(url.getProtocol())) {
final String jarUrlStr = url.toExternalForm();
final String fileUrlStr = jarUrlStr.substring("jar:".length(),
jarUrlStr.length() - ("!/" + SNAPSHOTS_DIR_NAME).length());
final Path p = Path.of(URI.create(fileUrlStr));
final PathTree jarPathTree = PathTree.ofDirectoryOrArchive(p);
return snapshotsBaseRoot = srcTree == null ? jarPathTree : new MultiRootPathTree(jarPathTree, srcTree);
} else {
throw new IllegalStateException("Unexpected URL protocol in " + url);
}
}
public static <T> T withSnapshotsDir(String relativePath, Function<Path, T> function) {
final PathTree snapshotsBaseRoot = getSnapshotsBaseTree();
try (OpenPathTree tree = snapshotsBaseRoot.open()) {
return function.apply(tree.getPath(SNAPSHOTS_DIR_NAME).resolve(relativePath));
} catch (IOException e) {
throw new UncheckedIOException("Failed to open " + snapshotsBaseRoot.getRoots(), e);
}
}
/**
* Test file content to make sure it is valid by comparing it to its snapshots.
* <br />
* The snapshot file can easily be updated when necessary and reviewed to confirm it is consistent with the changes.
* <br />
* <br />
* The snapshot file will be created/updated using <code>-Dsnap</code> or
* <code>-Dupdate-snapshots</code>
* <br />
* <br />
* Even if the content is checked as a whole, it's always better to also manually check that specific content snippets
* contains what's expected
* <br />
* <br />
* example:
*
* <pre>
* assertThatMatchSnapshot(testInfo, projectDir, "src/main/java/org/acme/GreetingResource.java")
* .satisfies(checkContains("@Path(\"/hello\")"))
* </pre>
*
* @param testInfo the {@link TestInfo} from the {@Link Test} parameter (used to get the current test class & method to
* compute the snapshot location)
* @param parentDir the parent directory containing the generated files for this test (makes it nicer when checking multiple
* snapshots)
* @param fileRelativePath the relative path from the directory (used to name the snapshot)
* @return an {@link AbstractPathAssert} giving a direct way to check specific content snippets contains what's expected
* @throws Throwable
*/
public static AbstractPathAssert<?> assertThatMatchSnapshot(TestInfo testInfo, Path parentDir, String fileRelativePath)
throws Throwable {
final String snapshotDirName = getSnapshotDirName(testInfo);
final String normalizedFileName = snapshotDirName + "/" + normalizePathAsName(fileRelativePath);
return assertThatMatchSnapshot(parentDir.resolve(fileRelativePath), normalizedFileName);
}
/**
* Test file content to make sure it is valid by comparing it to a snapshot.
* <br />
* The snapshot file can easily be updated when necessary and reviewed to confirm it is consistent with the changes.
* <br />
* <br />
* The snapshot file will be created/updated using <code>-Dsnap</code> or
* <code>-Dupdate-snapshots</code>
* <br />
* <br />
* Even if the content is checked as a whole, it's always better to also manually check that specific content snippets
* contains what's expected using {@link #checkContains(String)} or {@link #checkMatches(String)}
*
* @param fileToCheck the {@link Path} of the file to check
* @param snapshotIdentifier the snapshotIdentifier of the snapshot (used as a relative path from the {@link #SNAPSHOTS_DIR}
* @return an {@link AbstractPathAssert} giving a direct way to check specific content snippets contains what's expected
* @throws Throwable
*/
public static AbstractPathAssert<?> assertThatMatchSnapshot(Path fileToCheck, String snapshotIdentifier) throws Throwable {
assertThat(fileToCheck).isRegularFile();
final boolean updateSnapshot = shouldUpdateSnapshot(snapshotIdentifier);
return withSnapshotsDir(snapshotIdentifier, snapshotFile -> {
if (updateSnapshot) {
final Path srcSnapshotFile = SNAPSHOTS_DIR.resolve(snapshotIdentifier);
if (Files.isRegularFile(srcSnapshotFile)) {
deleteExistingSnapshots(snapshotIdentifier, srcSnapshotFile);
}
try {
FileUtils.copyFile(fileToCheck.toFile(), srcSnapshotFile.toFile());
System.out.println("COPIED " + fileToCheck + " -> " + srcSnapshotFile);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
snapshotFile = srcSnapshotFile;
}
final String snapshotNotFoundDescription = "corresponding snapshot file not found for " + snapshotIdentifier
+ " (Use -Dsnap to create it automatically)";
final String description = "Snapshot is not matching (use -Dsnap to update it automatically): "
+ snapshotIdentifier;
if (isUTF8File(fileToCheck)) {
assertThat(snapshotFile).as(snapshotNotFoundDescription).isRegularFile();
assertThat(fileToCheck).as(description).exists()
.usingCharset(StandardCharsets.UTF_8)
.hasSameTextualContentAs(snapshotFile, StandardCharsets.UTF_8);
} else {
assertThat(snapshotFile).as(snapshotNotFoundDescription).isRegularFile();
assertThat(fileToCheck).as(description).hasSameBinaryContentAs(snapshotFile);
}
return assertThat(fileToCheck);
});
}
/**
* Test directory tree to make sure it is valid by comparing it to a snapshot.
* <br />
* The snapshot file can easily be updated when necessary and reviewed to confirm it is consistent with the changes.
* <br />
* <br />
* The snapshot file will be created/updated using <code>-Dsnap</code> or
* <code>-Dupdate-snapshots</code>
*
* @param testInfo the {@link TestInfo} from the {@Link Test} parameter (used to get the current test class & method to
* compute the snapshot location)
* @param dir the {@link Path} of the directory to test
* @return a {@link ListAssert} with the directory tree as a list
* @throws Throwable
*/
public static ListAssert<String> assertThatDirectoryTreeMatchSnapshots(TestInfo testInfo, Path dir) throws Throwable {
return assertThatDirectoryTreeMatchSnapshots(getSnapshotDirName(testInfo), dir);
}
/**
* Test directory tree to make sure it is valid by comparing it to a snapshot.
* <br />
* The snapshot file can easily be updated when necessary and reviewed to confirm it is consistent with the changes.
* <br />
* <br />
* The snapshot file will be created/updated using <code>-Dsnap</code> or
* <code>-Dupdate-snapshots</code>
*
* @param snapshotDirName the snapshot dir name for storage
* @param dir the {@link Path} of the directory to test
* @return a {@link ListAssert} with the directory tree as a list
* @throws Throwable
*/
public static ListAssert<String> assertThatDirectoryTreeMatchSnapshots(String snapshotDirName, Path dir) throws Throwable {
assertThat(dir).isDirectory();
final List<String> tree = Files.walk(dir)
.map(p -> {
final String r = dir.relativize(p).toString().replace('\\', '/');
if (Files.isDirectory(p)) {
return r + "/";
}
return r;
})
.sorted()
.collect(toList());
final String snapshotName = snapshotDirName + "/dir-tree.snapshot";
final boolean updateSnapshot = shouldUpdateSnapshot(snapshotName);
return withSnapshotsDir(snapshotName, snapshotFile -> {
try {
if (updateSnapshot) {
final Path srcSnapshotFile = SNAPSHOTS_DIR.resolve(snapshotName);
if (Files.isRegularFile(srcSnapshotFile)) {
deleteExistingSnapshots(snapshotName, srcSnapshotFile);
}
Files.createDirectories(srcSnapshotFile.getParent());
Files.write(srcSnapshotFile, String.join("\n", tree).getBytes(StandardCharsets.UTF_8));
snapshotFile = srcSnapshotFile;
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
assertThat(snapshotFile)
.as("corresponding snapshot file not found for " + snapshotName
+ " (Use -Dsnap to create it automatically)")
.isRegularFile();
final List<String> content = Arrays.stream(getTextContent(snapshotFile).split("\\v"))
.filter(s -> !s.isEmpty())
.collect(toList());
return assertThat(tree)
.as("Snapshot is not matching (use -Dsnap to update it automatically):" + snapshotName)
.containsExactlyInAnyOrderElementsOf(content);
});
}
public static String getTextContent(Path file) {
try {
return Files.readString(file);
} catch (IOException e) {
throw new UncheckedIOException("Unable to read " + file.toString(), e);
}
}
public static void deleteTestDirectory(final File file) throws IOException {
FileUtils.deleteDirectory(file);
Assertions.assertFalse(
Files.exists(file.toPath()), "Directory still exists");
}
/**
* To use with {@link AbstractPathAssert} in order to check the file content contains a specific string.
*
* @param s the string which should be in the file content
* @return a {@link Consumer<Path>} to use with {@link AbstractPathAssert#satisfies(Consumer)}
*/
public static Consumer<Path> checkContains(String s) {
return (p) -> assertThat(getTextContent(p)).contains(s);
}
public static Consumer<Path> checkNotContains(String s) {
return (p) -> assertThat(getTextContent(p)).doesNotContainIgnoringCase(s);
}
public static Consumer<Path> checkMatches(String regex) {
return (p) -> assertThat(getTextContent(p)).matches(regex);
}
public static String getSnapshotDirName(TestInfo testInfo) {
return testInfo.getTestClass().get().getSimpleName() + '/' + testInfo.getTestMethod().get().getName();
}
public static String normalizePathAsName(String fileRelativePath) {
return fileRelativePath.replace('/', '_');
}
private static boolean shouldUpdateSnapshot(String identifier) {
return getUpdateSnapshotsProp().filter(u -> u.isEmpty() || "true".equalsIgnoreCase(u) || u.contains(identifier))
.isPresent();
}
private static boolean isUTF8File(final Path file) {
try {
final byte[] inputBytes = Files.readAllBytes(file);
final String converted = new String(inputBytes, StandardCharsets.UTF_8);
final byte[] outputBytes = converted.getBytes(StandardCharsets.UTF_8);
return Arrays.equals(inputBytes, outputBytes);
} catch (IOException e) {
return false;
}
}
private static void deleteExistingSnapshots(String name, Path snapshots) {
System.out.println("\n>>>>>> DELETING EXISTING TEST SNAPSHOTS FOR:\n>>>>>> " + name + "\n");
FileUtils.deleteQuietly(snapshots.toFile());
}
static Optional<String> getUpdateSnapshotsProp() {
final Optional<String> property = Optional
.ofNullable(System.getProperty(UPDATE_SNAPSHOTS_PROPERTY, System.getenv(UPDATE_SNAPSHOTS_PROPERTY)));
if (property.isPresent()) {
return property;
}
return Optional.ofNullable(
System.getProperty(UPDATE_SNAPSHOTS_PROPERTY_SHORTCUT, System.getenv(UPDATE_SNAPSHOTS_PROPERTY_SHORTCUT)));
}
}
|
SnapshotTesting
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ComparisonContractViolatedTest.java
|
{
"start": 1481,
"end": 6012
}
|
class ____ {
int intField;
long longField;
@Override
public boolean equals(Object o) {
return o instanceof Struct
&& intField == ((Struct) o).intField
&& longField == ((Struct) o).longField;
}
@Override
public int hashCode() {
return intField + (int) longField;
}
}
static final Comparator<Struct> intComparisonNoZero1 =
new Comparator<Struct>() {
@Override
public int compare(Struct left, Struct right) {
// BUG: Diagnostic contains: Integer.compare(left.intField, right.intField)
return (left.intField < right.intField) ? -1 : 1;
}
};
static final Comparator<Struct> intComparisonNoZero2 =
new Comparator<Struct>() {
@Override
public int compare(Struct left, Struct right) {
// BUG: Diagnostic contains: Integer.compare(left.intField, right.intField)
return (right.intField < left.intField) ? 1 : -1;
}
};
static final Comparator<Struct> intComparisonNoZero3 =
new Comparator<Struct>() {
@Override
public int compare(Struct left, Struct right) {
// BUG: Diagnostic contains: Integer.compare(left.intField, right.intField)
return (left.intField > right.intField) ? 1 : -1;
}
};
static final Comparator<Struct> intComparisonNoZero4 =
new Comparator<Struct>() {
@Override
public int compare(Struct left, Struct right) {
// BUG: Diagnostic contains: Integer.compare(left.intField, right.intField)
return (left.intField <= right.intField) ? -1 : 1;
}
};
static final Comparator<Struct> longComparisonNoZero1 =
new Comparator<Struct>() {
@Override
public int compare(Struct left, Struct right) {
// BUG: Diagnostic contains: Long.compare(left.longField, right.longField)
return (left.longField < right.longField) ? -1 : 1;
}
};
static final Comparator<Struct> longComparisonNoZero2 =
new Comparator<Struct>() {
@Override
public int compare(Struct left, Struct right) {
// BUG: Diagnostic contains: Long.compare(left.longField, right.longField)
return (left.longField < right.longField) ? -1 : POSITIVE_CONSTANT;
}
};
static final Comparator<Struct> zeroOrOneComparator =
new Comparator<Struct>() {
@Override
// BUG: Diagnostic contains: violates the contract
public int compare(Struct o1, Struct o2) {
return o1.equals(o2) ? 0 : 1;
}
};
static final Comparator<Struct> zeroOrNegativeOneComparator =
new Comparator<Struct>() {
@Override
// BUG: Diagnostic contains: violates the contract
public int compare(Struct o1, Struct o2) {
return o1.equals(o2) ? 0 : -1;
}
};
static final Comparator<Struct> zeroOrPositiveConstantComparator =
new Comparator<Struct>() {
@Override
// BUG: Diagnostic contains: violates the contract
public int compare(Struct o1, Struct o2) {
return o1.equals(o2) ? 0 : POSITIVE_CONSTANT;
}
};
}\
""")
.doTest();
}
@Test
public void negativeCase() {
compilationHelper
.addSourceLines(
"ComparisonContractViolatedNegativeCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
public
|
Struct
|
java
|
quarkusio__quarkus
|
extensions/vertx/deployment/src/test/java/io/quarkus/vertx/deployment/VerticleWithInstanceDeploymentTest.java
|
{
"start": 607,
"end": 1279
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(BeanDeployingAVerticleFromInstance.class, MyVerticle.class));
@Test
public void testDeploymentOfVerticleInstance() {
String resp1 = RestAssured.get("http://localhost:8080").asString();
String resp2 = RestAssured.get("http://localhost:8080").asString();
Assertions.assertTrue(resp1.startsWith("OK"));
Assertions.assertTrue(resp2.startsWith("OK"));
Assertions.assertNotEquals(resp1, resp2);
}
public static
|
VerticleWithInstanceDeploymentTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/archive/internal/ByteArrayInputStreamAccess.java
|
{
"start": 398,
"end": 809
}
|
class ____ implements InputStreamAccess, Serializable {
private final String name;
private final byte[] bytes;
public ByteArrayInputStreamAccess(String name, byte[] bytes) {
this.name = name;
this.bytes = bytes;
}
@Override
public String getStreamName() {
return name;
}
@Override
public InputStream accessInputStream() {
return new ByteArrayInputStream( bytes );
}
}
|
ByteArrayInputStreamAccess
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java
|
{
"start": 4341,
"end": 72382
}
|
class ____ extends ESTestCase {
private static final long[] NODE_TIERS_NO_MONITORING = new long[] {
ByteSizeValue.ofGb(1).getBytes(),
ByteSizeValue.ofGb(2).getBytes(),
ByteSizeValue.ofGb(4).getBytes(),
ByteSizeValue.ofGb(8).getBytes(),
ByteSizeValue.ofGb(16).getBytes(),
ByteSizeValue.ofGb(32).getBytes(),
ByteSizeValue.ofGb(64).getBytes(),
ByteSizeValue.ofGb(15).getBytes(),
ByteSizeValue.ofGb(30).getBytes(),
ByteSizeValue.ofGb(60).getBytes() };
// When monitoring is enabled Filebeat and Metricbeat are given a memory allowance of 360MB,
// and this is deducted from the raw node size.
private static final long MONITORING_ALLOWANCE_BYTES = ByteSizeValue.ofMb(360).getBytes();
private static final long[] NODE_TIERS_WITH_MONITORING = Arrays.stream(NODE_TIERS_NO_MONITORING)
.map(m -> m - MONITORING_ALLOWANCE_BYTES)
.toArray();
private static final long BYTES_IN_4MB = ByteSizeValue.ofMb(4).getBytes();
// Must match the logic used in MachineDependentHeap.MachineNodeRole.ML_ONLY
// (including rounding down to a multiple of 4 megabytes before multiplying
// back up).
public static long mlOnlyNodeJvmBytes(long systemMemoryBytes) {
// 40% of memory up to 16GB, plus 10% of memory above that, up to an absolute maximum of 31GB
long unroundedBytes = (systemMemoryBytes <= JVM_SIZE_KNOT_POINT)
? (long) (systemMemoryBytes * 0.4)
: (long) min(JVM_SIZE_KNOT_POINT * 0.4 + (systemMemoryBytes - JVM_SIZE_KNOT_POINT) * 0.1, STATIC_JVM_UPPER_THRESHOLD);
return (unroundedBytes / BYTES_IN_4MB) * BYTES_IN_4MB;
}
public static final List<Tuple<Long, Long>> AUTO_NODE_TIERS_NO_MONITORING = Arrays.stream(NODE_TIERS_NO_MONITORING)
.mapToObj(m -> Tuple.tuple(m, mlOnlyNodeJvmBytes(m)))
.toList();
public static final List<Tuple<Long, Long>> AUTO_NODE_TIERS_WITH_MONITORING = Arrays.stream(NODE_TIERS_WITH_MONITORING)
.mapToObj(m -> Tuple.tuple(m, mlOnlyNodeJvmBytes(m)))
.toList();
private static final long TEST_NODE_SIZE = ByteSizeValue.ofGb(20).getBytes();
private static final long ML_MEMORY_FOR_TEST_NODE_SIZE = NativeMemoryCalculator.allowedBytesForMl(TEST_NODE_SIZE, 0, true);
private static final long TEST_JVM_SIZE = mlOnlyNodeJvmBytes(TEST_NODE_SIZE);
private static final int TEST_ALLOCATED_PROCESSORS = 2;
private static final long TEST_JOB_SIZE = ByteSizeValue.ofMb(200).getBytes();
private static final long PER_NODE_OVERHEAD = MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes();
private NodeLoadDetector nodeLoadDetector;
private NodeRealAvailabilityZoneMapper nodeRealAvailabilityZoneMapper;
private ClusterService clusterService;
private Settings settings;
private LongSupplier timeSupplier;
private MlMemoryTracker mlMemoryTracker;
@Before
public void setup() {
mlMemoryTracker = mock(MlMemoryTracker.class);
when(mlMemoryTracker.isRecentlyRefreshed()).thenReturn(true);
when(mlMemoryTracker.asyncRefresh()).thenReturn(true);
when(mlMemoryTracker.getAnomalyDetectorJobMemoryRequirement(any())).thenReturn(TEST_JOB_SIZE);
when(mlMemoryTracker.getDataFrameAnalyticsJobMemoryRequirement(any())).thenReturn(TEST_JOB_SIZE);
when(mlMemoryTracker.getTrainedModelAssignmentMemoryRequirement(any())).thenReturn(TEST_JOB_SIZE);
when(mlMemoryTracker.getJobMemoryRequirement(any(), any())).thenReturn(TEST_JOB_SIZE);
nodeLoadDetector = mock(NodeLoadDetector.class);
when(nodeLoadDetector.getMlMemoryTracker()).thenReturn(mlMemoryTracker);
when(nodeLoadDetector.detectNodeLoad(any(), any(), anyInt(), anyInt(), anyBoolean())).thenReturn(
NodeLoad.builder("any").setUseMemory(true).incAssignedAnomalyDetectorMemory(ByteSizeValue.ofGb(1).getBytes()).build()
);
nodeRealAvailabilityZoneMapper = mock(NodeRealAvailabilityZoneMapper.class);
clusterService = mock(ClusterService.class);
settings = Settings.EMPTY;
timeSupplier = System::currentTimeMillis;
ClusterSettings cSettings = new ClusterSettings(
settings,
Set.of(
MachineLearning.MAX_MACHINE_MEMORY_PERCENT,
MachineLearning.MAX_OPEN_JOBS_PER_NODE,
MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT,
MachineLearning.MAX_ML_NODE_SIZE,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING
)
);
when(clusterService.getClusterSettings()).thenReturn(cSettings);
}
public void testScalingEdgeCase() {
// This scale up should push above 1gb, but under 2gb.
// The unassigned job barely doesn't fit within the current scale (by 1 megabyte - 610mb available and 611mb needed).
// The three assigned jobs have model memory limits 200mb, 10mb and 9mb.
// The unassigned job has model memory limit 128mb.
// Then we have four times the process overhead of 10mb, plus the per-node overhead of 30mb, so total overhead on one node is 70mb.
when(mlMemoryTracker.getAnomalyDetectorJobMemoryRequirement(any())).thenReturn(
ByteSizeValue.ofMb(128).getBytes() + Job.PROCESS_MEMORY_OVERHEAD.getBytes()
);
when(mlMemoryTracker.getJobMemoryRequirement(any(), any())).thenReturn(
ByteSizeValue.ofMb(128).getBytes() + Job.PROCESS_MEMORY_OVERHEAD.getBytes()
);
List<String> jobTasks = List.of("waiting_job");
long mlMemoryFor1GbNode = autoBytesForMl(AUTO_NODE_TIERS_NO_MONITORING.get(0).v1(), AUTO_NODE_TIERS_NO_MONITORING.get(0).v2());
List<NodeLoad> nodesForScaleup = List.of(
NodeLoad.builder("any")
.setMaxMemory(mlMemoryFor1GbNode)
.setUseMemory(true)
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(
ByteSizeValue.ofMb(200).getBytes() + ByteSizeValue.ofMb(10).getBytes() + ByteSizeValue.ofMb(9).getBytes()
+ Job.PROCESS_MEMORY_OVERHEAD.getBytes() * 3
)
.incNumAssignedAnomalyDetectorJobs()
.incNumAssignedAnomalyDetectorJobs()
.incNumAssignedAnomalyDetectorJobs()
.build()
);
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setUseAuto(true);
MlMemoryAutoscalingCapacity scaleUpResult = decider.checkForScaleUp(
0,
0,
nodesForScaleup,
jobTasks,
List.of(),
List.of(),
List.of(),
null,
new NativeMemoryCapacity(
mlMemoryFor1GbNode - PER_NODE_OVERHEAD,
mlMemoryFor1GbNode - PER_NODE_OVERHEAD,
AUTO_NODE_TIERS_NO_MONITORING.get(0).v2()
)
).orElseThrow();
assertThat(
scaleUpResult.tierSize().getBytes(),
allOf(greaterThan(ByteSizeValue.ofGb(1).getBytes()), lessThan(ByteSizeValue.ofGb(2).getBytes()))
);
// Assume a scale up to 2gb nodes
// We should NOT scale down below or to 1gb given the same jobs with 2gb node
long mlMemoryFor2GbNode = autoBytesForMl(AUTO_NODE_TIERS_NO_MONITORING.get(1).v1(), AUTO_NODE_TIERS_NO_MONITORING.get(1).v2());
List<NodeLoad> nodeForScaleDown = List.of(
NodeLoad.builder("any")
.setMaxMemory(mlMemoryFor2GbNode)
.setUseMemory(true)
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(
ByteSizeValue.ofMb(200).getBytes() + ByteSizeValue.ofMb(10).getBytes() + ByteSizeValue.ofMb(9).getBytes()
+ ByteSizeValue.ofMb(128).getBytes() + Job.PROCESS_MEMORY_OVERHEAD.getBytes() * 4
)
.incNumAssignedAnomalyDetectorJobs()
.incNumAssignedAnomalyDetectorJobs()
.incNumAssignedAnomalyDetectorJobs()
.incNumAssignedAnomalyDetectorJobs()
.build()
);
MlMemoryAutoscalingCapacity result = decider.checkForScaleDown(
nodeForScaleDown,
ByteSizeValue.ofMb(200).getBytes() + Job.PROCESS_MEMORY_OVERHEAD.getBytes(),
new NativeMemoryCapacity(mlMemoryFor2GbNode, mlMemoryFor2GbNode, AUTO_NODE_TIERS_NO_MONITORING.get(1).v2())
).orElseThrow();
assertThat(
result.tierSize().getBytes(),
allOf(greaterThan(ByteSizeValue.ofGb(1).getBytes()), lessThan(ByteSizeValue.ofGb(2).getBytes()))
);
}
public void testScaleStability() {
for (int i = 0; i < 10; i++) {
// Run this test with the Cloud node sizes we get when monitoring is not enabled and when monitoring is enabled
final long[] nodeTiers;
final List<Tuple<Long, Long>> autoNodeTiers;
if ((i % 2) == 0) {
nodeTiers = NODE_TIERS_NO_MONITORING;
autoNodeTiers = AUTO_NODE_TIERS_NO_MONITORING;
} else {
nodeTiers = NODE_TIERS_WITH_MONITORING;
autoNodeTiers = AUTO_NODE_TIERS_WITH_MONITORING;
}
for (int tier = 0; tier < autoNodeTiers.size() - 1; tier++) {
final Tuple<Long, Long> lowerTier = autoNodeTiers.get(tier);
final long lowerTierNodeSize = lowerTier.v1();
final long lowerTierJvmSize = lowerTier.v2();
final long lowerTierMemoryForMl = autoBytesForMl(lowerTierNodeSize, lowerTierJvmSize);
final Tuple<Long, Long> higherTier = autoNodeTiers.get(tier + 1);
// The jobs that currently exist, to use in the scaleUp call
NodeLoad.Builder forScaleUp = new NodeLoad.Builder("any").setMaxMemory(lowerTierMemoryForMl)
.setMaxJobs(Integer.MAX_VALUE)
.setUseMemory(true);
// The jobs + load that exists for all jobs (after scale up), used in scaleDown call
final long higherTierMemoryForMl = autoBytesForMl(higherTier.v1(), higherTier.v2());
NodeLoad.Builder forScaleDown = new NodeLoad.Builder("any").setMaxMemory(higherTierMemoryForMl)
.setMaxJobs(Integer.MAX_VALUE)
.setUseMemory(true);
long maxJobSize = 0;
// Fill with existing tier jobs
while (forScaleUp.getFreeMemory() > Job.PROCESS_MEMORY_OVERHEAD.getBytes()) {
long jobSize = randomLongBetween(Job.PROCESS_MEMORY_OVERHEAD.getBytes(), forScaleUp.getFreeMemory());
maxJobSize = Math.max(jobSize, maxJobSize);
forScaleUp.incNumAssignedAnomalyDetectorJobs().incAssignedAnomalyDetectorMemory(jobSize);
forScaleDown.incNumAssignedAnomalyDetectorJobs().incAssignedAnomalyDetectorMemory(jobSize);
}
// Create jobs for scale up
NodeLoad nodeLoadForScaleUp = forScaleUp.build();
List<String> waitingJobs = new ArrayList<>();
while (forScaleDown.getFreeMemory() > Job.PROCESS_MEMORY_OVERHEAD.getBytes()) {
long jobSize = randomLongBetween(Job.PROCESS_MEMORY_OVERHEAD.getBytes(), forScaleDown.getFreeMemory());
maxJobSize = Math.max(jobSize, maxJobSize);
forScaleDown.incNumAssignedAnomalyDetectorJobs().incAssignedAnomalyDetectorMemory(jobSize);
String waitingJob = randomAlphaOfLength(10);
when(mlMemoryTracker.getAnomalyDetectorJobMemoryRequirement(eq(waitingJob))).thenReturn(jobSize);
when(mlMemoryTracker.getJobMemoryRequirement(eq(MlTasks.JOB_TASK_NAME), eq(waitingJob))).thenReturn(jobSize);
waitingJobs.add(waitingJob);
}
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setUseAuto(true);
MlMemoryAutoscalingCapacity scaleUpResult = decider.checkForScaleUp(
0,
0,
List.of(nodeLoadForScaleUp),
waitingJobs,
List.of(),
List.of(),
List.of(),
null,
new NativeMemoryCapacity(lowerTierMemoryForMl, lowerTierMemoryForMl, lowerTierJvmSize)
).orElseThrow();
long scaledUpTierSizeRequested = scaleUpResult.tierSize().getBytes();
assertThat(scaledUpTierSizeRequested, greaterThan(lowerTierNodeSize));
assertThat(scaleUpResult.nodeSize().getBytes(), greaterThanOrEqualTo(lowerTierNodeSize));
// It's possible that the next tier is above what we consider "higherTier"
// This is just fine for this test, as long as scale_down does not drop below this tier
int nextTier = Arrays.binarySearch(nodeTiers, scaledUpTierSizeRequested);
if (nextTier < 0) {
nextTier = -nextTier - 1;
}
// It's possible we requested a huge scale up, this is OK, we just don't have validation
// numbers that exist past a certain point.
if (nextTier >= nodeTiers.length) {
// Start the next iteration of the outermost loop
break;
}
// Actual scaled up size will likely be bigger than what we asked for
long scaledUpSize = nodeTiers[nextTier];
assertThat(scaledUpSize, greaterThanOrEqualTo(scaledUpTierSizeRequested));
long scaledUpJvmSize = autoNodeTiers.get(nextTier).v2();
long scaledUpBytesForMl = autoBytesForMl(scaledUpSize, scaledUpJvmSize);
NodeLoad nodeLoadForScaleDown = forScaleDown.build();
// It could be that scale down doesn't occur, this is fine as we are "perfectly scaled"
Optional<MlMemoryAutoscalingCapacity> result = decider.checkForScaleDown(
List.of(nodeLoadForScaleDown),
maxJobSize,
new NativeMemoryCapacity(scaledUpBytesForMl, scaledUpBytesForMl, scaledUpJvmSize)
);
// If scale down is present, we don't want to drop below our current tier.
// If we do, that means that for the same jobs we scaled with, we calculated something incorrectly.
if (result.isPresent()) {
long tierSizeRequired = result.get().tierSize().getBytes();
int afterScaleDownTier = Arrays.binarySearch(nodeTiers, tierSizeRequired);
if (afterScaleDownTier < 0) {
afterScaleDownTier = -afterScaleDownTier - 1;
}
assertThat(afterScaleDownTier, equalTo(nextTier));
}
}
}
}
public void testScaleUp_withNoJobsWaitingNoMlNodes() {
MlMemoryAutoscalingDecider decider = buildDecider();
assertThat(
decider.checkForScaleUp(
0,
0,
List.of(), // node loads when there are no ML nodes
List.of(),
List.of(),
List.of(),
List.of(),
null,
NativeMemoryCapacity.ZERO // current scale when there are no ML nodes
),
equalTo(Optional.empty())
);
}
public void testScaleUp_withWaitingJobsAndAutoMemoryAndNoRoomInNodes() {
ByteSizeValue anomalyDetectorJobSize = ByteSizeValue.ofGb(randomIntBetween(2, 4));
ByteSizeValue analyticsJobSize = ByteSizeValue.ofGb(randomIntBetween(2, 4));
when(mlMemoryTracker.getAnomalyDetectorJobMemoryRequirement(any())).thenReturn(anomalyDetectorJobSize.getBytes());
when(mlMemoryTracker.getJobMemoryRequirement(eq(MlTasks.JOB_TASK_NAME), any())).thenReturn(anomalyDetectorJobSize.getBytes());
when(mlMemoryTracker.getDataFrameAnalyticsJobMemoryRequirement(any())).thenReturn(analyticsJobSize.getBytes());
when(mlMemoryTracker.getJobMemoryRequirement(eq(MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME), any())).thenReturn(
analyticsJobSize.getBytes()
);
List<String> jobTasks = List.of("waiting_job", "waiting_job_2");
List<String> analytics = List.of("analytics_waiting");
List<NodeLoad> fullyLoadedNode = List.of(
NodeLoad.builder("any")
.setMaxMemory(anomalyDetectorJobSize.getBytes() + PER_NODE_OVERHEAD)
.setUseMemory(true)
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(anomalyDetectorJobSize.getBytes())
.incNumAssignedAnomalyDetectorJobs()
.build()
);
NativeMemoryCapacity currentScale = new NativeMemoryCapacity(anomalyDetectorJobSize.getBytes(), anomalyDetectorJobSize.getBytes());
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setUseAuto(true);
{ // No time in queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
0,
fullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertFalse(decision.isEmpty());
MlMemoryAutoscalingCapacity result = decision.get();
long allowedBytesForMlNode = NativeMemoryCalculator.allowedBytesForMl(
result.nodeSize().getBytes(),
randomIntBetween(5, 90), // irrelevant because auto is true
true
);
// Note: with more than 1 job involved this calculation could be a wild overestimate. We get away
// with it here because all the jobs fit on one node. This is not how the production code works.
long allowedBytesForMlTier = NativeMemoryCalculator.allowedBytesForMl(
result.tierSize().getBytes(),
randomIntBetween(5, 90), // irrelevant because auto is true
true
);
assertThat(
allowedBytesForMlNode,
greaterThanOrEqualTo(Math.max(anomalyDetectorJobSize.getBytes(), analyticsJobSize.getBytes()) + PER_NODE_OVERHEAD)
);
assertThat(
allowedBytesForMlTier,
greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() * 3 + analyticsJobSize.getBytes() + PER_NODE_OVERHEAD)
);
}
{ // we allow one job in the analytics queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
1,
fullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertFalse(decision.isEmpty());
MlMemoryAutoscalingCapacity result = decision.get();
long allowedBytesForMlNode = NativeMemoryCalculator.allowedBytesForMl(
result.nodeSize().getBytes(),
randomIntBetween(5, 90), // irrelevant because auto is true
true
);
// Note: with more than 1 job involved this calculation could be a wild overestimate. We get away
// with it here because all the jobs fit on one node. This is not how the production code works.
long allowedBytesForMlTier = NativeMemoryCalculator.allowedBytesForMl(
result.tierSize().getBytes(),
randomIntBetween(5, 90), // irrelevant because auto is true
true
);
assertThat(allowedBytesForMlNode, greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() + PER_NODE_OVERHEAD));
assertThat(allowedBytesForMlTier, greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() * 3 + PER_NODE_OVERHEAD));
}
{ // we allow one job in the anomaly queue and analytics queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
1,
1,
fullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertFalse(decision.isEmpty());
MlMemoryAutoscalingCapacity result = decision.get();
long allowedBytesForMlNode = NativeMemoryCalculator.allowedBytesForMl(
result.nodeSize().getBytes(),
randomIntBetween(5, 90), // irrelevant because auto is true
true
);
// Note: with more than 1 job involved this calculation could be a wild overestimate. We get away
// with it here because all the jobs fit on one node. This is not how the production code works.
long allowedBytesForMlTier = NativeMemoryCalculator.allowedBytesForMl(
result.tierSize().getBytes(),
randomIntBetween(5, 90), // irrelevant because auto is true
true
);
assertThat(allowedBytesForMlNode, greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() + PER_NODE_OVERHEAD));
assertThat(allowedBytesForMlTier, greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() * 2 + PER_NODE_OVERHEAD));
}
}
public void testScaleUp_withWaitingSnapshotUpgradesAndAutoMemoryAndNoRoomInNodes() {
ByteSizeValue anomalyDetectorJobSize = ByteSizeValue.ofGb(randomIntBetween(2, 8));
ByteSizeValue analyticsJobSize = ByteSizeValue.ofGb(randomIntBetween(2, 8));
when(mlMemoryTracker.getAnomalyDetectorJobMemoryRequirement(any())).thenReturn(anomalyDetectorJobSize.getBytes());
when(mlMemoryTracker.getJobMemoryRequirement(eq(MlTasks.JOB_TASK_NAME), any())).thenReturn(anomalyDetectorJobSize.getBytes());
when(mlMemoryTracker.getDataFrameAnalyticsJobMemoryRequirement(any())).thenReturn(analyticsJobSize.getBytes());
when(mlMemoryTracker.getJobMemoryRequirement(eq(MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME), any())).thenReturn(
analyticsJobSize.getBytes()
);
List<String> snapshotUpgradeTasks = List.of("waiting_upgrade", "waiting_upgrade_2");
List<NodeLoad> fullyLoadedNode = List.of(
NodeLoad.builder("any")
.setMaxMemory(ByteSizeValue.ofGb(1).getBytes() + PER_NODE_OVERHEAD)
.setUseMemory(true)
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofGb(1).getBytes())
.incNumAssignedAnomalyDetectorJobs()
.build()
);
NativeMemoryCapacity currentScale = new NativeMemoryCapacity(ByteSizeValue.ofGb(1).getBytes(), ByteSizeValue.ofGb(1).getBytes());
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setUseAuto(true);
{ // No time in queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
0,
fullyLoadedNode,
List.of(),
snapshotUpgradeTasks,
List.of(),
List.of(),
null,
currentScale
);
assertFalse(decision.isEmpty());
MlMemoryAutoscalingCapacity result = decision.get();
long allowedBytesForMlNode = NativeMemoryCalculator.allowedBytesForMl(result.nodeSize().getBytes(), 30, true);
// Note: with more than 1 job involved this calculation could be a wild overestimate. We get away
// with it here because all the jobs fit on one node. This is not how the production code works.
long allowedBytesForMlTier = NativeMemoryCalculator.allowedBytesForMl(result.tierSize().getBytes(), 30, true);
assertThat(allowedBytesForMlNode, greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() + PER_NODE_OVERHEAD));
assertThat(allowedBytesForMlTier, greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() * 2 + PER_NODE_OVERHEAD));
}
{ // we allow one job in the analytics queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
1,
fullyLoadedNode,
List.of(),
snapshotUpgradeTasks,
List.of(),
List.of(),
null,
currentScale
);
assertFalse(decision.isEmpty());
MlMemoryAutoscalingCapacity result = decision.get();
long allowedBytesForMlNode = NativeMemoryCalculator.allowedBytesForMl(result.nodeSize().getBytes(), 30, true);
// Note: with more than 1 job involved this calculation could be a wild overestimate. We get away
// with it here because all the jobs fit on one node. This is not how the production code works.
long allowedBytesForMlTier = NativeMemoryCalculator.allowedBytesForMl(result.tierSize().getBytes(), 30, true);
assertThat(allowedBytesForMlNode, greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() + PER_NODE_OVERHEAD));
assertThat(allowedBytesForMlTier, greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() * 2 + PER_NODE_OVERHEAD));
}
{ // we allow one job in the anomaly queue and analytics queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
1,
1,
fullyLoadedNode,
List.of(),
snapshotUpgradeTasks,
List.of(),
List.of(),
null,
NativeMemoryCapacity.ZERO
);
assertFalse(decision.isEmpty());
MlMemoryAutoscalingCapacity result = decision.get();
long allowedBytesForMlNode = NativeMemoryCalculator.allowedBytesForMl(result.nodeSize().getBytes(), 30, true);
// Note: with more than 1 job involved this calculation could be a wild overestimate. We get away
// with it here because all the jobs fit on one node. This is not how the production code works.
long allowedBytesForMlTier = NativeMemoryCalculator.allowedBytesForMl(result.tierSize().getBytes(), 30, true);
assertThat(allowedBytesForMlNode, greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() + PER_NODE_OVERHEAD));
assertThat(allowedBytesForMlTier, greaterThanOrEqualTo(anomalyDetectorJobSize.getBytes() + PER_NODE_OVERHEAD));
}
}
public void testScaleUp_withWaitingJobsAndRoomInNodes() {
List<String> jobTasks = List.of("waiting_job", "waiting_job_2");
List<String> analytics = List.of("analytics_waiting");
// Two small nodes in cluster, so simulate two availability zones
when(nodeRealAvailabilityZoneMapper.getNumMlAvailabilityZones()).thenReturn(OptionalInt.of(2));
List<NodeLoad> nodesWithRoom = List.of(
NodeLoad.builder("partially_filled")
.setMaxMemory(2 * TEST_JOB_SIZE + PER_NODE_OVERHEAD)
.setUseMemory(true)
.setMaxJobs(10)
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(TEST_JOB_SIZE)
.incNumAssignedAnomalyDetectorJobs()
.build(),
NodeLoad.builder("not_filled").setMaxMemory(TEST_JOB_SIZE + PER_NODE_OVERHEAD).setMaxJobs(10).setUseMemory(true).build()
);
// Current scale needs to be set to total cluster allowance for ML excluding per-node overhead
NativeMemoryCapacity currentScale = new NativeMemoryCapacity(3 * TEST_JOB_SIZE, TEST_JOB_SIZE);
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setMaxMachineMemoryPercent(25);
// No time in queue, should be able to assign all but one job given the current node load
{
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
0,
nodesWithRoom,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertTrue(decision.isPresent());
// It's four times because the native memory percentage is 25.
assertThat(decision.get().nodeSize().getBytes(), equalTo(4 * (TEST_JOB_SIZE + PER_NODE_OVERHEAD)));
// In the scaled up cluster we're going to have 4 jobs and 2 node overheads. Then multiply by 4 again as 25% ML memory.
assertThat(decision.get().tierSize().getBytes(), equalTo(4 * (4 * TEST_JOB_SIZE + 2 * PER_NODE_OVERHEAD)));
}
// We allow one job in the analytics queue, so no need to scale as both anomaly detection jobs will fit
{
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
1,
nodesWithRoom,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertFalse(decision.isPresent());
}
// We allow one job in the anomaly detection queue, so no need to scale as one anomaly detection job and the analytics job will fit
{
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
1,
0,
nodesWithRoom,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertFalse(decision.isPresent());
}
}
public void testScaleUp_withWaitingJobsAndNoRoomInNodes() {
List<String> jobTasks = List.of("waiting_job", "waiting_job_2");
List<String> analytics = List.of("analytics_waiting");
List<NodeLoad> fullyLoadedNode = List.of(
NodeLoad.builder("any")
.setMaxMemory(ByteSizeValue.ofGb(1).getBytes() + PER_NODE_OVERHEAD)
.setUseMemory(true)
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofGb(1).getBytes())
.incNumAssignedAnomalyDetectorJobs()
.build()
);
// Current scale needs to be set to total cluster allowance for ML excluding per-node overhead
NativeMemoryCapacity currentScale = new NativeMemoryCapacity(ByteSizeValue.ofGb(1).getBytes(), ByteSizeValue.ofGb(1).getBytes());
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setMaxMachineMemoryPercent(25);
{ // No time in queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
0,
fullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertFalse(decision.isEmpty());
// Existing 1GB job is bigger than the waiting TEST_JOB_SIZE, and node requirement is based on the larger value
assertThat(decision.get().nodeSize().getBytes(), equalTo(4 * (ByteSizeValue.ofGb(1).getBytes() + PER_NODE_OVERHEAD)));
assertThat(
decision.get().tierSize().getBytes(),
equalTo(4 * (ByteSizeValue.ofGb(1).getBytes() + 3 * TEST_JOB_SIZE + PER_NODE_OVERHEAD))
);
}
{ // we allow one job in the analytics queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
1,
fullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertFalse(decision.isEmpty());
// Existing 1GB job is bigger than the waiting TEST_JOB_SIZE, and node requirement is based on the larger value
assertThat(decision.get().nodeSize().getBytes(), equalTo(4 * (ByteSizeValue.ofGb(1).getBytes() + PER_NODE_OVERHEAD)));
assertThat(
decision.get().tierSize().getBytes(),
equalTo(4 * (ByteSizeValue.ofGb(1).getBytes() + 2 * TEST_JOB_SIZE + PER_NODE_OVERHEAD))
);
}
{ // we allow one job in the anomaly queue and analytics queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
1,
1,
fullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertFalse(decision.isEmpty());
// Existing 1GB job is bigger than the waiting TEST_JOB_SIZE, and node requirement is based on the larger value
assertThat(decision.get().nodeSize().getBytes(), equalTo(4 * (ByteSizeValue.ofGb(1).getBytes() + PER_NODE_OVERHEAD)));
assertThat(
decision.get().tierSize().getBytes(),
equalTo(4 * (ByteSizeValue.ofGb(1).getBytes() + TEST_JOB_SIZE + PER_NODE_OVERHEAD))
);
}
}
public void testScaleUp_withWaitingJobsAndSomeRoomInNodes() {
List<String> jobTasks = List.of("waiting_job");
List<String> analytics = List.of("analytics_waiting");
List<NodeLoad> nearlyFullyLoadedNode = List.of(
// Free space on this node is _nearly_ enough for another job but not quite
NodeLoad.builder("any")
.setMaxMemory(2 * TEST_JOB_SIZE - ByteSizeValue.ofMb(1).getBytes() + PER_NODE_OVERHEAD)
.setUseMemory(true)
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(TEST_JOB_SIZE)
.incNumAssignedAnomalyDetectorJobs()
.build()
);
// Current scale needs to be set to total cluster allowance for ML excluding per-node overhead
NativeMemoryCapacity currentScale = new NativeMemoryCapacity(
2 * TEST_JOB_SIZE - ByteSizeValue.ofMb(1).getBytes(),
2 * TEST_JOB_SIZE - ByteSizeValue.ofMb(1).getBytes()
);
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setMaxMachineMemoryPercent(25);
{ // No time in queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
0,
nearlyFullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertFalse(decision.isEmpty());
// We won't ask for a smaller node than the current scale on a scale up even
// though we theoretically could tolerate smaller nodes but more of them
assertThat(
decision.get().nodeSize().getBytes(),
equalTo(4 * (2 * TEST_JOB_SIZE - ByteSizeValue.ofMb(1).getBytes() + PER_NODE_OVERHEAD))
);
// The important thing here is that the free space that was nearly enough for another job is _not_ added in again
assertThat(decision.get().tierSize().getBytes(), equalTo(4 * (3 * TEST_JOB_SIZE + PER_NODE_OVERHEAD)));
}
{ // we allow one job in the analytics queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
1,
nearlyFullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertFalse(decision.isEmpty());
// We won't ask for a smaller node than the current scale on a scale up even
// though we theoretically could tolerate smaller nodes but more of them
assertThat(
decision.get().nodeSize().getBytes(),
equalTo(4 * (2 * TEST_JOB_SIZE - ByteSizeValue.ofMb(1).getBytes() + PER_NODE_OVERHEAD))
);
// The important thing here is that the free space that was nearly enough for another job is _not_ added in again
// (so we are asking for a very tiny scale up here - just enough for 1MB extra ML memory)
assertThat(decision.get().tierSize().getBytes(), equalTo(4 * (2 * TEST_JOB_SIZE + PER_NODE_OVERHEAD)));
}
{ // we allow one job in the anomaly queue and analytics queue
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
1,
1,
nearlyFullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertTrue(decision.isEmpty());
}
}
public void testScaleUp_withWaitingJobs_WithFutureCapacity() {
List<String> jobTasks = List.of("waiting_job", "waiting_job_2");
List<String> analytics = List.of("analytics_waiting");
List<NodeLoad> fullyLoadedNode = List.of(
NodeLoad.builder("any")
.setMaxMemory(ByteSizeValue.ofGb(1).getBytes())
.setUseMemory(true)
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofGb(1).getBytes() - PER_NODE_OVERHEAD)
.incNumAssignedAnomalyDetectorJobs()
.build()
);
NativeMemoryCapacity currentScale = new NativeMemoryCapacity(
ByteSizeValue.ofGb(1).getBytes() - PER_NODE_OVERHEAD,
ByteSizeValue.ofGb(1).getBytes() - PER_NODE_OVERHEAD
);
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setMaxMachineMemoryPercent(25);
{ // with null future capacity and current capacity is full
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
2,
1,
fullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
null,
currentScale
);
assertTrue(decision.isEmpty()); // means "don't know" in this case
}
{ // current capacity is full but the existing job is expected to terminate and free up all its resources
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
2,
1,
fullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
currentScale,
currentScale
);
assertTrue(decision.isEmpty()); // means "OK to wait for future capacity"
}
{ // with no future capacity (i.e. current jobs expected to run forever) and current capacity is full
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
2,
1,
fullyLoadedNode,
jobTasks,
List.of(),
analytics,
List.of(),
NativeMemoryCapacity.ZERO,
currentScale
);
assertFalse(decision.isEmpty());
assertThat(decision.get().nodeSize().getBytes(), equalTo(ByteSizeValue.ofGb(4).getBytes()));
// For the tier we'll need enough for the current 1GB of usage plus 3 new 200MB jobs,
// so with 25% ML memory percent we need 4 * 1624MB
assertThat(decision.get().tierSize().getBytes(), equalTo(ByteSizeValue.ofMb(6496).getBytes()));
}
}
public void testScaleUp_withWaitingModelAndAutoMemoryAndNoRoomInNodes() {
when(mlMemoryTracker.getTrainedModelAssignmentMemoryRequirement(any())).thenReturn(ByteSizeValue.ofGb(2).getBytes());
List<NodeLoad> fullyLoadedNode = List.of(
NodeLoad.builder("any")
.setMaxMemory(ByteSizeValue.ofGb(1).getBytes() + PER_NODE_OVERHEAD)
.setUseMemory(true)
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofGb(1).getBytes())
.incNumAssignedAnomalyDetectorJobs()
.build()
);
NativeMemoryCapacity currentScale = new NativeMemoryCapacity(ByteSizeValue.ofGb(1).getBytes(), ByteSizeValue.ofGb(1).getBytes());
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setUseAuto(true);
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
0,
fullyLoadedNode,
List.of(),
List.of(),
List.of(),
List.of("foo"),
null,
currentScale
);
assertFalse(decision.isEmpty());
MlMemoryAutoscalingCapacity result = decision.get();
long allowedBytesForMlNode = NativeMemoryCalculator.allowedBytesForMl(result.nodeSize().getBytes(), 30, true);
// Note: with more than 1 job involved this calculation could be a wild overestimate. We get away
// with it here because all the jobs fit on one node. This is not how the production code works.
long allowedBytesForMlTier = NativeMemoryCalculator.allowedBytesForMl(result.tierSize().getBytes(), 30, true);
assertThat(allowedBytesForMlNode, greaterThanOrEqualTo(ByteSizeValue.ofGb(2).getBytes() + PER_NODE_OVERHEAD));
assertThat(allowedBytesForMlTier, greaterThanOrEqualTo(ByteSizeValue.ofGb(2).getBytes() + PER_NODE_OVERHEAD));
}
public void testScaleUp_withWaitingModelsAndRoomInNodes() {
// Two small nodes in cluster, so simulate two availability zones
when(nodeRealAvailabilityZoneMapper.getNumMlAvailabilityZones()).thenReturn(OptionalInt.of(2));
List<NodeLoad> nodesWithRoom = List.of(
NodeLoad.builder("partially_filled")
.setMaxMemory(2 * TEST_JOB_SIZE + PER_NODE_OVERHEAD)
.setUseMemory(true)
.setMaxJobs(10)
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(TEST_JOB_SIZE)
.incNumAssignedAnomalyDetectorJobs()
.build(),
NodeLoad.builder("not_filled").setMaxMemory(TEST_JOB_SIZE + PER_NODE_OVERHEAD).setMaxJobs(10).setUseMemory(true).build()
);
NativeMemoryCapacity currentScale = new NativeMemoryCapacity(3 * TEST_JOB_SIZE, TEST_JOB_SIZE);
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setMaxMachineMemoryPercent(25);
Optional<MlMemoryAutoscalingCapacity> decision = decider.checkForScaleUp(
0,
0,
nodesWithRoom,
List.of(),
List.of(),
List.of(),
List.of("foo", "bar", "baz"),
null,
currentScale
);
assertTrue(decision.isPresent());
assertThat(decision.get().nodeSize().getBytes(), equalTo(4 * (TEST_JOB_SIZE + PER_NODE_OVERHEAD)));
assertThat(decision.get().tierSize().getBytes(), equalTo(4 * (4 * TEST_JOB_SIZE + 2 * PER_NODE_OVERHEAD)));
assertFalse(
decider.checkForScaleUp(1, 0, nodesWithRoom, List.of(), List.of(), List.of(), List.of("foo", "bar"), null, currentScale)
.isPresent()
);
}
public void testScaleDown() {
when(nodeRealAvailabilityZoneMapper.getNumMlAvailabilityZones()).thenReturn(OptionalInt.of(3));
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setMaxMachineMemoryPercent(25);
{ // Current capacity allows for smaller node
List<NodeLoad> nodeLoads = List.of(
NodeLoad.builder("foo")
.setMaxMemory(ByteSizeValue.ofGb(5).getBytes())
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofGb(1).getBytes() - PER_NODE_OVERHEAD)
.incNumAssignedAnomalyDetectorJobs()
.build(),
NodeLoad.builder("bar")
.setMaxMemory(ByteSizeValue.ofGb(5).getBytes())
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofGb(1).getBytes() - PER_NODE_OVERHEAD)
.incNumAssignedAnomalyDetectorJobs()
.build(),
NodeLoad.builder("baz")
.setMaxMemory(ByteSizeValue.ofGb(5).getBytes())
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofGb(1).getBytes() - PER_NODE_OVERHEAD)
.incNumAssignedAnomalyDetectorJobs()
.build()
);
Optional<MlMemoryAutoscalingCapacity> result = decider.checkForScaleDown(
nodeLoads,
ByteSizeValue.ofGb(1).getBytes() - PER_NODE_OVERHEAD,
new NativeMemoryCapacity(
ByteSizeValue.ofGb(15).getBytes() - 3 * PER_NODE_OVERHEAD,
ByteSizeValue.ofGb(5).getBytes() - PER_NODE_OVERHEAD
)
);
assertThat(result, isPresent());
MlMemoryAutoscalingCapacity deciderResult = result.get();
// Four times due to 25% ML memory
assertThat(deciderResult.nodeSize().getBytes(), equalTo(4 * ByteSizeValue.ofGb(1).getBytes()));
assertThat(deciderResult.tierSize().getBytes(), equalTo(ByteSizeValue.ofGb(12).getBytes()));
}
{ // Current capacity allows for smaller tier
List<NodeLoad> nodeLoads = List.of(
NodeLoad.builder("foo")
.setMaxMemory(ByteSizeValue.ofGb(1).getBytes())
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofMb(100).getBytes() - PER_NODE_OVERHEAD)
.incNumAssignedAnomalyDetectorJobs()
.build(),
NodeLoad.builder("bar")
.setMaxMemory(ByteSizeValue.ofGb(1).getBytes())
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofMb(100).getBytes() - PER_NODE_OVERHEAD)
.incNumAssignedAnomalyDetectorJobs()
.build(),
NodeLoad.builder("baz")
.setMaxMemory(ByteSizeValue.ofGb(1).getBytes())
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofMb(100).getBytes() - PER_NODE_OVERHEAD)
.incNumAssignedAnomalyDetectorJobs()
.build()
);
Optional<MlMemoryAutoscalingCapacity> result = decider.checkForScaleDown(
nodeLoads,
ByteSizeValue.ofMb(100).getBytes() - PER_NODE_OVERHEAD,
new NativeMemoryCapacity(
ByteSizeValue.ofGb(3).getBytes() - 3 * PER_NODE_OVERHEAD,
ByteSizeValue.ofGb(1).getBytes() - PER_NODE_OVERHEAD
)
);
assertThat(result, isPresent());
MlMemoryAutoscalingCapacity deciderResult = result.get();
// Four times due to 25% ML memory
assertThat(deciderResult.nodeSize().getBytes(), equalTo(4 * ByteSizeValue.ofMb(100).getBytes()));
assertThat(deciderResult.tierSize().getBytes(), equalTo(ByteSizeValue.ofMb(100).getBytes() * 12));
}
{ // Scale down is not really possible
List<NodeLoad> nodeLoads = List.of(
NodeLoad.builder("foo")
.setMaxMemory(ByteSizeValue.ofMb(100).getBytes())
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofMb(100).getBytes() - PER_NODE_OVERHEAD)
.incNumAssignedAnomalyDetectorJobs()
.build(),
NodeLoad.builder("bar")
.setMaxMemory(ByteSizeValue.ofMb(100).getBytes())
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofMb(100).getBytes() - PER_NODE_OVERHEAD)
.incNumAssignedAnomalyDetectorJobs()
.build(),
NodeLoad.builder("baz")
.setMaxMemory(ByteSizeValue.ofMb(100).getBytes())
.incAssignedNativeCodeOverheadMemory(PER_NODE_OVERHEAD)
.incAssignedAnomalyDetectorMemory(ByteSizeValue.ofMb(100).getBytes() - PER_NODE_OVERHEAD)
.incNumAssignedAnomalyDetectorJobs()
.build()
);
Optional<MlMemoryAutoscalingCapacity> result = decider.checkForScaleDown(
nodeLoads,
ByteSizeValue.ofMb(100).getBytes() - PER_NODE_OVERHEAD,
new NativeMemoryCapacity(
ByteSizeValue.ofMb(300).getBytes() - 3 * PER_NODE_OVERHEAD,
ByteSizeValue.ofMb(100).getBytes() - PER_NODE_OVERHEAD
)
);
assertThat(result, isEmpty());
}
}
public void testCpuModelAssignmentRequirements() {
assertTrue(
MlMemoryAutoscalingDecider.modelAssignmentsRequireMoreThanHalfCpu(
List.of(
TrainedModelAssignment.Builder.empty(
new StartTrainedModelDeploymentAction.TaskParams(
"model1",
"deployment_1",
TEST_JOB_SIZE,
2,
3,
100,
null,
Priority.NORMAL,
0L,
0L
),
null
).build(),
TrainedModelAssignment.Builder.empty(
new StartTrainedModelDeploymentAction.TaskParams(
"model1",
"deployment_1",
TEST_JOB_SIZE,
1,
1,
100,
null,
Priority.NORMAL,
0L,
0L
),
null
).build()
),
withMlNodes("ml_node_1", "ml_node_2"),
1
)
);
assertTrue(
MlMemoryAutoscalingDecider.modelAssignmentsRequireMoreThanHalfCpu(
List.of(
TrainedModelAssignment.Builder.empty(
new StartTrainedModelDeploymentAction.TaskParams(
"model1",
"deployment_1",
TEST_JOB_SIZE,
1,
3,
100,
null,
Priority.NORMAL,
0L,
0L
),
null
).build(),
TrainedModelAssignment.Builder.empty(
new StartTrainedModelDeploymentAction.TaskParams(
"model1",
"deployment_1",
TEST_JOB_SIZE,
1,
1,
100,
null,
Priority.NORMAL,
0L,
0L
),
null
).build()
),
withMlNodes("ml_node_1", "ml_node_2"),
1
)
);
assertFalse(
MlMemoryAutoscalingDecider.modelAssignmentsRequireMoreThanHalfCpu(
List.of(
TrainedModelAssignment.Builder.empty(
new StartTrainedModelDeploymentAction.TaskParams(
"model1",
"deployment_1",
TEST_JOB_SIZE,
1,
3,
100,
null,
Priority.NORMAL,
0L,
0L
),
null
).build(),
TrainedModelAssignment.Builder.empty(
new StartTrainedModelDeploymentAction.TaskParams(
"model1",
"deployment_1",
TEST_JOB_SIZE,
1,
1,
100,
null,
Priority.NORMAL,
0L,
0L
),
null
).build()
),
withMlNodes("ml_node_1", "ml_node_2", "ml_node_3", "ml_node_4"),
1
)
);
}
public void testEnsureScaleDown() {
assertThat(
MlMemoryAutoscalingDecider.ensureScaleDown(
MlMemoryAutoscalingCapacity.builder(ByteSizeValue.ofGb(1), ByteSizeValue.ofGb(8)).build(),
MlMemoryAutoscalingCapacity.builder(ByteSizeValue.ofGb(2), ByteSizeValue.ofGb(4)).build()
),
equalTo(MlMemoryAutoscalingCapacity.builder(ByteSizeValue.ofGb(1), ByteSizeValue.ofGb(4)).build())
);
assertThat(
MlMemoryAutoscalingDecider.ensureScaleDown(
MlMemoryAutoscalingCapacity.builder(ByteSizeValue.ofGb(3), ByteSizeValue.ofGb(8)).build(),
MlMemoryAutoscalingCapacity.builder(ByteSizeValue.ofGb(2), ByteSizeValue.ofGb(4)).build()
),
equalTo(MlMemoryAutoscalingCapacity.builder(ByteSizeValue.ofGb(2), ByteSizeValue.ofGb(4)).build())
);
assertThat(
MlMemoryAutoscalingDecider.ensureScaleDown(
MlMemoryAutoscalingCapacity.builder(ByteSizeValue.ofGb(3), ByteSizeValue.ofGb(4)).build(),
MlMemoryAutoscalingCapacity.builder(ByteSizeValue.ofGb(2), ByteSizeValue.ofGb(3)).build()
),
equalTo(MlMemoryAutoscalingCapacity.builder(ByteSizeValue.ofGb(2), ByteSizeValue.ofGb(3)).build())
);
}
public void testFutureAvailableCapacity() {
nodeLoadDetector = new NodeLoadDetector(mlMemoryTracker);
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setUseAuto(true);
boolean waitingAnalytics = randomBoolean();
boolean waitingAnomalyJobs = waitingAnalytics == false || randomBoolean();
int maxWaitingAnalytics = randomIntBetween(1, 2);
int maxWaitingAnomaly = randomIntBetween(1, 2);
List<String> assignedAnomalyJobs = randomList(0, 2, () -> randomAlphaOfLength(10));
List<String> batchAnomalyJobs = randomList(0, 2, () -> randomAlphaOfLength(10));
List<String> assignedAnalyticsJobs = randomList(0, 2, () -> randomAlphaOfLength(10));
ClusterState clusterState = clusterState(
assignedAnomalyJobs,
batchAnomalyJobs,
assignedAnalyticsJobs,
waitingAnomalyJobs ? randomList(1, maxWaitingAnomaly, () -> randomAlphaOfLength(10)) : List.of(),
waitingAnalytics ? randomList(1, maxWaitingAnalytics, () -> randomAlphaOfLength(10)) : List.of()
);
Collection<DiscoveryNode> mlNodesInCluster = clusterState.getNodes().getNodes().values();
Optional<NativeMemoryCapacity> nativeMemoryCapacity = decider.calculateFutureAvailableCapacity(mlNodesInCluster, clusterState);
assertThat(nativeMemoryCapacity, isPresent());
assertThat(nativeMemoryCapacity.get().getNodeMlNativeMemoryRequirementExcludingOverhead(), greaterThanOrEqualTo(TEST_JOB_SIZE));
assertThat(
nativeMemoryCapacity.get().getNodeMlNativeMemoryRequirementExcludingOverhead(),
lessThanOrEqualTo(ML_MEMORY_FOR_TEST_NODE_SIZE)
);
assertThat(
nativeMemoryCapacity.get().getTierMlNativeMemoryRequirementExcludingOverhead(),
greaterThanOrEqualTo(TEST_JOB_SIZE * (assignedAnalyticsJobs.size() + batchAnomalyJobs.size()))
);
assertThat(
nativeMemoryCapacity.get().getTierMlNativeMemoryRequirementExcludingOverhead(),
lessThanOrEqualTo(mlNodesInCluster.size() * (ML_MEMORY_FOR_TEST_NODE_SIZE - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes()))
);
}
public void testScale_WithNoScaleUpButWaitingJobs() {
nodeLoadDetector = new NodeLoadDetector(mlMemoryTracker);
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setUseAuto(true);
boolean waitingAnalytics = randomBoolean();
boolean waitingAnomalyJobs = waitingAnalytics == false || randomBoolean();
int maxWaitingAnalytics = randomIntBetween(1, 2);
int maxWaitingAnomaly = randomIntBetween(1, 2);
ClusterState clusterState = clusterState(
randomList(0, 2, () -> randomAlphaOfLength(10)),
randomList(0, 2, () -> randomAlphaOfLength(10)),
randomList(0, 2, () -> randomAlphaOfLength(10)),
waitingAnomalyJobs ? randomList(1, maxWaitingAnomaly, () -> randomAlphaOfLength(10)) : List.of(),
waitingAnalytics ? randomList(1, maxWaitingAnalytics, () -> randomAlphaOfLength(10)) : List.of()
);
Settings settings = Settings.builder()
.put(MlAutoscalingDeciderService.NUM_ANALYTICS_JOBS_IN_QUEUE.getKey(), maxWaitingAnalytics)
.put(MlAutoscalingDeciderService.NUM_ANOMALY_JOBS_IN_QUEUE.getKey(), maxWaitingAnomaly)
.build();
AutoscalingCapacity autoscalingCapacity = new AutoscalingCapacity(
new AutoscalingCapacity.AutoscalingResources(ByteSizeValue.ofGb(1), ByteSizeValue.ofGb(1), null),
new AutoscalingCapacity.AutoscalingResources(ByteSizeValue.ofGb(1), ByteSizeValue.ofGb(1), null)
);
DeciderContext deciderContext = new DeciderContext(clusterState, autoscalingCapacity);
MlAutoscalingContext mlAutoscalingContext = new MlAutoscalingContext(clusterState);
MlMemoryAutoscalingCapacity result = decider.scale(settings, deciderContext, mlAutoscalingContext, 1);
assertThat(result.reason(), containsString("but the number in the queue is less than the configured maximum allowed"));
assertThat(result.nodeSize(), equalTo(ByteSizeValue.ofGb(1)));
assertThat(result.tierSize(), equalTo(ByteSizeValue.ofGb(1)));
}
public void testScale_WithNoMlNodesButWaitingAnalytics() {
nodeLoadDetector = new NodeLoadDetector(mlMemoryTracker);
MlMemoryAutoscalingDecider decider = buildDecider();
decider.setUseAuto(true);
final String analyticsId = "waiting-analytics";
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
addAnalyticsTask(analyticsId, null, DataFrameAnalyticsState.STARTING, tasksBuilder);
ClusterState.Builder clusterStateBuilder = ClusterState.builder(new ClusterName("_name"));
Metadata.Builder metadata = Metadata.builder();
metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build());
clusterStateBuilder.metadata(metadata);
ClusterState clusterState = clusterStateBuilder.build();
Settings settings = Settings.builder()
.put(MlAutoscalingDeciderService.NUM_ANALYTICS_JOBS_IN_QUEUE.getKey(), 0)
.put(MlAutoscalingDeciderService.NUM_ANOMALY_JOBS_IN_QUEUE.getKey(), 0)
.build();
DeciderContext deciderContext = new DeciderContext(clusterState, AutoscalingCapacity.ZERO);
MlAutoscalingContext mlAutoscalingContext = new MlAutoscalingContext(clusterState);
MlMemoryAutoscalingCapacity result = decider.scale(settings, deciderContext, mlAutoscalingContext, 1);
assertThat(
result.reason(),
containsString(
"requesting scale up as number of jobs in queues exceeded configured limit and there are no machine learning nodes"
)
);
assertThat(result.nodeSize(), equalTo(ByteSizeValue.ofMb(714)));
assertThat(result.tierSize(), equalTo(ByteSizeValue.ofMb(714)));
}
private MlMemoryAutoscalingDecider buildDecider() {
return new MlMemoryAutoscalingDecider(
settings,
clusterService,
nodeRealAvailabilityZoneMapper,
nodeLoadDetector,
new ScaleTimer(timeSupplier)
);
}
private static ClusterState clusterState(
List<String> ongoingAnomalyTasks,
List<String> batchAnomalyTasks,
List<String> analyticsTasks,
List<String> waitingAnomalyTasks,
List<String> waitingAnalyticsTasks
) {
List<String> nodeNames = List.of("_node_id1", "_node_id2", "_node_id3");
List<DiscoveryNode> nodeList = withMlNodes(nodeNames.toArray(String[]::new));
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
for (DiscoveryNode node : nodeList) {
nodesBuilder.add(node);
}
PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
for (String jobId : ongoingAnomalyTasks) {
OpenJobPersistentTasksExecutorTests.addJobTask(
jobId,
randomFrom(nodeNames),
randomFrom(JobState.CLOSING, JobState.OPENED, JobState.OPENING, null),
tasksBuilder
);
}
for (String jobId : batchAnomalyTasks) {
String nodeAssignment = randomFrom(nodeNames);
OpenJobPersistentTasksExecutorTests.addJobTask(
jobId,
nodeAssignment,
randomFrom(JobState.CLOSING, JobState.OPENED, JobState.OPENING, null),
tasksBuilder
);
StartDatafeedAction.DatafeedParams dfParams = new StartDatafeedAction.DatafeedParams(jobId + "-datafeed", 0);
dfParams.setEndTime(new Date().getTime());
tasksBuilder.addTask(
MlTasks.datafeedTaskId(jobId + "-datafeed"),
MlTasks.DATAFEED_TASK_NAME,
dfParams,
new PersistentTasksCustomMetadata.Assignment(nodeAssignment, "test")
);
}
for (String analyticsId : analyticsTasks) {
addAnalyticsTask(
analyticsId,
randomFrom(nodeNames),
randomFrom(
DataFrameAnalyticsState.STARTED,
DataFrameAnalyticsState.REINDEXING,
DataFrameAnalyticsState.ANALYZING,
DataFrameAnalyticsState.STOPPING,
DataFrameAnalyticsState.STARTING
),
tasksBuilder
);
}
for (String job : waitingAnalyticsTasks) {
addAnalyticsTask(job, null, null, tasksBuilder);
}
for (String job : waitingAnomalyTasks) {
addJobTask(job, null, null, tasksBuilder);
}
PersistentTasksCustomMetadata tasks = tasksBuilder.build();
ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name"));
cs.nodes(nodesBuilder);
Metadata.Builder metadata = Metadata.builder();
metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks);
cs.metadata(metadata);
return cs.build();
}
private static List<DiscoveryNode> withMlNodes(String... nodeName) {
return Arrays.stream(nodeName)
.map(
n -> DiscoveryNodeUtils.create(
n,
buildNewFakeTransportAddress(),
Map.of(
MACHINE_MEMORY_NODE_ATTR,
String.valueOf(TEST_NODE_SIZE),
MAX_JVM_SIZE_NODE_ATTR,
String.valueOf(TEST_JVM_SIZE),
MachineLearning.ALLOCATED_PROCESSORS_NODE_ATTR,
String.valueOf(TEST_ALLOCATED_PROCESSORS)
),
Set.of(DiscoveryNodeRole.ML_ROLE)
)
)
.toList();
}
public static void addAnalyticsTask(
String jobId,
String nodeId,
DataFrameAnalyticsState jobState,
PersistentTasksCustomMetadata.Builder builder
) {
builder.addTask(
MlTasks.dataFrameAnalyticsTaskId(jobId),
MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME,
new StartDataFrameAnalyticsAction.TaskParams(jobId, MlConfigVersion.CURRENT, true),
nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment")
);
if (jobState != null) {
builder.updateTaskState(
MlTasks.dataFrameAnalyticsTaskId(jobId),
new DataFrameAnalyticsTaskState(jobState, builder.getLastAllocationId(), null, Instant.now())
);
}
}
public static void addJobTask(String jobId, String nodeId, JobState jobState, PersistentTasksCustomMetadata.Builder builder) {
builder.addTask(
MlTasks.jobTaskId(jobId),
MlTasks.JOB_TASK_NAME,
new OpenJobAction.JobParams(jobId),
nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment")
);
if (jobState != null) {
builder.updateTaskState(
MlTasks.jobTaskId(jobId),
new JobTaskState(jobState, builder.getLastAllocationId(), null, Instant.now())
);
}
}
static
|
MlMemoryAutoscalingDeciderTests
|
java
|
ReactiveX__RxJava
|
src/jmh/java/io/reactivex/rxjava3/core/ToFlowablePerf.java
|
{
"start": 1027,
"end": 3168
}
|
class ____ {
@Param({ "1", "1000", "1000000" })
public int times;
Maybe<Integer> flowable;
Flowable<Integer> flowableInner;
Observable<Integer> observable;
Observable<Integer> observableInner;
@Setup
public void setup() {
Integer[] array = new Integer[times];
Arrays.fill(array, 777);
Flowable<Integer> source = Flowable.fromArray(array);
final BiFunction<Integer, Integer, Integer> second = new BiFunction<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer a, Integer b) {
return b;
}
};
flowable = source.reduce(second);
flowableInner = source.concatMap(new Function<Integer, Publisher<Integer>>() {
@Override
public Publisher<Integer> apply(Integer v) {
return Flowable.range(1, 50).reduce(second).toFlowable();
}
});
Observable<Integer> sourceObs = Observable.fromArray(array);
observable = sourceObs.reduce(second).toObservable();
observableInner = sourceObs.concatMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Observable.range(1, 50).reduce(second).toObservable();
}
});
}
@Benchmark
public Object flowable() {
return flowable.blockingGet();
}
@Benchmark
public Object flowableInner() {
return flowableInner.blockingLast();
}
@Benchmark
public Object observable() {
return observable.blockingLast();
}
@Benchmark
public Object observableInner() {
return observableInner.blockingLast();
}
static volatile Object o;
public static void main(String[] args) {
ToFlowablePerf p = new ToFlowablePerf();
p.times = 1000000;
p.setup();
for (int j = 0; j < 15; j++) {
for (int i = 0; i < 600; i++) {
o = p.flowable();
}
System.out.println("--- " + j);
}
}
}
|
ToFlowablePerf
|
java
|
apache__kafka
|
connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java
|
{
"start": 18549,
"end": 19802
}
|
class ____ instead. Classes found: "
+ connectors.stream().map(PluginDesc::toString).collect(Collectors.joining(", "))
);
}
PluginDesc<? extends Connector> entry = matches.get(0);
klass = entry.pluginClass();
}
return klass;
}
public Class<? extends Connector> connectorClass(String connectorClassOrAlias) {
return connectorClass(connectorClassOrAlias, null);
}
public Task newTask(Class<? extends Task> taskClass) {
return newPlugin(taskClass);
}
/**
* If the given configuration defines a {@link Converter} using the named configuration property, return a new configured instance.
*
* @param config the configuration containing the {@link Converter}'s configuration; may not be null
* @param classPropertyName the name of the property that contains the name of the {@link Converter} class; may not be null
* @param classLoaderUsage which classloader should be used
* @return the instantiated and configured {@link Converter}; null if the configuration did not define the specified property
* @throws ConnectException if the {@link Converter} implementation
|
name
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/JoinTablePrimaryKeyJoinColumnTest.java
|
{
"start": 2114,
"end": 3061
}
|
class ____ {
@Id
private Long id;
private String owner;
private BigDecimal balance;
private BigDecimal interestRate;
//Getters and setters are omitted for brevity
//end::entity-inheritance-joined-table-primary-key-join-column-example[]
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getOwner() {
return owner;
}
public void setOwner(String owner) {
this.owner = owner;
}
public BigDecimal getBalance() {
return balance;
}
public void setBalance(BigDecimal balance) {
this.balance = balance;
}
public BigDecimal getInterestRate() {
return interestRate;
}
public void setInterestRate(BigDecimal interestRate) {
this.interestRate = interestRate;
}
//tag::entity-inheritance-joined-table-primary-key-join-column-example[]
}
@Entity(name = "DebitAccount")
@PrimaryKeyJoinColumn(name = "account_id")
public static
|
Account
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhance/internal/bytebuddy/DirtyCheckingWithMappedsuperclassTest.java
|
{
"start": 2273,
"end": 5178
}
|
class ____ {
@Test
public void shouldDeclareFieldsInEntityClass() {
assertThat( CardGame.class )
.hasDeclaredFields( ENTITY_ENTRY_FIELD_NAME, PREVIOUS_FIELD_NAME, NEXT_FIELD_NAME, TRACKER_FIELD_NAME );
}
@Test
public void shouldDeclareMethodsInEntityClass() {
assertThat( CardGame.class )
.hasDeclaredMethods( PERSISTENT_FIELD_READER_PREFIX + "id", PERSISTENT_FIELD_WRITER_PREFIX + "id" )
.hasDeclaredMethods( PERSISTENT_FIELD_READER_PREFIX + "name", PERSISTENT_FIELD_WRITER_PREFIX + "name" )
.hasDeclaredMethods( PERSISTENT_FIELD_READER_PREFIX + "code", PERSISTENT_FIELD_WRITER_PREFIX + "code" )
.hasDeclaredMethods( ENTITY_INSTANCE_GETTER_NAME, ENTITY_ENTRY_GETTER_NAME )
.hasDeclaredMethods( PREVIOUS_GETTER_NAME, PREVIOUS_SETTER_NAME, NEXT_GETTER_NAME, NEXT_SETTER_NAME )
.hasDeclaredMethods( TRACKER_HAS_CHANGED_NAME, TRACKER_CLEAR_NAME, TRACKER_SUSPEND_NAME, TRACKER_GET_NAME );
}
@Test
public void shouldCreateTheTracker() throws Exception {
CardGame entity = new CardGame( "MTG", "Magic the Gathering" );
assertThat( entity )
.extracting( NEXT_FIELD_NAME ).isNull();
assertThat( entity )
.extracting( PREVIOUS_FIELD_NAME ).isNull();
assertThat( entity )
.extracting( ENTITY_ENTRY_FIELD_NAME ).isNull();
assertThat( entity )
.extracting( TRACKER_FIELD_NAME ).isInstanceOf( SimpleFieldTracker.class );
assertThat( entity ).extracting( resultOf( TRACKER_HAS_CHANGED_NAME ) ).isEqualTo( true );
assertThat( entity ).extracting( resultOf( TRACKER_GET_NAME ) ).isEqualTo( new String[] { "name", "code" } );
}
@Test
public void shouldResetTheTracker() throws Exception {
CardGame entity = new CardGame( "7WD", "7 Wonders duel" );
Method trackerClearMethod = CardGame.class.getMethod( TRACKER_CLEAR_NAME );
trackerClearMethod.invoke( entity );
assertThat( entity ).extracting( resultOf( TRACKER_HAS_CHANGED_NAME ) ).isEqualTo( false );
assertThat( entity ).extracting( resultOf( TRACKER_GET_NAME ) ).isEqualTo( new String[0] );
}
@Test
public void shouldUpdateTheTracker() throws Exception {
CardGame entity = new CardGame( "SPL", "Splendor" );
assertThat( entity.getCode() ).isEqualTo( "XsplX" );
Method trackerClearMethod = CardGame.class.getMethod( TRACKER_CLEAR_NAME );
trackerClearMethod.invoke( entity );
entity.setName( "Splendor: Cities of Splendor" );
assertThat( entity.getCode() )
.as( "Field 'code' should have not change" ).isEqualTo( "XsplX" );
assertThat( entity ).extracting( resultOf( TRACKER_HAS_CHANGED_NAME ) ).isEqualTo( true );
assertThat( entity ).extracting( resultOf( TRACKER_GET_NAME ) ).isEqualTo( new String[] { "name" } );
entity.setName( "Cities of Splendor" );
assertThat( entity ).extracting( resultOf( TRACKER_GET_NAME ) ).isEqualTo( new String[] { "name", "code" } );
}
@MappedSuperclass
public static abstract
|
DirtyCheckingWithMappedsuperclassTest
|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/load/model/MediaStoreFileLoader.java
|
{
"start": 656,
"end": 1229
}
|
class ____ implements ModelLoader<Uri, File> {
private final Context context;
// Public API.
@SuppressWarnings("WeakerAccess")
public MediaStoreFileLoader(Context context) {
this.context = context;
}
@Override
public LoadData<File> buildLoadData(
@NonNull Uri uri, int width, int height, @NonNull Options options) {
return new LoadData<>(new ObjectKey(uri), new FilePathFetcher(context, uri));
}
@Override
public boolean handles(@NonNull Uri uri) {
return MediaStoreUtil.isMediaStoreUri(uri);
}
private static
|
MediaStoreFileLoader
|
java
|
quarkusio__quarkus
|
independent-projects/qute/core/src/main/java/io/quarkus/qute/InsertSectionHelper.java
|
{
"start": 2010,
"end": 3089
}
|
class ____ implements SectionHelperFactory<InsertSectionHelper> {
@Override
public List<String> getDefaultAliases() {
return ImmutableList.of("insert");
}
@Override
public ParametersInfo getParameters() {
return ParametersInfo.builder().addParameter("name", IncludeSectionHelper.DEFAULT_NAME).build();
}
@Override
public InsertSectionHelper initialize(SectionInitContext context) {
String name = context.getParameter("name");
if (context.getEngine().getSectionHelperFactories().containsKey(name)) {
throw context.error(
"\\{#insert} defined in the \\{#include\\} conflicts with an existing section/tag: {name}")
.code(Code.INSERT_SECTION_CONFLICT)
.argument("name", name)
.origin(context.getOrigin())
.build();
}
return new InsertSectionHelper(name, context.getBlocks().get(0));
}
}
|
Factory
|
java
|
jhy__jsoup
|
src/main/java/org/jsoup/nodes/Element.java
|
{
"start": 71669,
"end": 71972
}
|
class ____ to remove
@return this element
*/
public Element removeClass(String className) {
Validate.notNull(className);
Set<String> classes = classNames();
classes.remove(className);
classNames(classes);
return this;
}
/**
Toggle a
|
name
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/internal/stubbing/OngoingStubbingImpl.java
|
{
"start": 1175,
"end": 1356
}
|
interface ____ tests
return invocationContainer.getInvocations();
}
public void setStrictness(Strictness strictness) {
this.strictness = strictness;
}
}
|
for
|
java
|
apache__dubbo
|
dubbo-compatible/src/test/java/org/apache/dubbo/metadata/annotation/processing/builder/CollectionTypeDefinitionBuilderTest.java
|
{
"start": 1610,
"end": 4900
}
|
class ____ extends AbstractAnnotationProcessingTest {
private CollectionTypeDefinitionBuilder builder;
private VariableElement stringsField;
private VariableElement colorsField;
private VariableElement primitiveTypeModelsField;
private VariableElement modelsField;
private VariableElement modelArraysField;
@Override
protected void addCompiledClasses(Set<Class<?>> classesToBeCompiled) {
classesToBeCompiled.add(CollectionTypeModel.class);
}
@Override
protected void beforeEach() {
builder = new CollectionTypeDefinitionBuilder();
TypeElement testType = getType(CollectionTypeModel.class);
stringsField = findField(testType, "strings");
colorsField = findField(testType, "colors");
primitiveTypeModelsField = findField(testType, "primitiveTypeModels");
modelsField = findField(testType, "models");
modelArraysField = findField(testType, "modelArrays");
assertEquals("strings", stringsField.getSimpleName().toString());
assertEquals("colors", colorsField.getSimpleName().toString());
assertEquals(
"primitiveTypeModels", primitiveTypeModelsField.getSimpleName().toString());
assertEquals("models", modelsField.getSimpleName().toString());
assertEquals("modelArrays", modelArraysField.getSimpleName().toString());
}
@Test
void testAccept() {
assertTrue(builder.accept(processingEnv, stringsField.asType()));
assertTrue(builder.accept(processingEnv, colorsField.asType()));
assertTrue(builder.accept(processingEnv, primitiveTypeModelsField.asType()));
assertTrue(builder.accept(processingEnv, modelsField.asType()));
assertTrue(builder.accept(processingEnv, modelArraysField.asType()));
}
@Test
void testBuild() {
buildAndAssertTypeDefinition(
processingEnv, stringsField, "java.util.Collection<java.lang.String>", "java.lang.String", builder);
buildAndAssertTypeDefinition(
processingEnv,
colorsField,
"java.util.List<org.apache.dubbo.metadata.annotation.processing.model.Color>",
"org.apache.dubbo.metadata.annotation.processing.model.Color",
builder);
buildAndAssertTypeDefinition(
processingEnv,
primitiveTypeModelsField,
"java.util.Queue<org.apache.dubbo.metadata.annotation.processing.model.PrimitiveTypeModel>",
"org.apache.dubbo.metadata.annotation.processing.model.PrimitiveTypeModel",
builder);
buildAndAssertTypeDefinition(
processingEnv,
modelsField,
"java.util.Deque<org.apache.dubbo.metadata.annotation.processing.model.Model>",
"org.apache.dubbo.metadata.annotation.processing.model.Model",
builder);
buildAndAssertTypeDefinition(
processingEnv,
modelArraysField,
"java.util.Set<org.apache.dubbo.metadata.annotation.processing.model.Model[]>",
"org.apache.dubbo.metadata.annotation.processing.model.Model[]",
builder);
}
}
|
CollectionTypeDefinitionBuilderTest
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/InconsistentCapitalization.java
|
{
"start": 2037,
"end": 4661
}
|
class ____ extends BugChecker implements ClassTreeMatcher {
@Override
public Description matchClass(ClassTree tree, VisitorState state) {
ImmutableSet<Symbol> fields = FieldScanner.findFields(tree);
if (fields.isEmpty()) {
return Description.NO_MATCH;
}
ImmutableMap<String, Symbol> fieldNamesMap =
fields.stream()
.collect(
toImmutableMap(
symbol -> Ascii.toLowerCase(symbol.toString()), x -> x, (x, y) -> x));
ImmutableMap<TreePath, Symbol> matchedParameters =
MatchingParametersScanner.findMatchingParameters(fieldNamesMap, state.getPath());
if (matchedParameters.isEmpty()) {
return Description.NO_MATCH;
}
for (Map.Entry<TreePath, Symbol> entry : matchedParameters.entrySet()) {
TreePath parameterPath = entry.getKey();
Symbol field = entry.getValue();
String fieldName = field.getSimpleName().toString();
VariableTree parameterTree = (VariableTree) parameterPath.getLeaf();
SuggestedFix.Builder fix =
SuggestedFixes.renameVariable(parameterTree, fieldName, state).toBuilder();
if (parameterPath.getParentPath() != null) {
String qualifiedName =
getExplicitQualification(parameterPath, tree, state) + field.getSimpleName();
// If the field was accessed in a non-qualified way, by renaming the parameter this may
// cause clashes with it. Thus, it is required to qualify all uses of the field within the
// parameter's scope just in case.
parameterPath
.getParentPath()
.getLeaf()
.accept(
new TreeScanner<Void, Void>() {
@Override
public Void visitIdentifier(IdentifierTree tree, Void unused) {
if (field.equals(ASTHelpers.getSymbol(tree))) {
fix.replace(tree, qualifiedName);
}
return null;
}
},
null);
}
state.reportMatch(
buildDescription(parameterPath.getLeaf())
.setMessage(
String.format(
"Found the field '%s' with the same name as the parameter '%s' but with "
+ "different capitalization.",
fieldName, ((VariableTree) parameterPath.getLeaf()).getName()))
.addFix(fix.build())
.build());
}
return Description.NO_MATCH;
}
/**
* Returns the qualification to access a field of the given
|
InconsistentCapitalization
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java
|
{
"start": 1179,
"end": 1285
}
|
class ____ methods have
* either {@link Idempotent} or {@link AtMostOnce} once annotations.
*/
public
|
public
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/security/FluentApiAuthenticationMechanismSelectionTest.java
|
{
"start": 10808,
"end": 11553
}
|
class ____ implements HttpAuthenticationMechanism {
private final HttpAuthenticationMechanism delegate = new BasicAuthenticationMechanism(null, false);
@Override
public Uni<SecurityIdentity> authenticate(RoutingContext context, IdentityProviderManager identityProviderManager) {
return delegate.authenticate(context, identityProviderManager);
}
@Override
public Uni<ChallengeData> getChallenge(RoutingContext context) {
return delegate.getChallenge(context);
}
@Override
public Set<Class<? extends AuthenticationRequest>> getCredentialTypes() {
return delegate.getCredentialTypes();
}
}
}
|
AbstractCustomAuthenticationMechanism
|
java
|
spring-projects__spring-security
|
config/src/main/java/org/springframework/security/config/annotation/web/builders/HttpSecurity.java
|
{
"start": 55123,
"end": 56310
}
|
class ____ {
*
* @Bean
* public SecurityFilterChain securityFilterChain(HttpSecurity http) {
* http
* .authorizeHttpRequests((authorizeHttpRequests) ->
* authorizeHttpRequests
* .anyRequest().authenticated()
* )
* .oauth2Client(withDefaults());
* return http.build();
* }
* }
* </pre>
* @param oauth2ClientCustomizer the {@link Customizer} to provide more options for
* the {@link OAuth2ClientConfigurer}
* @return the {@link HttpSecurity} for further customizations
* @ @see
* <a target="_blank" href= "https://tools.ietf.org/html/rfc6749#section-1.1">OAuth
* 2.0 Authorization Framework</a>
*/
public HttpSecurity oauth2Client(Customizer<OAuth2ClientConfigurer<HttpSecurity>> oauth2ClientCustomizer) {
oauth2ClientCustomizer.customize(getOrApply(new OAuth2ClientConfigurer<>()));
return HttpSecurity.this;
}
/**
* Configures OAuth 2.0 Resource Server support.
*
* <h2>Example Configuration</h2>
*
* The following example demonstrates how to configure a custom JWT authentication
* converter.
*
* <pre>
* @Configuration
* @EnableWebSecurity
* public
|
OAuth2ClientSecurityConfig
|
java
|
spring-projects__spring-security
|
core/src/test/java/org/springframework/security/authentication/DefaultAuthenticationEventPublisherTests.java
|
{
"start": 10715,
"end": 10854
}
|
class ____ extends AuthenticationException {
MockAuthenticationException(String msg) {
super(msg);
}
}
}
|
MockAuthenticationException
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/aop/aspectj/DeclarationOrderIndependenceTests.java
|
{
"start": 2885,
"end": 3453
}
|
interface ____ {
void beforeAdviceFired();
void afterReturningAdviceFired();
void aroundAdviceFired();
}
private Collaborator collaborator;
public void setCollaborator(Collaborator collaborator) {
this.collaborator = collaborator;
}
public void before() {
this.collaborator.beforeAdviceFired();
}
public void afterReturning() {
this.collaborator.afterReturningAdviceFired();
}
public Object around(ProceedingJoinPoint pjp) throws Throwable {
Object ret = pjp.proceed();
this.collaborator.aroundAdviceFired();
return ret;
}
}
|
Collaborator
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-ses/src/main/java/org/apache/camel/component/aws2/ses/Ses2ProducerHealthCheck.java
|
{
"start": 1163,
"end": 2303
}
|
class ____ extends AbstractHealthCheck {
private final Ses2Endpoint ses2Endpoint;
public Ses2ProducerHealthCheck(Ses2Endpoint ses2Endpoint, String clientId) {
super("camel", "producer:aws2-ses-" + clientId);
this.ses2Endpoint = ses2Endpoint;
}
@Override
protected void doCall(HealthCheckResultBuilder builder, Map<String, Object> options) {
Ses2Configuration configuration = ses2Endpoint.getConfiguration();
try {
if (!SesClient.serviceMetadata().regions().contains(Region.of(configuration.getRegion()))) {
builder.message("The service is not supported in this region");
builder.down();
return;
}
SesClient client = ses2Endpoint.getSESClient();
client.getSendStatistics();
} catch (AwsServiceException e) {
builder.message(e.getMessage());
builder.error(e);
builder.down();
} catch (Exception e) {
builder.error(e);
builder.down();
return;
}
builder.up();
}
}
|
Ses2ProducerHealthCheck
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/PatternRule.java
|
{
"start": 1376,
"end": 4815
}
|
class ____ implements IpFilterRule {
private final Pattern ipPattern;
private final Pattern namePattern;
private final IpFilterRuleType ruleType;
private final boolean localhost;
private final String pattern;
/**
* Instantiates a new pattern rule.
*
* @param ruleType indicates if this is an allow or block rule
* @param pattern the filter pattern
*/
PatternRule(IpFilterRuleType ruleType, String pattern) {
this.ruleType = ruleType;
this.pattern = pattern;
Pattern namePattern = null;
Pattern ipPattern = null;
boolean localhost = false;
if (pattern != null) {
String[] acls = pattern.split(",");
String ip = "";
String name = "";
for (String c : acls) {
c = c.trim();
if ("n:localhost".equals(c)) {
localhost = true;
} else if (c.startsWith("n:")) {
name = addRule(name, c.substring(2));
} else if (c.startsWith("i:")) {
ip = addRule(ip, c.substring(2));
}
}
if (ip.length() != 0) {
ipPattern = Pattern.compile(ip);
}
if (name.length() != 0) {
namePattern = Pattern.compile(name);
}
}
this.ipPattern = ipPattern;
this.namePattern = namePattern;
this.localhost = localhost;
}
/**
* returns the pattern.
*
* @return the pattern
*/
String getPattern() {
return pattern;
}
private static String addRule(String pattern, String rule) {
if (rule == null || rule.length() == 0) {
return pattern;
}
if (pattern.length() != 0) {
pattern += "|";
}
rule = rule.replace(".", "\\.");
rule = rule.replace("*", ".*");
rule = rule.replace("?", ".");
pattern += '(' + rule + ')';
return pattern;
}
private static boolean isLocalhost(InetAddress address) {
try {
return address.isAnyLocalAddress() || address.isLoopbackAddress() || NetworkInterface.getByInetAddress(address) != null;
} catch (SocketException e) {
// not defined - ie. it's not a local address
return false;
}
}
@Override
public boolean matches(InetSocketAddress remoteAddress) {
InetAddress inetAddress = remoteAddress.getAddress();
if (localhost) {
if (isLocalhost(inetAddress)) {
return true;
}
}
if (ipPattern != null) {
String format = NetworkAddress.format(inetAddress);
if (ipPattern.matcher(format).matches()) {
return true;
}
}
return checkHostName(inetAddress);
}
@SuppressForbidden(reason = "we compare the hostname of the address this is how netty3 did it and we keep it for BWC")
private boolean checkHostName(InetAddress address) {
if (namePattern != null) {
if (namePattern.matcher(address.getHostName()).matches()) {
return true;
}
}
return false;
}
@Override
public IpFilterRuleType ruleType() {
return ruleType;
}
boolean isLocalhost() {
return localhost;
}
}
|
PatternRule
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java
|
{
"start": 1876,
"end": 13522
}
|
class ____ extends ESSingleNodeTestCase {
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return Arrays.asList(FieldFilterPlugin.class);
}
@Before
public void putMappings() {
assertAcked(indicesAdmin().prepareCreate("index1"));
assertAcked(indicesAdmin().prepareCreate("filtered"));
assertAcked(indicesAdmin().preparePutMapping("index1", "filtered").setSource(TEST_ITEM, XContentType.JSON));
}
public void testGetMappings() {
GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get();
assertExpectedMappings(getMappingsResponse.mappings());
}
public void testGetIndex() {
GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT)
.setFeatures(GetIndexRequest.Feature.MAPPINGS)
.get();
assertExpectedMappings(getIndexResponse.mappings());
}
public void testGetFieldMappings() {
GetFieldMappingsResponse getFieldMappingsResponse = indicesAdmin().prepareGetFieldMappings().setFields("*").get();
Map<String, Map<String, GetFieldMappingsResponse.FieldMappingMetadata>> mappings = getFieldMappingsResponse.mappings();
assertEquals(2, mappings.size());
assertFieldMappings(mappings.get("index1"), ALL_FLAT_FIELDS);
assertFieldMappings(mappings.get("filtered"), FILTERED_FLAT_FIELDS);
// double check that submitting the filtered mappings to an unfiltered index leads to the same get field mappings output
// as the one coming from a filtered index with same mappings
GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "filtered").get();
MappingMetadata filtered = getMappingsResponse.getMappings().get("filtered");
assertAcked(indicesAdmin().prepareCreate("test").setMapping(filtered.getSourceAsMap()));
GetFieldMappingsResponse response = indicesAdmin().prepareGetFieldMappings("test").setFields("*").get();
assertEquals(1, response.mappings().size());
assertFieldMappings(response.mappings().get("test"), FILTERED_FLAT_FIELDS);
}
public void testGetNonExistentFieldMapping() {
GetFieldMappingsResponse response = indicesAdmin().prepareGetFieldMappings("index1").setFields("non-existent").get();
Map<String, Map<String, GetFieldMappingsResponse.FieldMappingMetadata>> mappings = response.mappings();
assertEquals(1, mappings.size());
Map<String, GetFieldMappingsResponse.FieldMappingMetadata> fieldmapping = mappings.get("index1");
assertEquals(0, fieldmapping.size());
}
public void testFieldCapabilities() {
List<String> allFields = new ArrayList<>(ALL_FLAT_FIELDS);
allFields.addAll(ALL_OBJECT_FIELDS);
FieldCapabilitiesResponse index1 = client().fieldCaps(new FieldCapabilitiesRequest().fields("*").indices("index1")).actionGet();
assertFieldCaps(index1, allFields);
FieldCapabilitiesResponse filtered = client().fieldCaps(new FieldCapabilitiesRequest().fields("*").indices("filtered")).actionGet();
List<String> filteredFields = new ArrayList<>(FILTERED_FLAT_FIELDS);
filteredFields.addAll(ALL_OBJECT_FIELDS);
assertFieldCaps(filtered, filteredFields);
// double check that submitting the filtered mappings to an unfiltered index leads to the same field_caps output
// as the one coming from a filtered index with same mappings
GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "filtered").get();
MappingMetadata filteredMapping = getMappingsResponse.getMappings().get("filtered");
assertAcked(indicesAdmin().prepareCreate("test").setMapping(filteredMapping.getSourceAsMap()));
FieldCapabilitiesResponse test = client().fieldCaps(new FieldCapabilitiesRequest().fields("*").indices("test")).actionGet();
// properties.value is an object field in the new mapping
filteredFields.add("properties.value");
assertFieldCaps(test, filteredFields);
}
private static void assertFieldCaps(FieldCapabilitiesResponse fieldCapabilitiesResponse, Collection<String> expectedFields) {
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>(fieldCapabilitiesResponse.get());
for (String field : builtInMetadataFields()) {
Map<String, FieldCapabilities> remove = responseMap.remove(field);
assertNotNull(" expected field [" + field + "] not found", remove);
}
for (String field : expectedFields) {
Map<String, FieldCapabilities> remove = responseMap.remove(field);
assertNotNull(" expected field [" + field + "] not found", remove);
}
assertEquals("Some unexpected fields were returned: " + responseMap.keySet(), 0, responseMap.size());
}
private static Set<String> builtInMetadataFields() {
Set<String> builtInMetadataFields = new HashSet<>(IndicesModule.getBuiltInMetadataFields());
// Index is not a time-series index, and it will not contain _tsid and _ts_routing_hash fields.
builtInMetadataFields.remove(TimeSeriesIdFieldMapper.NAME);
builtInMetadataFields.remove(TimeSeriesRoutingHashFieldMapper.NAME);
return builtInMetadataFields;
}
private static void assertFieldMappings(
Map<String, GetFieldMappingsResponse.FieldMappingMetadata> actual,
Collection<String> expectedFields
) {
Map<String, GetFieldMappingsResponse.FieldMappingMetadata> fields = new HashMap<>(actual);
for (String field : builtInMetadataFields()) {
GetFieldMappingsResponse.FieldMappingMetadata fieldMappingMetadata = fields.remove(field);
assertNotNull(" expected field [" + field + "] not found", fieldMappingMetadata);
}
for (String field : expectedFields) {
GetFieldMappingsResponse.FieldMappingMetadata fieldMappingMetadata = fields.remove(field);
assertNotNull("expected field [" + field + "] not found", fieldMappingMetadata);
}
assertEquals("Some unexpected fields were returned: " + fields.keySet(), 0, fields.size());
}
private void assertExpectedMappings(Map<String, MappingMetadata> mappings) {
assertEquals(2, mappings.size());
assertNotFiltered(mappings.get("index1"));
MappingMetadata filtered = mappings.get("filtered");
assertFiltered(filtered);
assertMappingsAreValid(filtered.getSourceAsMap());
}
private void assertMappingsAreValid(Map<String, Object> sourceAsMap) {
// check that the returned filtered mappings are still valid mappings by submitting them and retrieving them back
assertAcked(indicesAdmin().prepareCreate("test").setMapping(sourceAsMap));
GetMappingsResponse testMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
assertEquals(1, testMappingsResponse.getMappings().size());
// the mappings are returned unfiltered for this index, yet they are the same as the previous ones that were returned filtered
assertFiltered(testMappingsResponse.getMappings().get("test"));
}
@SuppressWarnings("unchecked")
private static void assertFiltered(MappingMetadata mappingMetadata) {
assertNotNull(mappingMetadata);
Map<String, Object> sourceAsMap = mappingMetadata.getSourceAsMap();
assertEquals(4, sourceAsMap.size());
assertTrue(sourceAsMap.containsKey("_meta"));
assertTrue(sourceAsMap.containsKey("_routing"));
assertTrue(sourceAsMap.containsKey("_source"));
Map<String, Object> typeProperties = (Map<String, Object>) sourceAsMap.get("properties");
assertEquals(4, typeProperties.size());
Map<String, Object> name = (Map<String, Object>) typeProperties.get("name");
assertEquals(1, name.size());
Map<String, Object> nameProperties = (Map<String, Object>) name.get("properties");
assertEquals(1, nameProperties.size());
assertLeafs(nameProperties, "last_visible");
assertLeafs(typeProperties, "age_visible");
Map<String, Object> address = (Map<String, Object>) typeProperties.get("address");
assertNotNull(address);
assertEquals(1, address.size());
Map<String, Object> addressProperties = (Map<String, Object>) address.get("properties");
assertNotNull(addressProperties);
assertEquals(1, addressProperties.size());
assertLeafs(addressProperties, "location_visible");
Map<String, Object> properties = (Map<String, Object>) typeProperties.get("properties");
assertNotNull(properties);
assertEquals(2, properties.size());
assertEquals("nested", properties.get("type"));
Map<String, Object> propertiesProperties = (Map<String, Object>) properties.get("properties");
assertNotNull(propertiesProperties);
assertEquals(2, propertiesProperties.size());
assertLeafs(propertiesProperties, "key_visible");
Map<String, Object> value = (Map<String, Object>) propertiesProperties.get("value");
assertNotNull(value);
assertEquals(1, value.size());
Map<String, Object> valueProperties = (Map<String, Object>) value.get("properties");
assertNotNull(valueProperties);
assertEquals(1, valueProperties.size());
assertLeafs(valueProperties, "keyword_visible");
}
@SuppressWarnings("unchecked")
private static void assertNotFiltered(MappingMetadata mappingMetadata) {
assertNotNull(mappingMetadata);
Map<String, Object> sourceAsMap = mappingMetadata.getSourceAsMap();
assertEquals(4, sourceAsMap.size());
assertTrue(sourceAsMap.containsKey("_meta"));
assertTrue(sourceAsMap.containsKey("_routing"));
assertTrue(sourceAsMap.containsKey("_source"));
Map<String, Object> typeProperties = (Map<String, Object>) sourceAsMap.get("properties");
assertEquals(5, typeProperties.size());
Map<String, Object> name = (Map<String, Object>) typeProperties.get("name");
assertEquals(1, name.size());
Map<String, Object> nameProperties = (Map<String, Object>) name.get("properties");
assertEquals(2, nameProperties.size());
assertLeafs(nameProperties, "first", "last_visible");
assertLeafs(typeProperties, "birth", "age_visible");
Map<String, Object> address = (Map<String, Object>) typeProperties.get("address");
assertNotNull(address);
assertEquals(1, address.size());
Map<String, Object> addressProperties = (Map<String, Object>) address.get("properties");
assertNotNull(addressProperties);
assertEquals(3, addressProperties.size());
assertLeafs(addressProperties, "street", "location", "location_visible");
Map<String, Object> properties = (Map<String, Object>) typeProperties.get("properties");
assertNotNull(properties);
assertEquals(2, properties.size());
assertTrue(properties.containsKey("type"));
Map<String, Object> propertiesProperties = (Map<String, Object>) properties.get("properties");
assertNotNull(propertiesProperties);
assertEquals(2, propertiesProperties.size());
assertMultiField(propertiesProperties, "key_visible", "keyword");
assertMultiField(propertiesProperties, "value", "keyword_visible");
}
public static
|
FieldFilterMapperPluginTests
|
java
|
elastic__elasticsearch
|
libs/h3/src/main/java/org/elasticsearch/h3/BaseCells.java
|
{
"start": 1077,
"end": 39990
}
|
class ____ {
private record BaseCellData(
int homeFace, // "home" face and normalized ijk coordinates on that face
int homeI,
int homeJ,
int homeK,
boolean isPentagon, // is this base cell a pentagon?
int[] cwOffsetPent // if a pentagon, what are its two clockwise offset
) {}
/**
* Resolution 0 base cell data table.
* <p>
* For each base cell, gives the "home" face and ijk+ coordinates on that face,
* whether or not the base cell is a pentagon. Additionally, if the base cell
* is a pentagon, the two cw offset rotation adjacent faces are given (-1
* indicates that no cw offset rotation faces exist for this base cell).
*/
private static final BaseCellData[] baseCellData = new BaseCellData[] {
new BaseCellData(1, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 0
new BaseCellData(2, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 1
new BaseCellData(1, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 2
new BaseCellData(2, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 3
new BaseCellData(0, 2, 0, 0, true, new int[] { -1, -1 }), // base cell 4
new BaseCellData(1, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 5
new BaseCellData(1, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 6
new BaseCellData(2, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 7
new BaseCellData(0, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 8
new BaseCellData(2, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 9
new BaseCellData(1, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 10
new BaseCellData(1, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 11
new BaseCellData(3, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 12
new BaseCellData(3, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 13
new BaseCellData(11, 2, 0, 0, true, new int[] { 2, 6 }), // base cell 14
new BaseCellData(4, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 15
new BaseCellData(0, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 16
new BaseCellData(6, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 17
new BaseCellData(0, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 18
new BaseCellData(2, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 19
new BaseCellData(7, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 20
new BaseCellData(2, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 21
new BaseCellData(0, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 22
new BaseCellData(6, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 23
new BaseCellData(10, 2, 0, 0, true, new int[] { 1, 5 }), // base cell 24
new BaseCellData(6, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 25
new BaseCellData(3, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 26
new BaseCellData(11, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 27
new BaseCellData(4, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 28
new BaseCellData(3, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 29
new BaseCellData(0, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 30
new BaseCellData(4, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 31
new BaseCellData(5, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 32
new BaseCellData(0, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 33
new BaseCellData(7, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 34
new BaseCellData(11, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 35
new BaseCellData(7, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 36
new BaseCellData(10, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 37
new BaseCellData(12, 2, 0, 0, true, new int[] { 3, 7 }), // base cell 38
new BaseCellData(6, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 39
new BaseCellData(7, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 40
new BaseCellData(4, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 41
new BaseCellData(3, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 42
new BaseCellData(3, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 43
new BaseCellData(4, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 44
new BaseCellData(6, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 45
new BaseCellData(11, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 46
new BaseCellData(8, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 47
new BaseCellData(5, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 48
new BaseCellData(14, 2, 0, 0, true, new int[] { 0, 9 }), // base cell 49
new BaseCellData(5, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 50
new BaseCellData(12, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 51
new BaseCellData(10, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 52
new BaseCellData(4, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 53
new BaseCellData(12, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 54
new BaseCellData(7, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 55
new BaseCellData(11, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 56
new BaseCellData(10, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 57
new BaseCellData(13, 2, 0, 0, true, new int[] { 4, 8 }), // base cell 58
new BaseCellData(10, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 59
new BaseCellData(11, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 60
new BaseCellData(9, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 61
new BaseCellData(8, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 62
new BaseCellData(6, 2, 0, 0, true, new int[] { 11, 15 }), // base cell 63
new BaseCellData(8, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 64
new BaseCellData(9, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 65
new BaseCellData(14, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 66
new BaseCellData(5, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 67
new BaseCellData(16, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 68
new BaseCellData(8, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 69
new BaseCellData(5, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 70
new BaseCellData(12, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 71
new BaseCellData(7, 2, 0, 0, true, new int[] { 12, 16 }), // base cell 72
new BaseCellData(12, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 73
new BaseCellData(10, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 74
new BaseCellData(9, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 75
new BaseCellData(13, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 76
new BaseCellData(16, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 77
new BaseCellData(15, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 78
new BaseCellData(15, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 79
new BaseCellData(16, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 80
new BaseCellData(14, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 81
new BaseCellData(13, 1, 1, 0, false, new int[] { 0, 0 }), // base cell 82
new BaseCellData(5, 2, 0, 0, true, new int[] { 10, 19 }), // base cell 83
new BaseCellData(8, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 84
new BaseCellData(14, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 85
new BaseCellData(9, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 86
new BaseCellData(14, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 87
new BaseCellData(17, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 88
new BaseCellData(12, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 89
new BaseCellData(16, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 90
new BaseCellData(17, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 91
new BaseCellData(15, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 92
new BaseCellData(16, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 93
new BaseCellData(9, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 94
new BaseCellData(15, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 95
new BaseCellData(13, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 96
new BaseCellData(8, 2, 0, 0, true, new int[] { 13, 17 }), // base cell 97
new BaseCellData(13, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 98
new BaseCellData(17, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 99
new BaseCellData(19, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 100
new BaseCellData(14, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 101
new BaseCellData(19, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 102
new BaseCellData(17, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 103
new BaseCellData(13, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 104
new BaseCellData(17, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 105
new BaseCellData(16, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 106
new BaseCellData(9, 2, 0, 0, true, new int[] { 14, 18 }), // base cell 107
new BaseCellData(15, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 108
new BaseCellData(15, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 109
new BaseCellData(18, 0, 1, 1, false, new int[] { 0, 0 }), // base cell 110
new BaseCellData(18, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 111
new BaseCellData(19, 0, 0, 1, false, new int[] { 0, 0 }), // base cell 112
new BaseCellData(17, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 113
new BaseCellData(19, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 114
new BaseCellData(18, 0, 1, 0, false, new int[] { 0, 0 }), // base cell 115
new BaseCellData(18, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 116
new BaseCellData(19, 2, 0, 0, true, new int[] { -1, -1 }), // base cell 117
new BaseCellData(19, 1, 0, 0, false, new int[] { 0, 0 }), // base cell 118
new BaseCellData(18, 0, 0, 0, false, new int[] { 0, 0 }), // base cell 119
new BaseCellData(19, 1, 0, 1, false, new int[] { 0, 0 }), // base cell 120
new BaseCellData(18, 1, 0, 0, false, new int[] { 0, 0 }) // base cell 121
};
/**
* base cell at a given ijk and required rotations into its system
*/
record BaseCellRotation(
int baseCell, // base cell number
int ccwRot60 // number of ccw 60 degree rotations relative to current
) {}
/** @brief Resolution 0 base cell lookup table for each face.
*
* Given the face number and a resolution 0 ijk+ coordinate in that face's
* face-centered ijk coordinate system, gives the base cell located at that
* coordinate and the number of 60 ccw rotations to rotate into that base
* cell's orientation.
*
* Valid lookup coordinates are from (0, 0, 0) to (2, 2, 2).
*
* This table can be accessed using the functions `_faceIjkToBaseCell` and
* `_faceIjkToBaseCellCCWrot60`
*/
private static final BaseCellRotation[][][][] faceIjkBaseCells = new BaseCellRotation[][][][] {
{// face 0
{
// i 0
{ new BaseCellRotation(16, 0), new BaseCellRotation(18, 0), new BaseCellRotation(24, 0) }, // j 0
{ new BaseCellRotation(33, 0), new BaseCellRotation(30, 0), new BaseCellRotation(32, 3) }, // j 1
{ new BaseCellRotation(49, 1), new BaseCellRotation(48, 3), new BaseCellRotation(50, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(8, 0), new BaseCellRotation(5, 5), new BaseCellRotation(10, 5) }, // j 0
{ new BaseCellRotation(22, 0), new BaseCellRotation(16, 0), new BaseCellRotation(18, 0) }, // j 1
{ new BaseCellRotation(41, 1), new BaseCellRotation(33, 0), new BaseCellRotation(30, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(4, 0), new BaseCellRotation(0, 5), new BaseCellRotation(2, 5) }, // j 0
{ new BaseCellRotation(15, 1), new BaseCellRotation(8, 0), new BaseCellRotation(5, 5) }, // j 1
{ new BaseCellRotation(31, 1), new BaseCellRotation(22, 0), new BaseCellRotation(16, 0) } // j 2
} },
{// face 1
{
// i 0
{ new BaseCellRotation(2, 0), new BaseCellRotation(6, 0), new BaseCellRotation(14, 0) }, // j 0
{ new BaseCellRotation(10, 0), new BaseCellRotation(11, 0), new BaseCellRotation(17, 3) }, // j 1
{ new BaseCellRotation(24, 1), new BaseCellRotation(23, 3), new BaseCellRotation(25, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(0, 0), new BaseCellRotation(1, 5), new BaseCellRotation(9, 5) }, // j 0
{ new BaseCellRotation(5, 0), new BaseCellRotation(2, 0), new BaseCellRotation(6, 0) }, // j 1
{ new BaseCellRotation(18, 1), new BaseCellRotation(10, 0), new BaseCellRotation(11, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(4, 1), new BaseCellRotation(3, 5), new BaseCellRotation(7, 5) }, // j 0
{ new BaseCellRotation(8, 1), new BaseCellRotation(0, 0), new BaseCellRotation(1, 5) }, // j 1
{ new BaseCellRotation(16, 1), new BaseCellRotation(5, 0), new BaseCellRotation(2, 0) } // j 2
} },
{// face 2
{
// i 0
{ new BaseCellRotation(7, 0), new BaseCellRotation(21, 0), new BaseCellRotation(38, 0) }, // j 0
{ new BaseCellRotation(9, 0), new BaseCellRotation(19, 0), new BaseCellRotation(34, 3) }, // j 1
{ new BaseCellRotation(14, 1), new BaseCellRotation(20, 3), new BaseCellRotation(36, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(3, 0), new BaseCellRotation(13, 5), new BaseCellRotation(29, 5) }, // j 0
{ new BaseCellRotation(1, 0), new BaseCellRotation(7, 0), new BaseCellRotation(21, 0) }, // j 1
{ new BaseCellRotation(6, 1), new BaseCellRotation(9, 0), new BaseCellRotation(19, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(4, 2), new BaseCellRotation(12, 5), new BaseCellRotation(26, 5) }, // j 0
{ new BaseCellRotation(0, 1), new BaseCellRotation(3, 0), new BaseCellRotation(13, 5) }, // j 1
{ new BaseCellRotation(2, 1), new BaseCellRotation(1, 0), new BaseCellRotation(7, 0) } // j 2
} },
{// face 3
{
// i 0
{ new BaseCellRotation(26, 0), new BaseCellRotation(42, 0), new BaseCellRotation(58, 0) }, // j 0
{ new BaseCellRotation(29, 0), new BaseCellRotation(43, 0), new BaseCellRotation(62, 3) }, // j 1
{ new BaseCellRotation(38, 1), new BaseCellRotation(47, 3), new BaseCellRotation(64, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(12, 0), new BaseCellRotation(28, 5), new BaseCellRotation(44, 5) }, // j 0
{ new BaseCellRotation(13, 0), new BaseCellRotation(26, 0), new BaseCellRotation(42, 0) }, // j 1
{ new BaseCellRotation(21, 1), new BaseCellRotation(29, 0), new BaseCellRotation(43, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(4, 3), new BaseCellRotation(15, 5), new BaseCellRotation(31, 5) }, // j 0
{ new BaseCellRotation(3, 1), new BaseCellRotation(12, 0), new BaseCellRotation(28, 5) }, // j 1
{ new BaseCellRotation(7, 1), new BaseCellRotation(13, 0), new BaseCellRotation(26, 0) } // j 2
} },
{// face 4
{
// i 0
{ new BaseCellRotation(31, 0), new BaseCellRotation(41, 0), new BaseCellRotation(49, 0) }, // j 0
{ new BaseCellRotation(44, 0), new BaseCellRotation(53, 0), new BaseCellRotation(61, 3) }, // j 1
{ new BaseCellRotation(58, 1), new BaseCellRotation(65, 3), new BaseCellRotation(75, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(15, 0), new BaseCellRotation(22, 5), new BaseCellRotation(33, 5) }, // j 0
{ new BaseCellRotation(28, 0), new BaseCellRotation(31, 0), new BaseCellRotation(41, 0) }, // j 1
{ new BaseCellRotation(42, 1), new BaseCellRotation(44, 0), new BaseCellRotation(53, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(4, 4), new BaseCellRotation(8, 5), new BaseCellRotation(16, 5) }, // j 0
{ new BaseCellRotation(12, 1), new BaseCellRotation(15, 0), new BaseCellRotation(22, 5) }, // j 1
{ new BaseCellRotation(26, 1), new BaseCellRotation(28, 0), new BaseCellRotation(31, 0) } // j 2
} },
{// face 5
{
// i 0
{ new BaseCellRotation(50, 0), new BaseCellRotation(48, 0), new BaseCellRotation(49, 3) }, // j 0
{ new BaseCellRotation(32, 0), new BaseCellRotation(30, 3), new BaseCellRotation(33, 3) }, // j 1
{ new BaseCellRotation(24, 3), new BaseCellRotation(18, 3), new BaseCellRotation(16, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(70, 0), new BaseCellRotation(67, 0), new BaseCellRotation(66, 3) }, // j 0
{ new BaseCellRotation(52, 3), new BaseCellRotation(50, 0), new BaseCellRotation(48, 0) }, // j 1
{ new BaseCellRotation(37, 3), new BaseCellRotation(32, 0), new BaseCellRotation(30, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(83, 0), new BaseCellRotation(87, 3), new BaseCellRotation(85, 3) }, // j 0
{ new BaseCellRotation(74, 3), new BaseCellRotation(70, 0), new BaseCellRotation(67, 0) }, // j 1
{ new BaseCellRotation(57, 1), new BaseCellRotation(52, 3), new BaseCellRotation(50, 0) } // j 2
} },
{// face 6
{
// i 0
{ new BaseCellRotation(25, 0), new BaseCellRotation(23, 0), new BaseCellRotation(24, 3) }, // j 0
{ new BaseCellRotation(17, 0), new BaseCellRotation(11, 3), new BaseCellRotation(10, 3) }, // j 1
{ new BaseCellRotation(14, 3), new BaseCellRotation(6, 3), new BaseCellRotation(2, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(45, 0), new BaseCellRotation(39, 0), new BaseCellRotation(37, 3) }, // j 0
{ new BaseCellRotation(35, 3), new BaseCellRotation(25, 0), new BaseCellRotation(23, 0) }, // j 1
{ new BaseCellRotation(27, 3), new BaseCellRotation(17, 0), new BaseCellRotation(11, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(63, 0), new BaseCellRotation(59, 3), new BaseCellRotation(57, 3) }, // j 0
{ new BaseCellRotation(56, 3), new BaseCellRotation(45, 0), new BaseCellRotation(39, 0) }, // j 1
{ new BaseCellRotation(46, 3), new BaseCellRotation(35, 3), new BaseCellRotation(25, 0) } // j 2
} },
{// face 7
{
// i 0
{ new BaseCellRotation(36, 0), new BaseCellRotation(20, 0), new BaseCellRotation(14, 3) }, // j 0
{ new BaseCellRotation(34, 0), new BaseCellRotation(19, 3), new BaseCellRotation(9, 3) }, // j 1
{ new BaseCellRotation(38, 3), new BaseCellRotation(21, 3), new BaseCellRotation(7, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(55, 0), new BaseCellRotation(40, 0), new BaseCellRotation(27, 3) }, // j 0
{ new BaseCellRotation(54, 3), new BaseCellRotation(36, 0), new BaseCellRotation(20, 0) }, // j 1
{ new BaseCellRotation(51, 3), new BaseCellRotation(34, 0), new BaseCellRotation(19, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(72, 0), new BaseCellRotation(60, 3), new BaseCellRotation(46, 3) }, // j 0
{ new BaseCellRotation(73, 3), new BaseCellRotation(55, 0), new BaseCellRotation(40, 0) }, // j 1
{ new BaseCellRotation(71, 3), new BaseCellRotation(54, 3), new BaseCellRotation(36, 0) } // j 2
} },
{// face 8
{
// i 0
{ new BaseCellRotation(64, 0), new BaseCellRotation(47, 0), new BaseCellRotation(38, 3) }, // j 0
{ new BaseCellRotation(62, 0), new BaseCellRotation(43, 3), new BaseCellRotation(29, 3) }, // j 1
{ new BaseCellRotation(58, 3), new BaseCellRotation(42, 3), new BaseCellRotation(26, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(84, 0), new BaseCellRotation(69, 0), new BaseCellRotation(51, 3) }, // j 0
{ new BaseCellRotation(82, 3), new BaseCellRotation(64, 0), new BaseCellRotation(47, 0) }, // j 1
{ new BaseCellRotation(76, 3), new BaseCellRotation(62, 0), new BaseCellRotation(43, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(97, 0), new BaseCellRotation(89, 3), new BaseCellRotation(71, 3) }, // j 0
{ new BaseCellRotation(98, 3), new BaseCellRotation(84, 0), new BaseCellRotation(69, 0) }, // j 1
{ new BaseCellRotation(96, 3), new BaseCellRotation(82, 3), new BaseCellRotation(64, 0) } // j 2
} },
{// face 9
{
// i 0
{ new BaseCellRotation(75, 0), new BaseCellRotation(65, 0), new BaseCellRotation(58, 3) }, // j 0
{ new BaseCellRotation(61, 0), new BaseCellRotation(53, 3), new BaseCellRotation(44, 3) }, // j 1
{ new BaseCellRotation(49, 3), new BaseCellRotation(41, 3), new BaseCellRotation(31, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(94, 0), new BaseCellRotation(86, 0), new BaseCellRotation(76, 3) }, // j 0
{ new BaseCellRotation(81, 3), new BaseCellRotation(75, 0), new BaseCellRotation(65, 0) }, // j 1
{ new BaseCellRotation(66, 3), new BaseCellRotation(61, 0), new BaseCellRotation(53, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(107, 0), new BaseCellRotation(104, 3), new BaseCellRotation(96, 3) }, // j 0
{ new BaseCellRotation(101, 3), new BaseCellRotation(94, 0), new BaseCellRotation(86, 0) }, // j 1
{ new BaseCellRotation(85, 3), new BaseCellRotation(81, 3), new BaseCellRotation(75, 0) } // j 2
} },
{// face 10
{
// i 0
{ new BaseCellRotation(57, 0), new BaseCellRotation(59, 0), new BaseCellRotation(63, 3) }, // j 0
{ new BaseCellRotation(74, 0), new BaseCellRotation(78, 3), new BaseCellRotation(79, 3) }, // j 1
{ new BaseCellRotation(83, 3), new BaseCellRotation(92, 3), new BaseCellRotation(95, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(37, 0), new BaseCellRotation(39, 3), new BaseCellRotation(45, 3) }, // j 0
{ new BaseCellRotation(52, 0), new BaseCellRotation(57, 0), new BaseCellRotation(59, 0) }, // j 1
{ new BaseCellRotation(70, 3), new BaseCellRotation(74, 0), new BaseCellRotation(78, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(24, 0), new BaseCellRotation(23, 3), new BaseCellRotation(25, 3) }, // j 0
{ new BaseCellRotation(32, 3), new BaseCellRotation(37, 0), new BaseCellRotation(39, 3) }, // j 1
{ new BaseCellRotation(50, 3), new BaseCellRotation(52, 0), new BaseCellRotation(57, 0) } // j 2
} },
{// face 11
{
// i 0
{ new BaseCellRotation(46, 0), new BaseCellRotation(60, 0), new BaseCellRotation(72, 3) }, // j 0
{ new BaseCellRotation(56, 0), new BaseCellRotation(68, 3), new BaseCellRotation(80, 3) }, // j 1
{ new BaseCellRotation(63, 3), new BaseCellRotation(77, 3), new BaseCellRotation(90, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(27, 0), new BaseCellRotation(40, 3), new BaseCellRotation(55, 3) }, // j 0
{ new BaseCellRotation(35, 0), new BaseCellRotation(46, 0), new BaseCellRotation(60, 0) }, // j 1
{ new BaseCellRotation(45, 3), new BaseCellRotation(56, 0), new BaseCellRotation(68, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(14, 0), new BaseCellRotation(20, 3), new BaseCellRotation(36, 3) }, // j 0
{ new BaseCellRotation(17, 3), new BaseCellRotation(27, 0), new BaseCellRotation(40, 3) }, // j 1
{ new BaseCellRotation(25, 3), new BaseCellRotation(35, 0), new BaseCellRotation(46, 0) } // j 2
} },
{// face 12
{
// i 0
{ new BaseCellRotation(71, 0), new BaseCellRotation(89, 0), new BaseCellRotation(97, 3) }, // j 0
{ new BaseCellRotation(73, 0), new BaseCellRotation(91, 3), new BaseCellRotation(103, 3) }, // j 1
{ new BaseCellRotation(72, 3), new BaseCellRotation(88, 3), new BaseCellRotation(105, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(51, 0), new BaseCellRotation(69, 3), new BaseCellRotation(84, 3) }, // j 0
{ new BaseCellRotation(54, 0), new BaseCellRotation(71, 0), new BaseCellRotation(89, 0) }, // j 1
{ new BaseCellRotation(55, 3), new BaseCellRotation(73, 0), new BaseCellRotation(91, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(38, 0), new BaseCellRotation(47, 3), new BaseCellRotation(64, 3) }, // j 0
{ new BaseCellRotation(34, 3), new BaseCellRotation(51, 0), new BaseCellRotation(69, 3) }, // j 1
{ new BaseCellRotation(36, 3), new BaseCellRotation(54, 0), new BaseCellRotation(71, 0) } // j 2
} },
{// face 13
{
// i 0
{ new BaseCellRotation(96, 0), new BaseCellRotation(104, 0), new BaseCellRotation(107, 3) }, // j 0
{ new BaseCellRotation(98, 0), new BaseCellRotation(110, 3), new BaseCellRotation(115, 3) }, // j 1
{ new BaseCellRotation(97, 3), new BaseCellRotation(111, 3), new BaseCellRotation(119, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(76, 0), new BaseCellRotation(86, 3), new BaseCellRotation(94, 3) }, // j 0
{ new BaseCellRotation(82, 0), new BaseCellRotation(96, 0), new BaseCellRotation(104, 0) }, // j 1
{ new BaseCellRotation(84, 3), new BaseCellRotation(98, 0), new BaseCellRotation(110, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(58, 0), new BaseCellRotation(65, 3), new BaseCellRotation(75, 3) }, // j 0
{ new BaseCellRotation(62, 3), new BaseCellRotation(76, 0), new BaseCellRotation(86, 3) }, // j 1
{ new BaseCellRotation(64, 3), new BaseCellRotation(82, 0), new BaseCellRotation(96, 0) } // j 2
} },
{// face 14
{
// i 0
{ new BaseCellRotation(85, 0), new BaseCellRotation(87, 0), new BaseCellRotation(83, 3) }, // j 0
{ new BaseCellRotation(101, 0), new BaseCellRotation(102, 3), new BaseCellRotation(100, 3) }, // j 1
{ new BaseCellRotation(107, 3), new BaseCellRotation(112, 3), new BaseCellRotation(114, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(66, 0), new BaseCellRotation(67, 3), new BaseCellRotation(70, 3) }, // j 0
{ new BaseCellRotation(81, 0), new BaseCellRotation(85, 0), new BaseCellRotation(87, 0) }, // j 1
{ new BaseCellRotation(94, 3), new BaseCellRotation(101, 0), new BaseCellRotation(102, 3) } // j 2
},
{
// i 2
{ new BaseCellRotation(49, 0), new BaseCellRotation(48, 3), new BaseCellRotation(50, 3) }, // j 0
{ new BaseCellRotation(61, 3), new BaseCellRotation(66, 0), new BaseCellRotation(67, 3) }, // j 1
{ new BaseCellRotation(75, 3), new BaseCellRotation(81, 0), new BaseCellRotation(85, 0) } // j 2
} },
{// face 15
{
// i 0
{ new BaseCellRotation(95, 0), new BaseCellRotation(92, 0), new BaseCellRotation(83, 0) }, // j 0
{ new BaseCellRotation(79, 0), new BaseCellRotation(78, 0), new BaseCellRotation(74, 3) }, // j 1
{ new BaseCellRotation(63, 1), new BaseCellRotation(59, 3), new BaseCellRotation(57, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(109, 0), new BaseCellRotation(108, 0), new BaseCellRotation(100, 5) }, // j 0
{ new BaseCellRotation(93, 1), new BaseCellRotation(95, 0), new BaseCellRotation(92, 0) }, // j 1
{ new BaseCellRotation(77, 1), new BaseCellRotation(79, 0), new BaseCellRotation(78, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(117, 4), new BaseCellRotation(118, 5), new BaseCellRotation(114, 5) }, // j 0
{ new BaseCellRotation(106, 1), new BaseCellRotation(109, 0), new BaseCellRotation(108, 0) }, // j 1
{ new BaseCellRotation(90, 1), new BaseCellRotation(93, 1), new BaseCellRotation(95, 0) } // j 2
} },
{// face 16
{
// i 0
{ new BaseCellRotation(90, 0), new BaseCellRotation(77, 0), new BaseCellRotation(63, 0) }, // j 0
{ new BaseCellRotation(80, 0), new BaseCellRotation(68, 0), new BaseCellRotation(56, 3) }, // j 1
{ new BaseCellRotation(72, 1), new BaseCellRotation(60, 3), new BaseCellRotation(46, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(106, 0), new BaseCellRotation(93, 0), new BaseCellRotation(79, 5) }, // j 0
{ new BaseCellRotation(99, 1), new BaseCellRotation(90, 0), new BaseCellRotation(77, 0) }, // j 1
{ new BaseCellRotation(88, 1), new BaseCellRotation(80, 0), new BaseCellRotation(68, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(117, 3), new BaseCellRotation(109, 5), new BaseCellRotation(95, 5) }, // j 0
{ new BaseCellRotation(113, 1), new BaseCellRotation(106, 0), new BaseCellRotation(93, 0) }, // j 1
{ new BaseCellRotation(105, 1), new BaseCellRotation(99, 1), new BaseCellRotation(90, 0) } // j 2
} },
{// face 17
{
// i 0
{ new BaseCellRotation(105, 0), new BaseCellRotation(88, 0), new BaseCellRotation(72, 0) }, // j 0
{ new BaseCellRotation(103, 0), new BaseCellRotation(91, 0), new BaseCellRotation(73, 3) }, // j 1
{ new BaseCellRotation(97, 1), new BaseCellRotation(89, 3), new BaseCellRotation(71, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(113, 0), new BaseCellRotation(99, 0), new BaseCellRotation(80, 5) }, // j 0
{ new BaseCellRotation(116, 1), new BaseCellRotation(105, 0), new BaseCellRotation(88, 0) }, // j 1
{ new BaseCellRotation(111, 1), new BaseCellRotation(103, 0), new BaseCellRotation(91, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(117, 2), new BaseCellRotation(106, 5), new BaseCellRotation(90, 5) }, // j 0
{ new BaseCellRotation(121, 1), new BaseCellRotation(113, 0), new BaseCellRotation(99, 0) }, // j 1
{ new BaseCellRotation(119, 1), new BaseCellRotation(116, 1), new BaseCellRotation(105, 0) } // j 2
} },
{// face 18
{
// i 0
{ new BaseCellRotation(119, 0), new BaseCellRotation(111, 0), new BaseCellRotation(97, 0) }, // j 0
{ new BaseCellRotation(115, 0), new BaseCellRotation(110, 0), new BaseCellRotation(98, 3) }, // j 1
{ new BaseCellRotation(107, 1), new BaseCellRotation(104, 3), new BaseCellRotation(96, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(121, 0), new BaseCellRotation(116, 0), new BaseCellRotation(103, 5) }, // j 0
{ new BaseCellRotation(120, 1), new BaseCellRotation(119, 0), new BaseCellRotation(111, 0) }, // j 1
{ new BaseCellRotation(112, 1), new BaseCellRotation(115, 0), new BaseCellRotation(110, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(117, 1), new BaseCellRotation(113, 5), new BaseCellRotation(105, 5) }, // j 0
{ new BaseCellRotation(118, 1), new BaseCellRotation(121, 0), new BaseCellRotation(116, 0) }, // j 1
{ new BaseCellRotation(114, 1), new BaseCellRotation(120, 1), new BaseCellRotation(119, 0) } // j 2
} },
{// face 19
{
// i 0
{ new BaseCellRotation(114, 0), new BaseCellRotation(112, 0), new BaseCellRotation(107, 0) }, // j 0
{ new BaseCellRotation(100, 0), new BaseCellRotation(102, 0), new BaseCellRotation(101, 3) }, // j 1
{ new BaseCellRotation(83, 1), new BaseCellRotation(87, 3), new BaseCellRotation(85, 3) } // j 2
},
{
// i 1
{ new BaseCellRotation(118, 0), new BaseCellRotation(120, 0), new BaseCellRotation(115, 5) }, // j 0
{ new BaseCellRotation(108, 1), new BaseCellRotation(114, 0), new BaseCellRotation(112, 0) }, // j 1
{ new BaseCellRotation(92, 1), new BaseCellRotation(100, 0), new BaseCellRotation(102, 0) } // j 2
},
{
// i 2
{ new BaseCellRotation(117, 0), new BaseCellRotation(121, 5), new BaseCellRotation(119, 5) }, // j 0
{ new BaseCellRotation(109, 1), new BaseCellRotation(118, 0), new BaseCellRotation(120, 0) }, // j 1
{ new BaseCellRotation(95, 1), new BaseCellRotation(108, 1), new BaseCellRotation(114, 0) } // j 2
} } };
/**
* Return whether or not the indicated base cell is a pentagon.
*/
public static boolean isBaseCellPentagon(int baseCell) {
if (baseCell < 0 || baseCell >= Constants.NUM_BASE_CELLS) { // LCOV_EXCL_BR_LINE
// Base cells less than zero can not be represented in an index
return false;
}
return baseCellData[baseCell].isPentagon;
}
/**
* Return whether or not the indicated base cell is a pentagon.
*/
public static FaceIJK getBaseFaceIJK(int baseCell) {
if (baseCell < 0 || baseCell >= Constants.NUM_BASE_CELLS) { // LCOV_EXCL_BR_LINE
// Base cells less than zero can not be represented in an index
throw new IllegalArgumentException("Illegal base cell");
}
BaseCellData cellData = baseCellData[baseCell];
return new FaceIJK(cellData.homeFace, new CoordIJK(cellData.homeI, cellData.homeJ, cellData.homeK));
}
/** Find base cell given a face and a CoordIJK.
*
* Given the face number and a resolution 0 ijk+ coordinate in that face's
* face-centered ijk coordinate system, return the base cell located at that
* coordinate.
*
* Valid ijk+ lookup coordinates are from (0, 0, 0) to (2, 2, 2).
*/
public static int getBaseCell(int face, CoordIJK coord) {
return faceIjkBaseCells[face][coord.i][coord.j][coord.k].baseCell;
}
/** Find base cell given a face and a CoordIJK.
*
* Given the face number and a resolution 0 ijk+ coordinate in that face's
* face-centered ijk coordinate system, return the number of 60' ccw rotations
* to rotate into the coordinate system of the base cell at that coordinates.
*
* Valid ijk+ lookup coordinates are from (0, 0, 0) to (2, 2, 2).
*/
public static int getBaseCellCCWrot60(int face, CoordIJK coord) {
return faceIjkBaseCells[face][coord.i][coord.j][coord.k].ccwRot60;
}
/** Return whether or not the tested face is a cw offset face.
*/
public static boolean baseCellIsCwOffset(int baseCell, int testFace) {
return baseCellData[baseCell].cwOffsetPent[0] == testFace || baseCellData[baseCell].cwOffsetPent[1] == testFace;
}
/** Return whether the indicated base cell is a pentagon where all
* neighbors are oriented towards it. */
public static boolean isBaseCellPolarPentagon(int baseCell) {
return baseCell == 4 || baseCell == 117;
}
}
|
BaseCells
|
java
|
apache__kafka
|
storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageTest.java
|
{
"start": 27155,
"end": 30450
}
|
class ____ {
private static final byte[] OFFSET_FILE_BYTES = "offset".getBytes();
private static final byte[] TIME_FILE_BYTES = "time".getBytes();
private static final byte[] TXN_FILE_BYTES = "txn".getBytes();
private static final byte[] PRODUCER_SNAPSHOT_FILE_BYTES = "pid".getBytes();
private static final byte[] LEADER_EPOCH_CHECKPOINT_FILE_BYTES = "0\n2\n0 0\n2 12".getBytes();
private final Path segmentPath = Paths.get("local-segments");
private long baseOffset = 0;
LocalLogSegments() {
if (Files.notExists(segmentPath)) {
try {
Files.createDirectories(segmentPath);
} catch (final IOException ex) {
LOGGER.error("Failed to create directory: {}", segmentPath, ex);
}
}
}
LogSegmentData nextSegment() {
return nextSegment(new byte[0]);
}
LogSegmentData nextSegment(final byte[]... data) {
final String offset = LogFileUtils.filenamePrefixFromOffset(baseOffset);
try {
final FileChannel channel = FileChannel.open(
segmentPath.resolve(offset + LogFileUtils.LOG_FILE_SUFFIX),
StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE);
final ByteBuffer buffer = ByteBuffer.allocate(128);
final byte magic = RecordBatch.MAGIC_VALUE_V2;
MemoryRecordsBuilder builder = MemoryRecords.builder(
buffer, magic, Compression.NONE, TimestampType.CREATE_TIME, baseOffset);
for (byte[] value : data) {
builder.append(System.currentTimeMillis(), null, value);
}
builder.build().writeFullyTo(channel);
channel.force(true);
final Path segment = segmentPath.resolve(offset + LogFileUtils.LOG_FILE_SUFFIX);
final Path offsetIdx = segmentPath.resolve(offset + LogFileUtils.INDEX_FILE_SUFFIX);
final Path timeIdx = segmentPath.resolve(offset + LogFileUtils.TIME_INDEX_FILE_SUFFIX);
final Path txnIdx = segmentPath.resolve(offset + LogFileUtils.TXN_INDEX_FILE_SUFFIX);
final Path producerIdSnapshot = segmentPath.resolve(offset + LogFileUtils.PRODUCER_SNAPSHOT_FILE_SUFFIX);
Files.write(offsetIdx, OFFSET_FILE_BYTES);
Files.write(timeIdx, TIME_FILE_BYTES);
Files.write(txnIdx, TXN_FILE_BYTES);
Files.write(producerIdSnapshot, PRODUCER_SNAPSHOT_FILE_BYTES);
baseOffset += data.length;
return new LogSegmentData(segment, offsetIdx, timeIdx, Optional.of(txnIdx),
producerIdSnapshot, ByteBuffer.wrap(LEADER_EPOCH_CHECKPOINT_FILE_BYTES));
} catch (IOException e) {
throw new AssertionError(e);
}
}
void deleteAll() throws IOException {
List<Path> paths = Files.list(segmentPath).toList();
for (final Path path : paths) {
Files.delete(path);
}
Files.delete(segmentPath);
}
}
}
|
LocalLogSegments
|
java
|
apache__camel
|
components/camel-xslt/src/main/java/org/apache/camel/component/xslt/StreamResultHandler.java
|
{
"start": 1058,
"end": 1423
}
|
class ____ implements ResultHandler {
private ByteArrayOutputStream buffer = new ByteArrayOutputStream();
private StreamResult result = new StreamResult(buffer);
@Override
public Result getResult() {
return result;
}
@Override
public void setBody(Message in) {
in.setBody(buffer.toByteArray());
}
}
|
StreamResultHandler
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/SingleTableInheritancePersistTest.java
|
{
"start": 7189,
"end": 7854
}
|
class ____ extends Person {
private String job;
@OneToOne
private Man husband;
@OneToMany(mappedBy = "mother")
private List<Child> children = new ArrayList<>();
public Woman() {
}
public Woman(String name, String job) {
super( name );
this.job = job;
}
public String getJob() {
return job;
}
public void setJob(String job) {
this.job = job;
}
public Man getHusband() {
return husband;
}
public void setHusband(Man husband) {
this.husband = husband;
}
public List<Child> getChildren() {
return children;
}
public void setChildren(List<Child> children) {
this.children = children;
}
}
}
|
Woman
|
java
|
apache__spark
|
common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/UploadBlockStream.java
|
{
"start": 1360,
"end": 2717
}
|
class ____ extends BlockTransferMessage {
public final String blockId;
public final byte[] metadata;
public UploadBlockStream(String blockId, byte[] metadata) {
this.blockId = blockId;
this.metadata = metadata;
}
@Override
protected Type type() { return Type.UPLOAD_BLOCK_STREAM; }
@Override
public int hashCode() {
int objectsHashCode = Objects.hashCode(blockId);
return objectsHashCode * 41 + Arrays.hashCode(metadata);
}
@Override
public String toString() {
return "UploadBlockStream[blockId=" + blockId + ",metadata size=" + metadata.length + "]";
}
@Override
public boolean equals(Object other) {
if (other instanceof UploadBlockStream o) {
return Objects.equals(blockId, o.blockId)
&& Arrays.equals(metadata, o.metadata);
}
return false;
}
@Override
public int encodedLength() {
return Encoders.Strings.encodedLength(blockId)
+ Encoders.ByteArrays.encodedLength(metadata);
}
@Override
public void encode(ByteBuf buf) {
Encoders.Strings.encode(buf, blockId);
Encoders.ByteArrays.encode(buf, metadata);
}
public static UploadBlockStream decode(ByteBuf buf) {
String blockId = Encoders.Strings.decode(buf);
byte[] metadata = Encoders.ByteArrays.decode(buf);
return new UploadBlockStream(blockId, metadata);
}
}
|
UploadBlockStream
|
java
|
spring-projects__spring-boot
|
module/spring-boot-webmvc/src/main/java/org/springframework/boot/webmvc/actuate/endpoint/web/WebMvcEndpointHandlerMapping.java
|
{
"start": 3567,
"end": 4081
}
|
class ____ implements LinksHandler {
@Override
@ResponseBody
@Reflective
public Map<String, Map<String, Link>> links(HttpServletRequest request, HttpServletResponse response) {
Map<String, Link> links = WebMvcEndpointHandlerMapping.this.linksResolver
.resolveLinks(request.getRequestURL().toString());
return OperationResponseBody.of(Collections.singletonMap("_links", links));
}
@Override
public String toString() {
return "Actuator root web endpoint";
}
}
static
|
WebMvcLinksHandler
|
java
|
quarkusio__quarkus
|
extensions/security/deployment/src/test/java/io/quarkus/security/test/permissionsallowed/checker/PermissionCheckerOneSecuredMethodArg.java
|
{
"start": 215,
"end": 472
}
|
class ____ {
@PermissionChecker("one-arg")
public boolean isGranted(SecurityIdentity securityIdentity, Object one) {
return Integer.parseInt(one.toString()) == 1 && securityIdentity.hasRole("admin");
}
}
|
PermissionCheckerOneSecuredMethodArg
|
java
|
elastic__elasticsearch
|
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineMethods.java
|
{
"start": 821,
"end": 1188
}
|
class ____ extends SinglePassSimpleStatisticsMethod {
static final String NAME = "rescale_0_100";
RescaleZeroToOneHundred(double[] values) {
super(values);
}
@Override
public double applyAsDouble(double value) {
return 100 * (value - min) / (max - min);
}
}
static
|
RescaleZeroToOneHundred
|
java
|
apache__hadoop
|
hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/commit/mapred/CommitterTestBase.java
|
{
"start": 2372,
"end": 13814
}
|
class ____ {
private Configuration conf;
private FileSystem fs;
private Path outputPath;
private TaskAttemptID taskAttempt0;
private Path reportDir;
@BeforeEach
public void setup() throws IOException {
conf = newConf();
fs = FileSystem.get(conf);
String uuid = UUIDUtils.random();
outputPath = fs.makeQualified(new Path("/test/" + uuid));
taskAttempt0 = JobSuite.createTaskAttemptId(randomTrimmedJobId(), 0);
reportDir = fs.makeQualified(new Path("/report/" + uuid));
fs.mkdirs(reportDir);
conf.set(org.apache.hadoop.fs.tosfs.commit.Committer.COMMITTER_SUMMARY_REPORT_DIR,
reportDir.toUri().toString());
}
protected abstract Configuration newConf();
@AfterEach
public void teardown() {
CommonUtils.runQuietly(() -> fs.delete(outputPath, true));
IOUtils.closeStream(fs);
}
@BeforeAll
public static void beforeClass() {
Assumptions.assumeTrue(TestEnv.checkTestEnabled());
}
@AfterAll
public static void afterClass() {
if (!TestEnv.checkTestEnabled()) {
return;
}
List<String> committerThreads = Thread.getAllStackTraces().keySet()
.stream()
.map(Thread::getName)
.filter(n -> n.startsWith(org.apache.hadoop.fs.tosfs.commit.Committer.THREADS_PREFIX))
.collect(Collectors.toList());
assertTrue(committerThreads.isEmpty(), "Outstanding committer threads");
}
private static String randomTrimmedJobId() {
SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMdd");
return String.format("%s%04d_%04d", formatter.format(new Date()),
(long) (Math.random() * 1000),
(long) (Math.random() * 1000));
}
private static String randomFormedJobId() {
return String.format("job_%s", randomTrimmedJobId());
}
@Test
public void testSetupJob() throws IOException {
JobSuite suite = JobSuite.create(conf, taskAttempt0, outputPath);
Assumptions.assumeFalse(suite.skipTests());
// Setup job.
suite.setupJob();
suite.dumpObjectStorage();
suite.assertHasMagicKeys();
}
@Test
public void testSetupJobWithOrphanPaths() throws IOException, InterruptedException {
JobSuite suite = JobSuite.create(conf, taskAttempt0, outputPath);
Assumptions.assumeFalse(suite.skipTests());
// Orphan success marker.
Path successPath = CommitUtils.successMarker(outputPath);
CommitUtils.save(fs, successPath, new byte[]{});
assertTrue(fs.exists(successPath), "The success file should exist.");
// Orphan job path.
Path jobPath = CommitUtils.magicJobPath(suite.committer().jobId(), outputPath);
fs.mkdirs(jobPath);
assertTrue(fs.exists(jobPath), "The job path should exist.");
Path subPath = new Path(jobPath, "tmp.pending");
CommitUtils.save(fs, subPath, new byte[]{});
assertTrue(fs.exists(subPath), "The sub path under job path should be existing.");
FileStatus jobPathStatus = fs.getFileStatus(jobPath);
Thread.sleep(1000L);
suite.setupJob();
suite.dumpObjectStorage();
suite.assertHasMagicKeys();
assertFalse(fs.exists(successPath), "Should have deleted the success path");
assertTrue(fs.exists(jobPath), "Should have re-created the job path");
assertFalse(fs.exists(subPath), "Should have deleted the sub path under the job path");
}
@Test
public void testSetupTask() throws IOException {
JobSuite suite = JobSuite.create(conf, taskAttempt0, outputPath);
Assumptions.assumeFalse(suite.skipTests());
// Remaining attempt task path.
Path taskAttemptBasePath =
CommitUtils.magicTaskAttemptBasePath(suite.taskAttemptContext(), outputPath);
Path subTaskAttemptPath = new Path(taskAttemptBasePath, "tmp.pending");
CommitUtils.save(fs, subTaskAttemptPath, new byte[]{});
assertTrue(fs.exists(taskAttemptBasePath));
assertTrue(fs.exists(subTaskAttemptPath));
// Setup job.
suite.setupJob();
suite.assertHasMagicKeys();
// It will clear all the job path once we've set up the job.
assertFalse(fs.exists(taskAttemptBasePath));
assertFalse(fs.exists(subTaskAttemptPath));
// Left some the task paths.
CommitUtils.save(fs, subTaskAttemptPath, new byte[]{});
assertTrue(fs.exists(taskAttemptBasePath));
assertTrue(fs.exists(subTaskAttemptPath));
// Setup task.
suite.setupTask();
assertFalse(fs.exists(subTaskAttemptPath));
}
@Test
public void testCommitTask() throws Exception {
JobSuite suite = JobSuite.create(conf, taskAttempt0, outputPath);
Assumptions.assumeFalse(suite.skipTests());
// Setup job
suite.setupJob();
suite.dumpObjectStorage();
suite.assertHasMagicKeys();
// Setup task
suite.setupTask();
// Write records.
suite.assertNoMagicPendingFile();
suite.assertMultipartUpload(0);
suite.writeOutput();
suite.dumpObjectStorage();
suite.assertHasMagicPendingFile();
suite.assertNoMagicMultipartUpload();
suite.assertMultipartUpload(1);
// Assert the pending file content.
Path pendingPath = suite.magicPendingPath();
byte[] pendingData = CommitUtils.load(suite.fs(), pendingPath);
Pending pending = Pending.deserialize(pendingData);
assertEquals(suite.destPartKey(), pending.destKey());
assertEquals(20, pending.length());
assertEquals(1, pending.parts().size());
// Commit the task.
suite.commitTask();
// Verify the pending set file.
suite.assertHasPendingSet();
// Assert the pending set file content.
Path pendingSetPath = suite.magicPendingSetPath();
byte[] pendingSetData = CommitUtils.load(suite.fs(), pendingSetPath);
PendingSet pendingSet = PendingSet.deserialize(pendingSetData);
assertEquals(suite.job().getJobID().toString(), pendingSet.jobId());
assertEquals(1, pendingSet.commits().size());
assertEquals(pending, pendingSet.commits().get(0));
assertEquals(pendingSet.extraData(), ImmutableMap.of(CommitUtils.TASK_ATTEMPT_ID,
suite.taskAttemptContext().getTaskAttemptID().toString()));
// Complete the multipart upload and verify the results.
ObjectStorage storage = suite.storage();
storage.completeUpload(pending.destKey(), pending.uploadId(), pending.parts());
suite.verifyPartContent();
}
@Test
public void testAbortTask() throws Exception {
JobSuite suite = JobSuite.create(conf, taskAttempt0, outputPath);
Assumptions.assumeFalse(suite.skipTests());
suite.setupJob();
suite.setupTask();
// Pre-check before the output write.
suite.assertNoMagicPendingFile();
suite.assertMultipartUpload(0);
// Execute the output write.
suite.writeOutput();
// Post-check after the output write.
suite.assertHasMagicPendingFile();
suite.assertNoMagicMultipartUpload();
suite.assertMultipartUpload(1);
// Assert the pending file content.
Path pendingPath = suite.magicPendingPath();
byte[] pendingData = CommitUtils.load(suite.fs(), pendingPath);
Pending pending = Pending.deserialize(pendingData);
assertEquals(suite.destPartKey(), pending.destKey());
assertEquals(20, pending.length());
assertEquals(1, pending.parts().size());
// Abort the task.
suite.abortTask();
// Verify the state after aborting task.
suite.assertNoMagicPendingFile();
suite.assertNoMagicMultipartUpload();
suite.assertMultipartUpload(0);
suite.assertNoTaskAttemptPath();
}
@Test
public void testCommitJob() throws Exception {
JobSuite suite = JobSuite.create(conf, taskAttempt0, outputPath);
Assumptions.assumeFalse(suite.skipTests());
suite.setupJob();
suite.setupTask();
suite.writeOutput();
suite.commitTask();
// Commit the job.
suite.assertNoPartFiles();
suite.commitJob();
// Verify the output.
suite.assertNoMagicMultipartUpload();
suite.assertNoMagicObjectKeys();
suite.assertSuccessMarker();
suite.assertSummaryReport(reportDir);
suite.verifyPartContent();
}
@Test
public void testCommitJobFailed() throws Exception {
JobSuite suite = JobSuite.create(conf, taskAttempt0, outputPath);
Assumptions.assumeFalse(suite.skipTests());
suite.setupJob();
suite.setupTask();
suite.writeOutput();
suite.commitTask();
// Commit the job.
suite.assertNoPartFiles();
suite.commitJob();
}
@Test
public void testTaskCommitAfterJobCommit() throws Exception {
JobSuite suite = JobSuite.create(conf, taskAttempt0, outputPath);
Assumptions.assumeFalse(suite.skipTests());
suite.setupJob();
suite.setupTask();
suite.writeOutput();
suite.commitTask();
// Commit the job
suite.assertNoPartFiles();
suite.commitJob();
// Verify the output.
suite.assertNoMagicMultipartUpload();
suite.assertNoMagicObjectKeys();
suite.assertSuccessMarker();
suite.verifyPartContent();
// Commit the task again.
assertThrows(FileNotFoundException.class, suite::commitTask);
}
@Test
public void testTaskCommitWithConsistentJobId() throws Exception {
Configuration config = newConf();
String consistentJobId = randomFormedJobId();
config.set(CommitUtils.SPARK_WRITE_UUID, consistentJobId);
JobSuite suite = JobSuite.create(config, taskAttempt0, outputPath);
Assumptions.assumeFalse(suite.skipTests());
// By now, we have two "jobId"s, one is spark uuid, and the other is the jobId in taskAttempt.
// The job committer will adopt the former.
suite.setupJob();
// Next, we clear spark uuid, and set the jobId of taskAttempt to another value. In this case,
// the committer will take the jobId of taskAttempt as the final jobId, which is not consistent
// with the one that committer holds.
config.unset(CommitUtils.SPARK_WRITE_UUID);
JobConf jobConf = new JobConf(config);
String anotherJobId = randomTrimmedJobId();
TaskAttemptID taskAttemptId1 =
JobSuite.createTaskAttemptId(anotherJobId, JobSuite.DEFAULT_APP_ATTEMPT_ID);
final TaskAttemptContext attemptContext1 =
JobSuite.createTaskAttemptContext(jobConf, taskAttemptId1, JobSuite.DEFAULT_APP_ATTEMPT_ID);
assertThrows(IllegalArgumentException.class, () -> suite.setupTask(attemptContext1),
"JobId set in the context");
// Even though we use another taskAttempt, as long as we ensure the spark uuid is consistent,
// the jobId in committer is consistent.
config.set(CommitUtils.SPARK_WRITE_UUID, consistentJobId);
config.set(FileOutputFormat.OUTDIR, outputPath.toString());
jobConf = new JobConf(config);
anotherJobId = randomTrimmedJobId();
TaskAttemptID taskAttemptId2 =
JobSuite.createTaskAttemptId(anotherJobId, JobSuite.DEFAULT_APP_ATTEMPT_ID);
TaskAttemptContext attemptContext2 =
JobSuite.createTaskAttemptContext(jobConf, taskAttemptId2, JobSuite.DEFAULT_APP_ATTEMPT_ID);
suite.setupTask(attemptContext2);
// Write output must use the same task context with setup task.
suite.writeOutput(attemptContext2);
// Commit task must use the same task context with setup task.
suite.commitTask(attemptContext2);
suite.assertPendingSetAtRightLocation();
// Commit the job
suite.assertNoPartFiles();
suite.commitJob();
// Verify the output.
suite.assertNoMagicMultipartUpload();
suite.assertNoMagicObjectKeys();
suite.assertSuccessMarker();
suite.verifyPartContent();
}
}
|
CommitterTestBase
|
java
|
mapstruct__mapstruct
|
core/src/test/java/org/mapstruct/factory/PackagePrivateMapperImpl.java
|
{
"start": 180,
"end": 247
}
|
class ____ implements PackagePrivateMapper {
}
|
PackagePrivateMapperImpl
|
java
|
apache__dubbo
|
dubbo-common/src/main/java/org/apache/dubbo/common/logger/Logger.java
|
{
"start": 936,
"end": 4774
}
|
interface ____ {
/**
* Logs a message with trace log level.
*
* @param msg log this message
*/
void trace(String msg);
/**
* Logs a message with trace log level.
*
* @param msg log this message
* @param arguments a list of arguments
*/
void trace(String msg, Object... arguments);
/**
* Logs an error with trace log level.
*
* @param e log this cause
*/
void trace(Throwable e);
/**
* Logs an error with trace log level.
*
* @param msg log this message
* @param e log this cause
*/
void trace(String msg, Throwable e);
/**
* Logs a message with debug log level.
*
* @param msg log this message
*/
void debug(String msg);
/**
* Logs a message with debug log level.
*
* @param msg log this message
* @param arguments a list of arguments
*/
void debug(String msg, Object... arguments);
/**
* Logs an error with debug log level.
*
* @param e log this cause
*/
void debug(Throwable e);
/**
* Logs an error with debug log level.
*
* @param msg log this message
* @param e log this cause
*/
void debug(String msg, Throwable e);
/**
* Logs a message with info log level.
*
* @param msg log this message
*/
void info(String msg);
/**
* Logs a message with info log level.
*
* @param msg log this message
* @param arguments a list of arguments
*/
void info(String msg, Object... arguments);
/**
* Logs an error with info log level.
*
* @param e log this cause
*/
void info(Throwable e);
/**
* Logs an error with info log level.
*
* @param msg log this message
* @param e log this cause
*/
void info(String msg, Throwable e);
/**
* Logs a message with warn log level.
*
* @param msg log this message
*/
void warn(String msg);
/**
* Logs a message with warn log level.
*
* @param msg log this message
* @param arguments a list of arguments
*/
void warn(String msg, Object... arguments);
/**
* Logs a message with warn log level.
*
* @param e log this message
*/
void warn(Throwable e);
/**
* Logs a message with warn log level.
*
* @param msg log this message
* @param e log this cause
*/
void warn(String msg, Throwable e);
/**
* Logs a message with error log level.
*
* @param msg log this message
*/
void error(String msg);
/**
* Logs a message with error log level.
*
* @param msg log this message
* @param arguments a list of arguments
*/
void error(String msg, Object... arguments);
/**
* Logs an error with error log level.
*
* @param e log this cause
*/
void error(Throwable e);
/**
* Logs an error with error log level.
*
* @param msg log this message
* @param e log this cause
*/
void error(String msg, Throwable e);
/**
* Is trace logging currently enabled?
*
* @return true if trace is enabled
*/
boolean isTraceEnabled();
/**
* Is debug logging currently enabled?
*
* @return true if debug is enabled
*/
boolean isDebugEnabled();
/**
* Is info logging currently enabled?
*
* @return true if info is enabled
*/
boolean isInfoEnabled();
/**
* Is warn logging currently enabled?
*
* @return true if warn is enabled
*/
boolean isWarnEnabled();
/**
* Is error logging currently enabled?
*
* @return true if error is enabled
*/
boolean isErrorEnabled();
}
|
Logger
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/lazytoone/polymorphic/JoinFetchedPolymorphicToOneTests.java
|
{
"start": 5042,
"end": 5454
}
|
class ____ extends Customer {
private String vat;
public ForeignCustomer() {
super();
}
public ForeignCustomer(Integer id, String name, String vat) {
super( id, name );
this.vat = vat;
}
public String getVat() {
return vat;
}
public void setVat(String vat) {
this.vat = vat;
}
}
@Entity(name = "DomesticCustomer")
@Table(name = "domestic_cust")
public static
|
ForeignCustomer
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-ddb/src/test/java/org/apache/camel/component/aws2/ddbstream/ShardIteratorHandlerTest.java
|
{
"start": 1414,
"end": 8514
}
|
class ____ extends CamelTestSupport {
private static final String SHARD_ITERATOR_0 = STREAM_ARN + "|1|hash-0";
private static final String SHARD_ITERATOR_1 = STREAM_ARN + "|1|hash-1";
private static final String SHARD_ITERATOR_2 = STREAM_ARN + "|1|hash-2";
private static final String SHARD_ITERATOR_3 = STREAM_ARN + "|1|hash-3";
private static final String SHARD_ITERATOR_4 = STREAM_ARN + "|1|hash-4";
private static final String SHARD_ITERATOR_5 = STREAM_ARN + "|1|hash-5";
private static final String SHARD_ITERATOR_6 = STREAM_ARN + "|1|hash-6";
private Ddb2StreamComponent component;
private AmazonDDBStreamsClientMock dynamoDbStreamsClient;
@BeforeEach
void setup() {
component = context.getComponent("aws2-ddbstream", Ddb2StreamComponent.class);
dynamoDbStreamsClient = new AmazonDDBStreamsClientMock();
component.getConfiguration().setAmazonDynamoDbStreamsClient(dynamoDbStreamsClient);
dynamoDbStreamsClient.setMockedShardAndIteratorResponse(SHARD_0, SHARD_ITERATOR_0);
dynamoDbStreamsClient.setMockedShardAndIteratorResponse(SHARD_1, SHARD_ITERATOR_1);
dynamoDbStreamsClient.setMockedShardAndIteratorResponse(SHARD_2, SHARD_ITERATOR_2);
dynamoDbStreamsClient.setMockedShardAndIteratorResponse(SHARD_3, SHARD_ITERATOR_3);
dynamoDbStreamsClient.setMockedShardAndIteratorResponse(SHARD_4, SHARD_ITERATOR_4);
dynamoDbStreamsClient.setMockedShardAndIteratorResponse(SHARD_5, SHARD_ITERATOR_5);
dynamoDbStreamsClient.setMockedShardAndIteratorResponse(SHARD_6, SHARD_ITERATOR_6);
}
@Test
void shouldReturnLeafShardIterators() throws Exception {
component.getConfiguration().setStreamIteratorType(StreamIteratorType.FROM_LATEST);
Ddb2StreamEndpoint endpoint = (Ddb2StreamEndpoint) component.createEndpoint("aws2-ddbstreams://myTable");
ShardIteratorHandler underTest = new ShardIteratorHandler(endpoint);
endpoint.doStart();
Map<String, String> expectedShardIterators = new HashMap<>();
expectedShardIterators.put(SHARD_3.shardId(), SHARD_ITERATOR_3);
expectedShardIterators.put(SHARD_4.shardId(), SHARD_ITERATOR_4);
expectedShardIterators.put(SHARD_5.shardId(), SHARD_ITERATOR_5);
expectedShardIterators.put(SHARD_6.shardId(), SHARD_ITERATOR_6);
assertEquals(expectedShardIterators, underTest.getShardIterators());
}
@Test
void shouldReturnRootShardIterator() throws Exception {
component.getConfiguration().setStreamIteratorType(StreamIteratorType.FROM_START);
Ddb2StreamEndpoint endpoint = (Ddb2StreamEndpoint) component.createEndpoint("aws2-ddbstreams://myTable");
ShardIteratorHandler underTest = new ShardIteratorHandler(endpoint);
endpoint.doStart();
assertEquals(Collections.singletonMap(SHARD_0.shardId(), SHARD_ITERATOR_0), underTest.getShardIterators());
}
@Test
void shouldProgressThroughTreeWhenShardIteratorsAreRetrievedRepeatedly() throws Exception {
component.getConfiguration().setStreamIteratorType(StreamIteratorType.FROM_START);
Ddb2StreamEndpoint endpoint = (Ddb2StreamEndpoint) component.createEndpoint("aws2-ddbstreams://myTable");
ShardIteratorHandler underTest = new ShardIteratorHandler(endpoint);
endpoint.doStart();
assertEquals(Collections.singletonMap(SHARD_0.shardId(), SHARD_ITERATOR_0), underTest.getShardIterators());
Map<String, String> expectedShardIterators1 = new HashMap<>();
expectedShardIterators1.put(SHARD_1.shardId(), SHARD_ITERATOR_1);
expectedShardIterators1.put(SHARD_2.shardId(), SHARD_ITERATOR_2);
assertEquals(expectedShardIterators1, underTest.getShardIterators());
Map<String, String> expectedShardIterators2 = new HashMap<>();
expectedShardIterators2.put(SHARD_3.shardId(), SHARD_ITERATOR_3);
expectedShardIterators2.put(SHARD_4.shardId(), SHARD_ITERATOR_4);
expectedShardIterators2.put(SHARD_5.shardId(), SHARD_ITERATOR_5);
expectedShardIterators2.put(SHARD_6.shardId(), SHARD_ITERATOR_6);
assertEquals(expectedShardIterators2, underTest.getShardIterators());
Map<String, String> expectedShardIterators3 = expectedShardIterators2;
assertEquals(expectedShardIterators3, underTest.getShardIterators());
}
@Test
void shouldUpdateShardIterator() throws Exception {
component.getConfiguration().setStreamIteratorType(StreamIteratorType.FROM_LATEST);
Ddb2StreamEndpoint endpoint = (Ddb2StreamEndpoint) component.createEndpoint("aws2-ddbstreams://myTable");
ShardIteratorHandler underTest = new ShardIteratorHandler(endpoint);
endpoint.doStart();
underTest.getShardIterators();
String updatedShardIterator5 = STREAM_ARN + "|1|hash-5-new";
underTest.updateShardIterator(SHARD_5.shardId(), updatedShardIterator5);
assertEquals(updatedShardIterator5, underTest.getShardIterators().get(SHARD_5.shardId()));
}
@Test
void shouldRemoveShardIfNullIteratorIsProvided() throws Exception {
component.getConfiguration().setStreamIteratorType(StreamIteratorType.FROM_LATEST);
Ddb2StreamEndpoint endpoint = (Ddb2StreamEndpoint) component.createEndpoint("aws2-ddbstreams://myTable");
ShardIteratorHandler underTest = new ShardIteratorHandler(endpoint);
endpoint.doStart();
underTest.getShardIterators();
underTest.updateShardIterator(SHARD_3.shardId(), null);
assertFalse(underTest.getShardIterators().containsKey(SHARD_3.shardId()));
}
@Test
void shouldRequestAndCacheFreshShardIterator() throws Exception {
component.getConfiguration().setStreamIteratorType(StreamIteratorType.FROM_LATEST);
Ddb2StreamEndpoint endpoint = (Ddb2StreamEndpoint) component.createEndpoint("aws2-ddbstreams://myTable");
ShardIteratorHandler underTest = new ShardIteratorHandler(endpoint);
endpoint.doStart();
underTest.getShardIterators();
String freshShardIterator4 = STREAM_ARN + "|1|hash-4-fresh";
dynamoDbStreamsClient.setMockedShardAndIteratorResponse(SHARD_4, freshShardIterator4);
assertEquals(freshShardIterator4, underTest.requestFreshShardIterator(SHARD_4.shardId(), freshShardIterator4));
assertEquals(freshShardIterator4, underTest.getShardIterators().get(SHARD_4.shardId()));
}
@Test
void shouldThrowIllegalArgumentExceptionIfNoStreamsAreReturned() throws Exception {
AmazonDDBStreamlessClientMock dynamoDbStreamsClient = new AmazonDDBStreamlessClientMock();
component.getConfiguration().setAmazonDynamoDbStreamsClient(dynamoDbStreamsClient);
Ddb2StreamEndpoint endpoint = (Ddb2StreamEndpoint) component.createEndpoint("aws2-ddbstreams://myTable");
ShardIteratorHandler underTest = new ShardIteratorHandler(endpoint);
endpoint.doStart();
assertThrows(IllegalArgumentException.class, () -> underTest.getShardIterators());
}
}
|
ShardIteratorHandlerTest
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/aggregations/metrics/MetricAggScriptPlugin.java
|
{
"start": 1032,
"end": 3714
}
|
class ____ extends MockScriptPlugin {
/** The name of the script engine type this plugin provides. */
public static final String METRIC_SCRIPT_ENGINE = "metric_scripts";
/** Script to take a field name in params and sum the values of the field. */
public static final String SUM_FIELD_PARAMS_SCRIPT = "sum_field_params";
/** Script to sum the values of a field named {@code values}. */
public static final String SUM_VALUES_FIELD_SCRIPT = "sum_values_field";
/** Script to return the value of a field named {@code value}. */
public static final String VALUE_FIELD_SCRIPT = "value_field";
/** Script to return the {@code _value} provided by aggs framework. */
public static final String VALUE_SCRIPT = "_value";
/** Script to return a random double */
public static final String RANDOM_SCRIPT = "Math.random()";
@Override
public String pluginScriptLang() {
return METRIC_SCRIPT_ENGINE;
}
@Override
protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() {
Map<String, Function<Map<String, Object>, Object>> scripts = new HashMap<>();
Function<Map<String, Object>, Integer> getInc = vars -> {
if (vars == null || vars.containsKey("inc") == false) {
return 0;
} else {
return ((Number) vars.get("inc")).intValue();
}
};
BiFunction<Map<String, Object>, String, Object> sum = (vars, fieldname) -> {
int inc = getInc.apply(vars);
LeafDocLookup docLookup = (LeafDocLookup) vars.get("doc");
List<Long> values = new ArrayList<>();
for (Object v : docLookup.get(fieldname)) {
values.add(((Number) v).longValue() + inc);
}
return values;
};
scripts.put(SUM_FIELD_PARAMS_SCRIPT, vars -> {
String fieldname = (String) vars.get("field");
return sum.apply(vars, fieldname);
});
scripts.put(SUM_VALUES_FIELD_SCRIPT, vars -> sum.apply(vars, "values"));
scripts.put(VALUE_FIELD_SCRIPT, vars -> sum.apply(vars, "value"));
scripts.put(VALUE_SCRIPT, vars -> {
int inc = getInc.apply(vars);
return ((Number) vars.get("_value")).doubleValue() + inc;
});
return scripts;
}
@Override
protected Map<String, Function<Map<String, Object>, Object>> nonDeterministicPluginScripts() {
Map<String, Function<Map<String, Object>, Object>> scripts = new HashMap<>();
scripts.put("Math.random()", vars -> ESTestCase.randomDouble());
return scripts;
}
}
|
MetricAggScriptPlugin
|
java
|
square__retrofit
|
retrofit-converters/jaxb/src/test/java/retrofit2/converter/jaxb/Contact.java
|
{
"start": 847,
"end": 1549
}
|
class ____ {
@XmlElement(required = true)
public final String name;
@XmlElement(name = "phone_number")
public final List<PhoneNumber> phone_numbers;
@SuppressWarnings("unused") // Used by JAXB.
private Contact() {
this("", new ArrayList<PhoneNumber>());
}
public Contact(String name, List<PhoneNumber> phoneNumbers) {
this.name = name;
this.phone_numbers = phoneNumbers;
}
@Override
public boolean equals(Object o) {
return o instanceof Contact
&& ((Contact) o).name.equals(name)
&& ((Contact) o).phone_numbers.equals(phone_numbers);
}
@Override
public int hashCode() {
return Arrays.asList(name, phone_numbers).hashCode();
}
}
|
Contact
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/indices/AutoscalingMissedIndicesUpdateException.java
|
{
"start": 645,
"end": 940
}
|
class ____ extends ElasticsearchException {
public AutoscalingMissedIndicesUpdateException(String message) {
super(message);
}
public AutoscalingMissedIndicesUpdateException(StreamInput in) throws IOException {
super(in);
}
}
|
AutoscalingMissedIndicesUpdateException
|
java
|
apache__avro
|
lang/java/avro/src/main/java/org/apache/avro/util/SchemaVisitor.java
|
{
"start": 877,
"end": 1626
}
|
interface ____<T> {
/**
* Invoked for schemas that do not have "child" schemas (like string, int …) or
* for a previously encountered schema with children, which will be treated as a
* terminal. (to avoid circular recursion)
*/
SchemaVisitorAction visitTerminal(Schema terminal);
/**
* Invoked for schema with children before proceeding to visit the children.
*/
SchemaVisitorAction visitNonTerminal(Schema nonTerminal);
/**
* Invoked for schemas with children after its children have been visited.
*/
SchemaVisitorAction afterVisitNonTerminal(Schema nonTerminal);
/**
* Invoked when visiting is complete.
*
* @return a value that will be returned by the visit method.
*/
T get();
|
SchemaVisitor
|
java
|
netty__netty
|
microbench/src/main/java/io/netty/handler/codec/http2/HpackBenchmarkUtil.java
|
{
"start": 1403,
"end": 1581
}
|
class ____ {
private HpackBenchmarkUtil() {
}
/**
* Internal key used to index a particular set of headers in the map.
*/
private static
|
HpackBenchmarkUtil
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/test/settings/TlsSettings.java
|
{
"start": 427,
"end": 5294
}
|
class ____ {
private static final String TRUST_STORE_TYPE = "PKCS12";
private static final String TEST_WORK_FOLDER = System.getenv().getOrDefault("TEST_WORK_FOLDER", "work/docker");
private static final String TEST_SERVER_CERT = "redis.crt";
private static final String TEST_CLIENT_P12 = "client.p12";
private static final String TEST_CLIENT_CERT = "client.crt";
private static final String TEST_CLIENT_KEY = "client.key";
private static final String TEST_CA_CERT = "ca.crt";
private static final String TEST_TRUSTSTORE = "truststore.jks";
public static Path envClientP12(Path certLocation) {
return Paths.get(TEST_WORK_FOLDER, certLocation.toString(), TEST_CLIENT_P12);
}
public static Path envServerCert(Path certLocation) {
return Paths.get(TEST_WORK_FOLDER, certLocation.toString(), TEST_SERVER_CERT);
}
public static Path envCa(Path certLocation) {
return Paths.get(TEST_WORK_FOLDER, certLocation.toString(), TEST_CA_CERT);
}
public static Path testTruststorePath(String name) {
return Paths.get(TEST_WORK_FOLDER, name + '-' + TEST_TRUSTSTORE);
}
/**
* Creates an empty truststore.
*
* @return An empty KeyStore object.
* @throws KeyStoreException If there's an error initializing the truststore.
* @throws IOException If there's an error loading the truststore.
* @throws NoSuchAlgorithmException If the algorithm used to check the integrity of the truststore cannot be found.
* @throws CertificateException If any of the certificates in the truststore could not be loaded.
*/
private static KeyStore createTruststore()
throws KeyStoreException, IOException, NoSuchAlgorithmException, CertificateException {
KeyStore trustStore = KeyStore.getInstance(TRUST_STORE_TYPE);
trustStore.load(null, null);
return trustStore;
}
/**
* Loads an X.509 certificate from the given file path.
*
* @param certPath Path to the certificate file.
* @return An X509Certificate object.
* @throws Exception If there's an error loading the certificate.
*/
private static X509Certificate loadCertificate(Path certPath) throws Exception {
try (FileInputStream fis = new FileInputStream(certPath.toFile())) {
CertificateFactory certFactory = CertificateFactory.getInstance("X.509");
return (X509Certificate) certFactory.generateCertificate(fis);
}
}
/**
* Adds a trusted certificate to the given truststore.
*
* @param trustStore The KeyStore object.
* @param alias Alias for the certificate.
* @param certPath Path to the certificate file.
* @throws Exception If there's an error adding the certificate.
*/
private static void addTrustedCertificate(KeyStore trustStore, String alias, Path certPath) throws Exception {
X509Certificate cert = loadCertificate(certPath);
trustStore.setCertificateEntry(alias, cert);
}
/**
* Creates a truststore, adds multiple trusted certificates, and saves it to the specified path.
*
* @param trustedCertPaths List of certificate file paths to add to the truststore.
* @param truststorePath Path to save the generated truststore.
* @param truststorePassword Password for the truststore.
* @return Path to the saved truststore file.
*/
public static Path createAndSaveTruststore(List<Path> trustedCertPaths, Path truststorePath, String truststorePassword) {
try {
KeyStore trustStore = createTruststore();
for (Path certPath : trustedCertPaths) {
addTrustedCertificate(trustStore, "trusted-cert-" + UUID.randomUUID(), certPath);
}
try (FileOutputStream fos = new FileOutputStream(truststorePath.toFile())) {
trustStore.store(fos, truststorePassword.toCharArray());
} catch (IOException e) {
throw new RuntimeException("Failed to save truststore to " + truststorePath + ": " + e.getMessage(), e);
}
} catch (Exception e) {
throw new RuntimeException("Failed to create and save truststore: " + e.getMessage(), e);
}
return truststorePath;
}
public static Path createAndSaveTestTruststore(String trustStoreName, Path certificateLocations,
String truststorePassword) {
List<Path> trustedCertPaths = new ArrayList<>();
trustedCertPaths.add(envCa(certificateLocations).toAbsolutePath());
trustedCertPaths.add(envServerCert(certificateLocations).toAbsolutePath());
Path trustStorePath = testTruststorePath(trustStoreName).toAbsolutePath();
return createAndSaveTruststore(trustedCertPaths, trustStorePath, truststorePassword);
}
}
|
TlsSettings
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/expression/FactoryBeanAccessTests.java
|
{
"start": 2816,
"end": 2911
}
|
class ____
implements org.springframework.expression.BeanResolver {
static
|
SimpleBeanResolver
|
java
|
micronaut-projects__micronaut-core
|
inject-java/src/main/java/io/micronaut/annotation/processing/AggregatingPackageElementVisitorProcessor.java
|
{
"start": 1130,
"end": 2365
}
|
class ____ extends PackageElementVisitorProcessor {
@Override
protected String getIncrementalProcessorType() {
return GRADLE_PROCESSING_AGGREGATING;
}
@Override
public Set<String> getSupportedAnnotationTypes() {
if (!hasVisitors()) {
return Collections.emptySet();
}
if (isIncremental(processingEnv)) {
var annotationNames = new HashSet<String>();
// try and narrow the annotations to only the ones interesting to the visitors
// if a visitor is interested in Object than fall back to all
for (PackageLoadedVisitor loadedVisitor : getPackageVisitors()) {
PackageElementVisitor<?> visitor = loadedVisitor.visitor();
Set<String> supportedAnnotationNames = visitor.getSupportedAnnotationNames();
if (supportedAnnotationNames.contains("*")) {
return super.getSupportedAnnotationTypes();
} else {
annotationNames.addAll(supportedAnnotationNames);
}
}
return annotationNames;
}
return super.getSupportedAnnotationTypes();
}
}
|
AggregatingPackageElementVisitorProcessor
|
java
|
apache__logging-log4j2
|
log4j-slf4j2-impl-fuzz-test/src/main/java/org/apache/logging/slf4j/fuzz/Slf4jLoggerFacade.java
|
{
"start": 1110,
"end": 2224
}
|
class ____ implements LoggerFacade {
private static final String FQCN = Slf4jLoggerFacade.class.getName();
private final Logger logger;
private Slf4jLoggerFacade(final Logger logger) {
this.logger = logger;
}
static Slf4jLoggerFacade ofClass(final Class<?> clazz) {
requireNonNull(clazz, "clazz");
final Logger logger = LoggerFactory.getLogger(clazz);
return new Slf4jLoggerFacade(logger);
}
private LoggingEventBuilder atError() {
final LoggingEventBuilder builder = logger.atError();
if (builder instanceof CallerBoundaryAware) {
((CallerBoundaryAware) builder).setCallerBoundary(FQCN);
}
return builder;
}
@Override
public void log(final String message) {
atError().log(message);
}
@Override
public void log(final String message, final Throwable throwable) {
atError().setCause(throwable).log(message);
}
@Override
public void log(final String message, final Object[] parameters) {
atError().log(message, parameters);
}
}
|
Slf4jLoggerFacade
|
java
|
google__jimfs
|
jimfs/src/main/java/com/google/common/jimfs/UserDefinedAttributeProvider.java
|
{
"start": 3444,
"end": 4762
}
|
class ____ extends AbstractAttributeView implements UserDefinedFileAttributeView {
View(FileLookup lookup) {
super(lookup);
}
@Override
public String name() {
return "user";
}
@Override
public List<String> list() throws IOException {
return userDefinedAttributes(lookupFile()).asList();
}
private byte[] getStoredBytes(String name) throws IOException {
byte[] bytes = (byte[]) lookupFile().getAttribute(name(), name);
if (bytes == null) {
throw new IllegalArgumentException("attribute '" + name() + ":" + name + "' is not set");
}
return bytes;
}
@Override
public int size(String name) throws IOException {
return getStoredBytes(name).length;
}
@Override
public int read(String name, ByteBuffer dst) throws IOException {
byte[] bytes = getStoredBytes(name);
dst.put(bytes);
return bytes.length;
}
@Override
public int write(String name, ByteBuffer src) throws IOException {
byte[] bytes = new byte[src.remaining()];
src.get(bytes);
lookupFile().setAttribute(name(), name, bytes);
return bytes.length;
}
@Override
public void delete(String name) throws IOException {
lookupFile().deleteAttribute(name(), name);
}
}
}
|
View
|
java
|
spring-projects__spring-security
|
kerberos/kerberos-core/src/main/java/org/springframework/security/kerberos/authentication/sun/SunJaasKerberosClient.java
|
{
"start": 3815,
"end": 4745
}
|
class ____ implements CallbackHandler {
private String username;
private String password;
private KerberosClientCallbackHandler(String username, String password) {
this.username = username;
this.password = password;
}
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
NameCallback ncb = (NameCallback) callback;
ncb.setName(this.username);
}
else if (callback instanceof PasswordCallback) {
PasswordCallback pwcb = (PasswordCallback) callback;
pwcb.setPassword(this.password.toCharArray());
}
else {
throw new UnsupportedCallbackException(callback,
"We got a " + callback.getClass().getCanonicalName()
+ ", but only NameCallback and PasswordCallback is supported");
}
}
}
}
}
|
KerberosClientCallbackHandler
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/task/TaskExecutionAutoConfigurationTests.java
|
{
"start": 28538,
"end": 28639
}
|
class ____ {
}
@Configuration(proxyBeanMethods = false)
@EnableScheduling
static
|
AsyncConfiguration
|
java
|
apache__dubbo
|
dubbo-remoting/dubbo-remoting-http12/src/main/java/org/apache/dubbo/remoting/http12/message/codec/JsonPbCodecFactory.java
|
{
"start": 1422,
"end": 2040
}
|
class ____ implements HttpMessageEncoderFactory, HttpMessageDecoderFactory {
@Override
public HttpMessageCodec createCodec(URL url, FrameworkModel frameworkModel, String mediaType) {
return frameworkModel == FrameworkModel.defaultModel() ? JsonPbCodec.INSTANCE : new JsonPbCodec(frameworkModel);
}
@Override
public MediaType mediaType() {
return MediaType.APPLICATION_JSON;
}
@Override
public boolean supports(String mediaType) {
return mediaType.startsWith(mediaType().getName()) || mediaType.startsWith(MediaType.TEXT_JSON.getName());
}
}
|
JsonPbCodecFactory
|
java
|
apache__maven
|
its/core-it-suite/src/test/resources/mng-5958-lifecycle-phases/mng5958-extension/src/main/java/org/apache/maven/its/mng5958/AbstractLifecycleMapping.java
|
{
"start": 1090,
"end": 2007
}
|
class ____ implements LifecycleMapping {
private Map<String, Lifecycle> lifecycleMap;
public Map<String, Lifecycle> getLifecycles() {
if (lifecycleMap != null) {
return lifecycleMap;
}
lifecycleMap = new LinkedHashMap<>();
Lifecycle lifecycle = new Lifecycle();
lifecycle.setId("default");
lifecycle.setPhases(initPhases());
lifecycleMap.put("default", lifecycle);
return lifecycleMap;
}
public Map<String, String> getPhases(String lifecycle) {
Lifecycle lifecycleMapping = getLifecycles().get(lifecycle);
if (lifecycleMapping != null) {
return lifecycleMapping.getPhases();
}
return null;
}
public List<String> getOptionalMojos(String lifecycle) {
return null;
}
// raw map on purpose
protected abstract Map initPhases();
}
|
AbstractLifecycleMapping
|
java
|
google__guava
|
android/guava/src/com/google/common/reflect/ClassPath.java
|
{
"start": 5493,
"end": 6330
}
|
class ____ resources (jar files or directories)
* failed.
*/
public static ClassPath from(ClassLoader classloader) throws IOException {
ImmutableSet<LocationInfo> locations = locationsFrom(classloader);
// Add all locations to the scanned set so that in a classpath [jar1, jar2], where jar1 has a
// manifest with Class-Path pointing to jar2, we won't scan jar2 twice.
Set<File> scanned = new HashSet<>();
for (LocationInfo location : locations) {
scanned.add(location.file());
}
// Scan all locations
ImmutableSet.Builder<ResourceInfo> builder = ImmutableSet.builder();
for (LocationInfo location : locations) {
builder.addAll(location.scanResources(scanned));
}
return new ClassPath(builder.build());
}
/**
* Returns all resources loadable from the current
|
path
|
java
|
micronaut-projects__micronaut-core
|
inject-java/src/test/groovy/io/micronaut/inject/ordered/Apple.java
|
{
"start": 144,
"end": 177
}
|
class ____ implements Fruit {
}
|
Apple
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ClassNewInstanceTest.java
|
{
"start": 5266,
"end": 5719
}
|
class ____ {
void f() {
try {
getClass().getDeclaredConstructor().newInstance();
} catch (ReflectiveOperationException e) {
e.printStackTrace();
}
}
}
""")
.doTest();
}
@Test
public void throwsException() {
testHelper
.addInputLines(
"in/Test.java",
"""
|
Test
|
java
|
elastic__elasticsearch
|
qa/smoke-test-http/src/internalClusterTest/java/org/elasticsearch/http/RestActionCancellationIT.java
|
{
"start": 2658,
"end": 9922
}
|
class ____ extends HttpSmokeTestCase {
public void testIndicesRecoveryRestCancellation() {
createIndex("test");
ensureGreen("test");
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_recovery"), RecoveryAction.NAME);
}
public void testCatRecoveryRestCancellation() {
createIndex("test");
ensureGreen("test");
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cat/recovery"), RecoveryAction.NAME);
}
public void testClusterHealthRestCancellation() {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cluster/health"), TransportClusterHealthAction.NAME);
}
public void testClusterStateRestCancellation() {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cluster/state"), ClusterStateAction.NAME);
}
public void testGetAliasesCancellation() {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_alias"), GetAliasesAction.NAME);
}
public void testCatAliasesCancellation() {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cat/aliases"), GetAliasesAction.NAME);
}
public void testGetComponentTemplateCancellation() {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_component_template"), GetComponentTemplateAction.NAME);
}
public void testGetIndexTemplateCancellation() {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_template"), GetIndexTemplatesAction.NAME);
}
public void testGetComposableTemplateCancellation() {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_index_template"), GetComposableIndexTemplateAction.NAME);
}
public void testSimulateTemplateCancellation() {
runRestActionCancellationTest(
new Request(HttpPost.METHOD_NAME, "/_index_template/_simulate/random_index_template"),
SimulateTemplateAction.NAME
);
}
public void testSimulateIndexTemplateCancellation() {
createIndex("test");
runRestActionCancellationTest(
new Request(HttpPost.METHOD_NAME, "/_index_template/_simulate_index/test"),
SimulateIndexTemplateAction.NAME
);
}
public void testClusterGetSettingsCancellation() {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cluster/settings"), ClusterGetSettingsAction.NAME);
}
public void testGetPipelineCancellation() {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_ingest/pipeline"), GetPipelineAction.NAME);
}
public void testGetMappingsCancellation() {
createIndex("test");
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/test/_mappings"), GetMappingsAction.NAME);
}
public void testGetIndicesCancellation() {
createIndex("test");
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/test"), GetIndexAction.NAME);
}
public void testGetIndexSettingsCancellation() {
createIndex("test");
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/test/_settings"), GetSettingsAction.NAME);
}
private void runRestActionCancellationTest(Request request, String actionName) {
final var node = usually() ? internalCluster().getRandomNodeName() : internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
try (
var restClient = createRestClient(node);
var capturingAction = CancellableActionTestPlugin.capturingActionOnNode(actionName, node)
) {
final var responseFuture = new PlainActionFuture<Response>();
final var restInvocation = restClient.performRequestAsync(request, wrapAsRestResponseListener(responseFuture));
if (randomBoolean()) {
// cancel by aborting the REST request
capturingAction.captureAndCancel(restInvocation::cancel);
expectThrows(ExecutionException.class, CancellationException.class, () -> responseFuture.get(10, TimeUnit.SECONDS));
} else {
// cancel via the task management API
final var cancelFuture = new PlainActionFuture<Void>();
capturingAction.captureAndCancel(
() -> SubscribableListener
.<ObjectPath>newForked(
l -> restClient.performRequestAsync(
getListTasksRequest(node, actionName),
wrapAsRestResponseListener(l.map(ObjectPath::createFromResponse))
)
)
.<Void>andThen((l, listTasksResponse) -> {
final var taskCount = listTasksResponse.evaluateArraySize("tasks");
assertThat(taskCount, greaterThan(0));
try (var listeners = new RefCountingListener(l)) {
for (int i = 0; i < taskCount; i++) {
final var taskPrefix = "tasks." + i + ".";
assertTrue(listTasksResponse.evaluate(taskPrefix + "cancellable"));
assertFalse(listTasksResponse.evaluate(taskPrefix + "cancelled"));
restClient.performRequestAsync(
getCancelTaskRequest(
listTasksResponse.evaluate(taskPrefix + "node"),
listTasksResponse.evaluate(taskPrefix + "id")
),
wrapAsRestResponseListener(listeners.acquire(HttpSmokeTestCase::assertOK))
);
}
}
})
.addListener(cancelFuture)
);
cancelFuture.get(10, TimeUnit.SECONDS);
expectThrows(Exception.class, () -> responseFuture.get(10, TimeUnit.SECONDS));
}
assertAllTasksHaveFinished(actionName);
} catch (Exception e) {
fail(e);
}
}
private static Request getListTasksRequest(String taskNode, String actionName) {
final var listTasksRequest = new Request(HttpGet.METHOD_NAME, "/_tasks");
listTasksRequest.addParameter("nodes", taskNode);
listTasksRequest.addParameter("actions", actionName);
listTasksRequest.addParameter("group_by", "none");
return listTasksRequest;
}
private static Request getCancelTaskRequest(String taskNode, int taskId) {
final var cancelTaskRequest = new Request(HttpPost.METHOD_NAME, Strings.format("/_tasks/%s:%d/_cancel", taskNode, taskId));
cancelTaskRequest.addParameter("wait_for_completion", null);
return cancelTaskRequest;
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopy(super.nodePlugins(), CancellableActionTestPlugin.class);
}
}
|
RestActionCancellationIT
|
java
|
apache__camel
|
components/camel-joor/src/main/java/org/apache/camel/language/joor/JavaJoorClassLoader.java
|
{
"start": 1051,
"end": 3835
}
|
class ____ extends ClassLoader {
private static final Logger LOG = LoggerFactory.getLogger(JavaJoorClassLoader.class);
private final Map<String, Class<?>> classes = new HashMap<>();
private String compileDirectory;
public JavaJoorClassLoader() {
super(JavaJoorClassLoader.class.getClassLoader());
}
public String getCompileDirectory() {
return compileDirectory;
}
public void setCompileDirectory(String compileDirectory) {
this.compileDirectory = compileDirectory;
}
@Override
public String getName() {
return "JavaJoorClassLoader";
}
@Override
protected Class<?> findClass(String name) throws ClassNotFoundException {
Class<?> clazz = classes.get(name);
if (clazz != null) {
return clazz;
}
throw new ClassNotFoundException(name);
}
@Override
public Class<?> loadClass(String name) throws ClassNotFoundException {
Class<?> clazz = classes.get(name);
if (clazz != null) {
return clazz;
}
throw new ClassNotFoundException(name);
}
public void addClass(String name, Class<?> clazz, byte[] code) {
if (name != null && clazz != null) {
classes.put(name, clazz);
}
if (name != null && code != null && compileDirectory != null) {
saveByteCodeToDisk(compileDirectory, name, code);
}
}
public void removeClass(String name) {
if (name != null) {
classes.remove(name);
if (compileDirectory != null) {
String fname = name.replace('.', '/');
fname = compileDirectory + "/" + fname + ".class";
File target = new File(fname);
FileUtil.deleteFile(target);
}
}
}
private static void saveByteCodeToDisk(String outputDirectory, String name, byte[] byteCode) {
// write to disk (can be triggered multiple times so only write once)
String fname = name.replace('.', '/');
fname = outputDirectory + "/" + fname + ".class";
File target = new File(fname);
if (!target.exists()) {
// create work-dir if needed
String dir = FileUtil.onlyPath(fname);
new File(dir).mkdirs();
try (FileOutputStream fos = new FileOutputStream(target)) {
LOG.debug("Writing compiled class: {} as bytecode to file: {}", name, target);
fos.write(byteCode);
} catch (Exception e) {
LOG.warn("Error writing compiled class: {} as bytecode to file: {} due to {}. This exception is ignored.",
name, target, e.getMessage());
}
}
}
}
|
JavaJoorClassLoader
|
java
|
spring-projects__spring-framework
|
spring-expression/src/test/java/org/springframework/expression/spel/SpelCompilationCoverageTests.java
|
{
"start": 253410,
"end": 253595
}
|
class ____ {
Three three = new Three();
public Three getThree() {
return three;
}
@Override
public String toString() {
return "instanceof Two";
}
}
public static
|
Two
|
java
|
micronaut-projects__micronaut-core
|
http-server-netty/src/main/java/io/micronaut/http/server/netty/handler/accesslog/element/RequestMethodElementBuilder.java
|
{
"start": 763,
"end": 1056
}
|
class ____ implements LogElementBuilder {
@Override
public LogElement build(String token, String param) {
if (RequestMethodElement.REQUEST_METHOD.equals(token)) {
return RequestMethodElement.INSTANCE;
}
return null;
}
}
|
RequestMethodElementBuilder
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/scheduler/Scheduler.java
|
{
"start": 1386,
"end": 7542
}
|
interface ____ extends Disposable {
/**
* Schedules the non-delayed execution of the given task on this scheduler.
*
* <p>
* This method is safe to be called from multiple threads but there are no
* ordering guarantees between tasks.
*
* @param task the task to execute
*
* @return the {@link Disposable} instance that lets one cancel this particular task.
* If the {@link Scheduler} has been shut down, throw a {@link RejectedExecutionException}.
*/
Disposable schedule(Runnable task);
/**
* Schedules the execution of the given task with the given delay amount.
*
* <p>
* This method is safe to be called from multiple threads but there are no
* ordering guarantees between tasks.
*
* @param task the task to schedule
* @param delay the delay amount, non-positive values indicate non-delayed scheduling
* @param unit the unit of measure of the delay amount
* @return the {@link Disposable} that lets one cancel this particular delayed task,
* or throw a {@link RejectedExecutionException} if the Scheduler is not capable of scheduling with delay.
*/
default Disposable schedule(Runnable task, long delay, TimeUnit unit) {
throw Exceptions.failWithRejectedNotTimeCapable();
}
/**
* Schedules a periodic execution of the given task with the given initial delay and period.
*
* <p>
* This method is safe to be called from multiple threads but there are no
* ordering guarantees between tasks.
*
* <p>
* The periodic execution is at a fixed rate, that is, the first execution will be after the initial
* delay, the second after initialDelay + period, the third after initialDelay + 2 * period, and so on.
*
* @param task the task to schedule
* @param initialDelay the initial delay amount, non-positive values indicate non-delayed scheduling
* @param period the period at which the task should be re-executed
* @param unit the unit of measure of the delay amount
* @return the {@link Disposable} that lets one cancel this particular delayed task,
* or throw a {@link RejectedExecutionException} if the Scheduler is not capable of scheduling periodically.
*/
default Disposable schedulePeriodically(Runnable task, long initialDelay, long period, TimeUnit unit) {
throw Exceptions.failWithRejectedNotTimeCapable();
}
/**
* Returns the "current time" notion of this scheduler.
*
* <p>
* <strong>Implementation Note:</strong> The default implementation uses {@link System#currentTimeMillis()}
* when requested with a {@code TimeUnit} of {@link TimeUnit#MILLISECONDS milliseconds} or coarser, and
* {@link System#nanoTime()} otherwise. As a consequence, results should not be interpreted as absolute timestamps
* in the latter case, only monotonicity inside the current JVM can be expected.
* </p>
* @param unit the target unit of the current time
* @return the current time value in the target unit of measure
*/
default long now(TimeUnit unit) {
if (unit.compareTo(TimeUnit.MILLISECONDS) >= 0) {
return unit.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS);
} else {
return unit.convert(System.nanoTime(), TimeUnit.NANOSECONDS);
}
}
/**
* Creates a worker of this Scheduler.
* <p>
* Once the Worker is no longer in use, one should call dispose() on it to
* release any resources the particular Scheduler may have used.
*
* It depends on the implementation, but Scheduler Workers should usually run tasks in
* FIFO order. Some implementations may entirely delegate the scheduling to an
* underlying structure (like an {@link ExecutorService}).
*
* @return the Worker instance.
*/
Worker createWorker();
/**
* Instructs this Scheduler to release all resources and reject
* any new tasks to be executed.
*
* <p>The operation is thread-safe.
*
* <p>The Scheduler may choose to ignore this instruction.
* <p>When used in combination with {@link #disposeGracefully()}
* there are no guarantees that all resources will be forcefully shut down.
* When a graceful disposal has started, the references to the underlying
* {@link java.util.concurrent.Executor}s might have already been lost.
*/
default void dispose() {
}
/**
* Lazy variant of {@link #dispose()} that also allows for graceful cleanup
* of underlying resources.
* <p>It is advised to apply a {@link Mono#timeout(Duration)} operator to the
* resulting {@link Mono}.
* <p>The returned {@link Mono} can be {@link Mono#retry(long) retried} in case of
* {@link java.util.concurrent.TimeoutException timeout errors}. It can also be
* followed by a call to {@link #dispose()} to issue a forceful shutdown of
* underlying resources.
*
* @return {@link Mono} which upon subscription initiates the graceful dispose
* procedure. If the disposal is successful, the returned {@link Mono} completes
* without an error.
*/
default Mono<Void> disposeGracefully() {
return Mono.fromRunnable(this::dispose);
}
/**
* Instructs this Scheduler to prepare itself for running tasks
* directly or through its Workers.
*
* <p>The operation is thread-safe but one should avoid using
* start() and dispose() concurrently as it would non-deterministically
* leave the Scheduler in either active or inactive state.
*
* @deprecated Use {@link #init()} instead. The use of this method is discouraged.
* Some implementations allowed restarting a Scheduler, while others did not. One
* of the issues with restarting is that checking
* {@link #isDisposed() the disposed state} is unreliable in concurrent scenarios.
* @see #init()
*/
@Deprecated
default void start() {
}
/**
* Instructs this Scheduler to prepare itself for running tasks
* directly or through its {@link Worker}s.
*
* <p>Implementations are encouraged to throw an exception if this method is called
* after the scheduler has been disposed via {@link #dispose()}
* or {@link #disposeGracefully()}.
*/
default void init() {
start();
}
/**
* A worker representing an asynchronous boundary that executes tasks.
*
* @author Stephane Maldini
* @author Simon Baslé
*/
|
Scheduler
|
java
|
apache__camel
|
components/camel-bean-validator/src/test/java/org/apache/camel/component/bean/validator/ValidatorFactoryAutowireTest.java
|
{
"start": 1315,
"end": 2205
}
|
class ____ extends CamelTestSupport {
@BindToRegistry("myValidatorFactory")
private ValidatorFactory validatorFactory;
@Override
public void doPreSetup() {
GenericBootstrap bootstrap = Validation.byDefaultProvider();
bootstrap.providerResolver(new HibernateValidationProviderResolver());
this.validatorFactory = bootstrap.configure().buildValidatorFactory();
}
@Test
void configureValidatorFactoryAutowired() throws Exception {
BeanValidatorEndpoint endpoint
= context.getEndpoint("bean-validator:dummy", BeanValidatorEndpoint.class);
BeanValidatorProducer producer = (BeanValidatorProducer) endpoint.createProducer();
assertSame(this.validatorFactory, endpoint.getValidatorFactory());
assertSame(this.validatorFactory, producer.getValidatorFactory());
}
}
|
ValidatorFactoryAutowireTest
|
java
|
micronaut-projects__micronaut-core
|
aop/src/main/java/io/micronaut/aop/Around.java
|
{
"start": 1284,
"end": 1745
}
|
interface ____ {
* }
* </code></pre>
*
* <p>Note that the annotation MUST be {@link java.lang.annotation.RetentionPolicy#RUNTIME} and the specified {@link io.micronaut.context.annotation.Type} must implement {@link MethodInterceptor}</p>
*
* @author Graeme Rocher
* @since 1.0
*/
@Documented
@Retention(RUNTIME)
@Target({ElementType.ANNOTATION_TYPE, ElementType.TYPE, ElementType.METHOD})
@InterceptorBinding(kind = InterceptorKind.AROUND)
public @
|
Example
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaFilter.java
|
{
"start": 984,
"end": 3486
}
|
class ____ {
private final Collection<ClientQuotaFilterComponent> components;
private final boolean strict;
/**
* A filter to be applied to matching client quotas.
*
* @param components the components to filter on
* @param strict whether the filter only includes specified components
*/
private ClientQuotaFilter(Collection<ClientQuotaFilterComponent> components, boolean strict) {
this.components = components;
this.strict = strict;
}
/**
* Constructs and returns a quota filter that matches all provided components. Matching entities
* with entity types that are not specified by a component will also be included in the result.
*
* @param components the components for the filter
*/
public static ClientQuotaFilter contains(Collection<ClientQuotaFilterComponent> components) {
return new ClientQuotaFilter(components, false);
}
/**
* Constructs and returns a quota filter that matches all provided components. Matching entities
* with entity types that are not specified by a component will *not* be included in the result.
*
* @param components the components for the filter
*/
public static ClientQuotaFilter containsOnly(Collection<ClientQuotaFilterComponent> components) {
return new ClientQuotaFilter(components, true);
}
/**
* Constructs and returns a quota filter that matches all configured entities.
*/
public static ClientQuotaFilter all() {
return new ClientQuotaFilter(Collections.emptyList(), false);
}
/**
* @return the filter's components
*/
public Collection<ClientQuotaFilterComponent> components() {
return this.components;
}
/**
* @return whether the filter is strict, i.e. only includes specified components
*/
public boolean strict() {
return this.strict;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClientQuotaFilter that = (ClientQuotaFilter) o;
return Objects.equals(components, that.components) && Objects.equals(strict, that.strict);
}
@Override
public int hashCode() {
return Objects.hash(components, strict);
}
@Override
public String toString() {
return "ClientQuotaFilter(components=" + components + ", strict=" + strict + ")";
}
}
|
ClientQuotaFilter
|
java
|
apache__camel
|
components/camel-fhir/camel-fhir-api/src/main/java/org/apache/camel/component/fhir/api/FhirUpdate.java
|
{
"start": 1358,
"end": 7884
}
|
class ____ {
private final IGenericClient client;
public FhirUpdate(IGenericClient client) {
this.client = client;
}
/**
* Updates a {@link IBaseResource} on the server by id
*
* @param resource The resource to update (e.g. Patient)
* @param id the {@link IIdType} referencing the resource
* @param preferReturn whether the server include or suppress the resource body as a part of the result
* @param extraParameters see {@link ExtraParameters} for a full list of parameters that can be passed, may be NULL
*/
public MethodOutcome resource(
IBaseResource resource, IIdType id, PreferReturnEnum preferReturn, Map<ExtraParameters, Object> extraParameters) {
IUpdateTyped updateTyped = client.update().resource(resource);
IUpdateExecutable updateExecutable = withOptionalId(id, updateTyped);
ExtraParameters.process(extraParameters, updateExecutable);
return processOptionalParam(preferReturn, updateExecutable);
}
/**
* Updates a {@link IBaseResource} on the server by id
*
* @param resourceAsString The resource body to update
* @param id the {@link IIdType} referencing the resource
* @param preferReturn whether the server include or suppress the resource body as a part of the result
* @param extraParameters see {@link ExtraParameters} for a full list of parameters that can be passed, may be NULL
*/
public MethodOutcome resource(
String resourceAsString, IIdType id, PreferReturnEnum preferReturn, Map<ExtraParameters, Object> extraParameters) {
IUpdateTyped updateTyped = client.update().resource(resourceAsString);
IUpdateExecutable updateExecutable = withOptionalId(id, updateTyped);
ExtraParameters.process(extraParameters, updateExecutable);
return processOptionalParam(preferReturn, updateExecutable);
}
/**
* Updates a {@link IBaseResource} on the server by id
*
* @param resource The resource to update (e.g. Patient)
* @param stringId the ID referencing the resource
* @param preferReturn whether the server include or suppress the resource body as a part of the result
* @param extraParameters see {@link ExtraParameters} for a full list of parameters that can be passed, may be NULL
*/
public MethodOutcome resource(
IBaseResource resource, String stringId, PreferReturnEnum preferReturn,
Map<ExtraParameters, Object> extraParameters) {
IUpdateTyped updateTyped = client.update().resource(resource);
IUpdateExecutable updateExecutable = withOptionalId(stringId, updateTyped);
ExtraParameters.process(extraParameters, updateExecutable);
return processOptionalParam(preferReturn, updateExecutable);
}
/**
* Updates a {@link IBaseResource} on the server by id
*
* @param resourceAsString The resource body to update
* @param stringId the ID referencing the resource
* @param preferReturn whether the server include or suppress the resource body as a part of the result
* @param extraParameters see {@link ExtraParameters} for a full list of parameters that can be passed, may be NULL
*/
public MethodOutcome resource(
String resourceAsString, String stringId, PreferReturnEnum preferReturn,
Map<ExtraParameters, Object> extraParameters) {
IUpdateTyped updateTyped = client.update().resource(resourceAsString);
IUpdateExecutable updateExecutable = withOptionalId(stringId, updateTyped);
ExtraParameters.process(extraParameters, updateExecutable);
return processOptionalParam(preferReturn, updateExecutable);
}
/**
* Updates a {@link IBaseResource} on the server by search url
*
* @param resource The resource to update (e.g. Patient)
* @param url Specifies that the update should be performed as a conditional create against a given
* search URL.
* @param preferReturn whether the server include or suppress the resource body as a part of the result
* @param extraParameters see {@link ExtraParameters} for a full list of parameters that can be passed, may be NULL
*/
public MethodOutcome resourceBySearchUrl(
IBaseResource resource, String url, PreferReturnEnum preferReturn, Map<ExtraParameters, Object> extraParameters) {
IUpdateExecutable updateExecutable = client.update().resource(resource).conditionalByUrl(url);
ExtraParameters.process(extraParameters, updateExecutable);
return processOptionalParam(preferReturn, updateExecutable);
}
/**
* Updates a {@link IBaseResource} on the server by search url
*
* @param resourceAsString The resource body to update
* @param url Specifies that the update should be performed as a conditional create against a given
* search URL.
* @param preferReturn whether the server include or suppress the resource body as a part of the result
* @param extraParameters see {@link ExtraParameters} for a full list of parameters that can be passed, may be NULL
*/
public MethodOutcome resourceBySearchUrl(
String resourceAsString, String url, PreferReturnEnum preferReturn, Map<ExtraParameters, Object> extraParameters) {
IUpdateExecutable updateExecutable = client.update().resource(resourceAsString).conditionalByUrl(url);
ExtraParameters.process(extraParameters, updateExecutable);
return processOptionalParam(preferReturn, updateExecutable);
}
private MethodOutcome processOptionalParam(PreferReturnEnum preferReturn, IUpdateExecutable updateExecutable) {
if (preferReturn != null) {
return updateExecutable.prefer(preferReturn).execute();
}
return updateExecutable.execute();
}
private IUpdateExecutable withOptionalId(IIdType id, IUpdateTyped updateTyped) {
if (ObjectHelper.isNotEmpty(id)) {
return updateTyped.withId(id);
} else {
return updateTyped;
}
}
private IUpdateExecutable withOptionalId(String stringId, IUpdateTyped updateTyped) {
if (ObjectHelper.isNotEmpty(stringId)) {
return updateTyped.withId(stringId);
} else {
return updateTyped;
}
}
}
|
FhirUpdate
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/functions/VectorSearchFunction.java
|
{
"start": 1299,
"end": 2489
}
|
class ____ extends TableFunction<RowData> {
/**
* Synchronously search result based on input row to find topK matched rows.
*
* @param topK - The number of topK results to return.
* @param queryData - A {@link RowData} that wraps input for vector search function.
* @return A collection of predicted results.
*/
public abstract Collection<RowData> vectorSearch(int topK, RowData queryData)
throws IOException;
/** Invoke {@link #vectorSearch} and handle exceptions. */
public final void eval(Object... args) {
int topK = (int) args[0];
GenericRowData argsData = new GenericRowData(args.length - 1);
for (int i = 1; i < args.length; ++i) {
argsData.setField(i - 1, args[i]);
}
try {
Collection<RowData> results = vectorSearch(topK, argsData);
if (results == null) {
return;
}
results.forEach(this::collect);
} catch (Exception e) {
throw new FlinkRuntimeException(
String.format("Failed to execute search with input row %s.", argsData), e);
}
}
}
|
VectorSearchFunction
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.