language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetSpaceUsed.java | {
"start": 4123,
"end": 4329
} | class ____ implements GetSpaceUsed {
public DummyGetSpaceUsed(GetSpaceUsed.Builder builder) {
}
@Override public long getUsed() throws IOException {
return 300;
}
}
} | DummyGetSpaceUsed |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/controller/BrokerHeartbeatManagerTest.java | {
"start": 2013,
"end": 12085
} | class ____ {
private static BrokerHeartbeatManager newBrokerHeartbeatManager() {
LogContext logContext = new LogContext();
MockTime time = new MockTime(0, 1_000_000, 0);
return new BrokerHeartbeatManager(logContext, time, 10_000_000);
}
@Test
public void testHasValidSession() {
BrokerHeartbeatManager manager = newBrokerHeartbeatManager();
MockTime time = (MockTime) manager.time();
assertFalse(manager.hasValidSession(0, 100L));
for (int brokerId = 0; brokerId < 3; brokerId++) {
manager.register(brokerId, true);
}
manager.tracker().updateContactTime(new BrokerIdAndEpoch(0, 100L));
manager.touch(0, false, 0);
time.sleep(5);
manager.tracker().updateContactTime(new BrokerIdAndEpoch(1, 100L));
manager.touch(1, false, 0);
manager.tracker().updateContactTime(new BrokerIdAndEpoch(2, 200L));
manager.touch(2, false, 0);
assertTrue(manager.hasValidSession(0, 100L));
assertFalse(manager.hasValidSession(0, 200L));
assertTrue(manager.hasValidSession(1, 100L));
assertTrue(manager.hasValidSession(2, 200L));
assertFalse(manager.hasValidSession(3, 300L));
}
@Test
public void testMetadataOffsetComparator() {
TreeSet<BrokerHeartbeatState> set =
new TreeSet<>(BrokerHeartbeatManager.MetadataOffsetComparator.INSTANCE);
BrokerHeartbeatState broker1 = new BrokerHeartbeatState(1, false, -1L, -1L);
BrokerHeartbeatState broker2 = new BrokerHeartbeatState(2, false, -1L, -1L);
BrokerHeartbeatState broker3 = new BrokerHeartbeatState(3, false, -1L, -1L);
set.add(broker1);
set.add(broker2);
set.add(broker3);
Iterator<BrokerHeartbeatState> iterator = set.iterator();
assertEquals(broker1, iterator.next());
assertEquals(broker2, iterator.next());
assertEquals(broker3, iterator.next());
assertFalse(iterator.hasNext());
assertTrue(set.remove(broker1));
assertTrue(set.remove(broker2));
assertTrue(set.remove(broker3));
assertTrue(set.isEmpty());
broker1.setMetadataOffset(800);
broker2.setMetadataOffset(400);
broker3.setMetadataOffset(100);
set.add(broker1);
set.add(broker2);
set.add(broker3);
iterator = set.iterator();
assertEquals(broker3, iterator.next());
assertEquals(broker2, iterator.next());
assertEquals(broker1, iterator.next());
assertFalse(iterator.hasNext());
}
private static Set<UsableBroker> usableBrokersToSet(BrokerHeartbeatManager manager) {
Set<UsableBroker> brokers = new HashSet<>();
for (Iterator<UsableBroker> iterator = new UsableBrokerIterator(
manager.brokers().iterator(),
id -> id % 2 == 0 ? Optional.of("rack1") : Optional.of("rack2"));
iterator.hasNext(); ) {
brokers.add(iterator.next());
}
return brokers;
}
@Test
public void testUsableBrokerIterator() {
BrokerHeartbeatManager manager = newBrokerHeartbeatManager();
assertEquals(Set.of(), usableBrokersToSet(manager));
for (int brokerId = 0; brokerId < 5; brokerId++) {
manager.register(brokerId, true);
}
manager.touch(0, false, 100);
manager.touch(1, false, 100);
manager.touch(2, false, 98);
manager.touch(3, false, 100);
manager.touch(4, true, 100);
assertEquals(98L, manager.lowestActiveOffset());
Set<UsableBroker> expected = new HashSet<>();
expected.add(new UsableBroker(0, Optional.of("rack1"), false));
expected.add(new UsableBroker(1, Optional.of("rack2"), false));
expected.add(new UsableBroker(2, Optional.of("rack1"), false));
expected.add(new UsableBroker(3, Optional.of("rack2"), false));
expected.add(new UsableBroker(4, Optional.of("rack1"), true));
assertEquals(expected, usableBrokersToSet(manager));
manager.maybeUpdateControlledShutdownOffset(2, 0);
assertEquals(100L, manager.lowestActiveOffset());
assertThrows(RuntimeException.class,
() -> manager.maybeUpdateControlledShutdownOffset(4, 0));
manager.touch(4, false, 100);
manager.maybeUpdateControlledShutdownOffset(4, 0);
expected.remove(new UsableBroker(2, Optional.of("rack1"), false));
expected.remove(new UsableBroker(4, Optional.of("rack1"), true));
assertEquals(expected, usableBrokersToSet(manager));
}
@Test
public void testControlledShutdownOffsetIsOnlyUpdatedOnce() {
BrokerHeartbeatManager manager = newBrokerHeartbeatManager();
assertEquals(Set.of(), usableBrokersToSet(manager));
for (int brokerId = 0; brokerId < 5; brokerId++) {
manager.register(brokerId, true);
}
manager.touch(0, false, 100);
manager.touch(1, false, 100);
manager.touch(2, false, 98);
manager.touch(3, false, 100);
manager.touch(4, true, 100);
assertEquals(OptionalLong.empty(), manager.controlledShutdownOffset(2));
manager.maybeUpdateControlledShutdownOffset(2, 98);
assertEquals(OptionalLong.of(98), manager.controlledShutdownOffset(2));
manager.maybeUpdateControlledShutdownOffset(2, 99);
assertEquals(OptionalLong.of(98), manager.controlledShutdownOffset(2));
assertEquals(OptionalLong.empty(), manager.controlledShutdownOffset(3));
manager.maybeUpdateControlledShutdownOffset(3, 101);
assertEquals(OptionalLong.of(101), manager.controlledShutdownOffset(3));
manager.maybeUpdateControlledShutdownOffset(3, 102);
assertEquals(OptionalLong.of(101), manager.controlledShutdownOffset(3));
}
@Test
public void testCalculateNextBrokerState() {
BrokerHeartbeatManager manager = newBrokerHeartbeatManager();
for (int brokerId = 0; brokerId < 6; brokerId++) {
manager.register(brokerId, true);
}
manager.touch(0, true, 100);
manager.touch(1, false, 98);
manager.touch(2, false, 100);
manager.touch(3, false, 100);
manager.touch(4, true, 100);
manager.touch(5, false, 99);
manager.maybeUpdateControlledShutdownOffset(5, 99);
assertEquals(98L, manager.lowestActiveOffset());
assertEquals(new BrokerControlStates(FENCED, SHUTDOWN_NOW),
manager.calculateNextBrokerState(0,
new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> false));
assertEquals(new BrokerControlStates(FENCED, UNFENCED),
manager.calculateNextBrokerState(0,
new BrokerHeartbeatRequestData().setWantFence(false).
setCurrentMetadataOffset(100), 100, () -> false));
assertEquals(new BrokerControlStates(FENCED, FENCED),
manager.calculateNextBrokerState(0,
new BrokerHeartbeatRequestData().setWantFence(false).
setCurrentMetadataOffset(50), 100, () -> false));
assertEquals(new BrokerControlStates(FENCED, FENCED),
manager.calculateNextBrokerState(0,
new BrokerHeartbeatRequestData().setWantFence(true), 100, () -> false));
assertEquals(new BrokerControlStates(UNFENCED, CONTROLLED_SHUTDOWN),
manager.calculateNextBrokerState(1,
new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> true));
assertEquals(new BrokerControlStates(UNFENCED, SHUTDOWN_NOW),
manager.calculateNextBrokerState(1,
new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> false));
assertEquals(new BrokerControlStates(UNFENCED, UNFENCED),
manager.calculateNextBrokerState(1,
new BrokerHeartbeatRequestData().setWantFence(false), 100, () -> false));
assertEquals(new BrokerControlStates(CONTROLLED_SHUTDOWN, CONTROLLED_SHUTDOWN),
manager.calculateNextBrokerState(5,
new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> true));
assertEquals(new BrokerControlStates(CONTROLLED_SHUTDOWN, CONTROLLED_SHUTDOWN),
manager.calculateNextBrokerState(5,
new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> false));
manager.fence(1);
assertEquals(new BrokerControlStates(CONTROLLED_SHUTDOWN, SHUTDOWN_NOW),
manager.calculateNextBrokerState(5,
new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> false));
assertEquals(new BrokerControlStates(CONTROLLED_SHUTDOWN, CONTROLLED_SHUTDOWN),
manager.calculateNextBrokerState(5,
new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> true));
assertEquals("Broker 6 is not registered.",
assertThrows(IllegalStateException.class,
() -> manager.calculateNextBrokerState(6, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> true)).getMessage());
assertEquals("Broker 7 is not registered.",
assertThrows(IllegalStateException.class,
() -> manager.calculateNextBrokerState(7, new BrokerHeartbeatRequestData().setWantShutDown(true), 100, () -> true)).getMessage());
}
@Test
public void testTouchThrowsExceptionUnlessRegistered() {
BrokerHeartbeatManager manager = newBrokerHeartbeatManager();
manager.register(1, true);
manager.register(3, true);
assertEquals("Broker 2 is not registered.",
assertThrows(IllegalStateException.class,
() -> manager.touch(2, false, 0)).getMessage());
assertEquals("Broker 4 is not registered.",
assertThrows(IllegalStateException.class,
() -> manager.touch(4, false, 0)).getMessage());
}
}
| BrokerHeartbeatManagerTest |
java | google__dagger | javatests/dagger/functional/staticprovides/SomeStaticModule.java | {
"start": 745,
"end": 1159
} | class ____ {
@Provides
@IntoSet
static String contributeStringFromAStaticMethod() {
return SomeStaticModule.class + ".contributeStringFromAStaticMethod";
}
@SuppressWarnings("StaticModuleMethods") // Purposely non-static for tests
@Provides
@IntoSet
String contributeStringFromAnInstanceMethod() {
return SomeStaticModule.class + ".contributeStringFromAnInstanceMethod";
}
}
| SomeStaticModule |
java | spring-projects__spring-boot | test-support/spring-boot-test-support/src/main/java/org/springframework/boot/testsupport/ssl/MockPkcs11SecurityProviderExtension.java | {
"start": 1062,
"end": 1442
} | class ____ implements BeforeAllCallback, AfterAllCallback {
@Override
public void beforeAll(ExtensionContext context) throws Exception {
Security.addProvider(MockPkcs11SecurityProvider.INSTANCE);
}
@Override
public void afterAll(ExtensionContext context) throws Exception {
Security.removeProvider(MockPkcs11SecurityProvider.NAME);
}
}
| MockPkcs11SecurityProviderExtension |
java | apache__spark | sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnarBatchReader.java | {
"start": 1982,
"end": 9059
} | class ____ extends RecordReader<Void, ColumnarBatch> {
// The capacity of vectorized batch.
private int capacity;
// Vectorized ORC Row Batch wrap.
private VectorizedRowBatchWrap wrap;
/**
* The column IDs of the physical ORC file schema which are required by this reader.
* -1 means this required column is partition column, or it doesn't exist in the ORC file.
* Ideally partition column should never appear in the physical file, and should only appear
* in the directory name. However, Spark allows partition columns inside physical file,
* but Spark will discard the values from the file, and use the partition value got from
* directory name. The column order will be reserved though.
*/
@VisibleForTesting
public int[] requestedDataColIds;
// Record reader from ORC row batch.
private org.apache.orc.RecordReader recordReader;
private StructField[] requiredFields;
// The result columnar batch for vectorized execution by whole-stage codegen.
@VisibleForTesting
public ColumnarBatch columnarBatch;
private final MemoryMode memoryMode;
// The wrapped ORC column vectors.
private org.apache.spark.sql.vectorized.ColumnVector[] orcVectorWrappers;
public OrcColumnarBatchReader(int capacity, MemoryMode memoryMode) {
this.capacity = capacity;
this.memoryMode = memoryMode;
}
@Override
public Void getCurrentKey() {
return null;
}
@Override
public ColumnarBatch getCurrentValue() {
return columnarBatch;
}
@Override
public float getProgress() throws IOException {
return recordReader.getProgress();
}
@Override
public boolean nextKeyValue() throws IOException {
return nextBatch();
}
@Override
public void close() throws IOException {
if (columnarBatch != null) {
columnarBatch.close();
columnarBatch = null;
}
if (recordReader != null) {
recordReader.close();
recordReader = null;
}
}
/**
* Initialize ORC file reader and batch record reader.
* Please note that `initBatch` is needed to be called after this.
*/
@Override
public void initialize(
InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException {
initialize(inputSplit, taskAttemptContext, null);
}
public void initialize(
InputSplit inputSplit,
TaskAttemptContext taskAttemptContext,
OrcTail orcTail) throws IOException {
FileSplit fileSplit = (FileSplit)inputSplit;
Configuration conf = taskAttemptContext.getConfiguration();
Reader reader = OrcFile.createReader(
fileSplit.getPath(),
OrcFile.readerOptions(conf)
.maxLength(OrcConf.MAX_FILE_LENGTH.getLong(conf))
.filesystem(fileSplit.getPath().getFileSystem(conf))
.orcTail(orcTail));
Reader.Options options =
OrcInputFormat.buildOptions(conf, reader, fileSplit.getStart(), fileSplit.getLength());
recordReader = reader.rows(options);
}
/**
* Initialize columnar batch by setting required schema and partition information.
* With this information, this creates ColumnarBatch with the full schema.
*
* @param orcSchema Schema from ORC file reader.
* @param requiredFields All the fields that are required to return, including partition fields.
* @param requestedDataColIds Requested column ids from orcSchema. -1 if not existed.
* @param requestedPartitionColIds Requested column ids from partition schema. -1 if not existed.
* @param partitionValues Values of partition columns.
*/
public void initBatch(
TypeDescription orcSchema,
StructField[] requiredFields,
int[] requestedDataColIds,
int[] requestedPartitionColIds,
InternalRow partitionValues) {
wrap = new VectorizedRowBatchWrap(orcSchema.createRowBatch(capacity));
assert(!wrap.batch().selectedInUse); // `selectedInUse` should be initialized with `false`.
assert(requiredFields.length == requestedDataColIds.length);
assert(requiredFields.length == requestedPartitionColIds.length);
// If a required column is also partition column, use partition value and don't read from file.
for (int i = 0; i < requiredFields.length; i++) {
if (requestedPartitionColIds[i] != -1) {
requestedDataColIds[i] = -1;
}
}
this.requiredFields = requiredFields;
this.requestedDataColIds = requestedDataColIds;
StructType resultSchema = new StructType(requiredFields);
// Just wrap the ORC column vector instead of copying it to Spark column vector.
orcVectorWrappers = new org.apache.spark.sql.vectorized.ColumnVector[resultSchema.length()];
StructType requiredSchema = new StructType(requiredFields);
for (int i = 0; i < requiredFields.length; i++) {
DataType dt = requiredFields[i].dataType();
if (requestedPartitionColIds[i] != -1) {
ConstantColumnVector partitionCol = new ConstantColumnVector(capacity, dt);
ColumnVectorUtils.populate(partitionCol, partitionValues, requestedPartitionColIds[i]);
orcVectorWrappers[i] = partitionCol;
} else {
int colId = requestedDataColIds[i];
// Initialize the missing columns once.
if (colId == -1) {
final WritableColumnVector missingCol;
if (memoryMode == MemoryMode.OFF_HEAP) {
missingCol = new OffHeapColumnVector(capacity, dt);
} else {
missingCol = new OnHeapColumnVector(capacity, dt);
}
// Check if the missing column has an associated default value in the schema metadata.
// If so, fill the corresponding column vector with the value.
Object defaultValue = ResolveDefaultColumns.existenceDefaultValues(requiredSchema)[i];
if (defaultValue == null) {
missingCol.putNulls(0, capacity);
} else if (missingCol.appendObjects(capacity, defaultValue).isEmpty()) {
throw new IllegalArgumentException("Cannot assign default column value to result " +
"column batch in vectorized Orc reader because the data type is not supported: " +
defaultValue);
}
missingCol.setIsConstant();
orcVectorWrappers[i] = missingCol;
} else {
orcVectorWrappers[i] = OrcColumnVectorUtils.toOrcColumnVector(
dt, wrap.batch().cols[colId]);
}
}
}
columnarBatch = new ColumnarBatch(orcVectorWrappers);
}
/**
* Return true if there exists more data in the next batch. If exists, prepare the next batch
* by copying from ORC VectorizedRowBatch columns to Spark ColumnarBatch columns.
*/
private boolean nextBatch() throws IOException {
recordReader.nextBatch(wrap.batch());
int batchSize = wrap.batch().size;
if (batchSize == 0) {
return false;
}
columnarBatch.setNumRows(batchSize);
for (int i = 0; i < requiredFields.length; i++) {
if (requestedDataColIds[i] != -1) {
((OrcColumnVector) orcVectorWrappers[i]).setBatchSize(batchSize);
}
}
return true;
}
}
| OrcColumnarBatchReader |
java | apache__maven | its/core-it-support/core-it-plugins/maven-it-plugin-plexus-lifecycle/src/main/java/org/apache/maven/its/plugins/plexuslifecycle/DefaultFakeComponent.java | {
"start": 1388,
"end": 1936
} | class ____ implements FakeComponent, Contextualizable, Disposable, LogEnabled {
private Logger logger;
public void enableLogging(Logger logger) {
this.logger = logger;
}
public void contextualize(Context context) throws ContextException {
logger.info("DefaultFakeComponent :: contextualize");
}
public void dispose() {
logger.info("DefaultFakeComponent :: dispose");
}
@Override
public void doNothing() {
logger.info("doNothing DefaultFakeComponent");
}
}
| DefaultFakeComponent |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/manual/MassiveStringValueSorting.java | {
"start": 2224,
"end": 12670
} | class ____ {
private static final long SEED = 347569784659278346L;
public void testStringValueSorting() {
File input = null;
File sorted = null;
try {
// the source file
input =
generateFileWithStrings(
300000, "http://some-uri.com/that/is/a/common/prefix/to/all");
// the sorted file
sorted = File.createTempFile("sorted_strings", "txt");
String[] command = {
"/bin/bash",
"-c",
"export LC_ALL=\"C\" && cat \""
+ input.getAbsolutePath()
+ "\" | sort > \""
+ sorted.getAbsolutePath()
+ "\""
};
Process p = null;
try {
p = Runtime.getRuntime().exec(command);
int retCode = p.waitFor();
if (retCode != 0) {
throw new Exception("Command failed with return code " + retCode);
}
p = null;
} finally {
if (p != null) {
p.destroy();
}
}
// sort the data
Sorter<StringValue> sorter = null;
BufferedReader reader = null;
BufferedReader verifyReader = null;
MemoryManager mm = null;
try (IOManager ioMan = new IOManagerAsync()) {
mm = MemoryManagerBuilder.newBuilder().setMemorySize(1024 * 1024).build();
TypeSerializer<StringValue> serializer =
new CopyableValueSerializer<StringValue>(StringValue.class);
TypeComparator<StringValue> comparator =
new CopyableValueComparator<StringValue>(true, StringValue.class);
reader = new BufferedReader(new FileReader(input));
MutableObjectIterator<StringValue> inputIterator =
new StringValueReaderMutableObjectIterator(reader);
sorter =
ExternalSorter.newBuilder(mm, new DummyInvokable(), serializer, comparator)
.maxNumFileHandles(128)
.enableSpilling(ioMan, 0.8f)
.memoryFraction(1.0)
.objectReuse(true)
.largeRecords(true)
.build(inputIterator);
MutableObjectIterator<StringValue> sortedData = sorter.getIterator();
reader.close();
// verify
verifyReader = new BufferedReader(new FileReader(sorted));
String nextVerify;
StringValue nextFromFlinkSort = new StringValue();
while ((nextVerify = verifyReader.readLine()) != null) {
nextFromFlinkSort = sortedData.next(nextFromFlinkSort);
Assert.assertNotNull(nextFromFlinkSort);
Assert.assertEquals(nextVerify, nextFromFlinkSort.getValue());
}
} finally {
if (reader != null) {
reader.close();
}
if (verifyReader != null) {
verifyReader.close();
}
if (sorter != null) {
sorter.close();
}
if (mm != null) {
mm.shutdown();
}
}
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
Assert.fail(e.getMessage());
} finally {
if (input != null) {
//noinspection ResultOfMethodCallIgnored
input.delete();
}
if (sorted != null) {
//noinspection ResultOfMethodCallIgnored
sorted.delete();
}
}
}
@SuppressWarnings("unchecked")
public void testStringValueTuplesSorting() {
final int numStrings = 300000;
File input = null;
File sorted = null;
try {
// the source file
input =
generateFileWithStringTuples(
numStrings, "http://some-uri.com/that/is/a/common/prefix/to/all");
// the sorted file
sorted = File.createTempFile("sorted_strings", "txt");
String[] command = {
"/bin/bash",
"-c",
"export LC_ALL=\"C\" && cat \""
+ input.getAbsolutePath()
+ "\" | sort > \""
+ sorted.getAbsolutePath()
+ "\""
};
Process p = null;
try {
p = Runtime.getRuntime().exec(command);
int retCode = p.waitFor();
if (retCode != 0) {
throw new Exception("Command failed with return code " + retCode);
}
p = null;
} finally {
if (p != null) {
p.destroy();
}
}
// sort the data
Sorter<Tuple2<StringValue, StringValue[]>> sorter = null;
BufferedReader reader = null;
BufferedReader verifyReader = null;
MemoryManager mm = null;
try (IOManager ioMan = new IOManagerAsync()) {
mm = MemoryManagerBuilder.newBuilder().setMemorySize(1024 * 1024).build();
TupleTypeInfo<Tuple2<StringValue, StringValue[]>> typeInfo =
(TupleTypeInfo<Tuple2<StringValue, StringValue[]>>)
new TypeHint<Tuple2<StringValue, StringValue[]>>() {}.getTypeInfo();
TypeSerializer<Tuple2<StringValue, StringValue[]>> serializer =
typeInfo.createSerializer(new SerializerConfigImpl());
TypeComparator<Tuple2<StringValue, StringValue[]>> comparator =
typeInfo.createComparator(
new int[] {0}, new boolean[] {true}, 0, new ExecutionConfig());
reader = new BufferedReader(new FileReader(input));
MutableObjectIterator<Tuple2<StringValue, StringValue[]>> inputIterator =
new StringValueTupleReaderMutableObjectIterator(reader);
sorter =
ExternalSorter.newBuilder(mm, new DummyInvokable(), serializer, comparator)
.maxNumFileHandles(4)
.enableSpilling(ioMan, 0.8f)
.memoryFraction(1.0)
.objectReuse(false)
.largeRecords(true)
.build(inputIterator);
// use this part to verify that all if good when sorting in memory
// List<MemorySegment> memory = mm.allocatePages(new DummyInvokable(),
// mm.computeNumberOfPages(1024*1024*1024));
// NormalizedKeySorter<Tuple2<String, String[]>> nks = new
// NormalizedKeySorter<Tuple2<String,String[]>>(serializer, comparator, memory);
//
// {
// Tuple2<String, String[]> wi = new Tuple2<String, String[]>("", new
// String[0]);
// while ((wi = inputIterator.next(wi)) != null) {
// Assert.assertTrue(nks.write(wi));
// }
//
// new QuickSort().sort(nks);
// }
//
// MutableObjectIterator<Tuple2<String, String[]>> sortedData =
// nks.getIterator();
MutableObjectIterator<Tuple2<StringValue, StringValue[]>> sortedData =
sorter.getIterator();
reader.close();
// verify
verifyReader = new BufferedReader(new FileReader(sorted));
MutableObjectIterator<Tuple2<StringValue, StringValue[]>> verifyIterator =
new StringValueTupleReaderMutableObjectIterator(verifyReader);
Tuple2<StringValue, StringValue[]> nextVerify =
new Tuple2<StringValue, StringValue[]>(
new StringValue(), new StringValue[0]);
Tuple2<StringValue, StringValue[]> nextFromFlinkSort =
new Tuple2<StringValue, StringValue[]>(
new StringValue(), new StringValue[0]);
int num = 0;
while ((nextVerify = verifyIterator.next(nextVerify)) != null) {
num++;
nextFromFlinkSort = sortedData.next(nextFromFlinkSort);
Assert.assertNotNull(nextFromFlinkSort);
Assert.assertEquals(nextVerify.f0, nextFromFlinkSort.f0);
Assert.assertArrayEquals(nextVerify.f1, nextFromFlinkSort.f1);
}
Assert.assertNull(sortedData.next(nextFromFlinkSort));
Assert.assertEquals(numStrings, num);
} finally {
if (reader != null) {
reader.close();
}
if (verifyReader != null) {
verifyReader.close();
}
if (sorter != null) {
sorter.close();
}
if (mm != null) {
mm.shutdown();
}
}
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
Assert.fail(e.getMessage());
} finally {
if (input != null) {
//noinspection ResultOfMethodCallIgnored
input.delete();
}
if (sorted != null) {
//noinspection ResultOfMethodCallIgnored
sorted.delete();
}
}
}
// --------------------------------------------------------------------------------------------
private static final | MassiveStringValueSorting |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StatementSwitchToExpressionSwitchTest.java | {
"start": 114811,
"end": 116075
} | class ____ {
int[] x;
public Test(int foo) {
x = null;
}
public int[] foo(Suit suit) {
switch (suit) {
case HEART:
// Inline comment
x[6] <<= 2;
break;
case DIAMOND:
x[5] <<= (((x[6] + 1) * (x[6] * x[5]) << 1));
break;
case SPADE:
throw new RuntimeException();
default:
throw new NullPointerException();
}
return x;
}
}
""")
.setArgs(
"-XepOpt:StatementSwitchToExpressionSwitch:EnableAssignmentSwitchConversion",
"-XepOpt:StatementSwitchToExpressionSwitch:EnableDirectConversion=false")
.doTest();
}
@Test
public void switchByEnum_assignmentSwitchMixedKinds_noError() {
// Different assignment types ("=" versus "+="). The check does not attempt to alter the
// assignments to make the assignment types match (e.g. does not change to "x = x + 2")
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/bean/BeanMethodWithEmptyParameterAndNoMethodWithNoParameterIssueTest.java | {
"start": 3080,
"end": 3252
} | class ____ {
public static void doSomething(Exchange exchange) {
exchange.getIn().setHeader("foo", "bar");
}
}
public static final | MyBean |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/api/recursive/comparison/ComparingNormalizedFields.java | {
"start": 1395,
"end": 6723
} | class ____ extends AbstractRecursiveComparisonIntrospectionStrategy {
private static final String NO_FIELD_FOUND = "Unable to find field in %s, fields tried: %s and %s";
// original field name <-> normalized field name by type
private final Map<Class<?>, Map<String, String>> originalFieldNameByNormalizedFieldNameByType = new ConcurrentHashMap<>();
// use ConcurrentHashMap in case this strategy instance is used in a multi-thread context
private final Map<Class<?>, Set<String>> fieldNamesPerClass = new ConcurrentHashMap<>();
/**
* Returns the <b>normalized</b> names of the children nodes of the given object that will be used in the recursive comparison.
* <p>
* The names are normalized according to {@link #normalizeFieldName(String)}.
*
* @param node the object to get the child nodes from
* @return the normalized names of the children nodes of the given object
*/
@Override
public Set<String> getChildrenNodeNamesOf(Object node) {
if (node == null) return new HashSet<>();
Class<?> nodeClass = node.getClass();
Set<String> fieldsNames = getFieldsNames(nodeClass);
// we normalize fields so that we can compare actual and expected, for example if actual has a firstName field and expected
// a first_name field, we won't find firstName in expected unless we normalize it
// Note that normalize has side effects as it keeps track of the normalized name -> original name mapping
return fieldNamesPerClass.computeIfAbsent(nodeClass,
unused -> fieldsNames.stream()
.map(fieldsName -> normalize(nodeClass, fieldsName))
.collect(toSet()));
}
/**
* Returns the normalized version of the given field name to allow actual and expected fields to be matched.
* <p>
* For example, let's assume {@code actual} is a {@code Person} with camel case fields like {@code firstName} and
* {@code expected} is a {@code PersonDto} with snake case field like {@code first_name}.
* <p>
* The default recursive comparison gathers all {@code actual} and {@code expected} fields to compare them but fails as it can't
* know that {@code actual.firstName} must be compared to {@code expected.first_name}.<br/> By normalizing fields names first,
* the recursive comparison can now operate on fields that can be matched.
* <p>
* In our example, we can either normalize fields to be camel case or snake case (camel case would be more natural though).
* <p>
* Note that {@link #getChildNodeValue(String, Object)} receives the normalized field name, it tries to get its value first and
* if failing to do so, it tries the original field name.<br/>
* In our example, if we normalize to camel case, getting {@code firstName} works fine for {@code actual} but not for
* {@code expected}, we have to get the original field name {@code first_name} to get the value ({@code ComparingNormalizedFields}
* implementation tracks which original field names resulted in a specific normalized field name).
*
* @param fieldName the field name to normalize
* @return the normalized field name
*/
protected abstract String normalizeFieldName(String fieldName);
/**
* Normalize the field name and keep track of the normalized name -> original name
* @param fieldName the field name to normalize
* @return the normalized field name
*/
private String normalize(Class<?> nodeClass, String fieldName) {
if (!originalFieldNameByNormalizedFieldNameByType.containsKey(nodeClass)) {
originalFieldNameByNormalizedFieldNameByType.put(nodeClass, new HashMap<>());
}
String normalizedFieldName = normalizeFieldName(fieldName);
originalFieldNameByNormalizedFieldNameByType.get(nodeClass).put(normalizedFieldName, fieldName);
return normalizedFieldName;
}
/**
* Returns the value of the given object field identified by the fieldName parameter.
* <p>
* Note that this method receives the normalized field name with (see {@link #normalizeFieldName(String)}), it tries to get
* its value first and if failing to do so, it tries the original field name ({@code ComparingNormalizedFields} implementation
* tracks which original field names resulted in a specific normalized field name).
* <p>
* For example, let's assume {@code actual} is a {@code Person} with camel case fields like {@code firstName} and {@code expected} is a
* {@code PersonDto} with snake case field like {@code first_name} and we normalize all fields names to be camel case. In this
* case, getting {@code firstName} works fine for {@code actual} but not for {@code expected}, for the latter it succeeds with
* the original field name {@code first_name}.
*
* @param fieldName the field name
* @param instance the object to read the field from
* @return the object field value
*/
@Override
public Object getChildNodeValue(String fieldName, Object instance) {
// fieldName was normalized usually matching actual or expected field naming convention but not both, we first try
// to get the value corresponding to fieldName but if that does not work it means the instance object | ComparingNormalizedFields |
java | apache__camel | components/camel-spring-parent/camel-spring-batch/src/main/java/org/apache/camel/component/spring/batch/support/CamelItemProcessor.java | {
"start": 1195,
"end": 1908
} | class ____<I, O> implements ItemProcessor<I, O> {
private static final Logger LOG = LoggerFactory.getLogger(CamelItemProcessor.class);
private final ProducerTemplate producerTemplate;
private final String endpointUri;
public CamelItemProcessor(ProducerTemplate producerTemplate, String endpointUri) {
this.producerTemplate = producerTemplate;
this.endpointUri = endpointUri;
}
@Override
@SuppressWarnings("unchecked")
public O process(I i) throws Exception {
LOG.debug("processing item [{}]...", i);
O result = (O) producerTemplate.requestBody(endpointUri, i);
LOG.debug("processed item");
return result;
}
}
| CamelItemProcessor |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/views/MultipleViewsDeser437Test.java | {
"start": 1147,
"end": 3827
} | class ____ {
public int x, y, z;
@JsonView(ViewX.class)
public SimpleBuilderXY withX(int x0) {
this.x = x0;
return this;
}
@JsonView(ViewY.class)
public SimpleBuilderXY withY(int y0) {
this.y = y0;
return this;
}
@JsonView(ViewZ.class)
public SimpleBuilderXY withZ(int z0) {
this.z = z0;
return this;
}
public ValueClassXY build() {
return new ValueClassXY(x, y, z);
}
}
private final ObjectMapper ENABLED_MAPPER = jsonMapperBuilder()
.enable(DeserializationFeature.FAIL_ON_UNEXPECTED_VIEW_PROPERTIES).build();
private final ObjectMapper DISABLED_MAPPER = jsonMapperBuilder()
.disable(DeserializationFeature.FAIL_ON_UNEXPECTED_VIEW_PROPERTIES).build();
@Test
public void testDeserWithMultipleViews() throws Exception
{
final String json = a2q("{'nonViewField':'nonViewFieldValue'," +
"'view1Field':'view1FieldValue'," +
"'view2Field':'view2FieldValue'}");
ObjectReader reader = ENABLED_MAPPER.readerWithView(View1.class).forType(Bean437.class);
_testMismatchException(reader, json);
}
@Test
public void testDeserMultipleViewsWithBuilders() throws Exception
{
final String json = a2q("{'x':5,'y':10,'z':0}");
// When enabled, should fail on unexpected view
_testMismatchException(
ENABLED_MAPPER.readerFor(ValueClassXY.class).withView(ViewX.class),
json);
_testMismatchException(
ENABLED_MAPPER.readerFor(ValueClassXY.class).withView(ViewY.class),
json);
// When disabled, should not fail on unexpected view
ValueClassXY withX = DISABLED_MAPPER.readerFor(ValueClassXY.class).withView(ViewX.class).readValue(json);
assertEquals(6, withX._x);
assertEquals(1, withX._y);
assertEquals(1, withX._z);
ValueClassXY withY = DISABLED_MAPPER.readerFor(ValueClassXY.class).withView(ViewY.class).readValue(json);
assertEquals(1, withY._x);
assertEquals(11, withY._y);
assertEquals(1, withY._z);
}
private void _testMismatchException(ObjectReader reader, String json) throws Exception {
try {
reader.readValue(json);
fail("should not pass, but fail with exception with unexpected view");
} catch (MismatchedInputException e) {
verifyException(e, "Input mismatch while deserializing");
verifyException(e, "is not part of current active view");
}
}
}
| SimpleBuilderXY |
java | apache__rocketmq | client/src/main/java/org/apache/rocketmq/acl/common/AclUtils.java | {
"start": 1467,
"end": 10594
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(LoggerName.COMMON_LOGGER_NAME);
public static byte[] combineRequestContent(RemotingCommand request, SortedMap<String, String> fieldsMap) {
try {
StringBuilder sb = new StringBuilder();
for (Map.Entry<String, String> entry : fieldsMap.entrySet()) {
if (!SessionCredentials.SIGNATURE.equals(entry.getKey())) {
sb.append(entry.getValue());
}
}
return AclUtils.combineBytes(sb.toString().getBytes(CHARSET), request.getBody());
} catch (Exception e) {
throw new RuntimeException("Incompatible exception.", e);
}
}
public static byte[] combineBytes(byte[] b1, byte[] b2) {
if (b1 == null || b1.length == 0) return b2;
if (b2 == null || b2.length == 0) return b1;
byte[] total = new byte[b1.length + b2.length];
System.arraycopy(b1, 0, total, 0, b1.length);
System.arraycopy(b2, 0, total, b1.length, b2.length);
return total;
}
public static String calSignature(byte[] data, String secretKey) {
return AclSigner.calSignature(data, secretKey);
}
public static void IPv6AddressCheck(String netAddress) {
if (isAsterisk(netAddress) || isMinus(netAddress)) {
int asterisk = netAddress.indexOf("*");
int minus = netAddress.indexOf("-");
// '*' must be the end of netAddress if it exists
if (asterisk > -1 && asterisk != netAddress.length() - 1) {
throw new AclException(String.format("NetAddress examine scope Exception netAddress is %s", netAddress));
}
// format like "2::ac5:78:1-200:*" or "2::ac5:78:1-200" is legal
if (minus > -1) {
if (asterisk == -1) {
if (minus <= netAddress.lastIndexOf(":")) {
throw new AclException(String.format("NetAddress examine scope Exception netAddress is %s", netAddress));
}
} else {
if (minus <= netAddress.lastIndexOf(":", netAddress.lastIndexOf(":") - 1)) {
throw new AclException(String.format("NetAddress examine scope Exception netAddress is %s", netAddress));
}
}
}
}
}
public static String v6ipProcess(String netAddress) {
int part;
String subAddress;
boolean isAsterisk = isAsterisk(netAddress);
boolean isMinus = isMinus(netAddress);
if (isAsterisk && isMinus) {
part = 6;
int lastColon = netAddress.lastIndexOf(':');
int secondLastColon = netAddress.substring(0, lastColon).lastIndexOf(':');
subAddress = netAddress.substring(0, secondLastColon);
} else if (!isAsterisk && !isMinus) {
part = 8;
subAddress = netAddress;
} else {
part = 7;
subAddress = netAddress.substring(0, netAddress.lastIndexOf(':'));
}
return expandIP(subAddress, part);
}
public static void verify(String netAddress, int index) {
if (!AclUtils.isScope(netAddress, index)) {
throw new AclException(String.format("NetAddress examine scope Exception netAddress is %s", netAddress));
}
}
public static String[] getAddresses(String netAddress, String partialAddress) {
String[] parAddStrArray = StringUtils.split(partialAddress.substring(1, partialAddress.length() - 1), ",");
String address = netAddress.substring(0, netAddress.indexOf("{"));
String[] addressStrArray = new String[parAddStrArray.length];
for (int i = 0; i < parAddStrArray.length; i++) {
addressStrArray[i] = address + parAddStrArray[i];
}
return addressStrArray;
}
public static boolean isScope(String netAddress, int index) {
// IPv6 Address
if (isColon(netAddress)) {
netAddress = expandIP(netAddress, 8);
String[] strArray = StringUtils.split(netAddress, ":");
return isIPv6Scope(strArray, index);
}
String[] strArray = StringUtils.split(netAddress, ".");
if (strArray.length != 4) {
return false;
}
return isScope(strArray, index);
}
public static boolean isScope(String[] num, int index) {
for (int i = 0; i < index; i++) {
if (!isScope(num[i])) {
return false;
}
}
return true;
}
public static boolean isColon(String netAddress) {
return netAddress.indexOf(':') > -1;
}
public static boolean isScope(String num) {
return isScope(Integer.parseInt(num.trim()));
}
public static boolean isScope(int num) {
return num >= 0 && num <= 255;
}
public static boolean isAsterisk(String asterisk) {
return asterisk.indexOf('*') > -1;
}
public static boolean isComma(String colon) {
return colon.indexOf(',') > -1;
}
public static boolean isMinus(String minus) {
return minus.indexOf('-') > -1;
}
public static boolean isIPv6Scope(String[] num, int index) {
for (int i = 0; i < index; i++) {
int value;
try {
value = Integer.parseInt(num[i], 16);
} catch (NumberFormatException e) {
return false;
}
if (!isIPv6Scope(value)) {
return false;
}
}
return true;
}
public static boolean isIPv6Scope(int num) {
int min = Integer.parseInt("0", 16);
int max = Integer.parseInt("ffff", 16);
return num >= min && num <= max;
}
public static String expandIP(String netAddress, int part) {
netAddress = netAddress.toUpperCase();
// expand netAddress
int separatorCount = StringUtils.countMatches(netAddress, ":");
int padCount = part - separatorCount;
if (padCount > 0) {
StringBuilder padStr = new StringBuilder(":");
for (int i = 0; i < padCount; i++) {
padStr.append(":");
}
netAddress = StringUtils.replace(netAddress, "::", padStr.toString());
}
// pad netAddress
String[] strArray = StringUtils.splitPreserveAllTokens(netAddress, ":");
for (int i = 0; i < strArray.length; i++) {
if (strArray[i].length() < 4) {
strArray[i] = StringUtils.leftPad(strArray[i], 4, '0');
}
}
// output
StringBuilder sb = new StringBuilder();
for (int i = 0; i < strArray.length; i++) {
sb.append(strArray[i]);
if (i != strArray.length - 1) {
sb.append(":");
}
}
return sb.toString();
}
public static <T> T getYamlDataObject(String path, Class<T> clazz) {
try (FileInputStream fis = new FileInputStream(path)) {
return getYamlDataObject(fis, clazz);
} catch (FileNotFoundException ignore) {
return null;
} catch (Exception e) {
throw new AclException(e.getMessage(), e);
}
}
public static <T> T getYamlDataObject(InputStream fis, Class<T> clazz) {
Yaml yaml = new Yaml();
try {
return yaml.loadAs(fis, clazz);
} catch (Exception e) {
throw new AclException(e.getMessage(), e);
}
}
public static RPCHook getAclRPCHook(String fileName) {
JSONObject yamlDataObject;
try {
yamlDataObject = AclUtils.getYamlDataObject(fileName,
JSONObject.class);
} catch (Exception e) {
log.error("Convert yaml file to data object error, ", e);
return null;
}
return buildRpcHook(yamlDataObject);
}
public static RPCHook getAclRPCHook(InputStream inputStream) {
JSONObject yamlDataObject = null;
try {
yamlDataObject = AclUtils.getYamlDataObject(inputStream, JSONObject.class);
} catch (Exception e) {
log.error("Convert yaml file to data object error, ", e);
return null;
}
return buildRpcHook(yamlDataObject);
}
private static RPCHook buildRpcHook(JSONObject yamlDataObject) {
if (yamlDataObject == null || yamlDataObject.isEmpty()) {
log.warn("Failed to parse configuration to enable ACL.");
return null;
}
String accessKey = yamlDataObject.getString(AclConstants.CONFIG_ACCESS_KEY);
String secretKey = yamlDataObject.getString(AclConstants.CONFIG_SECRET_KEY);
if (StringUtils.isBlank(accessKey) || StringUtils.isBlank(secretKey)) {
log.warn("Failed to enable ACL. Either AccessKey or secretKey is blank");
return null;
}
return new AclClientRPCHook(new SessionCredentials(accessKey, secretKey));
}
}
| AclUtils |
java | apache__camel | components/camel-iso8583/src/test/java/org/apache/camel/dataformat/iso8583/Iso8583DataFormatGroovyTest.java | {
"start": 1128,
"end": 3187
} | class ____ extends CamelTestSupport {
@Test
public void testUnmarshal() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(1);
getMockEndpoint("mock:result").message(0).body().isInstanceOf(Map.class);
getMockEndpoint("mock:result").message(0).body().simple("${body[op]}").isEqualTo("650000");
getMockEndpoint("mock:result").message(0).body().simple("${body[amount]}").isEqualTo("30.00");
getMockEndpoint("mock:result").message(0).body().simple("${body[ref]}").isEqualTo("001234425791");
getMockEndpoint("mock:result").message(0).body().simple("${body[response]}").isEqualTo("00");
getMockEndpoint("mock:result").message(0).body().simple("${body[terminal]}").isEqualTo("614209027600TéST");
getMockEndpoint("mock:result").message(0).body().simple("${body[currency]}").isEqualTo("484");
template.sendBody("direct:unmarshal", new File("src/test/resources/parse1.txt"));
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RoutesBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:unmarshal").unmarshal().iso8583("0210")
.transform().groovy(
"""
[
"op": body[3].value,
"amount": body[4].value,
"ref": body[37].value,
"response": body[39].value,
"terminal": body[41].value,
"currency": body[49].value
]
""")
.log("${body}")
.to("mock:result");
}
};
}
}
| Iso8583DataFormatGroovyTest |
java | apache__camel | components/camel-tracing/src/main/java/org/apache/camel/tracing/decorators/JdbcSpanDecorator.java | {
"start": 1016,
"end": 1646
} | class ____ extends AbstractSpanDecorator {
@Override
public String getComponent() {
return "jdbc";
}
@Override
public String getComponentClassName() {
return "org.apache.camel.component.jdbc.JdbcComponent";
}
@Override
public void pre(SpanAdapter span, Exchange exchange, Endpoint endpoint) {
super.pre(span, exchange, endpoint);
span.setTag(TagConstants.DB_SYSTEM, "sql");
Object body = exchange.getIn().getBody();
if (body instanceof String) {
span.setTag(TagConstants.DB_STATEMENT, (String) body);
}
}
}
| JdbcSpanDecorator |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/ClassUtils.java | {
"start": 63581,
"end": 64865
} | class ____ defines the method
* @param methodName the static method name
* @param args the parameter types to the method
* @return the static method, or {@code null} if no static method was found
* @throws IllegalArgumentException if the method name is blank or the clazz is null
*/
public static @Nullable Method getStaticMethod(Class<?> clazz, String methodName, Class<?>... args) {
Assert.notNull(clazz, "Class must not be null");
Assert.notNull(methodName, "Method name must not be null");
try {
Method method = clazz.getMethod(methodName, args);
return (Modifier.isStatic(method.getModifiers()) ? method : null);
}
catch (NoSuchMethodException ex) {
return null;
}
}
private static @Nullable Method getMethodOrNull(Class<?> clazz, String methodName, @Nullable Class<?> @Nullable [] paramTypes) {
try {
return clazz.getMethod(methodName, paramTypes);
}
catch (NoSuchMethodException ex) {
return null;
}
}
private static Set<Method> findMethodCandidatesByName(Class<?> clazz, String methodName) {
Set<Method> candidates = new HashSet<>(1);
Method[] methods = clazz.getMethods();
for (Method method : methods) {
if (methodName.equals(method.getName())) {
candidates.add(method);
}
}
return candidates;
}
}
| which |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/runtime/stream/sql/FunctionITCase.java | {
"start": 94072,
"end": 94316
} | class ____ extends ScalarFunction {
public Boolean eval(@DataTypeHint("BOOLEAN NOT NULL") Boolean b) {
return b;
}
}
/** A function that contains a local variable with multi blocks. */
public static | BoolEcho |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/PropertyConfigurerGetter.java | {
"start": 1263,
"end": 1344
} | class ____.
*
* @param name the property name
* @return the | type |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyAction.java | {
"start": 427,
"end": 727
} | class ____ extends ActionType<CreateApiKeyResponse> {
public static final String NAME = "cluster:admin/xpack/security/api_key/create";
public static final CreateApiKeyAction INSTANCE = new CreateApiKeyAction();
private CreateApiKeyAction() {
super(NAME);
}
}
| CreateApiKeyAction |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/OverridingMethodInconsistentArgumentNamesCheckerTest.java | {
"start": 1188,
"end": 1344
} | class ____ {
void m(int p1, int p2) {}
}
""")
.addSourceLines(
"B.java",
"""
| A |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/StronglyTypeTimeTest.java | {
"start": 941,
"end": 1450
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(StronglyTypeTime.class, getClass());
private final BugCheckerRefactoringTestHelper refactoringHelper =
BugCheckerRefactoringTestHelper.newInstance(StronglyTypeTime.class, getClass());
@Test
public void findingLocatedOnField() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import java.time.Duration;
| StronglyTypeTimeTest |
java | elastic__elasticsearch | test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/StashTests.java | {
"start": 1012,
"end": 7348
} | class ____ extends ESTestCase {
public void testReplaceStashedValuesStashKeyInMapValue() throws IOException {
Stash stash = new Stash();
Map<String, Object> expected = new HashMap<>();
expected.put("key", singletonMap("a", "foobar"));
Map<String, Object> map = new HashMap<>();
Map<String, Object> map2 = new HashMap<>();
if (randomBoolean()) {
stash.stashValue("stashed", "bar");
map2.put("a", "foo${stashed}");
} else {
stash.stashValue("stashed", "foobar");
map2.put("a", "$stashed");
}
map.put("key", map2);
Map<String, Object> actual = stash.replaceStashedValues(map);
assertEquals(expected, actual);
assertThat(actual, not(sameInstance(map)));
}
public void testReplaceStashedValuesStashKeyInMapKey() throws IOException {
Stash stash = new Stash();
Map<String, Object> expected = new HashMap<>();
expected.put("key", singletonMap("foobar", "a"));
Map<String, Object> map = new HashMap<>();
Map<String, Object> map2 = new HashMap<>();
if (randomBoolean()) {
stash.stashValue("stashed", "bar");
map2.put("foo${stashed}", "a");
} else {
stash.stashValue("stashed", "foobar");
map2.put("$stashed", "a");
}
map.put("key", map2);
Map<String, Object> actual = stash.replaceStashedValues(map);
assertEquals(expected, actual);
assertThat(actual, not(sameInstance(map)));
}
public void testReplaceStashedValuesStashKeyInMapKeyConflicts() throws IOException {
Stash stash = new Stash();
Map<String, Object> map = new HashMap<>();
Map<String, Object> map2 = new HashMap<>();
String key;
if (randomBoolean()) {
stash.stashValue("stashed", "bar");
key = "foo${stashed}";
} else {
stash.stashValue("stashed", "foobar");
key = "$stashed";
}
map2.put(key, "a");
map2.put("foobar", "whatever");
map.put("key", map2);
Exception e = expectThrows(IllegalArgumentException.class, () -> stash.replaceStashedValues(map));
assertEquals(
e.getMessage(),
"Unstashing has caused a key conflict! The map is [{foobar=whatever}] and the key is [" + key + "] which unstashes to [foobar]"
);
}
public void testReplaceStashedValuesStashKeyInList() throws IOException {
Stash stash = new Stash();
stash.stashValue("stashed", "bar");
Map<String, Object> expected = new HashMap<>();
expected.put("key", Arrays.asList("foot", "foobar", 1));
Map<String, Object> map = new HashMap<>();
Object value;
if (randomBoolean()) {
stash.stashValue("stashed", "bar");
value = "foo${stashed}";
} else {
stash.stashValue("stashed", "foobar");
value = "$stashed";
}
map.put("key", Arrays.asList("foot", value, 1));
Map<String, Object> actual = stash.replaceStashedValues(map);
assertEquals(expected, actual);
assertThat(actual, not(sameInstance(map)));
}
public void testPathInList() throws IOException {
Stash stash = new Stash();
String topLevelKey;
if (randomBoolean()) {
topLevelKey = randomAlphaOfLength(2) + "." + randomAlphaOfLength(2);
} else {
topLevelKey = randomAlphaOfLength(5);
}
stash.stashValue("body", singletonMap(topLevelKey, Arrays.asList("a", "b")));
Map<String, Object> expected;
Map<String, Object> map;
if (randomBoolean()) {
expected = singletonMap(topLevelKey, Arrays.asList("test", "boooooh!"));
map = singletonMap(topLevelKey, Arrays.asList("test", "${body.$_path}oooooh!"));
} else {
expected = singletonMap(topLevelKey, Arrays.asList("test", "b"));
map = singletonMap(topLevelKey, Arrays.asList("test", "$body.$_path"));
}
Map<String, Object> actual = stash.replaceStashedValues(map);
assertEquals(expected, actual);
assertThat(actual, not(sameInstance(map)));
}
public void testPathInMapValue() throws IOException {
Stash stash = new Stash();
String topLevelKey;
if (randomBoolean()) {
topLevelKey = randomAlphaOfLength(2) + "." + randomAlphaOfLength(2);
} else {
topLevelKey = randomAlphaOfLength(5);
}
stash.stashValue("body", singletonMap(topLevelKey, singletonMap("a", "b")));
Map<String, Object> expected;
Map<String, Object> map;
if (randomBoolean()) {
expected = singletonMap(topLevelKey, singletonMap("a", "boooooh!"));
map = singletonMap(topLevelKey, singletonMap("a", "${body.$_path}oooooh!"));
} else {
expected = singletonMap(topLevelKey, singletonMap("a", "b"));
map = singletonMap(topLevelKey, singletonMap("a", "$body.$_path"));
}
Map<String, Object> actual = stash.replaceStashedValues(map);
assertEquals(expected, actual);
assertThat(actual, not(sameInstance(map)));
}
public void testEscapeExtendedKey() throws IOException {
Stash stash = new Stash();
Map<String, Object> map = new HashMap<>();
map.put("key", singletonMap("a", "foo\\${bar}"));
Map<String, Object> actual = stash.replaceStashedValues(map);
assertMap(actual, matchesMap().entry("key", matchesMap().entry("a", "foo${bar}")));
assertThat(actual, not(sameInstance(map)));
}
public void testMultipleVariableNamesInPath() throws Exception {
var stash = new Stash();
stash.stashValue("body", Map.of(".ds-k8s-2021-12-15-1", Map.of("data_stream", "k8s", "settings", Map.of(), "mappings", Map.of())));
stash.stashValue("backing_index", ".ds-k8s-2021-12-15-1");
assertThat(stash.getValue("$body.$backing_index.data_stream"), equalTo("k8s"));
assertThat(stash.getValue("$body.$backing_index.settings"), equalTo(Map.of()));
assertThat(stash.getValue("$body.$backing_index.mappings"), equalTo(Map.of()));
}
}
| StashTests |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxPeekFuseable.java | {
"start": 1319,
"end": 2941
} | class ____<T> extends InternalFluxOperator<T, T>
implements Fuseable, SignalPeek<T> {
final @Nullable Consumer<? super Subscription> onSubscribeCall;
final @Nullable Consumer<? super T> onNextCall;
final @Nullable Consumer<? super Throwable> onErrorCall;
final @Nullable Runnable onCompleteCall;
final @Nullable Runnable onAfterTerminateCall;
final @Nullable LongConsumer onRequestCall;
final @Nullable Runnable onCancelCall;
FluxPeekFuseable(Flux<? extends T> source,
@Nullable Consumer<? super Subscription> onSubscribeCall,
@Nullable Consumer<? super T> onNextCall,
@Nullable Consumer<? super Throwable> onErrorCall,
@Nullable Runnable onCompleteCall,
@Nullable Runnable onAfterTerminateCall,
@Nullable LongConsumer onRequestCall,
@Nullable Runnable onCancelCall) {
super(source);
this.onSubscribeCall = onSubscribeCall;
this.onNextCall = onNextCall;
this.onErrorCall = onErrorCall;
this.onCompleteCall = onCompleteCall;
this.onAfterTerminateCall = onAfterTerminateCall;
this.onRequestCall = onRequestCall;
this.onCancelCall = onCancelCall;
}
@Override
@SuppressWarnings("unchecked")
public CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super T> actual) {
if (actual instanceof ConditionalSubscriber) {
return new PeekFuseableConditionalSubscriber<>((ConditionalSubscriber<? super T>) actual, this);
}
return new PeekFuseableSubscriber<>(actual, this);
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
static final | FluxPeekFuseable |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openshiftai/embeddings/OpenShiftAiEmbeddingsServiceSettingsTests.java | {
"start": 1483,
"end": 25253
} | class ____ extends AbstractWireSerializingTestCase<OpenShiftAiEmbeddingsServiceSettings> {
private static final String MODEL_VALUE = "some_model";
private static final String CORRECT_URL_VALUE = "http://www.abc.com";
private static final String INVALID_URL_VALUE = "^^^";
private static final int DIMENSIONS_VALUE = 384;
private static final SimilarityMeasure SIMILARITY_MEASURE_VALUE = SimilarityMeasure.DOT_PRODUCT;
private static final int MAX_INPUT_TOKENS_VALUE = 128;
private static final int RATE_LIMIT_VALUE = 2;
public void testFromMap_AllFields_Success() {
var serviceSettings = OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
true
),
ConfigurationParseContext.PERSISTENT
);
assertThat(
serviceSettings,
is(
new OpenShiftAiEmbeddingsServiceSettings(
MODEL_VALUE,
CORRECT_URL_VALUE,
DIMENSIONS_VALUE,
SIMILARITY_MEASURE_VALUE,
MAX_INPUT_TOKENS_VALUE,
new RateLimitSettings(RATE_LIMIT_VALUE),
true
)
)
);
}
public void testFromMap_NoModelId_Success() {
var serviceSettings = OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
null,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
);
assertThat(
serviceSettings,
is(
new OpenShiftAiEmbeddingsServiceSettings(
null,
CORRECT_URL_VALUE,
DIMENSIONS_VALUE,
SIMILARITY_MEASURE_VALUE,
MAX_INPUT_TOKENS_VALUE,
new RateLimitSettings(RATE_LIMIT_VALUE),
false
)
)
);
}
public void testFromMap_NoUrl_ThrowsException() {
var thrownException = expectThrows(
ValidationException.class,
() -> OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
null,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
)
);
assertThat(
thrownException.getMessage(),
containsString("Validation Failed: 1: [service_settings] does not contain the required setting [url];")
);
}
public void testFromMap_EmptyUrl_ThrowsException() {
var thrownException = expectThrows(
ValidationException.class,
() -> OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
"",
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
)
);
assertThat(
thrownException.getMessage(),
containsString("Validation Failed: 1: [service_settings] Invalid value empty string. [url] must be a non-empty string;")
);
}
public void testFromMap_InvalidUrl_ThrowsException() {
var thrownException = expectThrows(
ValidationException.class,
() -> OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
INVALID_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
)
);
assertThat(thrownException.getMessage(), containsString(Strings.format("""
Validation Failed: 1: [service_settings] Invalid url [%s] received for field [url]. \
Error: unable to parse url [%s]. Reason: Illegal character in path;""", INVALID_URL_VALUE, INVALID_URL_VALUE)));
}
public void testFromMap_NoSimilarity_Success() {
var serviceSettings = OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
null,
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
);
assertThat(
serviceSettings,
is(
new OpenShiftAiEmbeddingsServiceSettings(
MODEL_VALUE,
CORRECT_URL_VALUE,
DIMENSIONS_VALUE,
null,
MAX_INPUT_TOKENS_VALUE,
new RateLimitSettings(RATE_LIMIT_VALUE),
false
)
)
);
}
public void testFromMap_InvalidSimilarity_ThrowsException() {
var thrownException = expectThrows(
ValidationException.class,
() -> OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
"by_size",
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
)
);
assertThat(thrownException.getMessage(), containsString("""
Validation Failed: 1: [service_settings] Invalid value [by_size] received. \
[similarity] must be one of [cosine, dot_product, l2_norm];"""));
}
public void testFromMap_NoDimensions_SetByUserFalse_Persistent_Success() {
var serviceSettings = OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
null,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
);
assertThat(
serviceSettings,
is(
new OpenShiftAiEmbeddingsServiceSettings(
MODEL_VALUE,
CORRECT_URL_VALUE,
null,
SIMILARITY_MEASURE_VALUE,
MAX_INPUT_TOKENS_VALUE,
new RateLimitSettings(RATE_LIMIT_VALUE),
false
)
)
);
}
public void testFromMap_Persistent_WithDimensions_SetByUserFalse_Persistent_Success() {
var serviceSettings = OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
);
assertThat(
serviceSettings,
is(
new OpenShiftAiEmbeddingsServiceSettings(
MODEL_VALUE,
CORRECT_URL_VALUE,
DIMENSIONS_VALUE,
SIMILARITY_MEASURE_VALUE,
MAX_INPUT_TOKENS_VALUE,
new RateLimitSettings(RATE_LIMIT_VALUE),
false
)
)
);
}
public void testFromMap_WithDimensions_SetByUserNull_Persistent_ThrowsException() {
var thrownException = expectThrows(
ValidationException.class,
() -> OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
null
),
ConfigurationParseContext.PERSISTENT
)
);
assertThat(
thrownException.getMessage(),
containsString("Validation Failed: 1: [service_settings] does not contain the required setting [dimensions_set_by_user];")
);
}
public void testFromMap_NoDimensions_SetByUserNull_Request_Success() {
var serviceSettings = OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
null,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
null
),
ConfigurationParseContext.REQUEST
);
assertThat(
serviceSettings,
is(
new OpenShiftAiEmbeddingsServiceSettings(
MODEL_VALUE,
CORRECT_URL_VALUE,
null,
SIMILARITY_MEASURE_VALUE,
MAX_INPUT_TOKENS_VALUE,
new RateLimitSettings(RATE_LIMIT_VALUE),
false
)
)
);
}
public void testFromMap_WithDimensions_SetByUserNull_Request_Success() {
var serviceSettings = OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
null
),
ConfigurationParseContext.REQUEST
);
assertThat(
serviceSettings,
is(
new OpenShiftAiEmbeddingsServiceSettings(
MODEL_VALUE,
CORRECT_URL_VALUE,
DIMENSIONS_VALUE,
SIMILARITY_MEASURE_VALUE,
MAX_INPUT_TOKENS_VALUE,
new RateLimitSettings(RATE_LIMIT_VALUE),
true
)
)
);
}
public void testFromMap_WithDimensions_SetByUserTrue_Request_ThrowsException() {
var thrownException = expectThrows(
ValidationException.class,
() -> OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
true
),
ConfigurationParseContext.REQUEST
)
);
assertThat(
thrownException.getMessage(),
containsString("Validation Failed: 1: [service_settings] does not allow the setting [dimensions_set_by_user];")
);
}
public void testFromMap_ZeroDimensions_ThrowsException() {
var thrownException = expectThrows(
ValidationException.class,
() -> OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
0,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
)
);
assertThat(
thrownException.getMessage(),
containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [dimensions] must be a positive integer;")
);
}
public void testFromMap_NegativeDimensions_ThrowsException() {
var thrownException = expectThrows(
ValidationException.class,
() -> OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
-10,
MAX_INPUT_TOKENS_VALUE,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
)
);
assertThat(
thrownException.getMessage(),
containsString("Validation Failed: 1: [service_settings] Invalid value [-10]. [dimensions] must be a positive integer;")
);
}
public void testFromMap_NoInputTokens_Success() {
var serviceSettings = OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
null,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
);
assertThat(
serviceSettings,
is(
new OpenShiftAiEmbeddingsServiceSettings(
MODEL_VALUE,
CORRECT_URL_VALUE,
DIMENSIONS_VALUE,
SIMILARITY_MEASURE_VALUE,
null,
new RateLimitSettings(RATE_LIMIT_VALUE),
false
)
)
);
}
public void testFromMap_ZeroInputTokens_ThrowsException() {
var thrownException = expectThrows(
ValidationException.class,
() -> OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
0,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
)
);
assertThat(
thrownException.getMessage(),
containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [max_input_tokens] must be a positive integer;")
);
}
public void testFromMap_NegativeInputTokens_ThrowsException() {
var thrownException = expectThrows(
ValidationException.class,
() -> OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
-10,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, RATE_LIMIT_VALUE)),
false
),
ConfigurationParseContext.PERSISTENT
)
);
assertThat(
thrownException.getMessage(),
containsString("Validation Failed: 1: [service_settings] Invalid value [-10]. [max_input_tokens] must be a positive integer;")
);
}
public void testFromMap_NoRateLimit_Success() {
var serviceSettings = OpenShiftAiEmbeddingsServiceSettings.fromMap(
buildServiceSettingsMap(
MODEL_VALUE,
CORRECT_URL_VALUE,
SIMILARITY_MEASURE_VALUE.toString(),
DIMENSIONS_VALUE,
MAX_INPUT_TOKENS_VALUE,
null,
false
),
ConfigurationParseContext.PERSISTENT
);
assertThat(
serviceSettings,
is(
new OpenShiftAiEmbeddingsServiceSettings(
MODEL_VALUE,
CORRECT_URL_VALUE,
DIMENSIONS_VALUE,
SIMILARITY_MEASURE_VALUE,
MAX_INPUT_TOKENS_VALUE,
new RateLimitSettings(3000),
false
)
)
);
}
public void testToXContent_WritesAllValues() throws IOException {
var entity = new OpenShiftAiEmbeddingsServiceSettings(
MODEL_VALUE,
CORRECT_URL_VALUE,
DIMENSIONS_VALUE,
SIMILARITY_MEASURE_VALUE,
MAX_INPUT_TOKENS_VALUE,
new RateLimitSettings(3),
false
);
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
entity.toXContent(builder, null);
String xContentResult = Strings.toString(builder);
assertThat(xContentResult, CoreMatchers.is(XContentHelper.stripWhitespace(Strings.format("""
{
"model_id": "%s",
"url": "%s",
"rate_limit": {
"requests_per_minute": 3
},
"dimensions": 384,
"similarity": "dot_product",
"max_input_tokens": 128,
"dimensions_set_by_user": false
}
""", MODEL_VALUE, CORRECT_URL_VALUE))));
}
@Override
protected Writeable.Reader<OpenShiftAiEmbeddingsServiceSettings> instanceReader() {
return OpenShiftAiEmbeddingsServiceSettings::new;
}
@Override
protected OpenShiftAiEmbeddingsServiceSettings createTestInstance() {
return createRandom();
}
@Override
protected OpenShiftAiEmbeddingsServiceSettings mutateInstance(OpenShiftAiEmbeddingsServiceSettings instance) throws IOException {
String modelId = instance.modelId();
URI uri = instance.uri();
Integer dimensions = instance.dimensions();
SimilarityMeasure similarity = instance.similarity();
Integer maxInputTokens = instance.maxInputTokens();
RateLimitSettings rateLimitSettings = instance.rateLimitSettings();
Boolean dimensionsSetByUser = instance.dimensionsSetByUser();
switch (between(0, 6)) {
case 0 -> modelId = randomValueOtherThan(modelId, () -> randomAlphaOfLengthOrNull(8));
case 1 -> uri = randomValueOtherThan(uri, () -> ServiceUtils.createUri(randomAlphaOfLength(15)));
case 2 -> dimensions = randomValueOtherThan(dimensions, () -> randomBoolean() ? randomIntBetween(32, 256) : null);
case 3 -> similarity = randomValueOtherThan(similarity, () -> randomBoolean() ? randomFrom(SimilarityMeasure.values()) : null);
case 4 -> maxInputTokens = randomValueOtherThan(maxInputTokens, () -> randomBoolean() ? randomIntBetween(128, 256) : null);
case 5 -> rateLimitSettings = randomValueOtherThan(rateLimitSettings, RateLimitSettingsTests::createRandom);
case 6 -> dimensionsSetByUser = randomValueOtherThan(dimensionsSetByUser, ESTestCase::randomBoolean);
default -> throw new AssertionError("Illegal randomisation branch");
}
return new OpenShiftAiEmbeddingsServiceSettings(
modelId,
uri,
dimensions,
similarity,
maxInputTokens,
rateLimitSettings,
dimensionsSetByUser
);
}
private static OpenShiftAiEmbeddingsServiceSettings createRandom() {
var modelId = randomAlphaOfLength(8);
var url = randomAlphaOfLength(15);
var similarityMeasure = randomBoolean() ? randomFrom(SimilarityMeasure.values()) : null;
var dimensions = randomBoolean() ? randomIntBetween(32, 256) : null;
var maxInputTokens = randomBoolean() ? randomIntBetween(128, 256) : null;
boolean dimensionsSetByUser = randomBoolean();
return new OpenShiftAiEmbeddingsServiceSettings(
modelId,
url,
dimensions,
similarityMeasure,
maxInputTokens,
RateLimitSettingsTests.createRandom(),
dimensionsSetByUser
);
}
public static HashMap<String, Object> buildServiceSettingsMap(
@Nullable String modelId,
@Nullable String url,
@Nullable String similarity,
@Nullable Integer dimensions,
@Nullable Integer maxInputTokens,
@Nullable HashMap<String, Integer> rateLimitSettings,
@Nullable Boolean dimensionsSetByUser
) {
HashMap<String, Object> result = new HashMap<>();
if (modelId != null) {
result.put(ServiceFields.MODEL_ID, modelId);
}
if (url != null) {
result.put(ServiceFields.URL, url);
}
if (similarity != null) {
result.put(ServiceFields.SIMILARITY, similarity);
}
if (dimensions != null) {
result.put(ServiceFields.DIMENSIONS, dimensions);
}
if (maxInputTokens != null) {
result.put(ServiceFields.MAX_INPUT_TOKENS, maxInputTokens);
}
if (rateLimitSettings != null) {
result.put(RateLimitSettings.FIELD_NAME, rateLimitSettings);
}
if (dimensionsSetByUser != null) {
result.put(ServiceFields.DIMENSIONS_SET_BY_USER, dimensionsSetByUser);
}
return result;
}
}
| OpenShiftAiEmbeddingsServiceSettingsTests |
java | grpc__grpc-java | opentelemetry/src/main/java/io/grpc/opentelemetry/OpenTelemetryMetricSink.java | {
"start": 12769,
"end": 13174
} | class ____ {
final BitSet optionalLabelsIndices;
final Object measure;
MeasuresData(BitSet optionalLabelsIndices, Object measure) {
this.optionalLabelsIndices = optionalLabelsIndices;
this.measure = measure;
}
public BitSet getOptionalLabelsBitSet() {
return optionalLabelsIndices;
}
public Object getMeasure() {
return measure;
}
}
}
| MeasuresData |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/impl/ThreadContextDataInjector.java | {
"start": 10017,
"end": 13395
} | class ____ extends AbstractContextDataInjector {
/**
* If there are no configuration properties, this injector will return the thread context's internal data
* structure. Otherwise the configuration properties are combined with the thread context key-value pairs into the
* specified reusable StringMap.
*
* @param props list of configuration properties, may be {@code null}
* @param ignore a {@code StringMap} instance from the log event
* @return a {@code StringMap} combining configuration properties with thread context data
*/
@Override
public StringMap injectContextData(final List<Property> props, final StringMap ignore) {
// If there are no configuration properties we want to just return the ThreadContext's StringMap:
// it is a copy-on-write data structure so we are sure ThreadContext changes will not affect our copy.
if (providers.size() == 1 && (props == null || props.isEmpty())) {
// this will replace the LogEvent's context data with the returned instance
return providers.get(0).supplyStringMap();
}
int count = props == null ? 0 : props.size();
final StringMap[] maps = new StringMap[providers.size()];
for (int i = 0; i < providers.size(); ++i) {
maps[i] = providers.get(i).supplyStringMap();
count += maps[i].size();
}
// However, if the list of Properties is non-empty we need to combine the properties and the ThreadContext
// data. Note that we cannot reuse the specified StringMap: some Loggers may have properties defined
// and others not, so the LogEvent's context data may have been replaced with an immutable copy from
// the ThreadContext - this will throw an UnsupportedOperationException if we try to modify it.
final StringMap result = ContextDataFactory.createContextData(count);
copyProperties(props, result);
for (StringMap map : maps) {
result.putAll(map);
}
return result;
}
@Override
public ReadOnlyStringMap rawContextData() {
return ThreadContext.getThreadContextMap().getReadOnlyContextData();
}
}
/**
* Copies key-value pairs from the specified property list into the specified {@code StringMap}.
*
* @param properties list of configuration properties, may be {@code null}
* @param result the {@code StringMap} object to add the key-values to. Must be non-{@code null}.
*/
public static void copyProperties(final List<Property> properties, final StringMap result) {
if (properties != null) {
for (int i = 0; i < properties.size(); i++) {
final Property prop = properties.get(i);
result.putValue(prop.getName(), prop.getValue());
}
}
}
private static List<ContextDataProvider> getProviders() {
final List<ContextDataProvider> providers =
new ArrayList<>(contextDataProviders.size() + SERVICE_PROVIDERS.size());
providers.addAll(contextDataProviders);
providers.addAll(SERVICE_PROVIDERS);
return providers;
}
}
| ForCopyOnWriteThreadContextMap |
java | apache__camel | components/camel-sql/src/test/java/org/apache/camel/component/sql/SqlProducerAndInTest.java | {
"start": 1397,
"end": 4369
} | class ____ extends CamelTestSupport {
EmbeddedDatabase db;
@Override
public void doPreSetup() throws Exception {
db = new EmbeddedDatabaseBuilder()
.setName(getClass().getSimpleName())
.setType(EmbeddedDatabaseType.H2)
.addScript("sql/createAndPopulateDatabase.sql").build();
}
@Override
public void doPostTearDown() throws Exception {
if (db != null) {
db.shutdown();
}
}
@Test
public void testQueryInArray() throws InterruptedException {
MockEndpoint mock = getMockEndpoint("mock:query");
mock.expectedMessageCount(1);
template.requestBodyAndHeader("direct:query", "ASF", "names", new String[] { "Camel", "AMQ" });
MockEndpoint.assertIsSatisfied(context);
List list = mock.getReceivedExchanges().get(0).getIn().getBody(List.class);
assertEquals(2, list.size());
Map row = (Map) list.get(0);
assertEquals("Camel", row.get("PROJECT"));
row = (Map) list.get(1);
assertEquals("AMQ", row.get("PROJECT"));
}
@Test
public void testQueryInList() throws InterruptedException {
MockEndpoint mock = getMockEndpoint("mock:query");
mock.expectedMessageCount(1);
List<String> names = new ArrayList<>();
names.add("Camel");
names.add("AMQ");
template.requestBodyAndHeader("direct:query", "ASF", "names", names);
MockEndpoint.assertIsSatisfied(context);
List list = mock.getReceivedExchanges().get(0).getIn().getBody(List.class);
assertEquals(2, list.size());
Map row = (Map) list.get(0);
assertEquals("Camel", row.get("PROJECT"));
row = (Map) list.get(1);
assertEquals("AMQ", row.get("PROJECT"));
}
@Test
public void testQueryInString() throws InterruptedException {
MockEndpoint mock = getMockEndpoint("mock:query");
mock.expectedMessageCount(1);
template.requestBodyAndHeader("direct:query", "ASF", "names", "Camel,AMQ");
MockEndpoint.assertIsSatisfied(context);
List list = mock.getReceivedExchanges().get(0).getIn().getBody(List.class);
assertEquals(2, list.size());
Map row = (Map) list.get(0);
assertEquals("Camel", row.get("PROJECT"));
row = (Map) list.get(1);
assertEquals("AMQ", row.get("PROJECT"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// required for the sql component
getContext().getComponent("sql", SqlComponent.class).setDataSource(db);
from("direct:query")
.to("sql:classpath:sql/selectProjectsAndIn.sql")
.to("log:query")
.to("mock:query");
}
};
}
}
| SqlProducerAndInTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java | {
"start": 1118,
"end": 2912
} | class ____ extends AbstractScalarFunctionTestCase {
protected abstract Expression buildWithConfiguration(Source source, List<Expression> args, Configuration configuration);
@Override
protected Expression build(Source source, List<Expression> args) {
return buildWithConfiguration(source, args, testCase.getConfiguration());
}
public void testSerializationWithConfiguration() {
assumeTrue("can't serialize function", canSerialize());
Configuration config = randomConfiguration(testCase.getSource().text(), randomTables());
Expression expr = buildWithConfiguration(testCase.getSource(), testCase.getDataAsFields(), config);
assertSerialization(expr, config);
Configuration differentConfig = randomValueOtherThan(
config,
// The source must match the original (static) one, as function source serialization depends on it
() -> randomConfiguration(testCase.getSource().text(), randomTables())
);
Expression differentExpr = buildWithConfiguration(testCase.getSource(), testCase.getDataAsFields(), differentConfig);
assertNotEquals(expr, differentExpr);
}
protected static Configuration configurationForTimezone(ZoneId zoneId) {
return randomConfigurationBuilder().query(TEST_SOURCE.text()).zoneId(zoneId).build();
}
protected static Configuration configurationForLocale(Locale locale) {
return randomConfigurationBuilder().query(TEST_SOURCE.text()).locale(locale).build();
}
protected static Configuration configurationForTimezoneAndLocale(ZoneId zoneId, Locale locale) {
return randomConfigurationBuilder().query(TEST_SOURCE.text()).zoneId(zoneId).locale(locale).build();
}
}
| AbstractConfigurationFunctionTestCase |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/spi/PropertyData.java | {
"start": 830,
"end": 973
} | class ____ or the element type if an array
*/
TypeDetails getClassOrElementType() throws MappingException;
/**
* Returns the returned | itself |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java | {
"start": 36460,
"end": 55164
} | interface ____ {
void count(String partition, Resource reservedRes, SchedulerApplicationAttempt application);
}
private void count(String partition, Resource resource, Counter counter, Counter parentCounter) {
final String checkedPartition = ensurePartition(partition);
counter.count(checkedPartition, resource);
Optional.ofNullable(parentCounter).ifPresent(c -> c.count(checkedPartition, resource));
}
private void countAndUpdate(String partition, Resource resource,
Counter counter, CounterWithApp parentCounter) {
final String checkedPartition = ensurePartition(partition);
counter.count(checkedPartition, resource);
CSQueueUtils.updateUsedCapacity(resourceCalculator,
labelManager.getResourceByLabel(checkedPartition, Resources.none()),
checkedPartition, this);
Optional.ofNullable(parentCounter).ifPresent(c -> c.count(checkedPartition, resource, null));
}
@Override
public void incReservedResource(String partition, Resource reservedRes) {
count(partition, reservedRes, usageTracker.getQueueUsage()::incReserved,
parent == null ? null : parent::incReservedResource);
}
@Override
public void decReservedResource(String partition, Resource reservedRes) {
count(partition, reservedRes, usageTracker.getQueueUsage()::decReserved,
parent == null ? null : parent::decReservedResource);
}
@Override
public void incPendingResource(String nodeLabel, Resource resourceToInc) {
count(nodeLabel, resourceToInc, usageTracker.getQueueUsage()::incPending,
parent == null ? null : parent::incPendingResource);
}
@Override
public void decPendingResource(String nodeLabel, Resource resourceToDec) {
count(nodeLabel, resourceToDec, usageTracker.getQueueUsage()::decPending,
parent == null ? null : parent::decPendingResource);
}
@Override
public void incUsedResource(String nodeLabel, Resource resourceToInc,
SchedulerApplicationAttempt application) {
countAndUpdate(nodeLabel, resourceToInc, usageTracker.getQueueUsage()::incUsed,
parent == null ? null : parent::incUsedResource);
}
@Override
public void decUsedResource(String nodeLabel, Resource resourceToDec,
SchedulerApplicationAttempt application) {
countAndUpdate(nodeLabel, resourceToDec, usageTracker.getQueueUsage()::decUsed,
parent == null ? null : parent::decUsedResource);
}
/**
* Return if the queue has pending resource on given nodePartition and
* schedulingMode.
*/
boolean hasPendingResourceRequest(String nodePartition,
Resource cluster, SchedulingMode schedulingMode) {
return SchedulerUtils.hasPendingResourceRequest(resourceCalculator,
usageTracker.getQueueUsage(), nodePartition, cluster, schedulingMode);
}
@Override
public Priority getDefaultApplicationPriority() {
return null;
}
/**
* Returns the union of all node labels that could be accessed by this queue based on accessible
* node labels and configured node labels properties.
* @return node labels this queue has access to
*/
@Override
public Set<String> getNodeLabelsForQueue() {
// if queue's label is *, queue can access any labels. Instead of
// considering all labels in cluster, only those labels which are
// use some resource of this queue can be considered.
Set<String> nodeLabels = new HashSet<String>();
if (this.getAccessibleNodeLabels() != null && this.getAccessibleNodeLabels()
.contains(RMNodeLabelsManager.ANY)) {
nodeLabels.addAll(Sets.union(this.getQueueCapacities().getExistingNodeLabels(),
this.getQueueResourceUsage().getExistingNodeLabels()));
} else {
nodeLabels.addAll(this.getAccessibleNodeLabels());
}
// Add NO_LABEL also to this list as NO_LABEL also can be granted with
// resource in many general cases.
if (!nodeLabels.contains(NO_LABEL)) {
nodeLabels.add(NO_LABEL);
}
return nodeLabels;
}
public Resource getTotalKillableResource(String partition) {
return queueContext.getPreemptionManager().getKillableResource(getQueuePath(),
partition);
}
public Iterator<RMContainer> getKillableContainers(String partition) {
return queueContext.getPreemptionManager().getKillableContainers(
getQueuePath(),
partition);
}
@VisibleForTesting
@Override
public CSAssignment assignContainers(Resource clusterResource,
FiCaSchedulerNode node, ResourceLimits resourceLimits,
SchedulingMode schedulingMode) {
return assignContainers(clusterResource, new SimpleCandidateNodeSet<>(node),
resourceLimits, schedulingMode);
}
/**
* Checks whether this queue could accept the container allocation request.
* @param cluster overall cluster resource
* @param request container allocation request
* @return true if queue could accept the container allocation request, false otherwise
*/
@Override
public boolean accept(Resource cluster,
ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request) {
// Do we need to check parent queue before making this decision?
boolean checkParentQueue = false;
ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode> allocation =
request.getFirstAllocatedOrReservedContainer();
SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode> schedulerContainer =
allocation.getAllocatedOrReservedContainer();
// Do not check when allocating new container from a reserved container
if (allocation.getAllocateFromReservedContainer() == null) {
Resource required = allocation.getAllocatedOrReservedResource();
Resource netAllocated = Resources.subtract(required,
request.getTotalReleasedResource());
readLock.lock();
try {
String partition = schedulerContainer.getNodePartition();
Resource maxResourceLimit;
if (allocation.getSchedulingMode()
== SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
maxResourceLimit = getQueueMaxResource(partition);
} else{
maxResourceLimit = labelManager.getResourceByLabel(
schedulerContainer.getNodePartition(), cluster);
}
if (!Resources.fitsIn(resourceCalculator,
Resources.add(usageTracker.getQueueUsage().getUsed(partition), netAllocated),
maxResourceLimit)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Used resource=" + usageTracker.getQueueUsage().getUsed(partition)
+ " exceeded maxResourceLimit of the queue ="
+ maxResourceLimit);
}
return false;
}
} finally {
readLock.unlock();
}
// Only check parent queue when something new allocated or reserved.
checkParentQueue = true;
}
if (parent != null && checkParentQueue) {
return parent.accept(cluster, request);
}
return true;
}
@Override
public void validateSubmitApplication(ApplicationId applicationId,
String userName, String queue) throws AccessControlException {
// Dummy implementation
}
@Override
public void updateQueueState(QueueState queueState) {
this.state = queueState;
}
/**
* Sets the state of this queue to RUNNING.
* @throws YarnException if its parent queue is not in RUNNING state
*/
@Override
public void activateQueue() throws YarnException {
this.writeLock.lock();
try {
if (getState() == QueueState.RUNNING) {
LOG.info("The specified queue:" + getQueuePath()
+ " is already in the RUNNING state.");
} else {
CSQueue parentQueue = parent;
if (parentQueue == null || parentQueue.getState() == QueueState.RUNNING) {
updateQueueState(QueueState.RUNNING);
} else {
throw new YarnException("The parent Queue:" + parentQueue.getQueuePath()
+ " is not running. Please activate the parent queue first");
}
}
} finally {
this.writeLock.unlock();
}
}
/**
* Stops this queue if no application is currently running on the queue.
*/
protected void appFinished() {
this.writeLock.lock();
try {
if (getState() == QueueState.DRAINING) {
if (getNumApplications() == 0) {
updateQueueState(QueueState.STOPPED);
}
}
} finally {
this.writeLock.unlock();
}
}
@Override
public Priority getPriority() {
return this.priority;
}
@Override
public UserWeights getUserWeights() {
return userWeights;
}
/**
* Recursively sets the state of this queue and the state of its parent to DRAINING.
*/
public void recoverDrainingState() {
this.writeLock.lock();
try {
if (getState() == QueueState.STOPPED) {
updateQueueState(QueueState.DRAINING);
}
LOG.info("Recover draining state for queue " + this.getQueuePath());
if (parent != null && parent.getState() == QueueState.STOPPED) {
((AbstractCSQueue) parent).recoverDrainingState();
}
} finally {
this.writeLock.unlock();
}
}
@Override
public String getMultiNodeSortingPolicyClassName() {
return this.multiNodeSortingPolicyClassName;
}
public void setMultiNodeSortingPolicyClassName(String policyName) {
this.multiNodeSortingPolicyClassName = policyName;
}
public long getMaximumApplicationLifetime() {
return queueAppLifetimeSettings.getMaxApplicationLifetime();
}
public long getDefaultApplicationLifetime() {
return queueAppLifetimeSettings.getDefaultApplicationLifetime();
}
public boolean getDefaultAppLifetimeWasSpecifiedInConfig() {
return queueAppLifetimeSettings.isDefaultAppLifetimeWasSpecifiedInConfig();
}
public void setMaxParallelApps(int maxParallelApps) {
this.queueAppLifetimeSettings.setMaxParallelApps(maxParallelApps);
}
@Override
public int getMaxParallelApps() {
return this.queueAppLifetimeSettings.getMaxParallelApps();
}
abstract int getNumRunnableApps();
protected void updateAbsoluteCapacities() {
QueueCapacities parentQueueCapacities = null;
if (parent != null) {
parentQueueCapacities = parent.getQueueCapacities();
}
CSQueueUtils.updateAbsoluteCapacitiesByNodeLabels(queueCapacities,
parentQueueCapacities, queueCapacities.getExistingNodeLabels(),
queueContext.getConfiguration().isLegacyQueueMode());
}
private Resource createNormalizedMinResource(Resource minResource,
Map<String, Float> effectiveMinRatio) {
Resource ret = Resource.newInstance(minResource);
int maxLength = ResourceUtils.getNumberOfCountableResourceTypes();
for (int i = 0; i < maxLength; i++) {
ResourceInformation nResourceInformation =
minResource.getResourceInformation(i);
Float ratio = effectiveMinRatio.get(nResourceInformation.getName());
if (ratio != null) {
ret.setResourceValue(i,
(long) (nResourceInformation.getValue() * ratio));
if (LOG.isDebugEnabled()) {
LOG.debug("Updating min resource for Queue: " + getQueuePath() + " as " + ret
.getResourceInformation(i) + ", Actual resource: "
+ nResourceInformation.getValue() + ", ratio: " + ratio);
}
}
}
return ret;
}
private Resource getOrInheritMaxResource(Resource resourceByLabel, String label) {
Resource parentMaxResource =
parent.getQueueResourceQuotas().getConfiguredMaxResource(label);
if (parentMaxResource.equals(Resources.none())) {
parentMaxResource =
parent.getQueueResourceQuotas().getEffectiveMaxResource(label);
}
Resource configuredMaxResource =
getQueueResourceQuotas().getConfiguredMaxResource(label);
if (configuredMaxResource.equals(Resources.none())) {
return Resources.clone(parentMaxResource);
}
return Resources.clone(Resources.min(resourceCalculator, resourceByLabel,
configuredMaxResource, parentMaxResource));
}
void deriveCapacityFromAbsoluteConfigurations(String label,
Resource clusterResource) {
// Update capacity with a float calculated from the parent's minResources
// and the recently changed queue minResources.
// capacity = effectiveMinResource / {parent's effectiveMinResource}
float result = resourceCalculator.divide(clusterResource,
usageTracker.getQueueResourceQuotas().getEffectiveMinResource(label),
parent.getQueueResourceQuotas().getEffectiveMinResource(label));
queueCapacities.setCapacity(label,
Float.isInfinite(result) ? 0 : result);
// Update maxCapacity with a float calculated from the parent's maxResources
// and the recently changed queue maxResources.
// maxCapacity = effectiveMaxResource / parent's effectiveMaxResource
result = resourceCalculator.divide(clusterResource,
usageTracker.getQueueResourceQuotas().getEffectiveMaxResource(label),
parent.getQueueResourceQuotas().getEffectiveMaxResource(label));
queueCapacities.setMaximumCapacity(label,
Float.isInfinite(result) ? 0 : result);
// Update absolute capacity (as in fraction of the
// whole cluster's resources) with a float calculated from the queue's
// capacity and the parent's absoluteCapacity.
// absoluteCapacity = capacity * parent's absoluteCapacity
queueCapacities.setAbsoluteCapacity(label,
queueCapacities.getCapacity(label) * parent.getQueueCapacities()
.getAbsoluteCapacity(label));
// Update absolute maxCapacity (as in fraction of the
// whole cluster's resources) with a float calculated from the queue's
// maxCapacity and the parent's absoluteMaxCapacity.
// absoluteMaxCapacity = maxCapacity * parent's absoluteMaxCapacity
queueCapacities.setAbsoluteMaximumCapacity(label,
queueCapacities.getMaximumCapacity(label) *
parent.getQueueCapacities()
.getAbsoluteMaximumCapacity(label));
}
void updateEffectiveResources(Resource clusterResource) {
for (String label : queueNodeLabelsSettings.getConfiguredNodeLabels()) {
Resource resourceByLabel = labelManager.getResourceByLabel(label,
clusterResource);
Resource newEffectiveMinResource;
Resource newEffectiveMaxResource;
// Absolute and relative/weight mode needs different handling.
if (getCapacityConfigType().equals(
CapacityConfigType.ABSOLUTE_RESOURCE)) {
newEffectiveMinResource = createNormalizedMinResource(
usageTracker.getQueueResourceQuotas().getConfiguredMinResource(label),
((AbstractParentQueue) parent).getEffectiveMinRatio(label));
// Max resource of a queue should be the minimum of {parent's maxResources,
// this queue's maxResources}. Both parent's maxResources and this queue's
// maxResources can be configured. If this queue's maxResources is not
// configured, inherit the value from the parent. If parent's
// maxResources is not configured its inherited value must be collected.
newEffectiveMaxResource =
getOrInheritMaxResource(resourceByLabel, label);
} else {
newEffectiveMinResource = Resources
.multiply(resourceByLabel,
queueCapacities.getAbsoluteCapacity(label));
newEffectiveMaxResource = Resources
.multiply(resourceByLabel,
queueCapacities.getAbsoluteMaximumCapacity(label));
}
// Update the effective min
usageTracker.getQueueResourceQuotas().setEffectiveMinResource(label,
newEffectiveMinResource);
usageTracker.getQueueResourceQuotas().setEffectiveMaxResource(label,
newEffectiveMaxResource);
if (LOG.isDebugEnabled()) {
LOG.debug("Updating queue:" + getQueuePath()
+ " with effective minimum resource=" + newEffectiveMinResource
+ "and effective maximum resource="
+ newEffectiveMaxResource);
}
if (getCapacityConfigType().equals(
CapacityConfigType.ABSOLUTE_RESOURCE)) {
/*
* If the queues are configured with absolute resources, it is advised
* to update capacity/max-capacity/etc. based on the newly calculated
* resource values. These values won't be used for actual resource
* distribution, however, for accurate metrics and the UI
* they should be re-calculated.
*/
deriveCapacityFromAbsoluteConfigurations(label, clusterResource);
}
}
}
public boolean isDynamicQueue() {
readLock.lock();
try {
return dynamicQueue;
} finally {
readLock.unlock();
}
}
public void setDynamicQueue(boolean dynamicQueue) {
writeLock.lock();
try {
this.dynamicQueue = dynamicQueue;
} finally {
writeLock.unlock();
}
}
protected String getCapacityOrWeightString() {
if (queueCapacities.getWeight() != -1) {
return "weight=" + queueCapacities.getWeight() + ", " +
"normalizedWeight=" + queueCapacities.getNormalizedWeight();
} else {
return "capacity=" + queueCapacities.getCapacity();
}
}
/**
* Checks whether this queue is a dynamic queue and could be deleted.
* @return true if the dynamic queue could be deleted, false otherwise
*/
public boolean isEligibleForAutoDeletion() {
return false;
}
/**
* Checks whether this queue is a dynamic queue and there has not been an application submission
* on it for a configured period of time.
* @return true if queue has been idle for a configured period of time, false otherwise
*/
public boolean isInactiveDynamicQueue() {
long idleDurationSeconds =
(Time.monotonicNow() - getLastSubmittedTimestamp())/1000;
return isDynamicQueue() && isEligibleForAutoDeletion() &&
(idleDurationSeconds > queueContext.getConfiguration().
getAutoExpiredDeletionTime());
}
void updateLastSubmittedTimeStamp() {
writeLock.lock();
try {
usageTracker.setLastSubmittedTimestamp(Time.monotonicNow());
} finally {
writeLock.unlock();
}
}
@VisibleForTesting
long getLastSubmittedTimestamp() {
readLock.lock();
try {
return usageTracker.getLastSubmittedTimestamp();
} finally {
readLock.unlock();
}
}
@VisibleForTesting
void setLastSubmittedTimestamp(long lastSubmittedTimestamp) {
writeLock.lock();
try {
usageTracker.setLastSubmittedTimestamp(lastSubmittedTimestamp);
} finally {
writeLock.unlock();
}
}
}
| CounterWithApp |
java | google__dagger | dagger-android/test/javatests/dagger/android/DispatchingAndroidInjectorTest.java | {
"start": 1351,
"end": 4606
} | class ____ {
@Test
public void withClassKeys() {
DispatchingAndroidInjector<Activity> dispatchingAndroidInjector =
newDispatchingAndroidInjector(
ImmutableMap.of(FooActivity.class, FooInjector.Factory::new), ImmutableMap.of());
FooActivity activity = Robolectric.setupActivity(FooActivity.class);
assertThat(dispatchingAndroidInjector.maybeInject(activity)).isTrue();
}
@Test
public void withStringKeys() {
DispatchingAndroidInjector<Activity> dispatchingAndroidInjector =
newDispatchingAndroidInjector(
ImmutableMap.of(),
ImmutableMap.of(FooActivity.class.getName(), FooInjector.Factory::new));
FooActivity activity = Robolectric.setupActivity(FooActivity.class);
assertThat(dispatchingAndroidInjector.maybeInject(activity)).isTrue();
}
@Test
public void withMixedKeys() {
DispatchingAndroidInjector<Activity> dispatchingAndroidInjector =
newDispatchingAndroidInjector(
ImmutableMap.of(FooActivity.class, FooInjector.Factory::new),
ImmutableMap.of(BarActivity.class.getName(), BarInjector.Factory::new));
FooActivity fooActivity = Robolectric.setupActivity(FooActivity.class);
assertThat(dispatchingAndroidInjector.maybeInject(fooActivity)).isTrue();
BarActivity barActivity = Robolectric.setupActivity(BarActivity.class);
assertThat(dispatchingAndroidInjector.maybeInject(barActivity)).isTrue();
}
@Test
public void maybeInject_returnsFalse_ifNoMatchingInjectorExists() {
DispatchingAndroidInjector<Activity> dispatchingAndroidInjector =
newDispatchingAndroidInjector(ImmutableMap.of(), ImmutableMap.of());
BarActivity activity = Robolectric.setupActivity(BarActivity.class);
assertThat(dispatchingAndroidInjector.maybeInject(activity)).isFalse();
}
@Test
public void throwsIfFactoryCreateReturnsNull() {
DispatchingAndroidInjector<Activity> dispatchingAndroidInjector =
newDispatchingAndroidInjector(
ImmutableMap.of(FooActivity.class, () -> null), ImmutableMap.of());
FooActivity activity = Robolectric.setupActivity(FooActivity.class);
try {
dispatchingAndroidInjector.maybeInject(activity);
fail("Expected NullPointerException");
} catch (NullPointerException expected) {
}
}
@Test
public void throwsIfClassMismatched() {
DispatchingAndroidInjector<Activity> dispatchingAndroidInjector =
newDispatchingAndroidInjector(
ImmutableMap.of(FooActivity.class, BarInjector.Factory::new), ImmutableMap.of());
FooActivity activity = Robolectric.setupActivity(FooActivity.class);
try {
dispatchingAndroidInjector.maybeInject(activity);
fail("Expected InvalidInjectorBindingException");
} catch (InvalidInjectorBindingException expected) {
}
}
private static <T> DispatchingAndroidInjector<T> newDispatchingAndroidInjector(
Map<Class<?>, Provider<Factory<?>>> injectorFactoriesWithClassKeys,
Map<String, Provider<AndroidInjector.Factory<?>>>
injectorFactoriesWithStringKeys) {
return new DispatchingAndroidInjector<>(
injectorFactoriesWithClassKeys, injectorFactoriesWithStringKeys);
}
static | DispatchingAndroidInjectorTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/discovery/PeerFinderMessagesTests.java | {
"start": 1084,
"end": 5270
} | class ____ extends ESTestCase {
private DiscoveryNode createNode(String id) {
return DiscoveryNodeUtils.create(id);
}
public void testPeersRequestEqualsHashCodeSerialization() {
final PeersRequest initialPeersRequest = new PeersRequest(
createNode(randomAlphaOfLength(10)),
Arrays.stream(generateRandomStringArray(10, 10, false)).map(this::createNode).toList()
);
// Note: the explicit cast of the CopyFunction is needed for some IDE (specifically Eclipse 4.8.0) to infer the right type
EqualsHashCodeTestUtils.checkEqualsAndHashCode(
initialPeersRequest,
(CopyFunction<PeersRequest>) publishRequest -> copyWriteable(publishRequest, writableRegistry(), PeersRequest::new),
in -> {
final List<DiscoveryNode> discoveryNodes = new ArrayList<>(in.getKnownPeers());
if (randomBoolean()) {
return new PeersRequest(createNode(randomAlphaOfLength(10)), discoveryNodes);
} else {
return new PeersRequest(in.getSourceNode(), modifyDiscoveryNodesList(in.getKnownPeers(), true));
}
}
);
}
public void testPeersResponseEqualsHashCodeSerialization() {
final long initialTerm = randomNonNegativeLong();
final PeersResponse initialPeersResponse;
if (randomBoolean()) {
initialPeersResponse = new PeersResponse(Optional.of(createNode(randomAlphaOfLength(10))), emptyList(), initialTerm);
} else {
initialPeersResponse = new PeersResponse(
Optional.empty(),
Arrays.stream(generateRandomStringArray(10, 10, false, false)).map(this::createNode).toList(),
initialTerm
);
}
// Note: the explicit cast of the CopyFunction is needed for some IDE (specifically Eclipse 4.8.0) to infer the right type
EqualsHashCodeTestUtils.checkEqualsAndHashCode(
initialPeersResponse,
(CopyFunction<PeersResponse>) publishResponse -> copyWriteable(publishResponse, writableRegistry(), PeersResponse::new),
in -> {
final long term = in.getTerm();
if (randomBoolean()) {
return new PeersResponse(
in.getMasterNode(),
in.getKnownPeers(),
randomValueOtherThan(term, ESTestCase::randomNonNegativeLong)
);
} else {
if (in.getMasterNode().isPresent()) {
if (randomBoolean()) {
return new PeersResponse(Optional.of(createNode(randomAlphaOfLength(10))), in.getKnownPeers(), term);
} else {
return new PeersResponse(Optional.empty(), singletonList(createNode(randomAlphaOfLength(10))), term);
}
} else {
if (randomBoolean()) {
return new PeersResponse(Optional.of(createNode(randomAlphaOfLength(10))), emptyList(), term);
} else {
return new PeersResponse(in.getMasterNode(), modifyDiscoveryNodesList(in.getKnownPeers(), false), term);
}
}
}
}
);
}
private List<DiscoveryNode> modifyDiscoveryNodesList(Collection<DiscoveryNode> originalNodes, boolean allowEmpty) {
final List<DiscoveryNode> discoveryNodes = new ArrayList<>(originalNodes);
if (discoveryNodes.isEmpty() == false && randomBoolean() && (allowEmpty || discoveryNodes.size() > 1)) {
discoveryNodes.remove(randomIntBetween(0, discoveryNodes.size() - 1));
} else if (discoveryNodes.isEmpty() == false && randomBoolean()) {
discoveryNodes.set(randomIntBetween(0, discoveryNodes.size() - 1), createNode(randomAlphaOfLength(10)));
} else {
discoveryNodes.add(createNode(randomAlphaOfLength(10)));
}
return discoveryNodes;
}
}
| PeerFinderMessagesTests |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/util/introspection/PropertyOrFieldSupport_getValueOf_Test.java | {
"start": 9057,
"end": 9145
} | class ____ extends Employee {
public void getCity() {}
}
}
| VoidGetterPropertyEmployee |
java | spring-projects__spring-boot | module/spring-boot-web-server/src/testFixtures/java/org/springframework/boot/web/server/servlet/AbstractServletWebServerFactoryTests.java | {
"start": 72026,
"end": 72306
} | class ____ extends GenericServlet {
private int initCount;
@Override
public void init() {
this.initCount++;
}
@Override
public void service(ServletRequest req, ServletResponse res) {
}
int getInitCount() {
return this.initCount;
}
}
| InitCountingServlet |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/spr10210/A.java | {
"start": 840,
"end": 900
} | class ____ extends B<C> {
public void bridgeMethod() {
}
}
| A |
java | quarkusio__quarkus | integration-tests/maven/src/test/resources-filtered/projects/platform-properties-overrides/ext/deployment/src/main/java/org/acme/quarkus/sample/extension/deployment/AcmeExtensionProcessor.java | {
"start": 527,
"end": 1085
} | class ____ {
private static final String FEATURE = "acme-extension";
@BuildStep
FeatureBuildItem feature() {
return new FeatureBuildItem(FEATURE);
}
@BuildStep
@Record(ExecutionTime.STATIC_INIT)
SyntheticBeanBuildItem syntheticBean(ConfigReportRecorder recorder, NativeConfig nativeConfig) {
return SyntheticBeanBuildItem.configure(ConfigReport.class)
.scope(Singleton.class)
.runtimeValue(recorder.configReport(nativeConfig.builderImage().image()))
.done();
}
}
| AcmeExtensionProcessor |
java | apache__camel | components/camel-digitalocean/src/test/java/org/apache/camel/component/digitalocean/integration/DigitalOceanComponentIT.java | {
"start": 2287,
"end": 15078
} | class ____ extends DigitalOceanTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(DigitalOceanComponentIT.class);
@EndpointInject("mock:result")
protected MockEndpoint mockResultEndpoint;
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:getAccountInfo")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.get))
.to("digitalocean:account?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getAccountInfo2")
.to("digitalocean:account?operation=" + DigitalOceanOperations.get + "&oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getActions")
.to("digitalocean:actions?operation=list&oAuthToken={{oAuthToken}}&perPage=30")
.to("mock:result");
from("direct:getActionInfo")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.get))
.setHeader(DigitalOceanHeaders.ID, constant(133459716))
.to("digitalocean:actions?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getDroplets")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.list))
.to("digitalocean:droplets?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getDroplet")
.setHeader(DigitalOceanHeaders.ID, constant(5428878))
.to("digitalocean:droplets?operation=get&oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getDroplet2")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.get))
.setHeader(DigitalOceanHeaders.ID, constant(5428878))
.to("digitalocean:droplets?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:createDroplet")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.create))
.setHeader(DigitalOceanHeaders.NAME, constant("camel-test"))
.setHeader(DigitalOceanHeaders.REGION, constant("fra1"))
.setHeader(DigitalOceanHeaders.DROPLET_IMAGE, constant("ubuntu-14-04-x64"))
.setHeader(DigitalOceanHeaders.DROPLET_SIZE, constant("512mb"))
.process(e -> {
Collection<String> tags = new ArrayList<>();
tags.add("tag1");
tags.add("tag2");
e.getIn().setHeader(DigitalOceanHeaders.DROPLET_TAGS, tags);
})
.to("digitalocean:droplets?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:createMultipleDroplets")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.create))
.process(e -> {
Collection<String> names = new ArrayList<>();
names.add("droplet1");
names.add("droplet2");
e.getIn().setHeader(DigitalOceanHeaders.NAMES, names);
})
.setHeader(DigitalOceanHeaders.REGION, constant("fra1"))
.setHeader(DigitalOceanHeaders.DROPLET_IMAGE, constant("ubuntu-14-04-x64"))
.setHeader(DigitalOceanHeaders.DROPLET_SIZE, constant("512mb"))
.process(e -> {
Collection<String> tags = new ArrayList<>();
tags.add("tag1");
tags.add("tag2");
e.getIn().setHeader(DigitalOceanHeaders.DROPLET_TAGS, tags);
})
.to("digitalocean://droplets?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getDropletBackups")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.listBackups))
.setHeader(DigitalOceanHeaders.ID, constant(5428878))
.to("digitalocean://droplets?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:createTag")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.create))
.setHeader(DigitalOceanHeaders.NAME, constant("tag1"))
.to("digitalocean://tags?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getTags")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.list))
.to("digitalocean://tags?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getImages")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.list))
.to("digitalocean://images?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getImage")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.get))
.setHeader(DigitalOceanHeaders.DROPLET_IMAGE, constant("ubuntu-14-04-x64"))
.to("digitalocean://images?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getSizes")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.list))
.to("digitalocean://sizes?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getSize")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.get))
.setHeader(DigitalOceanHeaders.NAME, constant("512mb"))
.to("digitalocean://sizes?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getRegions")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.list))
.to("digitalocean://regions?oAuthToken={{oAuthToken}}")
.to("mock:result");
from("direct:getRegion")
.setHeader(DigitalOceanHeaders.OPERATION, constant(DigitalOceanOperations.get))
.setHeader(DigitalOceanHeaders.NAME, constant("nyc1"))
.to("digitalocean://regions?oAuthToken={{oAuthToken}}")
.to("mock:result");
}
};
}
@Test
public void testGetAccountInfo() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(2);
Exchange exchange = template.request("direct:getAccountInfo", null);
assertTrue(((Account) exchange.getMessage().getBody()).isEmailVerified());
exchange = template.request("direct:getAccountInfo2", null);
assertTrue(((Account) exchange.getMessage().getBody()).isEmailVerified());
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testGetAllActions() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:getActions", null);
MockEndpoint.assertIsSatisfied(context);
assertEquals(30, ((List) exchange.getMessage().getBody()).size());
}
@Test
public void testGetActionInfo() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:getActionInfo", null);
MockEndpoint.assertIsSatisfied(context);
assertEquals(((Action) exchange.getMessage().getBody()).getId(), Integer.valueOf(133459716));
}
@Test
public void testGetDropletInfo() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(2);
Exchange exchange = template.request("direct:getDroplet", null);
assertEquals(((Droplet) exchange.getMessage().getBody()).getId(), Integer.valueOf(5428878));
exchange = template.request("direct:getDroplet2", null);
MockEndpoint.assertIsSatisfied(context);
assertEquals(((Droplet) exchange.getMessage().getBody()).getId(), Integer.valueOf(5428878));
}
@Test
public void testCreateDroplet() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:createDroplet", null);
MockEndpoint.assertIsSatisfied(context);
Droplet droplet = (Droplet) exchange.getMessage().getBody();
assertNotNull(droplet.getId());
assertEquals("fra1", droplet.getRegion().getSlug());
assertCollectionSize(droplet.getTags(), 2);
}
@Test
public void testCreateMultipleDroplets() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:createMultipleDroplets", null);
MockEndpoint.assertIsSatisfied(context);
List<Droplet> droplets = (List<Droplet>) exchange.getMessage().getBody();
assertCollectionSize(droplets, 2);
}
@Test
public void testGetAllDroplets() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:getDroplets", null);
MockEndpoint.assertIsSatisfied(context);
assertEquals(1, ((List) exchange.getMessage().getBody()).size());
}
@Test
public void testGetDropletBackups() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:getDropletBackups", null);
MockEndpoint.assertIsSatisfied(context);
assertCollectionSize((List) exchange.getMessage().getBody(), 0);
}
@Test
public void testCreateTag() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:createTag", null);
MockEndpoint.assertIsSatisfied(context);
assertEquals("tag1", ((Tag) exchange.getMessage().getBody()).getName());
}
@Test
public void testGetTags() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:getTags", null);
MockEndpoint.assertIsSatisfied(context);
assertEquals("tag1", ((List<Tag>) exchange.getMessage().getBody()).get(0).getName());
}
@Test
public void getImages() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:getImages", null);
MockEndpoint.assertIsSatisfied(context);
List<Image> images = (List<Image>) exchange.getMessage().getBody();
assertNotEquals(1, images.size());
}
@Test
public void getImage() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:getImage", null);
MockEndpoint.assertIsSatisfied(context);
assertEquals("ubuntu-14-04-x64", (exchange.getMessage().getBody(Image.class)).getSlug());
}
@Test
public void getSizes() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:getSizes", null);
MockEndpoint.assertIsSatisfied(context);
List<Size> sizes = (List<Size>) exchange.getMessage().getBody();
LOG.debug("Sizes: {}", sizes);
assertNotEquals(1, sizes.size());
}
@Test
public void getRegions() throws Exception {
mockResultEndpoint.expectedMinimumMessageCount(1);
Exchange exchange = template.request("direct:getRegions", null);
MockEndpoint.assertIsSatisfied(context);
List<Region> regions = (List<Region>) exchange.getMessage().getBody();
LOG.debug("Regions: {}", regions);
assertNotEquals(1, regions.size());
}
}
| DigitalOceanComponentIT |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2437/PhoneSuperMapper.java | {
"start": 232,
"end": 293
} | interface ____ {
Phone map(PhoneDto dto);
}
| PhoneSuperMapper |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java | {
"start": 3161,
"end": 4262
} | class ____ extends AbstractRunnable {
@Override
public void onFailure(Exception e) {
logger.error(Strings.format("unexpected error processing [%s]", ContinuousComputation.this), e);
assert false : e;
}
@Override
public void onRejection(Exception e) {
// The executor has an unbounded queue so we must be shutting down to get here.
assert e instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown() : e;
logger.debug("rejected", e);
}
@Override
protected void doRun() {
final T input = enqueuedInput.get();
assert input != null;
try {
processInput(input);
} finally {
if (enqueuedInput.compareAndSet(input, null) == false) {
executor.execute(this);
}
}
}
@Override
public String toString() {
return "ContinuousComputation$Processor[" + ContinuousComputation.this + "]";
}
}
}
| Processor |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java | {
"start": 32307,
"end": 32749
} | class ____ {
static <T> Super<T> get() {
return new Super<T>() {};
}
}
""")
.doTest();
}
@Test
public void interface_containerOf_immutable() {
compilationHelper
.addSourceLines(
"MyList.java",
"""
import com.google.errorprone.annotations.Immutable;
@Immutable(containerOf = "T")
public | Test |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/NonTimeRangeUnboundedPrecedingFunction.java | {
"start": 1613,
"end": 3802
} | class ____ a specialized implementation for processing
* unbounded OVER window aggregations, particularly for non-time-based range queries in Apache
* Flink. It maintains strict ordering of rows within partitions and handles the full changelog
* lifecycle (inserts, updates, deletes).
*
* <p>Key Components and Assumptions
*
* <p>Data Structure Design: (1) Maintains a sorted list of tuples containing sort keys and lists of
* IDs for each key (2) Each incoming row is assigned a unique Long ID (starting from
* Long.MIN_VALUE) (3) Uses multiple state types to track rows, sort orders, and aggregations
*
* <p>State Management: (1) idState: Counter for generating unique row IDs (2) sortedListState:
* Ordered list of sort keys with their associated row IDs (3) valueMapState: Maps IDs to their
* corresponding input rows (4) accMapState: Maps sort keys to their accumulated values
*
* <p>Processing Model: (1) For inserts/updates: Adds rows to the appropriate position based on sort
* key (2) For deletes: Removes rows by matching both sort key and row content (3) Recalculates
* aggregates for affected rows and emits the appropriate events (4) Skips redundant events when
* accumulators haven't changed to reduce network traffic
*
* <p>Optimization Assumptions: (1) Skip emitting updates when accumulators haven't changed to
* reduce network traffic (2) Uses state TTL for automatic cleanup of stale data (3) Carefully
* manages row state to support incremental calculations
*
* <p>Retraction Handling: (1) Handles retraction mode (DELETE/UPDATE_BEFORE) events properly (2)
* Supports the proper processing of changelog streams
*
* <p>Limitations
*
* <p>Linear search performance: - The current implementation uses a linear search to find the
* correct position for each sort key. This can be optimized using a binary search for large state
* sizes.
*
* <p>State size and performance: - The implementation maintains multiple state types that could
* grow large with high cardinality data
*
* <p>Linear recalculation: - When processing updates, all subsequent elements need to be
* recalculated, which could be inefficient for large windows
*/
public | is |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/explain/MemoryEstimation.java | {
"start": 980,
"end": 4252
} | class ____ implements ToXContentObject, Writeable {
public static final ParseField EXPECTED_MEMORY_WITHOUT_DISK = new ParseField("expected_memory_without_disk");
public static final ParseField EXPECTED_MEMORY_WITH_DISK = new ParseField("expected_memory_with_disk");
public static final ConstructingObjectParser<MemoryEstimation, Void> PARSER = new ConstructingObjectParser<>(
"memory_estimation",
a -> new MemoryEstimation((ByteSizeValue) a[0], (ByteSizeValue) a[1])
);
static {
PARSER.declareField(
optionalConstructorArg(),
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_WITHOUT_DISK.getPreferredName()),
EXPECTED_MEMORY_WITHOUT_DISK,
ObjectParser.ValueType.VALUE
);
PARSER.declareField(
optionalConstructorArg(),
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_WITH_DISK.getPreferredName()),
EXPECTED_MEMORY_WITH_DISK,
ObjectParser.ValueType.VALUE
);
}
private final ByteSizeValue expectedMemoryWithoutDisk;
private final ByteSizeValue expectedMemoryWithDisk;
public MemoryEstimation(@Nullable ByteSizeValue expectedMemoryWithoutDisk, @Nullable ByteSizeValue expectedMemoryWithDisk) {
this.expectedMemoryWithoutDisk = expectedMemoryWithoutDisk;
this.expectedMemoryWithDisk = expectedMemoryWithDisk;
}
public MemoryEstimation(StreamInput in) throws IOException {
this.expectedMemoryWithoutDisk = in.readOptionalWriteable(ByteSizeValue::readFrom);
this.expectedMemoryWithDisk = in.readOptionalWriteable(ByteSizeValue::readFrom);
}
public ByteSizeValue getExpectedMemoryWithoutDisk() {
return expectedMemoryWithoutDisk;
}
public ByteSizeValue getExpectedMemoryWithDisk() {
return expectedMemoryWithDisk;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(expectedMemoryWithoutDisk);
out.writeOptionalWriteable(expectedMemoryWithDisk);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (expectedMemoryWithoutDisk != null) {
builder.field(EXPECTED_MEMORY_WITHOUT_DISK.getPreferredName(), expectedMemoryWithoutDisk.getStringRep());
}
if (expectedMemoryWithDisk != null) {
builder.field(EXPECTED_MEMORY_WITH_DISK.getPreferredName(), expectedMemoryWithDisk.getStringRep());
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
MemoryEstimation that = (MemoryEstimation) other;
return Objects.equals(expectedMemoryWithoutDisk, that.expectedMemoryWithoutDisk)
&& Objects.equals(expectedMemoryWithDisk, that.expectedMemoryWithDisk);
}
@Override
public int hashCode() {
return Objects.hash(expectedMemoryWithoutDisk, expectedMemoryWithDisk);
}
}
| MemoryEstimation |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/cglib/beans/BeanGenerator.java | {
"start": 1566,
"end": 1814
} | interface ____ {
public Object newInstance(String superclass, Map props);
}
private Class superclass;
private Map props = new HashMap();
private boolean classOnly;
public BeanGenerator() {
super(SOURCE);
}
/**
* Set the | BeanGeneratorKey |
java | spring-projects__spring-boot | module/spring-boot-micrometer-tracing-opentelemetry/src/test/java/org/springframework/boot/micrometer/tracing/opentelemetry/autoconfigure/otlp/OtlpTracingAutoConfigurationIntegrationTests.java | {
"start": 7252,
"end": 8278
} | class ____ {
private final Server server = createServer();
private final BlockingQueue<RecordedGrpcRequest> recordedRequests = new LinkedBlockingQueue<>();
void start() throws Exception {
this.server.start();
}
void close() throws Exception {
this.server.stop();
}
int getPort() {
return this.server.getURI().getPort();
}
RecordedGrpcRequest takeRequest(int timeout, TimeUnit unit) throws InterruptedException {
return this.recordedRequests.poll(timeout, unit);
}
void recordRequest(RecordedGrpcRequest request) {
this.recordedRequests.add(request);
}
private Server createServer() {
Server server = new Server();
server.addConnector(createConnector(server));
server.setHandler(new GrpcHandler());
return server;
}
private ServerConnector createConnector(Server server) {
ServerConnector connector = new ServerConnector(server,
new HTTP2CServerConnectionFactory(new HttpConfiguration()));
connector.setPort(0);
return connector;
}
| MockGrpcServer |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/spring/tx/async/AsyncEndpointJmsTX2IT.java | {
"start": 1499,
"end": 3872
} | class ____ extends AbstractSpringJMSITSupport {
private static String beforeThreadName;
private static String afterThreadName;
@Override
protected AbstractXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext(
"org/apache/camel/component/jms/integration/spring/tx/JmsTransacted-context.xml");
}
@Test
public void testAsyncEndpointOK() throws Exception {
getMockEndpoint("mock:before").expectedBodiesReceived("Hello Camel");
getMockEndpoint("mock:after").expectedBodiesReceived("Hi Camel");
getMockEndpoint("mock:result").expectedBodiesReceived("Bye Camel");
template.sendBody("activemq:queue:AsyncEndpointJmsTX2Test.inbox", "Hello Camel");
MockEndpoint.assertIsSatisfied(context);
// we are synchronous due to TX so the we are using same threads during the routing
assertTrue(beforeThreadName.equalsIgnoreCase(afterThreadName), "Should use same threads");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
context.addComponent("async", new MyAsyncComponent());
from("activemq:queue:AsyncEndpointJmsTX2Test.inbox")
.transacted()
.to("mock:before")
.to("log:before")
.process(exchange -> {
beforeThreadName = Thread.currentThread().getName();
assertTrue(exchange.isTransacted(), "Exchange should be transacted");
})
.to("async:hi:camel")
.process(exchange -> {
afterThreadName = Thread.currentThread().getName();
assertTrue(exchange.isTransacted(), "Exchange should be transacted");
})
.to("log:after")
.to("mock:after")
.to("direct:foo")
.to("log:result")
.to("mock:result");
from("direct:foo")
.transacted()
.to("async:bye:camel");
}
};
}
}
| AsyncEndpointJmsTX2IT |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java | {
"start": 3993,
"end": 8084
} | interface ____ extends Closeable {
/**
* Get the next block.<p>
*
* Note that this block may be removed in between the time we list it,
* and the time the caller tries to use it, or it may represent a stale
* entry. Callers should handle the case where the returned block no
* longer exists.
*
* @return The next block, or null if there are no
* more blocks. Null if there was an error
* determining the next block.
*
* @throws IOException If there was an error getting the next block in
* this volume. In this case, EOF will be set on
* the iterator.
*/
ExtendedBlock nextBlock() throws IOException;
/**
* Returns true if we got to the end of the block pool.
*/
boolean atEnd();
/**
* Repositions the iterator at the beginning of the block pool.
*/
void rewind();
/**
* Save this block iterator to the underlying volume.
* Any existing saved block iterator with this name will be overwritten.
* maxStalenessMs will not be saved.
*
* @throws IOException If there was an error when saving the block
* iterator.
*/
void save() throws IOException;
/**
* Set the maximum staleness of entries that we will return.<p>
*
* A maximum staleness of 0 means we will never return stale entries; a
* larger value will allow us to reduce resource consumption in exchange
* for returning more potentially stale entries. Even with staleness set
* to 0, consumers of this API must handle race conditions where block
* disappear before they can be processed.
*/
void setMaxStalenessMs(long maxStalenessMs);
/**
* Get the wall-clock time, measured in milliseconds since the Epoch,
* when this iterator was created.
*/
long getIterStartMs();
/**
* Get the wall-clock time, measured in milliseconds since the Epoch,
* when this iterator was last saved. Returns iterStartMs if the
* iterator was never saved.
*/
long getLastSavedMs();
/**
* Get the id of the block pool which this iterator traverses.
*/
String getBlockPoolId();
}
/**
* Create a new block iterator. It will start at the beginning of the
* block set.
*
* @param bpid The block pool id to iterate over.
* @param name The name of the block iterator to create.
*
* @return The new block iterator.
*/
BlockIterator newBlockIterator(String bpid, String name);
/**
* Load a saved block iterator.
*
* @param bpid The block pool id to iterate over.
* @param name The name of the block iterator to load.
*
* @return The saved block iterator.
* @throws IOException If there was an IO error loading the saved
* block iterator.
*/
BlockIterator loadBlockIterator(String bpid, String name) throws IOException;
/**
* Get the FSDatasetSpi which this volume is a part of.
*/
FsDatasetSpi getDataset();
/**
* Tracks the files and other information related to a block on the disk
* Missing file is indicated by setting the corresponding member
* to null.
*
* Because millions of these structures may be created, we try to save
* memory here. So instead of storing full paths, we store path suffixes.
* The block file, if it exists, will have a path like this:
* {@literal <volume_base_path>/<block_path>}
* So we don't need to store the volume path, since we already know what the
* volume is.
*
* The metadata file, if it exists, will have a path like this:
* {@literal <volume_base_path>/<block_path>_<genstamp>.meta}
* So if we have a block file, there isn't any need to store the block path
* again.
*
* The accessor functions take care of these manipulations.
*/
public static | BlockIterator |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java | {
"start": 25011,
"end": 27655
} | class ____ implements ResourceTracker {
public NodeAction heartBeatNodeAction = NodeAction.NORMAL;
public NodeAction registerNodeAction = NodeAction.NORMAL;
private Map<ApplicationId, List<Long>> keepAliveRequests =
new HashMap<ApplicationId, List<Long>>();
private ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
private final Context context;
MyResourceTracker3(Context context) {
this.context = context;
}
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
RegisterNodeManagerResponse response =
recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
response.setNodeAction(registerNodeAction);
response.setContainerTokenMasterKey(createMasterKey());
response.setNMTokenMasterKey(createMasterKey());
return response;
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
LOG.info("Got heartBeatId: [{}]", heartBeatID);
NodeStatus nodeStatus = request.getNodeStatus();
nodeStatus.setResponseId(heartBeatID.getAndIncrement());
NodeHeartbeatResponse nhResponse = YarnServerBuilderUtils.
newNodeHeartbeatResponse(heartBeatID.get(), heartBeatNodeAction, null,
null, null, null, 1000L);
if (nodeStatus.getKeepAliveApplications() != null
&& nodeStatus.getKeepAliveApplications().size() > 0) {
for (ApplicationId appId : nodeStatus.getKeepAliveApplications()) {
List<Long> list = keepAliveRequests.get(appId);
if (list == null) {
list = new LinkedList<Long>();
keepAliveRequests.put(appId, list);
}
list.add(System.currentTimeMillis());
}
}
if (heartBeatID.get() == 2) {
LOG.info("Sending FINISH_APP for application: [{}]", appId);
this.context.getApplications().put(appId, mock(Application.class));
nhResponse.addAllApplicationsToCleanup(Collections.singletonList(appId));
}
return nhResponse;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
return recordFactory
.newRecordInstance(UnRegisterNodeManagerResponse.class);
}
}
// Test NodeStatusUpdater sends the right container statuses each time it
// heart beats.
private Credentials expectedCredentials = new Credentials();
private | MyResourceTracker3 |
java | netty__netty | transport-classes-io_uring/src/main/java/io/netty/channel/uring/IoUringBufferRing.java | {
"start": 3007,
"end": 11319
} | class ____ implements Consumer<ByteBuf> {
private int expectedBuffers;
private short num;
private short bid;
private short oldTail;
short fill(short startBid, int numBuffers) {
// Fetch the tail once before allocate the batch.
oldTail = (short) SHORT_HANDLE.get(ioUringBufRing, tailFieldPosition);
// At the moment we always start with bid 0 and so num and bid is the same. As this is more of an
// implementation detail it is better to still keep both separated.
this.num = 0;
this.bid = startBid;
this.expectedBuffers = numBuffers;
try {
if (batchAllocation) {
allocator.allocateBatch(this, numBuffers);
} else {
for (int i = 0; i < numBuffers; i++) {
add(oldTail, bid++, num++, allocator.allocate());
}
}
} catch (Throwable t) {
corrupted = true;
for (int i = 0; i < buffers.length; i++) {
ByteBuf buffer = buffers[i];
if (buffer != null) {
buffer.release();
buffers[i] = null;
}
}
throw t;
}
// Now advanced the tail by the number of buffers that we just added.
SHORT_HANDLE.setRelease(ioUringBufRing, tailFieldPosition, (short) (oldTail + num));
return (short) (bid - 1);
}
void fill(short bid) {
short tail = (short) SHORT_HANDLE.get(ioUringBufRing, tailFieldPosition);
add(tail, bid, 0, allocator.allocate());
// Now advanced the tail by one
SHORT_HANDLE.setRelease(ioUringBufRing, tailFieldPosition, (short) (tail + 1));
}
@Override
public void accept(ByteBuf byteBuf) {
if (corrupted || closed) {
byteBuf.release();
throw new IllegalStateException("Already closed");
}
if (expectedBuffers == num) {
byteBuf.release();
throw new IllegalStateException("Produced too many buffers");
}
add(oldTail, bid++, num++, byteBuf);
}
private void add(int tail, short bid, int offset, ByteBuf byteBuf) {
short ringIndex = (short) ((tail + offset) & mask);
assert buffers[bid] == null;
long memoryAddress = IoUring.memoryAddress(byteBuf) + byteBuf.writerIndex();
int writable = byteBuf.writableBytes();
// see:
// https://github.com/axboe/liburing/
// blob/19134a8fffd406b22595a5813a3e319c19630ac9/src/include/liburing.h#L1561
int position = Native.SIZEOF_IOURING_BUF * ringIndex;
ioUringBufRing.putLong(position + Native.IOURING_BUFFER_OFFSETOF_ADDR, memoryAddress);
ioUringBufRing.putInt(position + Native.IOURING_BUFFER_OFFSETOF_LEN, writable);
ioUringBufRing.putShort(position + Native.IOURING_BUFFER_OFFSETOF_BID, bid);
buffers[bid] = byteBuf;
}
}
/**
* Try to expand by adding more buffers to the ring if there is any space left, this will be done lazy.
*
* @return {@code true} if we can expand the number of buffers in the ring, {@code false} otherwise.
*/
boolean expand() {
needExpand = true;
return allocatedBuffers < buffers.length;
}
private void fill(short startBid, int buffers) {
if (corrupted || closed) {
return;
}
assert buffers % 2 == 0;
lastGeneratedBid = ringConsumer.fill(startBid, buffers);
usableBuffers += buffers;
}
private void fill(short bid) {
if (corrupted || closed) {
return;
}
ringConsumer.fill(bid);
usableBuffers++;
}
/**
* @return the {@link IoUringBufferRingExhaustedEvent} that should be used to signal that there were no buffers
* left for this buffer ring.
*/
IoUringBufferRingExhaustedEvent getExhaustedEvent() {
return exhaustedEvent;
}
/**
* Return the amount of bytes that we attempted to read for the given id.
* This method must be called before {@link #useBuffer(short, int, boolean)}.
*
* @param bid the id of the buffer.
* @return the attempted bytes.
*/
int attemptedBytesRead(short bid) {
return buffers[bid].writableBytes();
}
private int calculateNextBufferBatch() {
return Math.min(batchSize, entries - allocatedBuffers);
}
/**
* Use the buffer for the given buffer id. The returned {@link ByteBuf} must be released once not used anymore.
*
* @param bid the id of the buffer
* @param read the number of bytes that could be read. This value might be larger then what a single
* {@link ByteBuf} can hold. Because of this, the caller should call
* @link #useBuffer(short, int, boolean)} in a loop (obtaining the next bid to use by calling
* {@link #nextBid(short)}) until all buffers could be obtained.
* @return the buffer.
*/
ByteBuf useBuffer(short bid, int read, boolean more) {
assert read > 0;
ByteBuf byteBuf = buffers[bid];
allocator.lastBytesRead(byteBuf.writableBytes(), read);
// We always slice so the user will not mess up things later.
ByteBuf buffer = byteBuf.retainedSlice(byteBuf.writerIndex(), read);
byteBuf.writerIndex(byteBuf.writerIndex() + read);
if (incremental && more && byteBuf.isWritable()) {
// The buffer will be used later again, just slice out what we did read so far.
return buffer;
}
// The buffer is considered to be used, null out the slot.
buffers[bid] = null;
byteBuf.release();
if (--usableBuffers == 0) {
int numBuffers = allocatedBuffers;
if (needExpand) {
// We did get a signal that our buffer ring did not have enough buffers, let's see if we
// can grow it.
needExpand = false;
numBuffers += calculateNextBufferBatch();
}
fill((short) 0, numBuffers);
allocatedBuffers = numBuffers;
assert allocatedBuffers % 2 == 0;
} else if (!batchAllocation) {
// If we don'T do bulk allocations to refill the buffer ring we need to fill in the just used bid again
// if we didn't get a signal that we need expansion.
fill(bid);
if (needExpand && lastGeneratedBid == bid) {
// We did get a signal that our buffer ring did not have enough buffers and we just did add the last
// generated bid at the tail of the ring. Now its safe to grow the buffer ring and still guarantee
// sequential ordering which is needed for our RECVSEND_BUNDLE implementation.
needExpand = false;
int numBuffers = calculateNextBufferBatch();
fill((short) (bid + 1), numBuffers);
allocatedBuffers += numBuffers;
assert allocatedBuffers % 2 == 0;
}
}
return buffer;
}
short nextBid(short bid) {
return (short) ((bid + 1) & allocatedBuffers - 1);
}
/**
* The group id that is assigned to this buffer ring.
*
* @return group id.
*/
short bufferGroupId() {
return bufferGroupId;
}
/**
* Close this {@link IoUringBufferRing}, using it after this method is called will lead to undefined behaviour.
*/
void close() {
if (closed) {
return;
}
closed = true;
Native.ioUringUnRegisterBufRing(ringFd, Buffer.memoryAddress(ioUringBufRing), entries, bufferGroupId);
for (ByteBuf byteBuf : buffers) {
if (byteBuf != null) {
byteBuf.release();
}
}
Arrays.fill(buffers, null);
}
}
| RingConsumer |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/changelog/inmemory/InMemoryChangelogStateHandle.java | {
"start": 1500,
"end": 4609
} | class ____ implements ChangelogStateHandle {
private static final long serialVersionUID = 1L;
private final List<StateChange> changes;
private final SequenceNumber from; // for debug purposes
private final SequenceNumber to; // for debug purposes
private final KeyGroupRange keyGroupRange;
private final StateHandleID stateHandleID;
public InMemoryChangelogStateHandle(
List<StateChange> changes,
SequenceNumber from,
SequenceNumber to,
KeyGroupRange keyGroupRange) {
this(changes, from, to, keyGroupRange, new StateHandleID(UUID.randomUUID().toString()));
}
private InMemoryChangelogStateHandle(
List<StateChange> changes,
SequenceNumber from,
SequenceNumber to,
KeyGroupRange keyGroupRange,
StateHandleID stateHandleId) {
this.changes = changes;
this.from = from;
this.to = to;
this.keyGroupRange = keyGroupRange;
this.stateHandleID = stateHandleId;
}
public static InMemoryChangelogStateHandle restore(
List<StateChange> changes,
SequenceNumber from,
SequenceNumber to,
KeyGroupRange keyGroupRange,
StateHandleID stateHandleId) {
return new InMemoryChangelogStateHandle(changes, from, to, keyGroupRange, stateHandleId);
}
@Override
public void discardState() {}
@Override
public long getStateSize() {
return changes.stream().mapToLong(change -> change.getChange().length).sum();
}
@Override
public long getCheckpointedSize() {
// memory changelog state handle would be counted as checkpoint each time.
return getStateSize();
}
@Override
public void collectSizeStats(StateObjectSizeStatsCollector collector) {
collector.add(StateObjectLocation.LOCAL_MEMORY, getStateSize());
}
public List<StateChange> getChanges() {
return Collections.unmodifiableList(changes);
}
@Override
public KeyGroupRange getKeyGroupRange() {
return keyGroupRange;
}
@Nullable
@Override
public KeyedStateHandle getIntersection(KeyGroupRange keyGroupRange) {
return changes.stream().mapToInt(StateChange::getKeyGroup).anyMatch(keyGroupRange::contains)
? this
: null;
}
@Override
public StateHandleID getStateHandleId() {
return stateHandleID;
}
@Override
public void registerSharedStates(SharedStateRegistry stateRegistry, long checkpointID) {
// do nothing
}
@Override
public String toString() {
return String.format("from %s to %s: %s", from, to, changes);
}
public long getFrom() {
return ((SequenceNumber.GenericSequenceNumber) from).number;
}
public long getTo() {
return ((SequenceNumber.GenericSequenceNumber) to).number;
}
@Override
public String getStorageIdentifier() {
return InMemoryStateChangelogStorageFactory.IDENTIFIER;
}
}
| InMemoryChangelogStateHandle |
java | qos-ch__slf4j | slf4j-simple/src/test/java/org/slf4j/simple/multiThreadedExecution/StateCheckingPrintStream.java | {
"start": 1457,
"end": 4540
} | enum ____ {
INITIAL, UNKNOWN, HELLO, THROWABLE, AT1, AT2, OTHER;
}
PrintStream other;
volatile State currentState = State.INITIAL;
public StateCheckingPrintStream(PrintStream ps) {
super(ps);
}
public void print(String s) {
}
public void println(String s) {
State next = computeState(s);
//System.out.println(next + " " + s);
switch (currentState) {
case INITIAL:
currentState = next;
break;
case UNKNOWN:
currentState = next;
break;
case OTHER:
if (next == State.UNKNOWN) {
currentState = State.UNKNOWN;
return;
}
if (next != State.OTHER && next != State.HELLO) {
throw badState(s, currentState, next);
}
currentState = next;
break;
case HELLO:
if (next != State.THROWABLE) {
throw badState(s, currentState, next);
}
currentState = next;
break;
case THROWABLE:
if (next != State.AT1) {
throw badState(s, currentState, next);
}
currentState = next;
break;
case AT1:
if (next != State.AT2) {
throw badState(s, currentState, next);
}
currentState = next;
break;
case AT2:
currentState = next;
break;
default:
throw new IllegalStateException("Unreachable code");
}
}
private IllegalStateException badState(String s, State currentState2, State next) {
return new IllegalStateException("Unexpected state " + next + " for current state " + currentState2 + " for " + s);
}
String OTHER_PATTERN_STR = ".*Other \\d{1,5}";
String HELLO_PATTERN_STR = ".*Hello \\d{1,5}";
String THROWABLE_PATTERN_STR = "java.lang.Throwable: i=\\d{1,5}";
String AT1_PATTERN_STR = "\\s*at " + PACKAGE_NAME + ".*";
String AT2_PATTERN_STR = "\\s*at " + ".*Thread.java.*";
Pattern PATTERN_OTHER = Pattern.compile(OTHER_PATTERN_STR);
Pattern PATTERN_HELLO = Pattern.compile(HELLO_PATTERN_STR);
Pattern PATTERN_THROWABLE = Pattern.compile(THROWABLE_PATTERN_STR);
Pattern PATTERN_AT1 = Pattern.compile(AT1_PATTERN_STR);
Pattern PATTERN_AT2 = Pattern.compile(AT2_PATTERN_STR);
private State computeState(String s) {
if (PATTERN_OTHER.matcher(s).matches()) {
return State.OTHER;
} else if (PATTERN_HELLO.matcher(s).matches()) {
return State.HELLO;
} else if (PATTERN_THROWABLE.matcher(s).matches()) {
return State.THROWABLE;
} else if (PATTERN_AT1.matcher(s).matches()) {
return State.AT1;
} else if (PATTERN_AT2.matcher(s).matches()) {
return State.AT2;
} else {
return State.UNKNOWN;
}
}
public void println(Object o) {
println(o.toString());
}
} | State |
java | elastic__elasticsearch | x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/RolloverActionIT.java | {
"start": 2082,
"end": 24383
} | class ____ extends IlmESRestTestCase {
private static final String FAILED_STEP_RETRY_COUNT_FIELD = "failed_step_retry_count";
private String index;
private String policy;
private String alias;
@Before
public void refreshIndex() {
index = "index-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
policy = "policy-" + randomAlphaOfLength(5);
alias = "alias-" + randomAlphaOfLength(5);
logger.info("--> running [{}] with index [{}], alias [{}] and policy [{}]", getTestName(), index, alias, policy);
}
public void testRolloverAction() throws Exception {
String originalIndex = index + "-000001";
String secondIndex = index + "-000002";
createIndexWithSettings(
client(),
originalIndex,
alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
);
// create policy
createNewSingletonPolicy(client(), policy, "hot", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null));
// update policy on index
updatePolicy(client(), originalIndex, policy);
// index document {"foo": "bar"} to trigger rollover
index(client(), originalIndex, "_id", "foo", "bar");
assertBusy(() -> {
assertThat(getStepKeyForIndex(client(), originalIndex), equalTo(PhaseCompleteStep.finalStep("hot").getKey()));
assertTrue(indexExists(secondIndex));
assertTrue(indexExists(originalIndex));
assertEquals("true", getOnlyIndexSettings(client(), originalIndex).get(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE));
}, 30, TimeUnit.SECONDS);
}
public void testRolloverActionWithIndexingComplete() throws Exception {
String originalIndex = index + "-000001";
String secondIndex = index + "-000002";
createIndexWithSettings(
client(),
originalIndex,
alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
);
Request updateSettingsRequest = new Request("PUT", "/" + originalIndex + "/_settings");
updateSettingsRequest.setJsonEntity(Strings.format("""
{
"settings": {
"%s": true
}
}""", LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE));
client().performRequest(updateSettingsRequest);
Request updateAliasRequest = new Request("POST", "/_aliases");
updateAliasRequest.setJsonEntity(Strings.format("""
{
"actions": [
{
"add": {
"index": "%s",
"alias": "%s",
"is_write_index": false
}
}
]
}""", originalIndex, alias));
client().performRequest(updateAliasRequest);
// create policy
createNewSingletonPolicy(client(), policy, "hot", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null));
// update policy on index
updatePolicy(client(), originalIndex, policy);
// index document {"foo": "bar"} to trigger rollover
index(client(), originalIndex, "_id", "foo", "bar");
assertBusy(() -> {
assertThat(getStepKeyForIndex(client(), originalIndex), equalTo(PhaseCompleteStep.finalStep("hot").getKey()));
assertTrue(indexExists(originalIndex));
assertFalse(indexExists(secondIndex)); // careful, *assertFalse* not *assertTrue*
assertEquals("true", getOnlyIndexSettings(client(), originalIndex).get(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE));
}, 30, TimeUnit.SECONDS);
}
public void testRolloverActionWithMaxPrimaryShardSize() throws Exception {
String originalIndex = index + "-000001";
String secondIndex = index + "-000002";
createIndexWithSettings(
client(),
originalIndex,
alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
);
index(client(), originalIndex, "_id", "foo", "bar");
// create policy
createNewSingletonPolicy(
client(),
policy,
"hot",
new RolloverAction(null, ByteSizeValue.ofBytes(1), null, null, null, null, null, null, null, null)
);
// update policy on index
updatePolicy(client(), originalIndex, policy);
assertBusy(() -> {
assertThat(getStepKeyForIndex(client(), originalIndex), equalTo(PhaseCompleteStep.finalStep("hot").getKey()));
assertTrue(indexExists(secondIndex));
assertTrue(indexExists(originalIndex));
assertEquals("true", getOnlyIndexSettings(client(), originalIndex).get(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE));
}, 30, TimeUnit.SECONDS);
}
public void testRolloverActionWithMaxPrimaryDocsSize() throws Exception {
String originalIndex = index + "-000001";
String secondIndex = index + "-000002";
createIndexWithSettings(
client(),
originalIndex,
alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
);
index(client(), originalIndex, "_id", "foo", "bar");
// create policy
createNewSingletonPolicy(client(), policy, "hot", new RolloverAction(null, null, null, null, 1L, null, null, null, null, null));
// update policy on index
updatePolicy(client(), originalIndex, policy);
assertBusy(() -> {
assertThat(getStepKeyForIndex(client(), originalIndex), equalTo(PhaseCompleteStep.finalStep("hot").getKey()));
assertTrue(indexExists(secondIndex));
assertTrue(indexExists(originalIndex));
assertEquals("true", getOnlyIndexSettings(client(), originalIndex).get(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE));
}, 30, TimeUnit.SECONDS);
}
/**
* There are multiple scenarios where we want to set up an empty index, make sure that it *doesn't* roll over, then change something
* about the cluster, and verify that now the index does roll over. This is a 'template method' that allows you to provide a runnable
* which will accomplish that end. Each invocation of this should live in its own top-level `public void test...` method.
*/
private void templateTestRolloverActionWithEmptyIndex(CheckedRunnable<Exception> allowEmptyIndexToRolloverRunnable) throws Exception {
String originalIndex = index + "-000001";
String secondIndex = index + "-000002";
createIndexWithSettings(
client(),
originalIndex,
alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
);
// create policy
createNewSingletonPolicy(
client(),
policy,
"hot",
new RolloverAction(null, null, TimeValue.timeValueSeconds(1), null, null, null, null, null, null, null)
);
// update policy on index
updatePolicy(client(), originalIndex, policy);
// maybe set the controlling setting to explicitly true (rather than just true by default)
if (randomBoolean()) {
setLifecycleRolloverOnlyIfHasDocumentsSetting(true);
}
// because the index is empty, it doesn't roll over
assertBusy(() -> {
assertThat(getStepKeyForIndex(client(), originalIndex).name(), is(WaitForRolloverReadyStep.NAME));
assertFalse(indexExists(secondIndex));
assertTrue(indexExists(originalIndex));
}, 30, TimeUnit.SECONDS);
// run the passed in runnable that will somehow make it so the index rolls over
allowEmptyIndexToRolloverRunnable.run();
// now the index rolls over as expected
assertBusy(() -> {
assertThat(getStepKeyForIndex(client(), originalIndex), equalTo(PhaseCompleteStep.finalStep("hot").getKey()));
assertTrue(indexExists(secondIndex));
assertTrue(indexExists(originalIndex));
assertEquals("true", getOnlyIndexSettings(client(), originalIndex).get(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE));
}, 30, TimeUnit.SECONDS);
// reset to null so that the post-test cleanup doesn't fail because it sees a deprecated setting
setLifecycleRolloverOnlyIfHasDocumentsSetting(null);
}
public void testRolloverActionWithEmptyIndexThenADocIsIndexed() throws Exception {
templateTestRolloverActionWithEmptyIndex(() -> {
// index document {"foo": "bar"} to trigger rollover
index(client(), index + "-000001", "_id", "foo", "bar");
});
}
public void testRolloverActionWithEmptyIndexThenThePolicyIsChanged() throws Exception {
templateTestRolloverActionWithEmptyIndex(() -> {
// change the policy to permit empty rollovers -- with either min_docs or min_primary_shard_docs set to 0
createNewSingletonPolicy(
client(),
policy,
"hot",
randomBoolean()
? new RolloverAction(null, null, TimeValue.timeValueSeconds(1), null, null, null, null, null, 0L, null)
: new RolloverAction(null, null, TimeValue.timeValueSeconds(1), null, null, null, null, null, null, 0L)
);
});
}
public void testRolloverActionWithEmptyIndexThenTheClusterSettingIsChanged() throws Exception {
templateTestRolloverActionWithEmptyIndex(() -> {
// change the cluster-wide setting to permit empty rollovers
setLifecycleRolloverOnlyIfHasDocumentsSetting(false);
});
}
private void setLifecycleRolloverOnlyIfHasDocumentsSetting(@Nullable Boolean value) throws IOException {
try {
Settings.Builder settings = Settings.builder();
if (value != null) {
settings.put(LifecycleSettings.LIFECYCLE_ROLLOVER_ONLY_IF_HAS_DOCUMENTS, value.booleanValue());
} else {
settings.putNull(LifecycleSettings.LIFECYCLE_ROLLOVER_ONLY_IF_HAS_DOCUMENTS);
}
updateClusterSettings(settings.build());
if (value != null) {
fail("expected WarningFailureException from warnings");
}
} catch (WarningFailureException e) {
// expected, this setting is deprecated, so we can get back a warning
}
}
public void testILMRolloverRetriesOnReadOnlyBlock() throws Exception {
String firstIndex = index + "-000001";
createNewSingletonPolicy(
client(),
policy,
"hot",
new RolloverAction(null, null, TimeValue.timeValueSeconds(1), null, null, null, null, null, 0L, null)
);
// create the index as readonly and associate the ILM policy to it
createIndexWithSettings(
client(),
firstIndex,
alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
.put("index.blocks.read_only", true),
true
);
// wait for ILM to start retrying the step
assertBusy(
() -> assertThat((Integer) explainIndex(client(), firstIndex).get(FAILED_STEP_RETRY_COUNT_FIELD), greaterThanOrEqualTo(1))
);
// remove the read only block
Request allowWritesOnIndexSettingUpdate = new Request("PUT", firstIndex + "/_settings");
allowWritesOnIndexSettingUpdate.setJsonEntity("""
{ "index": {
"blocks.read_only" : "false"\s
}
}""");
client().performRequest(allowWritesOnIndexSettingUpdate);
// index is not readonly so the ILM should complete successfully
assertBusy(() -> assertThat(getStepKeyForIndex(client(), firstIndex), equalTo(PhaseCompleteStep.finalStep("hot").getKey())));
}
public void testILMRolloverOnManuallyRolledIndex() throws Exception {
String originalIndex = index + "-000001";
String secondIndex = index + "-000002";
String thirdIndex = index + "-000003";
// Set up a policy with rollover
createNewSingletonPolicy(client(), policy, "hot", new RolloverAction(null, null, null, 2L, null, null, null, null, null, null));
Request createIndexTemplate = new Request("PUT", "_template/rolling_indexes");
createIndexTemplate.setJsonEntity(Strings.format("""
{
"index_patterns": ["%s-*"],
"settings": {
"number_of_shards": 1,
"number_of_replicas": 0,
"index.lifecycle.name": "%s",
"index.lifecycle.rollover_alias": "%s"
}
}""", index, policy, alias));
createIndexTemplate.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING));
client().performRequest(createIndexTemplate);
createIndexWithSettings(
client(),
originalIndex,
alias,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0),
true
);
// Index a document
index(client(), originalIndex, "1", "foo", "bar");
Request refreshOriginalIndex = new Request("POST", "/" + originalIndex + "/_refresh");
client().performRequest(refreshOriginalIndex);
// Manual rollover
rolloverMaxOneDocCondition(client(), alias);
assertBusy(() -> assertTrue(indexExists(secondIndex)));
// Index another document into the original index so the ILM rollover policy condition is met
index(client(), originalIndex, "2", "foo", "bar");
client().performRequest(refreshOriginalIndex);
// Wait for the rollover policy to execute
assertBusy(() -> assertThat(getStepKeyForIndex(client(), originalIndex), equalTo(PhaseCompleteStep.finalStep("hot").getKey())));
// ILM should manage the second index after attempting (and skipping) rolling the original index
assertBusy(() -> assertTrue((boolean) explainIndex(client(), secondIndex).getOrDefault("managed", true)));
// index some documents to trigger an ILM rollover
index(client(), alias, "1", "foo", "bar");
index(client(), alias, "2", "foo", "bar");
index(client(), alias, "3", "foo", "bar");
Request refreshSecondIndex = new Request("POST", "/" + secondIndex + "/_refresh");
client().performRequest(refreshSecondIndex).getStatusLine();
// ILM should rollover the second index even though it skipped the first one
assertBusy(() -> assertThat(getStepKeyForIndex(client(), secondIndex), equalTo(PhaseCompleteStep.finalStep("hot").getKey())));
assertBusy(() -> assertTrue(indexExists(thirdIndex)));
}
public void testRolloverStepRetriesUntilRolledOverIndexIsDeleted() throws Exception {
String index = this.index + "-000001";
String rolledIndex = this.index + "-000002";
createNewSingletonPolicy(
client(),
policy,
"hot",
new RolloverAction(null, null, TimeValue.timeValueSeconds(1), null, null, null, null, null, null, null)
);
// create the rolled index so the rollover of the first index fails
createIndexWithSettings(
client(),
rolledIndex,
alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias),
false
);
createIndexWithSettings(
client(),
index,
alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias),
true
);
assertBusy(
() -> assertThat((Integer) explainIndex(client(), index).get(FAILED_STEP_RETRY_COUNT_FIELD), greaterThanOrEqualTo(1)),
30,
TimeUnit.SECONDS
);
Request moveToStepRequest = new Request("POST", "_ilm/move/" + index);
moveToStepRequest.setJsonEntity("""
{
"current_step": {
"phase": "hot",
"action": "rollover",
"name": "check-rollover-ready"
},
"next_step": {
"phase": "hot",
"action": "rollover",
"name": "attempt-rollover"
}
}""");
// Using {@link #waitUntil} here as ILM moves back and forth between the {@link WaitForRolloverReadyStep} step and
// {@link org.elasticsearch.xpack.core.ilm.ErrorStep} in order to retry the failing step. As {@link #assertBusy}
// increases the wait time between calls exponentially, we might miss the window where the policy is on
// {@link WaitForRolloverReadyStep} and the move to `attempt-rollover` request will not be successful.
assertTrue(waitUntil(() -> {
try {
return client().performRequest(moveToStepRequest).getStatusLine().getStatusCode() == 200;
} catch (IOException e) {
return false;
}
}, 30, TimeUnit.SECONDS));
// Similar to above, using {@link #waitUntil} as we want to make sure the `attempt-rollover` step started failing and is being
// retried (which means ILM moves back and forth between the `attempt-rollover` step and the `error` step)
assertTrue("ILM did not start retrying the attempt-rollover step", waitUntil(() -> {
try {
Map<String, Object> explainIndexResponse = explainIndex(client(), index);
String failedStep = (String) explainIndexResponse.get("failed_step");
Integer retryCount = (Integer) explainIndexResponse.get(FAILED_STEP_RETRY_COUNT_FIELD);
return failedStep != null && failedStep.equals("attempt-rollover") && retryCount != null && retryCount >= 1;
} catch (IOException e) {
return false;
}
}, 30, TimeUnit.SECONDS));
deleteIndex(rolledIndex);
// the rollover step should eventually succeed
assertBusy(() -> assertThat(indexExists(rolledIndex), is(true)));
assertBusy(() -> assertThat(getStepKeyForIndex(client(), index), equalTo(PhaseCompleteStep.finalStep("hot").getKey())));
}
public void testUpdateRolloverLifecycleDateStepRetriesWhenRolloverInfoIsMissing() throws Exception {
String index = this.index + "-000001";
createNewSingletonPolicy(client(), policy, "hot", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null));
createIndexWithSettings(
client(),
index,
alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias),
true
);
assertBusy(() -> assertThat(getStepKeyForIndex(client(), index).name(), is(WaitForRolloverReadyStep.NAME)));
// moving ILM to the "update-rollover-lifecycle-date" without having gone through the actual rollover step
// the "update-rollover-lifecycle-date" step will fail as the index has no rollover information
Request moveToStepRequest = new Request("POST", "_ilm/move/" + index);
moveToStepRequest.setJsonEntity("""
{
"current_step": {
"phase": "hot",
"action": "rollover",
"name": "check-rollover-ready"
},
"next_step": {
"phase": "hot",
"action": "rollover",
"name": "update-rollover-lifecycle-date"
}
}""");
client().performRequest(moveToStepRequest);
assertBusy(
() -> assertThat((Integer) explainIndex(client(), index).get(FAILED_STEP_RETRY_COUNT_FIELD), greaterThanOrEqualTo(1)),
30,
TimeUnit.SECONDS
);
index(client(), index, "1", "foo", "bar");
Request refreshIndex = new Request("POST", "/" + index + "/_refresh");
client().performRequest(refreshIndex);
// manual rollover the index so the "update-rollover-lifecycle-date" ILM step can continue and finish successfully as the index
// will have rollover information now
rolloverMaxOneDocCondition(client(), alias);
assertBusy(() -> assertThat(getStepKeyForIndex(client(), index), equalTo(PhaseCompleteStep.finalStep("hot").getKey())));
}
}
| RolloverActionIT |
java | apache__camel | components/camel-timer/src/main/java/org/apache/camel/component/timer/TimerConstants.java | {
"start": 932,
"end": 1305
} | class ____ {
@Metadata(description = "The fired time", javaType = "Date")
public static final String HEADER_FIRED_TIME = Exchange.TIMER_FIRED_TIME;
@Metadata(description = "The timestamp of the message", javaType = "long")
public static final String HEADER_MESSAGE_TIMESTAMP = Exchange.MESSAGE_TIMESTAMP;
private TimerConstants() {
}
}
| TimerConstants |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/CriteriaWithDynamicInstantiationAndOrderByTest.java | {
"start": 8478,
"end": 8664
} | class ____ {
@Id
private Long id;
private String name;
public Item() {
}
public Item(Long id, String name) {
this.id = id;
this.name = name;
}
}
public static | Item |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/configproperties/MyConfigWithMethodConfigurationInject.java | {
"start": 511,
"end": 2780
} | class ____ {
private String host;
private int serverPort;
private MI_OtherConfig otherConfig;
private MI_OtherMissingConfig otherMissingConfig;
private MI_OtherBean otherBean;
private MI_OtherSingleton otherSingleton;
private Optional<MI_OtherSingleton> optionalOtherSingleton;
private BeanProvider<MI_OtherSingleton> otherSingletonBeanProvider;
private Provider<MI_OtherSingleton> otherSingletonProvider;
@ConfigurationInject
void inject(String host,
int serverPort,
MI_OtherConfig otherConfig,
MI_OtherMissingConfig otherMissingConfig,
MI_OtherBean otherBean,
MI_OtherSingleton otherSingleton,
Optional<MI_OtherSingleton> optionalOtherSingleton,
BeanProvider<MI_OtherSingleton> otherSingletonBeanProvider,
Provider<MI_OtherSingleton> otherSingletonProvider) {
this.host = host;
this.serverPort = serverPort;
this.otherConfig = otherConfig;
this.otherMissingConfig = otherMissingConfig;
this.otherBean = otherBean;
this.otherSingleton = otherSingleton;
this.optionalOtherSingleton = optionalOtherSingleton;
this.otherSingletonBeanProvider = otherSingletonBeanProvider;
this.otherSingletonProvider = otherSingletonProvider;
}
public String getHost() {
return host;
}
public int getServerPort() {
return serverPort;
}
public MI_OtherBean getOtherBean() {
return otherBean;
}
public MI_OtherConfig getOtherConfig() {
return otherConfig;
}
public MI_OtherMissingConfig getOtherMissingConfig() {
return otherMissingConfig;
}
public MI_OtherSingleton getOtherSingleton() {
return otherSingleton;
}
public Optional<MI_OtherSingleton> getOptionalOtherSingleton() {
return optionalOtherSingleton;
}
public BeanProvider<MI_OtherSingleton> getOtherSingletonBeanProvider() {
return otherSingletonBeanProvider;
}
public Provider<MI_OtherSingleton> getOtherSingletonProvider() {
return otherSingletonProvider;
}
}
@ConfigurationProperties("xyz")
| MyConfigWithMethodConfigurationInject |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableMergeWithSingleTest.java | {
"start": 1343,
"end": 13047
} | class ____ extends RxJavaTest {
@Test
public void normal() {
Flowable.range(1, 5)
.mergeWith(Single.just(100))
.test()
.assertResult(1, 2, 3, 4, 5, 100);
}
@Test
public void normalLong() {
Flowable.range(1, 512)
.mergeWith(Single.just(100))
.test()
.assertValueCount(513)
.assertComplete();
}
@Test
public void normalLongRequestExact() {
Flowable.range(1, 512)
.mergeWith(Single.just(100))
.test(513)
.assertValueCount(513)
.assertComplete();
}
@Test
public void take() {
Flowable.range(1, 5)
.mergeWith(Single.just(100))
.take(3)
.test()
.assertResult(1, 2, 3);
}
@Test
public void cancel() {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final SingleSubject<Integer> cs = SingleSubject.create();
TestSubscriber<Integer> ts = pp.mergeWith(cs).test();
assertTrue(pp.hasSubscribers());
assertTrue(cs.hasObservers());
ts.cancel();
assertFalse(pp.hasSubscribers());
assertFalse(cs.hasObservers());
}
@Test
public void normalBackpressured() {
final TestSubscriber<Integer> ts = new TestSubscriber<>(0L);
Flowable.range(1, 5).mergeWith(
Single.just(100)
)
.subscribe(ts);
ts
.assertEmpty()
.requestMore(2)
.assertValues(100, 1)
.requestMore(2)
.assertValues(100, 1, 2, 3)
.requestMore(2)
.assertResult(100, 1, 2, 3, 4, 5);
}
@Test
public void mainError() {
Flowable.error(new TestException())
.mergeWith(Single.just(100))
.test()
.assertFailure(TestException.class);
}
@Test
public void otherError() {
Flowable.never()
.mergeWith(Single.error(new TestException()))
.test()
.assertFailure(TestException.class);
}
@Test
public void completeRace() {
for (int i = 0; i < 10000; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final SingleSubject<Integer> cs = SingleSubject.create();
TestSubscriber<Integer> ts = pp.mergeWith(cs).test();
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onNext(1);
pp.onComplete();
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
cs.onSuccess(1);
}
};
TestHelper.race(r1, r2);
ts.assertResult(1, 1);
}
}
@Test
public void onNextSlowPath() {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final SingleSubject<Integer> cs = SingleSubject.create();
TestSubscriber<Integer> ts = pp.mergeWith(cs).subscribeWith(new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
super.onNext(t);
if (t == 1) {
pp.onNext(2);
}
}
});
pp.onNext(1);
cs.onSuccess(3);
pp.onNext(4);
pp.onComplete();
ts.assertResult(1, 2, 3, 4);
}
@Test
public void onSuccessSlowPath() {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final SingleSubject<Integer> cs = SingleSubject.create();
TestSubscriber<Integer> ts = pp.mergeWith(cs).subscribeWith(new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
super.onNext(t);
if (t == 1) {
cs.onSuccess(2);
}
}
});
pp.onNext(1);
pp.onNext(3);
pp.onComplete();
ts.assertResult(1, 2, 3);
}
@Test
public void onSuccessSlowPathBackpressured() {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final SingleSubject<Integer> cs = SingleSubject.create();
TestSubscriber<Integer> ts = pp.mergeWith(cs).subscribeWith(new TestSubscriber<Integer>(1) {
@Override
public void onNext(Integer t) {
super.onNext(t);
if (t == 1) {
cs.onSuccess(2);
}
}
});
pp.onNext(1);
pp.onNext(3);
pp.onComplete();
ts.request(2);
ts.assertResult(1, 2, 3);
}
@Test
public void onSuccessFastPathBackpressuredRace() {
for (int i = 0; i < 10000; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final SingleSubject<Integer> cs = SingleSubject.create();
final TestSubscriber<Integer> ts = pp.mergeWith(cs).subscribeWith(new TestSubscriber<>(0));
Runnable r1 = new Runnable() {
@Override
public void run() {
cs.onSuccess(1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ts.request(2);
}
};
TestHelper.race(r1, r2);
pp.onNext(2);
pp.onComplete();
ts.assertResult(1, 2);
}
}
@Test
public void onErrorMainOverflow() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
final AtomicReference<Subscriber<?>> subscriber = new AtomicReference<>();
TestSubscriber<Integer> ts = new Flowable<Integer>() {
@Override
protected void subscribeActual(Subscriber<? super Integer> s) {
s.onSubscribe(new BooleanSubscription());
subscriber.set(s);
}
}
.mergeWith(Single.<Integer>error(new IOException()))
.test();
subscriber.get().onError(new TestException());
ts.assertFailure(IOException.class)
;
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void onErrorOtherOverflow() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
Flowable.error(new IOException())
.mergeWith(Single.error(new TestException()))
.test()
.assertFailure(IOException.class)
;
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void onNextRequestRace() {
for (int i = 0; i < 10000; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final SingleSubject<Integer> cs = SingleSubject.create();
final TestSubscriber<Integer> ts = pp.mergeWith(cs).test(0);
pp.onNext(0);
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onNext(1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ts.request(3);
}
};
TestHelper.race(r1, r2);
cs.onSuccess(1);
pp.onComplete();
ts.assertResult(0, 1, 1);
}
}
@Test
public void doubleOnSubscribeMain() {
TestHelper.checkDoubleOnSubscribeFlowable(
new Function<Flowable<Object>, Publisher<Object>>() {
@Override
public Publisher<Object> apply(Flowable<Object> f)
throws Exception {
return f.mergeWith(Single.just(1));
}
}
);
}
@Test
public void noRequestOnError() {
Flowable.empty()
.mergeWith(Single.error(new TestException()))
.test(0)
.assertFailure(TestException.class);
}
@Test
public void drainExactRequestCancel() {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final SingleSubject<Integer> cs = SingleSubject.create();
TestSubscriber<Integer> ts = pp.mergeWith(cs)
.take(2)
.subscribeWith(new TestSubscriber<Integer>(2) {
@Override
public void onNext(Integer t) {
super.onNext(t);
if (t == 1) {
cs.onSuccess(2);
}
}
});
pp.onNext(1);
pp.onComplete();
ts.request(2);
ts.assertResult(1, 2);
}
@Test
public void drainRequestWhenLimitReached() {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final SingleSubject<Integer> cs = SingleSubject.create();
TestSubscriber<Integer> ts = pp.mergeWith(cs)
.subscribeWith(new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
super.onNext(t);
if (t == 1) {
for (int i = 0; i < Flowable.bufferSize() - 1; i++) {
pp.onNext(i + 2);
}
}
}
});
cs.onSuccess(1);
pp.onComplete();
ts.request(2);
ts.assertValueCount(Flowable.bufferSize());
ts.assertComplete();
}
@Test
public void cancelOtherOnMainError() {
PublishProcessor<Integer> pp = PublishProcessor.create();
SingleSubject<Integer> ss = SingleSubject.create();
TestSubscriber<Integer> ts = pp.mergeWith(ss).test();
assertTrue(pp.hasSubscribers());
assertTrue(ss.hasObservers());
pp.onError(new TestException());
ts.assertFailure(TestException.class);
assertFalse("main has observers!", pp.hasSubscribers());
assertFalse("other has observers", ss.hasObservers());
}
@Test
public void cancelMainOnOtherError() {
PublishProcessor<Integer> pp = PublishProcessor.create();
SingleSubject<Integer> ss = SingleSubject.create();
TestSubscriber<Integer> ts = pp.mergeWith(ss).test();
assertTrue(pp.hasSubscribers());
assertTrue(ss.hasObservers());
ss.onError(new TestException());
ts.assertFailure(TestException.class);
assertFalse("main has observers!", pp.hasSubscribers());
assertFalse("other has observers", ss.hasObservers());
}
@Test
public void undeliverableUponCancel() {
TestHelper.checkUndeliverableUponCancel(new FlowableConverter<Integer, Flowable<Integer>>() {
@Override
public Flowable<Integer> apply(Flowable<Integer> upstream) {
return upstream.mergeWith(Single.just(1).hide());
}
});
}
@Test
public void drainMoreWorkBeforeCancel() {
SingleSubject<Integer> ss = SingleSubject.create();
TestSubscriber<Integer> ts = new TestSubscriber<>();
Flowable.range(1, 5).mergeWith(ss)
.doOnNext(v -> {
if (v == 1) {
ss.onSuccess(6);
ts.cancel();
}
})
.subscribe(ts);
ts.assertValuesOnly(1);
}
}
| FlowableMergeWithSingleTest |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/web/bind/support/WebRequestDataBinderTests.java | {
"start": 14034,
"end": 14249
} | class ____ extends TestBean {
public void setConcreteSpouse(TestBean spouse) {
setSpouse(spouse);
}
public TestBean getConcreteSpouse() {
return (TestBean) getSpouse();
}
}
}
| TestBeanWithConcreteSpouse |
java | apache__maven | api/maven-api-spi/src/main/java/org/apache/maven/api/spi/ModelParser.java | {
"start": 1519,
"end": 3190
} | interface ____ extends SpiService {
/**
* Option that can be specified in the options map. The value should be a Boolean.
*/
String STRICT = "strict";
/**
* Locates the pom in the given directory.
*
* @param dir the directory to locate the pom for, never {@code null}
* @return a {@code Source} pointing to the located pom or an empty {@code Optional} if none was found by this parser
*/
@Nonnull
Optional<Source> locate(@Nonnull Path dir);
/**
* Parse the model obtained previously by a previous call to {@link #locate(Path)}.
*
* @param source the source to parse, never {@code null}
* @param options possible parsing options, may be {@code null}
* @return the parsed {@link Model}, never {@code null}
* @throws ModelParserException if the model cannot be parsed
*/
@Nonnull
Model parse(@Nonnull Source source, @Nullable Map<String, ?> options) throws ModelParserException;
/**
* Locate and parse the model in the specified directory.
* This is equivalent to {@code locate(dir).map(s -> parse(s, options))}.
*
* @param dir the directory to locate the pom for, never {@code null}
* @param options possible parsing options, may be {@code null}
* @return an optional parsed {@link Model} or {@code null} if none could be found
* @throws ModelParserException if the located model cannot be parsed
*/
@Nonnull
default Optional<Model> locateAndParse(@Nonnull Path dir, @Nullable Map<String, ?> options)
throws ModelParserException {
return locate(dir).map(s -> parse(s, options));
}
}
| ModelParser |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableReplayTest.java | {
"start": 25774,
"end": 69291
} | class ____ extends Worker {
private final Disposable mockDisposable;
public boolean unsubscribed;
InprocessWorker(Disposable mockDisposable) {
this.mockDisposable = mockDisposable;
}
@NonNull
@Override
public Disposable schedule(@NonNull Runnable action) {
action.run();
return mockDisposable; // this subscription is returned but discarded
}
@NonNull
@Override
public Disposable schedule(@NonNull Runnable action, long delayTime, @NonNull TimeUnit unit) {
action.run();
return mockDisposable;
}
@Override
public void dispose() {
unsubscribed = true;
}
@Override
public boolean isDisposed() {
return unsubscribed;
}
}
@Test
public void boundedReplayBuffer() {
BoundedReplayBuffer<Integer> buf = new BoundedReplayBuffer<Integer>(false) {
private static final long serialVersionUID = -9081211580719235896L;
@Override
void truncate() {
}
};
buf.addLast(new Node(1, 0));
buf.addLast(new Node(2, 1));
buf.addLast(new Node(3, 2));
buf.addLast(new Node(4, 3));
buf.addLast(new Node(5, 4));
List<Integer> values = new ArrayList<>();
buf.collect(values);
Assert.assertEquals(Arrays.asList(1, 2, 3, 4, 5), values);
buf.removeSome(2);
buf.removeFirst();
buf.removeSome(2);
values.clear();
buf.collect(values);
Assert.assertTrue(values.isEmpty());
buf.addLast(new Node(5, 5));
buf.addLast(new Node(6, 6));
buf.collect(values);
Assert.assertEquals(Arrays.asList(5, 6), values);
}
@Test(expected = IllegalStateException.class)
public void boundedRemoveFirstOneItemOnly() {
BoundedReplayBuffer<Integer> buf = new BoundedReplayBuffer<Integer>(false) {
private static final long serialVersionUID = -9081211580719235896L;
@Override
void truncate() {
}
};
buf.removeFirst();
}
@Test
public void timedAndSizedTruncation() {
TestScheduler test = new TestScheduler();
SizeAndTimeBoundReplayBuffer<Integer> buf = new SizeAndTimeBoundReplayBuffer<>(2, 2000, TimeUnit.MILLISECONDS, test, false);
List<Integer> values = new ArrayList<>();
buf.next(1);
test.advanceTimeBy(1, TimeUnit.SECONDS);
buf.next(2);
test.advanceTimeBy(1, TimeUnit.SECONDS);
buf.collect(values);
Assert.assertEquals(Arrays.asList(2), values);
buf.next(3);
buf.next(4);
values.clear();
buf.collect(values);
Assert.assertEquals(Arrays.asList(3, 4), values);
test.advanceTimeBy(2, TimeUnit.SECONDS);
buf.next(5);
values.clear();
buf.collect(values);
Assert.assertEquals(Arrays.asList(5), values);
test.advanceTimeBy(2, TimeUnit.SECONDS);
buf.complete();
values.clear();
buf.collect(values);
Assert.assertTrue(values.isEmpty());
Assert.assertEquals(1, buf.size);
Assert.assertTrue(buf.hasCompleted());
}
@Test
public void backpressure() {
final AtomicLong requested = new AtomicLong();
Flowable<Integer> source = Flowable.range(1, 1000)
.doOnRequest(new LongConsumer() {
@Override
public void accept(long t) {
requested.addAndGet(t);
}
});
ConnectableFlowable<Integer> cf = source.replay();
TestSubscriberEx<Integer> ts1 = new TestSubscriberEx<>(10L);
TestSubscriberEx<Integer> ts2 = new TestSubscriberEx<>(90L);
cf.subscribe(ts1);
cf.subscribe(ts2);
ts2.request(10);
cf.connect();
ts1.assertValueCount(10);
ts1.assertNotTerminated();
ts2.assertValueCount(100);
ts2.assertNotTerminated();
Assert.assertEquals(100, requested.get());
}
@Test
public void backpressureBounded() {
final AtomicLong requested = new AtomicLong();
Flowable<Integer> source = Flowable.range(1, 1000)
.doOnRequest(new LongConsumer() {
@Override
public void accept(long t) {
requested.addAndGet(t);
}
});
ConnectableFlowable<Integer> cf = source.replay(50);
TestSubscriberEx<Integer> ts1 = new TestSubscriberEx<>(10L);
TestSubscriberEx<Integer> ts2 = new TestSubscriberEx<>(90L);
cf.subscribe(ts1);
cf.subscribe(ts2);
ts2.request(10);
cf.connect();
ts1.assertValueCount(10);
ts1.assertNotTerminated();
ts2.assertValueCount(100);
ts2.assertNotTerminated();
Assert.assertEquals(100, requested.get());
}
@Test
public void coldReplayNoBackpressure() {
Flowable<Integer> source = Flowable.range(0, 1000).replay().autoConnect();
TestSubscriberEx<Integer> ts = new TestSubscriberEx<>();
source.subscribe(ts);
ts.assertNoErrors();
ts.assertTerminated();
List<Integer> onNextEvents = ts.values();
assertEquals(1000, onNextEvents.size());
for (int i = 0; i < 1000; i++) {
assertEquals((Integer)i, onNextEvents.get(i));
}
}
@Test
public void coldReplayBackpressure() {
Flowable<Integer> source = Flowable.range(0, 1000).replay().autoConnect();
TestSubscriber<Integer> ts = new TestSubscriber<>(0L);
ts.request(10);
source.subscribe(ts);
ts.assertNoErrors();
ts.assertNotComplete();
List<Integer> onNextEvents = ts.values();
assertEquals(10, onNextEvents.size());
for (int i = 0; i < 10; i++) {
assertEquals((Integer)i, onNextEvents.get(i));
}
ts.cancel();
}
@Test
public void cache() throws InterruptedException {
final AtomicInteger counter = new AtomicInteger();
Flowable<String> f = Flowable.unsafeCreate(new Publisher<String>() {
@Override
public void subscribe(final Subscriber<? super String> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
new Thread(new Runnable() {
@Override
public void run() {
counter.incrementAndGet();
System.out.println("published observable being executed");
subscriber.onNext("one");
subscriber.onComplete();
}
}).start();
}
}).replay().autoConnect();
// we then expect the following 2 subscriptions to get that same value
final CountDownLatch latch = new CountDownLatch(2);
// subscribe once
f.subscribe(new Consumer<String>() {
@Override
public void accept(String v) {
assertEquals("one", v);
System.out.println("v: " + v);
latch.countDown();
}
});
// subscribe again
f.subscribe(new Consumer<String>() {
@Override
public void accept(String v) {
assertEquals("one", v);
System.out.println("v: " + v);
latch.countDown();
}
});
if (!latch.await(1000, TimeUnit.MILLISECONDS)) {
fail("subscriptions did not receive values");
}
assertEquals(1, counter.get());
}
@Test
public void unsubscribeSource() throws Throwable {
Action unsubscribe = mock(Action.class);
Flowable<Integer> f = Flowable.just(1).doOnCancel(unsubscribe).replay().autoConnect();
f.subscribe();
f.subscribe();
f.subscribe();
verify(unsubscribe, never()).run();
}
@Test
public void take() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<>();
Flowable<Integer> cached = Flowable.range(1, 100).replay().autoConnect();
cached
.take(10)
.subscribe(ts);
ts.assertNoErrors();
ts.assertTerminated();
ts.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
}
@Test
public void async() {
Flowable<Integer> source = Flowable.range(1, 10000);
for (int i = 0; i < 100; i++) {
TestSubscriberEx<Integer> ts1 = new TestSubscriberEx<>();
Flowable<Integer> cached = source.replay().autoConnect();
cached.observeOn(Schedulers.computation()).subscribe(ts1);
ts1.awaitDone(2, TimeUnit.SECONDS);
ts1.assertNoErrors();
ts1.assertTerminated();
assertEquals(10000, ts1.values().size());
TestSubscriberEx<Integer> ts2 = new TestSubscriberEx<>();
cached.observeOn(Schedulers.computation()).subscribe(ts2);
ts2.awaitDone(2, TimeUnit.SECONDS);
ts2.assertNoErrors();
ts2.assertTerminated();
assertEquals(10000, ts2.values().size());
}
}
@Test
public void asyncComeAndGo() {
Flowable<Long> source = Flowable.interval(1, 1, TimeUnit.MILLISECONDS)
.take(1000)
.subscribeOn(Schedulers.io());
Flowable<Long> cached = source.replay().autoConnect();
Flowable<Long> output = cached.observeOn(Schedulers.computation(), false, 1024);
List<TestSubscriberEx<Long>> list = new ArrayList<>(100);
for (int i = 0; i < 100; i++) {
TestSubscriberEx<Long> ts = new TestSubscriberEx<>();
list.add(ts);
output.skip(i * 10).take(10).subscribe(ts);
}
List<Long> expected = new ArrayList<>();
for (int i = 0; i < 10; i++) {
expected.add((long)(i - 10));
}
int j = 0;
for (TestSubscriberEx<Long> ts : list) {
ts.awaitDone(3, TimeUnit.SECONDS);
ts.assertNoErrors();
ts.assertTerminated();
for (int i = j * 10; i < j * 10 + 10; i++) {
expected.set(i - j * 10, (long)i);
}
ts.assertValueSequence(expected);
j++;
}
}
@Test
public void noMissingBackpressureException() {
final int m = 4 * 1000 * 1000;
Flowable<Integer> firehose = Flowable.unsafeCreate(new Publisher<Integer>() {
@Override
public void subscribe(Subscriber<? super Integer> t) {
t.onSubscribe(new BooleanSubscription());
for (int i = 0; i < m; i++) {
t.onNext(i);
}
t.onComplete();
}
});
TestSubscriberEx<Integer> ts = new TestSubscriberEx<>();
firehose.replay().autoConnect().observeOn(Schedulers.computation()).takeLast(100).subscribe(ts);
ts.awaitDone(3, TimeUnit.SECONDS);
ts.assertNoErrors();
ts.assertTerminated();
assertEquals(100, ts.values().size());
}
@Test
public void valuesAndThenError() {
Flowable<Integer> source = Flowable.range(1, 10)
.concatWith(Flowable.<Integer>error(new TestException()))
.replay().autoConnect();
TestSubscriberEx<Integer> ts = new TestSubscriberEx<>();
source.subscribe(ts);
ts.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
ts.assertNotComplete();
Assert.assertEquals(1, ts.errors().size());
TestSubscriberEx<Integer> ts2 = new TestSubscriberEx<>();
source.subscribe(ts2);
ts2.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
ts2.assertNotComplete();
Assert.assertEquals(1, ts2.errors().size());
}
@Test
public void unsafeChildOnNextThrows() {
final AtomicInteger count = new AtomicInteger();
Flowable<Integer> source = Flowable.range(1, 100)
.doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
})
.replay().autoConnect();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
throw new TestException();
}
};
source.subscribe(ts);
Assert.assertEquals(100, count.get());
ts.assertNoValues();
ts.assertNotComplete();
ts.assertError(TestException.class);
}
@Test
public void unsafeChildOnErrorThrows() throws Throwable {
TestHelper.withErrorTracking(errors -> {
Flowable<Integer> source = Flowable.<Integer>error(new IOException())
.replay()
.autoConnect();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onError(Throwable t) {
super.onError(t);
throw new TestException();
}
};
source.subscribe(ts);
ts.assertFailure(IOException.class);
TestHelper.assertUndeliverable(errors, 0, TestException.class);
});
}
@Test
public void unsafeChildOnCompleteThrows() throws Throwable {
TestHelper.withErrorTracking(errors -> {
Flowable<Integer> source = Flowable.<Integer>empty()
.replay()
.autoConnect();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onComplete() {
super.onComplete();
throw new TestException();
}
};
source.subscribe(ts);
ts.assertResult();
TestHelper.assertUndeliverable(errors, 0, TestException.class);
});
}
@Test
public void unboundedLeavesEarly() {
PublishProcessor<Integer> source = PublishProcessor.create();
final List<Long> requests = new ArrayList<>();
Flowable<Integer> out = source
.doOnRequest(new LongConsumer() {
@Override
public void accept(long t) {
requests.add(t);
}
}).replay().autoConnect();
TestSubscriber<Integer> ts1 = new TestSubscriber<>(5L);
TestSubscriber<Integer> ts2 = new TestSubscriber<>(10L);
out.subscribe(ts1);
out.subscribe(ts2);
ts2.cancel();
Assert.assertEquals(Arrays.asList(5L, 5L), requests);
}
@Test
public void subscribersComeAndGoAtRequestBoundaries() {
ConnectableFlowable<Integer> source = Flowable.range(1, 10).replay(1);
source.connect();
TestSubscriber<Integer> ts1 = new TestSubscriber<>(2L);
source.subscribe(ts1);
ts1.assertValues(1, 2);
ts1.assertNoErrors();
ts1.cancel();
TestSubscriber<Integer> ts2 = new TestSubscriber<>(2L);
source.subscribe(ts2);
ts2.assertValues(2, 3);
ts2.assertNoErrors();
ts2.cancel();
TestSubscriber<Integer> ts21 = new TestSubscriber<>(1L);
source.subscribe(ts21);
ts21.assertValues(3);
ts21.assertNoErrors();
ts21.cancel();
TestSubscriber<Integer> ts22 = new TestSubscriber<>(1L);
source.subscribe(ts22);
ts22.assertValues(3);
ts22.assertNoErrors();
ts22.cancel();
TestSubscriber<Integer> ts3 = new TestSubscriber<>();
source.subscribe(ts3);
ts3.assertNoErrors();
System.out.println(ts3.values());
ts3.assertValues(3, 4, 5, 6, 7, 8, 9, 10);
ts3.assertComplete();
}
@Test
public void subscribersComeAndGoAtRequestBoundaries2() {
ConnectableFlowable<Integer> source = Flowable.range(1, 10).replay(2);
source.connect();
TestSubscriber<Integer> ts1 = new TestSubscriber<>(2L);
source.subscribe(ts1);
ts1.assertValues(1, 2);
ts1.assertNoErrors();
ts1.cancel();
TestSubscriber<Integer> ts11 = new TestSubscriber<>(2L);
source.subscribe(ts11);
ts11.assertValues(1, 2);
ts11.assertNoErrors();
ts11.cancel();
TestSubscriber<Integer> ts2 = new TestSubscriber<>(3L);
source.subscribe(ts2);
ts2.assertValues(1, 2, 3);
ts2.assertNoErrors();
ts2.cancel();
TestSubscriber<Integer> ts21 = new TestSubscriber<>(1L);
source.subscribe(ts21);
ts21.assertValues(2);
ts21.assertNoErrors();
ts21.cancel();
TestSubscriber<Integer> ts22 = new TestSubscriber<>(1L);
source.subscribe(ts22);
ts22.assertValues(2);
ts22.assertNoErrors();
ts22.cancel();
TestSubscriber<Integer> ts3 = new TestSubscriber<>();
source.subscribe(ts3);
ts3.assertNoErrors();
System.out.println(ts3.values());
ts3.assertValues(2, 3, 4, 5, 6, 7, 8, 9, 10);
ts3.assertComplete();
}
@Test
public void replayTime() {
Flowable.just(1).replay(1, TimeUnit.MINUTES)
.autoConnect()
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1);
}
@Test
public void replaySizeAndTime() {
Flowable.just(1).replay(1, 1, TimeUnit.MILLISECONDS)
.autoConnect()
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1);
}
@Test
public void replaySelectorTime() {
Flowable.just(1).replay(Functions.<Flowable<Integer>>identity(), 1, TimeUnit.MINUTES)
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1);
}
@Test
public void source() {
Flowable<Integer> source = Flowable.range(1, 3);
assertSame(source, (((HasUpstreamPublisher<?>)source.replay())).source());
}
@Test
public void connectRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final ConnectableFlowable<Integer> cf = Flowable.range(1, 3).replay();
Runnable r = new Runnable() {
@Override
public void run() {
cf.connect();
}
};
TestHelper.race(r, r);
}
}
@Test
public void subscribeRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final ConnectableFlowable<Integer> cf = Flowable.range(1, 3).replay();
final TestSubscriber<Integer> ts1 = new TestSubscriber<>();
final TestSubscriber<Integer> ts2 = new TestSubscriber<>();
Runnable r1 = new Runnable() {
@Override
public void run() {
cf.subscribe(ts1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
cf.subscribe(ts2);
}
};
TestHelper.race(r1, r2);
}
}
@Test
public void addRemoveRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final ConnectableFlowable<Integer> cf = Flowable.range(1, 3).replay();
final TestSubscriber<Integer> ts1 = new TestSubscriber<>();
final TestSubscriber<Integer> ts2 = new TestSubscriber<>();
cf.subscribe(ts1);
Runnable r1 = new Runnable() {
@Override
public void run() {
ts1.cancel();
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
cf.subscribe(ts2);
}
};
TestHelper.race(r1, r2);
}
}
@Test
public void cancelOnArrival() {
Flowable.range(1, 2)
.replay(Integer.MAX_VALUE)
.autoConnect()
.test(Long.MAX_VALUE, true)
.assertEmpty();
}
@Test
public void cancelOnArrival2() {
ConnectableFlowable<Integer> cf = PublishProcessor.<Integer>create()
.replay(Integer.MAX_VALUE);
cf.test();
cf
.autoConnect()
.test(Long.MAX_VALUE, true)
.assertEmpty();
}
@Test
public void connectConsumerThrows() {
ConnectableFlowable<Integer> cf = Flowable.range(1, 2)
.replay();
try {
cf.connect(new Consumer<Disposable>() {
@Override
public void accept(Disposable t) throws Exception {
throw new TestException();
}
});
fail("Should have thrown");
} catch (TestException ex) {
// expected
}
cf.test().assertEmpty().cancel();
cf.connect();
cf.test().assertResult(1, 2);
}
@Test
public void badSource() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
new Flowable<Integer>() {
@Override
protected void subscribeActual(Subscriber<? super Integer> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
subscriber.onError(new TestException("First"));
subscriber.onNext(1);
subscriber.onError(new TestException("Second"));
subscriber.onComplete();
}
}.replay()
.autoConnect()
.to(TestHelper.<Integer>testConsumer())
.assertFailureAndMessage(TestException.class, "First");
TestHelper.assertUndeliverable(errors, 0, TestException.class, "Second");
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void subscribeOnNextRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final ConnectableFlowable<Integer> cf = pp.replay();
final TestSubscriber<Integer> ts1 = new TestSubscriber<>();
Runnable r1 = new Runnable() {
@Override
public void run() {
cf.subscribe(ts1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
for (int j = 0; j < 1000; j++) {
pp.onNext(j);
}
}
};
TestHelper.race(r1, r2);
}
}
@Test
public void unsubscribeOnNextRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final ConnectableFlowable<Integer> cf = pp.replay();
final TestSubscriber<Integer> ts1 = new TestSubscriber<>();
cf.subscribe(ts1);
Runnable r1 = new Runnable() {
@Override
public void run() {
ts1.cancel();
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
for (int j = 0; j < 1000; j++) {
pp.onNext(j);
}
}
};
TestHelper.race(r1, r2);
}
}
@Test
public void unsubscribeReplayRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final ConnectableFlowable<Integer> cf = Flowable.range(1, 1000).replay();
final TestSubscriber<Integer> ts1 = new TestSubscriber<>();
cf.connect();
Runnable r1 = new Runnable() {
@Override
public void run() {
cf.subscribe(ts1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ts1.cancel();
}
};
TestHelper.race(r1, r2);
}
}
@Test
public void reentrantOnNext() {
final PublishProcessor<Integer> pp = PublishProcessor.create();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
if (t == 1) {
pp.onNext(2);
pp.onComplete();
}
super.onNext(t);
}
};
pp.replay().autoConnect().subscribe(ts);
pp.onNext(1);
ts.assertResult(1, 2);
}
@Test
public void reentrantOnNextBound() {
final PublishProcessor<Integer> pp = PublishProcessor.create();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
if (t == 1) {
pp.onNext(2);
pp.onComplete();
}
super.onNext(t);
}
};
pp.replay(10).autoConnect().subscribe(ts);
pp.onNext(1);
ts.assertResult(1, 2);
}
@Test
public void reentrantOnNextCancel() {
final PublishProcessor<Integer> pp = PublishProcessor.create();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
if (t == 1) {
pp.onNext(2);
cancel();
}
super.onNext(t);
}
};
pp.replay().autoConnect().subscribe(ts);
pp.onNext(1);
ts.assertValues(1);
}
@Test
public void reentrantOnNextCancelBounded() {
final PublishProcessor<Integer> pp = PublishProcessor.create();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
if (t == 1) {
pp.onNext(2);
cancel();
}
super.onNext(t);
}
};
pp.replay(10).autoConnect().subscribe(ts);
pp.onNext(1);
ts.assertValues(1);
}
@Test
public void replayMaxInt() {
Flowable.range(1, 2)
.replay(Integer.MAX_VALUE)
.autoConnect()
.test()
.assertResult(1, 2);
}
@Test
public void timedAndSizedTruncationError() {
TestScheduler test = new TestScheduler();
SizeAndTimeBoundReplayBuffer<Integer> buf = new SizeAndTimeBoundReplayBuffer<>(2, 2000, TimeUnit.MILLISECONDS, test, false);
Assert.assertFalse(buf.hasCompleted());
Assert.assertFalse(buf.hasError());
List<Integer> values = new ArrayList<>();
buf.next(1);
test.advanceTimeBy(1, TimeUnit.SECONDS);
buf.next(2);
test.advanceTimeBy(1, TimeUnit.SECONDS);
buf.collect(values);
Assert.assertEquals(Arrays.asList(2), values);
buf.next(3);
buf.next(4);
values.clear();
buf.collect(values);
Assert.assertEquals(Arrays.asList(3, 4), values);
test.advanceTimeBy(2, TimeUnit.SECONDS);
buf.next(5);
values.clear();
buf.collect(values);
Assert.assertEquals(Arrays.asList(5), values);
Assert.assertFalse(buf.hasCompleted());
Assert.assertFalse(buf.hasError());
test.advanceTimeBy(2, TimeUnit.SECONDS);
buf.error(new TestException());
values.clear();
buf.collect(values);
Assert.assertTrue(values.isEmpty());
Assert.assertEquals(1, buf.size);
Assert.assertFalse(buf.hasCompleted());
Assert.assertTrue(buf.hasError());
}
@Test
public void sizedTruncation() {
SizeBoundReplayBuffer<Integer> buf = new SizeBoundReplayBuffer<>(2, false);
List<Integer> values = new ArrayList<>();
buf.next(1);
buf.next(2);
buf.collect(values);
Assert.assertEquals(Arrays.asList(1, 2), values);
buf.next(3);
buf.next(4);
values.clear();
buf.collect(values);
Assert.assertEquals(Arrays.asList(3, 4), values);
buf.next(5);
values.clear();
buf.collect(values);
Assert.assertEquals(Arrays.asList(4, 5), values);
Assert.assertFalse(buf.hasCompleted());
buf.complete();
values.clear();
buf.collect(values);
Assert.assertEquals(Arrays.asList(4, 5), values);
Assert.assertEquals(3, buf.size);
Assert.assertTrue(buf.hasCompleted());
Assert.assertFalse(buf.hasError());
}
@Test
public void delayedUpstreamOnSubscribe() {
final Subscriber<?>[] sub = { null };
new Flowable<Integer>() {
@Override
protected void subscribeActual(Subscriber<? super Integer> s) {
sub[0] = s;
}
}
.replay()
.connect()
.dispose();
BooleanSubscription bs = new BooleanSubscription();
sub[0].onSubscribe(bs);
assertTrue(bs.isCancelled());
}
@Test
public void timedNoOutdatedData() {
TestScheduler scheduler = new TestScheduler();
Flowable<Integer> source = Flowable.just(1)
.replay(2, TimeUnit.SECONDS, scheduler)
.autoConnect();
source.test().assertResult(1);
source.test().assertResult(1);
scheduler.advanceTimeBy(3, TimeUnit.SECONDS);
source.test().assertResult();
}
@Test
public void multicastSelectorCallableConnectableCrash() {
FlowableReplay.multicastSelector(new Supplier<ConnectableFlowable<Object>>() {
@Override
public ConnectableFlowable<Object> get() throws Exception {
throw new TestException();
}
}, Functions.<Flowable<Object>>identity())
.test()
.assertFailure(TestException.class);
}
@Test
public void badRequest() {
TestHelper.assertBadRequestReported(
Flowable.never()
.replay()
);
}
@Test
public void noHeadRetentionCompleteSize() {
PublishProcessor<Integer> source = PublishProcessor.create();
FlowableReplay<Integer> co = (FlowableReplay<Integer>)source
.replay(1);
// the backpressure coordination would not accept items from source otherwise
co.test();
co.connect();
BoundedReplayBuffer<Integer> buf = (BoundedReplayBuffer<Integer>)(co.current.get().buffer);
source.onNext(1);
source.onNext(2);
source.onComplete();
assertNull(buf.get().value);
Object o = buf.get();
buf.trimHead();
assertSame(o, buf.get());
}
@Test
public void noHeadRetentionErrorSize() {
PublishProcessor<Integer> source = PublishProcessor.create();
FlowableReplay<Integer> co = (FlowableReplay<Integer>)source
.replay(1);
co.test();
co.connect();
BoundedReplayBuffer<Integer> buf = (BoundedReplayBuffer<Integer>)(co.current.get().buffer);
source.onNext(1);
source.onNext(2);
source.onError(new TestException());
assertNull(buf.get().value);
Object o = buf.get();
buf.trimHead();
assertSame(o, buf.get());
}
@Test
public void noHeadRetentionSize() {
PublishProcessor<Integer> source = PublishProcessor.create();
FlowableReplay<Integer> co = (FlowableReplay<Integer>)source
.replay(1);
co.test();
co.connect();
BoundedReplayBuffer<Integer> buf = (BoundedReplayBuffer<Integer>)(co.current.get().buffer);
source.onNext(1);
source.onNext(2);
assertNotNull(buf.get().value);
buf.trimHead();
assertNull(buf.get().value);
Object o = buf.get();
buf.trimHead();
assertSame(o, buf.get());
}
@Test
public void noHeadRetentionCompleteTime() {
PublishProcessor<Integer> source = PublishProcessor.create();
FlowableReplay<Integer> co = (FlowableReplay<Integer>)source
.replay(1, TimeUnit.MINUTES, Schedulers.computation());
co.test();
co.connect();
BoundedReplayBuffer<Integer> buf = (BoundedReplayBuffer<Integer>)(co.current.get().buffer);
source.onNext(1);
source.onNext(2);
source.onComplete();
assertNull(buf.get().value);
Object o = buf.get();
buf.trimHead();
assertSame(o, buf.get());
}
@Test
public void noHeadRetentionErrorTime() {
PublishProcessor<Integer> source = PublishProcessor.create();
FlowableReplay<Integer> co = (FlowableReplay<Integer>)source
.replay(1, TimeUnit.MINUTES, Schedulers.computation());
co.test();
co.connect();
BoundedReplayBuffer<Integer> buf = (BoundedReplayBuffer<Integer>)(co.current.get().buffer);
source.onNext(1);
source.onNext(2);
source.onError(new TestException());
assertNull(buf.get().value);
Object o = buf.get();
buf.trimHead();
assertSame(o, buf.get());
}
@Test
public void noHeadRetentionTime() {
TestScheduler sch = new TestScheduler();
PublishProcessor<Integer> source = PublishProcessor.create();
FlowableReplay<Integer> co = (FlowableReplay<Integer>)source
.replay(1, TimeUnit.MILLISECONDS, sch);
co.test();
co.connect();
BoundedReplayBuffer<Integer> buf = (BoundedReplayBuffer<Integer>)(co.current.get().buffer);
source.onNext(1);
sch.advanceTimeBy(2, TimeUnit.MILLISECONDS);
source.onNext(2);
assertNotNull(buf.get().value);
buf.trimHead();
assertNull(buf.get().value);
Object o = buf.get();
buf.trimHead();
assertSame(o, buf.get());
}
@Test(expected = TestException.class)
public void createBufferFactoryCrash() {
FlowableReplay.create(Flowable.just(1), new Supplier<ReplayBuffer<Integer>>() {
@Override
public ReplayBuffer<Integer> get() throws Exception {
throw new TestException();
}
})
.connect();
}
@Test
public void createBufferFactoryCrashOnSubscribe() {
FlowableReplay.create(Flowable.just(1), new Supplier<ReplayBuffer<Integer>>() {
@Override
public ReplayBuffer<Integer> get() throws Exception {
throw new TestException();
}
})
.test()
.assertFailure(TestException.class);
}
@Test
public void noBoundedRetentionViaThreadLocal() throws Exception {
Flowable<byte[]> source = Flowable.range(1, 200)
.map(new Function<Integer, byte[]>() {
@Override
public byte[] apply(Integer v) throws Exception {
return new byte[1024 * 1024];
}
})
.replay(new Function<Flowable<byte[]>, Publisher<byte[]>>() {
@Override
public Publisher<byte[]> apply(final Flowable<byte[]> f) throws Exception {
return f.take(1)
.concatMap(new Function<byte[], Publisher<byte[]>>() {
@Override
public Publisher<byte[]> apply(byte[] v) throws Exception {
return f;
}
});
}
}, 1)
.takeLast(1)
;
System.out.println("Bounded Replay Leak check: Wait before GC");
Thread.sleep(1000);
System.out.println("Bounded Replay Leak check: GC");
System.gc();
Thread.sleep(500);
final MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
MemoryUsage memHeap = memoryMXBean.getHeapMemoryUsage();
long initial = memHeap.getUsed();
System.out.printf("Bounded Replay Leak check: Starting: %.3f MB%n", initial / 1024.0 / 1024.0);
final AtomicLong after = new AtomicLong();
source.subscribe(new Consumer<byte[]>() {
@Override
public void accept(byte[] v) throws Exception {
System.out.println("Bounded Replay Leak check: Wait before GC 2");
Thread.sleep(1000);
System.out.println("Bounded Replay Leak check: GC 2");
System.gc();
Thread.sleep(500);
after.set(memoryMXBean.getHeapMemoryUsage().getUsed());
}
});
System.out.printf("Bounded Replay Leak check: After: %.3f MB%n", after.get() / 1024.0 / 1024.0);
if (initial + 100 * 1024 * 1024 < after.get()) {
Assert.fail("Bounded Replay Leak check: Memory leak detected: " + (initial / 1024.0 / 1024.0)
+ " -> " + after.get() / 1024.0 / 1024.0);
}
}
@Test
public void unsafeChildOnNextThrowsSizeBound() {
final AtomicInteger count = new AtomicInteger();
Flowable<Integer> source = Flowable.range(1, 100)
.doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
})
.replay(1000).autoConnect();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
throw new TestException();
}
};
source.subscribe(ts);
Assert.assertEquals(100, count.get());
ts.assertNoValues();
ts.assertNotComplete();
ts.assertError(TestException.class);
}
@Test
public void unsafeChildOnErrorThrowsSizeBound() throws Throwable {
TestHelper.withErrorTracking(errors -> {
Flowable<Integer> source = Flowable.<Integer>error(new IOException())
.replay(1000)
.autoConnect();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onError(Throwable t) {
super.onError(t);
throw new TestException();
}
};
source.subscribe(ts);
ts.assertFailure(IOException.class);
TestHelper.assertUndeliverable(errors, 0, TestException.class);
});
}
@Test
public void unsafeChildOnCompleteThrowsSizeBound() throws Throwable {
TestHelper.withErrorTracking(errors -> {
Flowable<Integer> source = Flowable.<Integer>empty()
.replay(1000)
.autoConnect();
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onComplete() {
super.onComplete();
throw new TestException();
}
};
source.subscribe(ts);
ts.assertResult();
TestHelper.assertUndeliverable(errors, 0, TestException.class);
});
}
@Test(expected = TestException.class)
public void connectDisposeCrash() {
ConnectableFlowable<Object> cf = Flowable.never().replay();
cf.connect();
cf.connect(d -> { throw new TestException(); });
}
@Test
public void resetWhileNotConnectedIsNoOp() {
ConnectableFlowable<Object> cf = Flowable.never().replay();
cf.reset();
}
@Test
public void resetWhileActiveIsNoOp() {
ConnectableFlowable<Object> cf = Flowable.never().replay();
cf.connect();
cf.reset();
}
@Test
public void delayedUpstreamSubscription() {
AtomicReference<Subscriber<? super Integer>> ref = new AtomicReference<>();
Flowable<Integer> f = Flowable.<Integer>unsafeCreate(ref::set);
TestSubscriber<Integer> ts = f.replay()
.autoConnect()
.test();
AtomicLong requested = new AtomicLong();
ref.get().onSubscribe(new Subscription() {
@Override
public void request(long n) {
BackpressureHelper.add(requested, n);
}
@Override
public void cancel() {
}
});
assertEquals(Long.MAX_VALUE, requested.get());
ref.get().onComplete();
ts.assertResult();
}
@Test
public void disposeNoNeedForReset() {
PublishProcessor<Integer> pp = PublishProcessor.create();
ConnectableFlowable<Integer> cf = pp.replay();
TestSubscriber<Integer> ts = cf.test();
Disposable d = cf.connect();
pp.onNext(1);
d.dispose();
ts = cf.test();
ts.assertEmpty();
cf.connect();
ts.assertEmpty();
pp.onNext(2);
ts.assertValuesOnly(2);
}
@Test
public void disposeNoNeedForResetSizeBound() {
PublishProcessor<Integer> pp = PublishProcessor.create();
ConnectableFlowable<Integer> cf = pp.replay(10);
TestSubscriber<Integer> ts = cf.test();
Disposable d = cf.connect();
pp.onNext(1);
d.dispose();
ts = cf.test();
ts.assertEmpty();
cf.connect();
ts.assertEmpty();
pp.onNext(2);
ts.assertValuesOnly(2);
}
@Test
public void disposeNoNeedForResetTimeBound() {
PublishProcessor<Integer> pp = PublishProcessor.create();
ConnectableFlowable<Integer> cf = pp.replay(10, TimeUnit.MINUTES);
TestSubscriber<Integer> ts = cf.test();
Disposable d = cf.connect();
pp.onNext(1);
d.dispose();
ts = cf.test();
ts.assertEmpty();
cf.connect();
ts.assertEmpty();
pp.onNext(2);
ts.assertValuesOnly(2);
}
@Test
public void disposeNoNeedForResetTimeAndSIzeBound() {
PublishProcessor<Integer> pp = PublishProcessor.create();
ConnectableFlowable<Integer> cf = pp.replay(10, 10, TimeUnit.MINUTES);
TestSubscriber<Integer> ts = cf.test();
Disposable d = cf.connect();
pp.onNext(1);
d.dispose();
ts = cf.test();
ts.assertEmpty();
cf.connect();
ts.assertEmpty();
pp.onNext(2);
ts.assertValuesOnly(2);
}
}
| InprocessWorker |
java | spring-projects__spring-boot | module/spring-boot-data-mongodb/src/test/java/org/springframework/boot/data/mongodb/autoconfigure/DataMongoReactiveRepositoriesAutoConfigurationTests.java | {
"start": 5069,
"end": 5324
} | class ____ {
}
@Configuration(proxyBeanMethods = false)
// To not find any repositories
@EnableReactiveMongoRepositories("foo.bar")
@TestAutoConfigurationPackage(DataMongoReactiveRepositoriesAutoConfigurationTests.class)
static | CustomizedConfiguration |
java | dropwizard__dropwizard | dropwizard-jackson/src/main/java/io/dropwizard/jackson/GuavaExtrasModule.java | {
"start": 2535,
"end": 3344
} | class ____ extends Serializers.Base {
@Override
public JsonSerializer<?> findSerializer(SerializationConfig config, JavaType type, BeanDescription beanDesc) {
if (CacheBuilderSpec.class.isAssignableFrom(type.getRawClass())) {
return new CacheBuilderSpecSerializer();
}
return super.findSerializer(config, type, beanDesc);
}
}
@Override
public String getModuleName() {
return "guava-extras";
}
@Override
public Version version() {
return Version.unknownVersion();
}
@Override
public void setupModule(SetupContext context) {
context.addDeserializers(new GuavaExtrasDeserializers());
context.addSerializers(new GuavaExtrasSerializers());
}
}
| GuavaExtrasSerializers |
java | micronaut-projects__micronaut-core | http-client-tck/src/main/java/io/micronaut/http/client/tck/tests/filter/ClientRequestFilterTest.java | {
"start": 14783,
"end": 20391
} | class ____ {
List<String> events = new ArrayList<>();
@RequestFilter("/request-filter/immediate-request-parameter")
public void requestFilterImmediateRequestParameter(HttpRequest<?> request) {
events.add("requestFilterImmediateRequestParameter " + request.getPath());
}
@RequestFilter("/request-filter/immediate-mutable-request-parameter")
public void requestFilterImmediateMutableRequestParameter(MutableHttpRequest<?> request) {
request.header("foo", "bar");
}
@RequestFilter("/request-filter/replace-request")
public HttpRequest<Object> requestFilterReplaceRequest() {
return HttpRequest.GET("/request-filter/replace-request-2");
}
@RequestFilter("/request-filter/replace-mutable-request")
public MutableHttpRequest<Object> requestFilterReplaceMutableRequest() {
return HttpRequest.GET("/request-filter/replace-mutable-request-2");
}
@RequestFilter("/request-filter/replace-request-null")
@Nullable
public HttpRequest<Object> requestFilterReplaceRequestNull() {
return null;
}
@RequestFilter("/request-filter/replace-request-empty")
public Optional<HttpRequest<Object>> requestFilterReplaceRequestEmpty() {
return Optional.empty();
}
@RequestFilter("/request-filter/replace-request-publisher")
public Publisher<HttpRequest<Object>> requestFilterReplaceRequestPublisher() {
return Flux.just(HttpRequest.GET("/request-filter/replace-request-publisher-2"));
}
@RequestFilter("/request-filter/replace-request-mono")
public Mono<HttpRequest<Object>> requestFilterReplaceRequestMono() {
return Mono.just(HttpRequest.GET("/request-filter/replace-request-mono-2"));
}
@RequestFilter("/request-filter/replace-request-completable")
public CompletableFuture<HttpRequest<Object>> requestFilterReplaceRequestCompletable() {
return CompletableFuture.completedFuture(HttpRequest.GET("/request-filter/replace-request-completable-2"));
}
@RequestFilter("/request-filter/replace-request-completion")
public CompletionStage<HttpRequest<Object>> requestFilterReplaceRequestCompletion() {
return CompletableFuture.completedStage(HttpRequest.GET("/request-filter/replace-request-completion-2"));
}
@RequestFilter("/request-filter/continuation-blocking")
@ExecuteOn(TaskExecutors.BLOCKING)
public void requestFilterContinuationBlocking(MutableHttpRequest<?> request, FilterContinuation<HttpResponse<?>> continuation) {
request.header("foo", "bar");
HttpResponse<?> r = continuation.proceed();
events.add("requestFilterContinuationBlocking " + r.body());
}
@RequestFilter("/request-filter/continuation-reactive-publisher")
public Publisher<HttpResponse<?>> requestFilterContinuationReactivePublisher(MutableHttpRequest<?> request, FilterContinuation<Publisher<HttpResponse<?>>> continuation) {
request.header("foo", "bar");
return Mono.from(continuation.proceed()).doOnNext(r -> events.add("requestFilterContinuationReactivePublisher " + r.body()));
}
@RequestFilter("/request-filter/continuation-update-request")
@ExecuteOn(TaskExecutors.BLOCKING)
public void requestFilterContinuationUpdateRequest(FilterContinuation<HttpResponse<?>> continuation) {
// won't affect the routing decision, but will appear in the controller
continuation.request(HttpRequest.GET("/request-filter/continuation-update-request-2"));
continuation.proceed();
}
@RequestFilter("/request-filter/immediate-response")
public HttpResponse<?> requestFilterImmediateResponse() {
return HttpResponse.ok("requestFilterImmediateResponse");
}
@RequestFilter("/request-filter/null-response")
@Nullable
public HttpResponse<?> requestFilterNullResponse() {
events.add("requestFilterNullResponse");
return null;
}
@RequestFilter("/request-filter/empty-optional-response")
public Optional<HttpResponse<?>> requestFilterEmptyOptionalResponse() {
events.add("requestFilterEmptyOptionalResponse");
return Optional.empty();
}
@RequestFilter("/request-filter/publisher-response")
public Publisher<HttpResponse<?>> requestFilterPublisherResponse() {
return Mono.fromCallable(() -> HttpResponse.ok("requestFilterPublisherResponse"));
}
@RequestFilter("/request-filter/mono-response")
public Mono<HttpResponse<?>> requestFilterMonoResponse() {
return Mono.fromCallable(() -> HttpResponse.ok("requestFilterMonoResponse"));
}
@RequestFilter("/request-filter/completable-response")
public CompletableFuture<MutableHttpResponse<String>> requestFilterCompletableResponse() {
return CompletableFuture.completedFuture(HttpResponse.ok("requestFilterCompletableResponse"));
}
@RequestFilter("/request-filter/completion-response")
public CompletionStage<MutableHttpResponse<String>> requestFilterCompletionResponse() {
return CompletableFuture.completedStage(HttpResponse.ok("requestFilterCompletionResponse"));
}
}
@Controller
@Requires(property = "spec.name", value = SPEC_NAME)
public static | MyClientFilter |
java | apache__flink | flink-table/flink-table-code-splitter/src/test/resources/block/code/TestWhileLoopInsideIfRewrite.java | {
"start": 7,
"end": 1261
} | class ____ {
int counter = 0;
public void myFun(int[] a, int[] b, int[] c) {
a[0] += b[1];
b[1] += a[1];
if (a.length < 100) {
while (counter < 10) {
c[counter] = a[0] + 1000;
System.out.println(c);
if (a[counter] > 0) {
b[counter] = a[counter] * 2;
c[counter] = b[counter] * 2;
System.out.println(b[counter]);
} else {
b[counter] = a[counter] * 3;
System.out.println(b[counter]);
}
a[2] += b[2];
b[3] += a[3];
if (a[0] > 0) {
System.out.println("Hello");
} else {
System.out.println("World");
}
counter--;
System.out.println("World ffff");
}
} else {
while (counter < 10) {
b[counter] = b[counter]++;
counter++;
}
System.out.println("World Else");
System.out.println("World Else 2");
}
a[4] += b[4];
b[5] += a[5];
}
}
| TestWhileLoopInsideIfRewrite |
java | quarkusio__quarkus | extensions/config-yaml/deployment/src/main/java/io/quarkus/config/yaml/deployment/ConfigYamlProcessor.java | {
"start": 713,
"end": 2684
} | class ____ {
@BuildStep
public FeatureBuildItem feature() {
return new FeatureBuildItem(Feature.CONFIG_YAML);
}
@BuildStep
public void yamlConfig(
BuildProducer<StaticInitConfigBuilderBuildItem> staticInitConfigBuilder,
BuildProducer<RunTimeConfigBuilderBuildItem> runTimeConfigBuilder) {
staticInitConfigBuilder.produce(new StaticInitConfigBuilderBuildItem(YamlConfigBuilder.class));
runTimeConfigBuilder.produce(new RunTimeConfigBuilderBuildItem(YamlConfigBuilder.class));
}
@BuildStep
void watchYamlConfig(BuildProducer<HotDeploymentWatchedFileBuildItem> watchedFiles) {
List<String> configWatchedFiles = new ArrayList<>();
String userDir = System.getProperty("user.dir");
// Main files
configWatchedFiles.add("application.yaml");
configWatchedFiles.add("application.yml");
configWatchedFiles.add(Paths.get(userDir, "config", "application.yaml").toAbsolutePath().toString());
configWatchedFiles.add(Paths.get(userDir, "config", "application.yml").toAbsolutePath().toString());
// Profiles
SmallRyeConfig config = ConfigProvider.getConfig().unwrap(SmallRyeConfig.class);
for (String profile : config.getProfiles()) {
configWatchedFiles.add(String.format("application-%s.yaml", profile));
configWatchedFiles.add(String.format("application-%s.yml", profile));
configWatchedFiles.add(
Paths.get(userDir, "config", String.format("application-%s.yaml", profile)).toAbsolutePath().toString());
configWatchedFiles.add(
Paths.get(userDir, "config", String.format("application-%s.yml", profile)).toAbsolutePath().toString());
}
for (String configWatchedFile : configWatchedFiles) {
watchedFiles.produce(new HotDeploymentWatchedFileBuildItem(configWatchedFile));
}
}
}
| ConfigYamlProcessor |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MultipleTopLevelClassesTest.java | {
"start": 2425,
"end": 2697
} | class ____ {}
;
""")
.doTest();
}
@Test
public void twoRecords() {
compilationHelper
.addSourceLines(
"a/A.java",
"""
package a;
// BUG: Diagnostic contains: one top-level | Test |
java | apache__camel | components/camel-olingo2/camel-olingo2-component/src/test/java/org/apache/camel/component/olingo2/Olingo2AppAPIETagEnabledTest.java | {
"start": 2811,
"end": 9859
} | class ____ extends AbstractOlingo2AppAPITestSupport {
private static MockWebServer server;
private static Olingo2App olingoApp;
private static Edm edm;
private static EdmEntitySet manufacturersSet;
@BeforeAll
public static void scaffold() throws Exception {
initEdm();
initServer();
}
@AfterAll
public static void unscaffold() throws Exception {
if (olingoApp != null) {
olingoApp.close();
}
if (server != null) {
server.shutdown();
}
}
private static void initEdm() throws Exception {
InputStream edmXml = Olingo2AppAPIETagEnabledTest.class.getResourceAsStream("etag-enabled-service.xml");
edm = EntityProvider.readMetadata(edmXml, true);
assertNotNull(edm);
EdmEntityContainer entityContainer = edm.getDefaultEntityContainer();
assertNotNull(entityContainer);
manufacturersSet = entityContainer.getEntitySet(MANUFACTURERS);
assertNotNull(manufacturersSet);
EdmEntityType entityType = manufacturersSet.getEntityType();
assertNotNull(entityType);
//
// Check we have enabled eTag properties
//
EdmProperty property = (EdmProperty) entityType.getProperty("Id");
assertNotNull(property.getFacets());
}
private static void initServer() throws Exception {
server = new MockWebServer();
//
// Init dispatcher prior to start of server
//
server.setDispatcher(new Dispatcher() {
@SuppressWarnings("resource")
@Override
public MockResponse dispatch(RecordedRequest recordedRequest) {
MockResponse mockResponse = new MockResponse();
switch (recordedRequest.getMethod()) {
case HttpMethod.GET:
try {
if (recordedRequest.getPath().endsWith("/" + TEST_CREATE_MANUFACTURER)) {
ODataResponse odataResponse = EntityProvider.writeEntry(TEST_FORMAT.getMimeType(),
manufacturersSet, getEntityData(),
EntityProviderWriteProperties.serviceRoot(getServiceUrl().uri()).build());
InputStream entityStream = odataResponse.getEntityAsStream();
mockResponse.setResponseCode(HttpStatusCodes.OK.getStatusCode());
mockResponse.setBody(new Buffer().readFrom(entityStream));
return mockResponse;
} else if (recordedRequest.getPath().endsWith("/" + Olingo2AppImpl.METADATA)) {
EdmServiceMetadata serviceMetadata = edm.getServiceMetadata();
return mockResponse.setResponseCode(HttpStatusCodes.OK.getStatusCode())
.addHeader(ODataHttpHeaders.DATASERVICEVERSION, serviceMetadata.getDataServiceVersion())
.setBody(new Buffer().readFrom(serviceMetadata.getMetadata()));
}
} catch (Exception ex) {
throw new RuntimeCamelException(ex);
}
break;
case HttpMethod.PATCH:
case HttpMethod.PUT:
case HttpMethod.POST:
case HttpMethod.DELETE:
//
// Objective of the test:
// The Read has to have been called by
// Olingo2AppImpl.argumentWithETag
// which should then populate the IF-MATCH header with the
// eTag value.
// Verify the eTag value is present.
//
assertNotNull(recordedRequest.getHeader(HttpHeader.IF_MATCH.asString()));
return mockResponse.setResponseCode(HttpStatusCodes.NO_CONTENT.getStatusCode());
default:
break;
}
mockResponse.setResponseCode(HttpStatusCodes.NOT_FOUND.getStatusCode()).setBody("{ status: \"Not Found\"}");
return mockResponse;
}
});
server.start();
//
// have to init olingoApp after start of server
// since getBaseUrl() will call server start
//
olingoApp = new Olingo2AppImpl(getServiceUrl() + "/");
olingoApp.setContentType(TEST_FORMAT_STRING);
}
private static HttpUrl getServiceUrl() {
if (server == null) {
fail("Test programming failure. Server not initialised");
}
return server.url(SERVICE_NAME);
}
@Test
public void testPatchEntityWithETag() throws Exception {
TestOlingo2ResponseHandler<HttpStatusCodes> statusHandler = new TestOlingo2ResponseHandler<>();
Map<String, Object> data = getEntityData();
@SuppressWarnings("unchecked")
Map<String, Object> address = (Map<String, Object>) data.get(ADDRESS);
data.put("Name", "MyCarManufacturer Renamed");
address.put("Street", "Main Street");
//
// Call patch
//
olingoApp.patch(edm, TEST_CREATE_MANUFACTURER, null, data, statusHandler);
HttpStatusCodes statusCode = statusHandler.await();
assertEquals(HttpStatusCodes.NO_CONTENT, statusCode);
}
@Test
public void testUpdateEntityWithETag() throws Exception {
TestOlingo2ResponseHandler<HttpStatusCodes> statusHandler = new TestOlingo2ResponseHandler<>();
Map<String, Object> data = getEntityData();
@SuppressWarnings("unchecked")
Map<String, Object> address = (Map<String, Object>) data.get(ADDRESS);
data.put("Name", "MyCarManufacturer Renamed");
address.put("Street", "Main Street");
//
// Call update
//
olingoApp.update(edm, TEST_CREATE_MANUFACTURER, null, data, statusHandler);
HttpStatusCodes statusCode = statusHandler.await();
assertEquals(HttpStatusCodes.NO_CONTENT, statusCode);
}
@Test
public void testDeleteEntityWithETag() throws Exception {
TestOlingo2ResponseHandler<HttpStatusCodes> statusHandler = new TestOlingo2ResponseHandler<>();
Map<String, Object> data = getEntityData();
@SuppressWarnings("unchecked")
Map<String, Object> address = (Map<String, Object>) data.get(ADDRESS);
data.put("Name", "MyCarManufacturer Renamed");
address.put("Street", "Main Street");
//
// Call delete
//
olingoApp.delete(TEST_CREATE_MANUFACTURER, null, statusHandler);
HttpStatusCodes statusCode = statusHandler.await();
assertEquals(HttpStatusCodes.NO_CONTENT, statusCode);
}
}
| Olingo2AppAPIETagEnabledTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryAnonymousClassTest.java | {
"start": 1937,
"end": 2531
} | class ____ {
private String camelCase(String x) {
return "hello " + x;
}
void g() {
Function<String, String> f = this::camelCase;
System.err.println(camelCase("world"));
}
}
""")
// Make sure the method body is still reformatted correctly.
.doTest(TEXT_MATCH);
}
@Test
public void variable_static() {
testHelper
.addInputLines(
"Test.java",
"""
import java.util.function.Function;
| Test |
java | google__dagger | javatests/dagger/internal/codegen/DaggerModuleMethodSubject.java | {
"start": 1342,
"end": 1484
} | class ____ extends Subject {
/** A {@link Truth} subject factory for testing Dagger module methods. */
static final | DaggerModuleMethodSubject |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/scripting/bsh/BshScriptUtils.java | {
"start": 8763,
"end": 8934
} | class ____ extends NestedRuntimeException {
private BshExecutionException(EvalError ex) {
super("BeanShell script execution failed", ex);
}
}
}
| BshExecutionException |
java | quarkusio__quarkus | independent-projects/qute/core/src/test/java/io/quarkus/qute/AsyncDataTest.java | {
"start": 272,
"end": 1026
} | class ____ {
@Test
public void testAsyncData() {
Engine engine = Engine.builder().addDefaults().addValueResolver(ValueResolver.builder().applyToBaseClass(Client.class)
.applyToName("tokens").resolveSync(ec -> ((Client) ec.getBase()).getTokens()).build()).build();
assertEquals("alpha:bravo:delta:",
engine.parse("{#for token in client.tokens}{token}:{/for}").data("client", new Client()).render());
assertEquals("alpha:bravo:delta:",
engine.parse("{#for token in tokens}{token}:{/for}").data("tokens", new Client().getTokens()).render());
assertEquals("alpha", engine.parse("{token}").data("token", CompletedStage.of("alpha")).render());
}
static | AsyncDataTest |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/ContextPropagation.java | {
"start": 1355,
"end": 1608
} | class ____ detect if the <a href="https://github.com/micrometer-metrics/context-propagation">context-propagation library</a> is on the classpath and to offer
* ContextSnapshot support to {@link Flux} and {@link Mono}.
*
* @author Simon Baslé
*/
final | to |
java | netty__netty | codec-classes-quic/src/main/java/io/netty/handler/codec/quic/QuicPacketType.java | {
"start": 806,
"end": 1880
} | enum ____ {
/**
* Initial packet.
*/
INITIAL,
/**
* Retry packet.
*/
RETRY,
/**
* Handshake packet.
*/
HANDSHAKE,
/**
* 0-RTT packet.
*/
ZERO_RTT,
/**
* 1-RTT short header packet.
*/
SHORT,
/**
* Version negotiation packet.
*/
VERSION_NEGOTIATION;
/**
* Return the {@link QuicPacketType} for the given byte.
*
* @param type the byte that represent the type.
* @return the {@link QuicPacketType}.
*/
static QuicPacketType of(byte type) {
switch(type) {
case 1:
return INITIAL;
case 2:
return RETRY;
case 3:
return HANDSHAKE;
case 4:
return ZERO_RTT;
case 5:
return SHORT;
case 6:
return VERSION_NEGOTIATION;
default:
throw new IllegalArgumentException("Unknown QUIC packet type: " + type);
}
}
}
| QuicPacketType |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/metamodel/AbstractJpaMetamodelPopulationTest.java | {
"start": 1244,
"end": 1376
} | class ____ {
@EmbeddedId
private CompositeIdId id;
private String data;
}
@Embeddable
public static | CompositeIdAnnotatedEntity |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/mutable/MutableLong.java | {
"start": 1304,
"end": 11113
} | class ____ extends Number implements Comparable<MutableLong>, Mutable<Number> {
/**
* Required for serialization support.
*
* @see java.io.Serializable
*/
private static final long serialVersionUID = 62986528375L;
/** The mutable value. */
private long value;
/**
* Constructs a new MutableLong with the default value of zero.
*/
public MutableLong() {
}
/**
* Constructs a new MutableLong with the specified value.
*
* @param value the initial value to store.
*/
public MutableLong(final long value) {
this.value = value;
}
/**
* Constructs a new MutableLong with the specified value.
*
* @param value the initial value to store, not null.
* @throws NullPointerException if the object is null.
*/
public MutableLong(final Number value) {
this.value = value.longValue();
}
/**
* Constructs a new MutableLong parsing the given string.
*
* @param value the string to parse, not null.
* @throws NumberFormatException if the string cannot be parsed into a long, see {@link Long#parseLong(String)}.
* @since 2.5
*/
public MutableLong(final String value) {
this.value = Long.parseLong(value);
}
/**
* Adds a value to the value of this instance.
*
* @param operand the value to add, not null.
* @since 2.2
*/
public void add(final long operand) {
this.value += operand;
}
/**
* Adds a value to the value of this instance.
*
* @param operand the value to add, not null.
* @throws NullPointerException if the object is null.
* @since 2.2
*/
public void add(final Number operand) {
this.value += operand.longValue();
}
/**
* Increments this instance's value by {@code operand}; this method returns the value associated with the instance
* immediately after the addition operation. This method is not thread safe.
*
* @param operand the quantity to add, not null.
* @return the value associated with this instance after adding the operand.
* @since 3.5
*/
public long addAndGet(final long operand) {
this.value += operand;
return value;
}
/**
* Increments this instance's value by {@code operand}; this method returns the value associated with the instance
* immediately after the addition operation. This method is not thread safe.
*
* @param operand the quantity to add, not null.
* @throws NullPointerException if {@code operand} is null.
* @return the value associated with this instance after adding the operand.
* @since 3.5
*/
public long addAndGet(final Number operand) {
this.value += operand.longValue();
return value;
}
/**
* Compares this mutable to another in ascending order.
*
* @param other the other mutable to compare to, not null.
* @return negative if this is less, zero if equal, positive if greater.
*/
@Override
public int compareTo(final MutableLong other) {
return Long.compare(this.value, other.value);
}
/**
* Decrements the value.
*
* @since 2.2
*/
public void decrement() {
value--;
}
/**
* Decrements this instance's value by 1; this method returns the value associated with the instance
* immediately after the decrement operation. This method is not thread safe.
*
* @return the value associated with the instance after it is decremented.
* @since 3.5
*/
public long decrementAndGet() {
value--;
return value;
}
/**
* Returns the value of this MutableLong as a double.
*
* @return the numeric value represented by this object after conversion to type double.
*/
@Override
public double doubleValue() {
return value;
}
/**
* Compares this object to the specified object. The result is {@code true} if and only if the argument
* is not {@code null} and is a {@link MutableLong} object that contains the same {@code long}
* value as this object.
*
* @param obj the object to compare with, null returns false.
* @return {@code true} if the objects are the same; {@code false} otherwise.
*/
@Override
public boolean equals(final Object obj) {
if (obj instanceof MutableLong) {
return value == ((MutableLong) obj).longValue();
}
return false;
}
/**
* Returns the value of this MutableLong as a float.
*
* @return the numeric value represented by this object after conversion to type float.
*/
@Override
public float floatValue() {
return value;
}
/**
* Increments this instance's value by {@code operand}; this method returns the value associated with the instance
* immediately prior to the addition operation. This method is not thread safe.
*
* @param operand the quantity to add, not null.
* @return the value associated with this instance immediately before the operand was added.
* @since 3.5
*/
public long getAndAdd(final long operand) {
final long last = value;
this.value += operand;
return last;
}
/**
* Increments this instance's value by {@code operand}; this method returns the value associated with the instance
* immediately prior to the addition operation. This method is not thread safe.
*
* @param operand the quantity to add, not null.
* @throws NullPointerException if {@code operand} is null.
* @return the value associated with this instance immediately before the operand was added.
* @since 3.5
*/
public long getAndAdd(final Number operand) {
final long last = value;
this.value += operand.longValue();
return last;
}
/**
* Decrements this instance's value by 1; this method returns the value associated with the instance
* immediately prior to the decrement operation. This method is not thread safe.
*
* @return the value associated with the instance before it was decremented.
* @since 3.5
*/
public long getAndDecrement() {
final long last = value;
value--;
return last;
}
/**
* Increments this instance's value by 1; this method returns the value associated with the instance
* immediately prior to the increment operation. This method is not thread safe.
*
* @return the value associated with the instance before it was incremented.
* @since 3.5
*/
public long getAndIncrement() {
final long last = value;
value++;
return last;
}
/**
* Gets the value as a Long instance.
*
* @return the value as a Long, never null.
* @deprecated Use {@link #get()}.
*/
@Deprecated
@Override
public Long getValue() {
return Long.valueOf(this.value);
}
/**
* Returns a suitable hash code for this mutable.
*
* @return a suitable hash code.
*/
@Override
public int hashCode() {
return (int) (value ^ value >>> 32);
}
/**
* Increments the value.
*
* @since 2.2
*/
public void increment() {
value++;
}
/**
* Increments this instance's value by 1; this method returns the value associated with the instance
* immediately after the increment operation. This method is not thread safe.
*
* @return the value associated with the instance after it is incremented.
* @since 3.5
*/
public long incrementAndGet() {
value++;
return value;
}
// shortValue and byteValue rely on Number implementation
/**
* Returns the value of this MutableLong as an int.
*
* @return the numeric value represented by this object after conversion to type int.
*/
@Override
public int intValue() {
return (int) value;
}
/**
* Returns the value of this MutableLong as a long.
*
* @return the numeric value represented by this object after conversion to type long.
*/
@Override
public long longValue() {
return value;
}
/**
* Sets the value.
*
* @param value the value to set.
*/
public void setValue(final long value) {
this.value = value;
}
/**
* Sets the value from any Number instance.
*
* @param value the value to set, not null.
* @throws NullPointerException if the object is null.
*/
@Override
public void setValue(final Number value) {
this.value = value.longValue();
}
/**
* Subtracts a value from the value of this instance.
*
* @param operand the value to subtract, not null.
* @since 2.2
*/
public void subtract(final long operand) {
this.value -= operand;
}
/**
* Subtracts a value from the value of this instance.
*
* @param operand the value to subtract, not null.
* @throws NullPointerException if the object is null.
* @since 2.2
*/
public void subtract(final Number operand) {
this.value -= operand.longValue();
}
/**
* Gets this mutable as an instance of Long.
*
* @return a Long instance containing the value from this mutable, never null.
*/
public Long toLong() {
return Long.valueOf(longValue());
}
/**
* Returns the String value of this mutable.
*
* @return the mutable value as a string.
*/
@Override
public String toString() {
return String.valueOf(value);
}
}
| MutableLong |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/config/plugins/convert/TypeConverters.java | {
"start": 13663,
"end": 14031
} | class ____ a TypeConverter is available for that class. Falls back to the provided
* default value if the conversion is unsuccessful. However, if the default value is <em>also</em> invalid, then
* {@code null} is returned (along with a nasty status log message).
*
* @param s
* the string to convert
* @param clazz
* the | if |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java | {
"start": 2785,
"end": 3415
} | class ____ detail message of {@code cause}). This
* constructor is useful for IO exceptions that are little more than
* wrappers for other throwables.
*
* @param cause The cause (which is saved for later retrieval by the {@link
* #getCause()} method). (A null value is permitted, and
* indicates
* that the cause is nonexistent or unknown.)
*/
public DiskBalancerException(Throwable cause, Result result) {
super(cause);
this.result = result;
}
/**
* Returns the result.
* @return int
*/
public Result getResult() {
return result;
}
}
| and |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/JUnit4TestNotRunTest.java | {
"start": 12730,
"end": 13118
} | class ____ {
public void testDoSomething() {}
}
""")
.addOutputLines(
"out/TestStuff.java",
"""
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@RunWith(JUnit4.class)
public | TestStuff |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/fields/RecursiveComparisonAssert_bddSoftAssertions_Test.java | {
"start": 983,
"end": 2773
} | class ____ extends WithComparingFieldsIntrospectionStrategyBaseTest {
private BDDSoftAssertions softly;
@BeforeEach
public void beforeEachTest() {
super.beforeEachTest();
Assertions.setRemoveAssertJRelatedElementsFromStackTrace(false);
softly = new BDDSoftAssertions();
}
@Test
void should_pass_with_bdd_soft_assertions() {
// GIVEN
Person actual = new Person("John");
actual.home.address.number = 1;
Person expected = new Person("John");
expected.home.address.number = 1;
// WHEN
softly.then(actual).usingRecursiveComparison(recursiveComparisonConfiguration).isEqualTo(expected);
// THEN
softly.assertAll();
}
@Test
void should_report_all_errors_with_bdd_soft_assertions() {
// GIVEN
Person john = new Person("John");
john.home.address.number = 1;
Person jack = new Person("Jack");
jack.home.address.number = 2;
// WHEN
softly.then(john).usingRecursiveComparison(recursiveComparisonConfiguration).isEqualTo(jack);
softly.then(jack).usingRecursiveComparison(recursiveComparisonConfiguration).isEqualTo(john);
// THEN
List<Throwable> errorsCollected = softly.errorsCollected();
then(errorsCollected).hasSize(2);
then(errorsCollected.get(0)).hasMessageContaining("field/property 'home.address.number' differ:")
.hasMessageContaining("- actual value : 1")
.hasMessageContaining("- expected value: 2");
then(errorsCollected.get(1)).hasMessageContaining("field/property 'home.address.number' differ:")
.hasMessageContaining("- actual value : 2")
.hasMessageContaining("- expected value: 1");
}
}
| RecursiveComparisonAssert_bddSoftAssertions_Test |
java | apache__flink | flink-test-utils-parent/flink-test-utils-junit/src/main/java/org/apache/flink/core/testutils/FilteredClassLoader.java | {
"start": 1551,
"end": 2168
} | class ____ to filter out.
*/
public FilteredClassLoader(ClassLoader delegate, String... filteredClassNames) {
super(Objects.requireNonNull(delegate));
this.filteredClassNames = new HashSet<>(Arrays.asList(filteredClassNames));
}
@Override
protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
synchronized (this) {
if (filteredClassNames.contains(name)) {
throw new ClassNotFoundException(name);
} else {
return super.loadClass(name, resolve);
}
}
}
}
| names |
java | alibaba__fastjson | src/test/java/com/alibaba/json/test/benchmark/basic/IntBenchmark.java | {
"start": 161,
"end": 2302
} | class ____ {
static String json = "{\"v1\":-1224609302,\"v2\":379420556,\"v3\":-1098099527,\"v4\":-2018662,\"v5\":422842162}";
static String json2 = "{\"v1\":\"-1224609302\",\"v2\":\"379420556\",\"v3\":\"-1098099527\",\"v4\":\"-2018662\",\"v5\":\"422842162\"}";
static String json3 = "{\n" +
"\t\"v1\":\"-1224609302\",\n" +
"\t\"v2\":\"379420556\",\n" +
"\t\"v3\":\"-1098099527\",\n" +
"\t\"v4\":\"-2018662\",\n" +
"\t\"v5\":\"422842162\"\n" +
"}";
public static void main(String[] args) throws Exception {
System.out.println(System.getProperty("java.vm.name") + " " + System.getProperty("java.runtime.version"));
// Model model = new Model();
// model.v1 = new Random().nextInt();
// model.v2 = new Random().nextInt();
// model.v3 = new Random().nextInt();
// model.v4 = new Random().nextInt();
// model.v5 = new Random().nextInt();
//
// System.out.println(JSON.toJSONString(model));
for (int i = 0; i < 10; ++i) {
perf(); // 1798
// perf2(); // 1877
// perf3(); // 20624 2334
}
}
public static void perf() {
long start = System.currentTimeMillis();
for (int i = 0; i < 1000 * 1000 * 10; ++i) {
JSON.parseObject(json, Model.class);
}
long millis = System.currentTimeMillis() - start;
System.out.println("millis : " + millis);
}
public static void perf2() {
long start = System.currentTimeMillis();
for (int i = 0; i < 1000 * 1000 * 10; ++i) {
JSON.parseObject(json2, Model.class);
}
long millis = System.currentTimeMillis() - start;
System.out.println("millis : " + millis);
}
public static void perf3() {
long start = System.currentTimeMillis();
for (int i = 0; i < 1000 * 1000 * 10; ++i) {
JSON.parseObject(json3, Model.class);
}
long millis = System.currentTimeMillis() - start;
System.out.println("millis : " + millis);
}
public static | IntBenchmark |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldHaveNoParent.java | {
"start": 823,
"end": 1790
} | class ____ extends BasicErrorMessageFactory {
private static final String PATH_HAS_PARENT = "%nExpected actual path:%n %s%n not to have a parent, but parent was:%n %s";
private static final String FILE_HAS_PARENT = "%nExpecting file (or directory):%n %s%nnot to have a parent, but parent was:%n %s";
/**
* Creates a new <code>{@link ShouldHaveNoParent}</code>.
*
* @param actual the actual value in the failed assertion.
* @return the created {@code ErrorMessageFactory}.
*/
public static ShouldHaveNoParent shouldHaveNoParent(File actual) {
return new ShouldHaveNoParent(actual);
}
public static ShouldHaveNoParent shouldHaveNoParent(Path actual) {
return new ShouldHaveNoParent(actual);
}
private ShouldHaveNoParent(File actual) {
super(FILE_HAS_PARENT, actual, actual.getParentFile());
}
private ShouldHaveNoParent(Path actual) {
super(PATH_HAS_PARENT, actual, actual.getParent());
}
}
| ShouldHaveNoParent |
java | apache__flink | flink-examples/flink-examples-streaming/src/test/java/org/apache/flink/streaming/test/socket/SocketWindowWordCountITCase.java | {
"start": 4706,
"end": 4809
} | class ____ extends OutputStream {
@Override
public void write(int b) {}
}
}
| NullStream |
java | quarkusio__quarkus | integration-tests/vertx/src/test/java/io/quarkus/it/vertx/JsonWriterTest.java | {
"start": 276,
"end": 1111
} | class ____ {
@Test
public void testJsonSync() {
RestAssured.when().get("/vertx-test/json-bodies/json/sync").then()
.statusCode(200).body("Hello", equalTo("World"));
}
@Test
public void testArraySync() {
RestAssured.when().get("/vertx-test/json-bodies/array/sync").then()
.statusCode(200).body("", equalTo(Arrays.asList("Hello", "World")));
}
@Test
public void testJsonAsync() {
RestAssured.when().get("/vertx-test/json-bodies/json/async").then()
.statusCode(200).body("Hello", equalTo("World"));
}
@Test
public void testArrayAsync() {
RestAssured.when().get("/vertx-test/json-bodies/array/async").then()
.statusCode(200).body("", equalTo(Arrays.asList("Hello", "World")));
}
}
| JsonWriterTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/partition/ClusterPartitionReport.java | {
"start": 1413,
"end": 2057
} | class ____ implements Serializable {
private static final long serialVersionUID = -3150175198722481689L;
private final Collection<ClusterPartitionReportEntry> entries;
public ClusterPartitionReport(final Collection<ClusterPartitionReportEntry> entries) {
this.entries = checkNotNull(entries);
}
public Collection<ClusterPartitionReportEntry> getEntries() {
return entries;
}
@Override
public String toString() {
return "PartitionReport{" + "entries=" + entries + '}';
}
/** An entry describing all partitions belonging to one dataset. */
public static | ClusterPartitionReport |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputBuffer.java | {
"start": 1704,
"end": 1772
} | class ____ extends DataOutputStream {
private static | DataOutputBuffer |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/client/support/InterceptingHttpAccessorTests.java | {
"start": 2524,
"end": 2757
} | class ____ implements ClientHttpRequestInterceptor {
@Override
public ClientHttpResponse intercept(HttpRequest request, byte[] body, ClientHttpRequestExecution execution) {
return null;
}
}
}
| ThirdClientHttpRequestInterceptor |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java | {
"start": 1224,
"end": 1372
} | class ____ extends InternalMappedTerms<StringTerms, StringTerms.Bucket> {
public static final String NAME = "sterms";
public static | StringTerms |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java | {
"start": 4041,
"end": 5673
} | class ____ the client proxy
* used to make calls to the rpc server.
* @param rpcRequest - deserialized
* @param receiveTime time at which the call received (for metrics)
* @return the call's return
* @throws IOException
**/
public Writable call(Server server, String protocol,
Writable rpcRequest, long receiveTime) throws Exception ;
}
static final Logger LOG = LoggerFactory.getLogger(RPC.class);
/**
* Get all superInterfaces that extend VersionedProtocol
* @param childInterfaces
* @return the super interfaces that extend VersionedProtocol
*/
static Class<?>[] getSuperInterfaces(Class<?>[] childInterfaces) {
List<Class<?>> allInterfaces = new ArrayList<Class<?>>();
for (Class<?> childInterface : childInterfaces) {
if (VersionedProtocol.class.isAssignableFrom(childInterface)) {
allInterfaces.add(childInterface);
allInterfaces.addAll(
Arrays.asList(
getSuperInterfaces(childInterface.getInterfaces())));
} else {
LOG.warn("Interface " + childInterface +
" ignored because it does not extend VersionedProtocol");
}
}
return allInterfaces.toArray(new Class[allInterfaces.size()]);
}
/**
* Get all interfaces that the given protocol implements or extends
* which are assignable from VersionedProtocol.
*/
static Class<?>[] getProtocolInterfaces(Class<?> protocol) {
Class<?>[] interfaces = protocol.getInterfaces();
return getSuperInterfaces(interfaces);
}
/**
* Get the protocol name.
* If the protocol | of |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/JUnit4SetUpNotRunTest.java | {
"start": 5373,
"end": 5771
} | class ____ {
// This will compile-fail and suggest the import of org.junit.Before
// BUG: Diagnostic contains: @Before
@Before
public void initMocks() {}
// BUG: Diagnostic contains: @Before
@Before
protected void badVisibility() {}
}
@ | JUnit4SetUpNotRunPositiveCaseCustomBefore2 |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnBooleanPropertyTests.java | {
"start": 1713,
"end": 7112
} | class ____ {
private @Nullable ConfigurableApplicationContext context;
private final ConfigurableEnvironment environment = new StandardEnvironment();
@AfterEach
void tearDown() {
if (this.context != null) {
this.context.close();
}
}
@Test
void defaultsWhenTrue() {
load(Defaults.class, "test=true");
assertThat(containsBean()).isTrue();
}
@Test
void defaultsWhenFalse() {
load(Defaults.class, "test=false");
assertThat(containsBean()).isFalse();
}
@Test
void defaultsWhenMissing() {
load(Defaults.class);
assertThat(containsBean()).isFalse();
}
@Test
void havingValueTrueMatchIfMissingFalseWhenTrue() {
load(HavingValueTrueMatchIfMissingFalse.class, "test=true");
assertThat(containsBean()).isTrue();
}
@Test
void havingValueTrueMatchIfMissingFalseWhenFalse() {
load(HavingValueTrueMatchIfMissingFalse.class, "test=false");
assertThat(containsBean()).isFalse();
}
@Test
void havingValueTrueMatchIfMissingFalseWhenMissing() {
load(HavingValueTrueMatchIfMissingFalse.class);
assertThat(containsBean()).isFalse();
}
@Test
void havingValueTrueMatchIfMissingTrueWhenTrue() {
load(HavingValueTrueMatchIfMissingTrue.class, "test=true");
assertThat(containsBean()).isTrue();
}
@Test
void havingValueTrueMatchIfMissingTrueWhenFalse() {
load(HavingValueTrueMatchIfMissingTrue.class, "test=false");
assertThat(containsBean()).isFalse();
}
@Test
void havingValueTrueMatchIfMissingTrueWhenMissing() {
load(HavingValueTrueMatchIfMissingTrue.class);
assertThat(containsBean()).isTrue();
}
@Test
void havingValueFalseMatchIfMissingFalseWhenTrue() {
load(HavingValueFalseMatchIfMissingFalse.class, "test=true");
assertThat(containsBean()).isFalse();
}
@Test
void havingValueFalseMatchIfMissingFalseWhenFalse() {
load(HavingValueFalseMatchIfMissingFalse.class, "test=false");
assertThat(containsBean()).isTrue();
}
@Test
void havingValueFalseMatchIfMissingFalseWhenMissing() {
load(HavingValueFalseMatchIfMissingFalse.class);
assertThat(containsBean()).isFalse();
}
@Test
void havingValueFalseMatchIfMissingTrueWhenTrue() {
load(HavingValueFalseMatchIfMissingTrue.class, "test=true");
assertThat(containsBean()).isFalse();
}
@Test
void havingValueFalseMatchIfMissingTrueWhenFalse() {
load(HavingValueFalseMatchIfMissingTrue.class, "test=false");
assertThat(containsBean()).isTrue();
}
@Test
void havingValueFalseMatchIfMissingTrueWhenMissing() {
load(HavingValueFalseMatchIfMissingTrue.class);
assertThat(containsBean()).isTrue();
}
@Test
void withPrefix() {
load(HavingValueFalseMatchIfMissingTrue.class, "foo.test=true");
assertThat(containsBean()).isTrue();
}
@Test
void nameOrValueMustBeSpecified() {
assertThatIllegalStateException().isThrownBy(() -> load(NoNameOrValueAttribute.class, "some.property"))
.satisfies(causeMessageContaining(
"The name or value attribute of @ConditionalOnBooleanProperty must be specified"));
}
@Test
void nameAndValueMustNotBeSpecified() {
assertThatIllegalStateException().isThrownBy(() -> load(NameAndValueAttribute.class, "some.property"))
.satisfies(causeMessageContaining(
"The name and value attributes of @ConditionalOnBooleanProperty are exclusive"));
}
@Test
void conditionReportWhenMatched() {
load(Defaults.class, "test=true");
assertThat(containsBean()).isTrue();
assertThat(getConditionEvaluationReport()).contains("@ConditionalOnBooleanProperty (test=true) matched");
}
@Test
void conditionReportWhenDoesNotMatch() {
load(Defaults.class, "test=false");
assertThat(containsBean()).isFalse();
assertThat(getConditionEvaluationReport())
.contains("@ConditionalOnBooleanProperty (test=true) found different value in property 'test'");
}
@Test
void repeatablePropertiesConditionReportWhenMatched() {
load(RepeatablePropertiesRequiredConfiguration.class, "property1=true", "property2=true");
assertThat(containsBean()).isTrue();
String report = getConditionEvaluationReport();
assertThat(report).contains("@ConditionalOnBooleanProperty (property1=true) matched");
assertThat(report).contains("@ConditionalOnBooleanProperty (property2=true) matched");
}
@Test
void repeatablePropertiesConditionReportWhenDoesNotMatch() {
load(RepeatablePropertiesRequiredConfiguration.class, "property1=true");
assertThat(getConditionEvaluationReport())
.contains("@ConditionalOnBooleanProperty (property2=true) did not find property 'property2'");
}
private <T extends Exception> Consumer<T> causeMessageContaining(String message) {
return (ex) -> assertThat(ex.getCause()).hasMessageContaining(message);
}
private String getConditionEvaluationReport() {
assertThat(this.context).isNotNull();
return ConditionEvaluationReport.get(this.context.getBeanFactory())
.getConditionAndOutcomesBySource()
.values()
.stream()
.flatMap(ConditionAndOutcomes::stream)
.map(Object::toString)
.collect(Collectors.joining("\n"));
}
private void load(Class<?> config, String... environment) {
TestPropertyValues.of(environment).applyTo(this.environment);
this.context = new SpringApplicationBuilder(config).environment(this.environment)
.web(WebApplicationType.NONE)
.run();
}
private boolean containsBean() {
assertThat(this.context).isNotNull();
return this.context.containsBean("foo");
}
abstract static | ConditionalOnBooleanPropertyTests |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/test/websocket/WebSocketIntegrationTests.java | {
"start": 14409,
"end": 14651
} | class ____ implements WebSocketHandler {
@Override
public Mono<Void> handle(WebSocketSession session) {
return session.closeStatus().doOnNext(serverCloseStatusSink::tryEmitValue).then();
}
}
private static final | ClientClosingHandler |
java | spring-projects__spring-framework | spring-orm/src/test/java/org/springframework/orm/jpa/domain/PersonListener.java | {
"start": 870,
"end": 1028
} | class ____ {
@Autowired
ApplicationContext context;
@PostLoad
public void postLoad(Person person) {
person.postLoaded = this.context;
}
}
| PersonListener |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/LangChain4jChatEndpointBuilderFactory.java | {
"start": 22629,
"end": 22991
} | class ____ extends AbstractEndpointBuilder implements LangChain4jChatEndpointBuilder, AdvancedLangChain4jChatEndpointBuilder {
public LangChain4jChatEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new LangChain4jChatEndpointBuilderImpl(path);
}
} | LangChain4jChatEndpointBuilderImpl |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/qualifiers/RepeatingQualifierObserverTest.java | {
"start": 1983,
"end": 2500
} | class ____ {
List<String> events = new ArrayList<>();
public void observeHome(@Observes @Location("home") String s) {
events.add(s);
}
public void observeFarAway(@Observes @Location("farAway") String s) {
events.add(s);
}
public void observeWork(@Observes @Location("work") @Location("office") String s) {
events.add(s);
}
public List<String> getEvents() {
return events;
}
}
}
| ObservingBean |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/metadata/PartitionRegistrationTest.java | {
"start": 2142,
"end": 23320
} | class ____ {
@Test
public void testElectionWasUnclean() {
assertFalse(PartitionRegistration.electionWasUnclean(LeaderRecoveryState.RECOVERED.value()));
assertTrue(PartitionRegistration.electionWasUnclean(LeaderRecoveryState.RECOVERING.value()));
}
@Test
public void testPartitionControlInfoMergeAndDiff() {
PartitionRegistration a = new PartitionRegistration.Builder().
setReplicas(new int[]{1, 2, 3}).setDirectories(DirectoryId.unassignedArray(3)).
setIsr(new int[]{1, 2}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(0).setPartitionEpoch(0).build();
PartitionRegistration b = new PartitionRegistration.Builder().
setReplicas(new int[]{1, 2, 3}).setDirectories(DirectoryId.unassignedArray(3)).
setIsr(new int[]{3}).setLeader(3).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(1).setPartitionEpoch(1).build();
PartitionRegistration c = new PartitionRegistration.Builder().
setReplicas(new int[]{1, 2, 3}).setDirectories(DirectoryId.unassignedArray(3)).
setIsr(new int[]{1}).setLastKnownElr(new int[]{3}).setElr(new int[]{2}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(0).setPartitionEpoch(1).build();
assertEquals(b, a.merge(new PartitionChangeRecord().
setLeader(3).setIsr(List.of(3))));
assertEquals("isr: [1, 2] -> [3], leader: 1 -> 3, leaderEpoch: 0 -> 1, partitionEpoch: 0 -> 1",
b.diff(a));
assertEquals("isr: [1, 2] -> [1], elr: [] -> [2], lastKnownElr: [] -> [3], partitionEpoch: 0 -> 1",
c.diff(a));
}
@Test
public void testRecordRoundTrip() {
PartitionRegistration registrationA = new PartitionRegistration.Builder().
setReplicas(new int[]{1, 2, 3}).
setDirectories(DirectoryId.migratingArray(3)).
setIsr(new int[]{1, 2}).setRemovingReplicas(new int[]{1}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(0).setPartitionEpoch(0).build();
Uuid topicId = Uuid.fromString("OGdAI5nxT_m-ds3rJMqPLA");
int partitionId = 4;
ApiMessageAndVersion record = registrationA.toRecord(topicId, partitionId,
new ImageWriterOptions.Builder(MetadataVersion.IBP_3_7_IV0).build()); // highest MV for PartitionRecord v0
PartitionRegistration registrationB =
new PartitionRegistration((PartitionRecord) record.message());
assertEquals(registrationA, registrationB);
}
@Test
public void testMergePartitionChangeRecordWithReassignmentData() {
Uuid dir1 = Uuid.fromString("FbRuu7CeQtq5YFreEzg16g");
Uuid dir2 = Uuid.fromString("4rtHTelWSSStAFMODOg3cQ");
Uuid dir3 = Uuid.fromString("Id1WXzHURROilVxZWJNZlw");
PartitionRegistration partition0 = new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3}).
setDirectories(new Uuid[]{dir1, dir2, dir3}).
setIsr(new int[] {1, 2, 3}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build();
PartitionRegistration partition1 = partition0.merge(new PartitionChangeRecord().
setRemovingReplicas(List.of(3)).
setAddingReplicas(List.of(4)).
setReplicas(List.of(1, 2, 3, 4)).
setDirectories(List.of(dir1, dir2, dir3, DirectoryId.UNASSIGNED)));
assertEquals(new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3, 4}).
setDirectories(new Uuid[]{dir1, dir2, dir3, DirectoryId.UNASSIGNED}).
setIsr(new int[] {1, 2, 3}).setRemovingReplicas(new int[] {3}).setAddingReplicas(new int[] {4}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(201).build(), partition1);
PartitionRegistration partition2 = partition1.merge(new PartitionChangeRecord().
setIsr(List.of(1, 2, 4)).
setRemovingReplicas(List.of()).
setAddingReplicas(List.of()).
setReplicas(List.of(1, 2, 4)).
setDirectories(List.of(dir1, dir2, DirectoryId.UNASSIGNED)));
assertEquals(new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 4}).
setDirectories(new Uuid[]{dir1, dir2, DirectoryId.UNASSIGNED}).
setIsr(new int[] {1, 2, 4}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(202).build(), partition2);
}
@Test
public void testBuilderThrowsIllegalStateExceptionWhenMissingReplicas() {
PartitionRegistration.Builder builder = new PartitionRegistration.Builder();
IllegalStateException exception = assertThrows(IllegalStateException.class, builder::build);
assertEquals("You must set replicas.", exception.getMessage());
}
@Test
public void testBuilderThrowsIllegalStateExceptionWhenMissingIsr() {
PartitionRegistration.Builder builder = new PartitionRegistration.Builder().
setReplicas(new int[]{0}).setDirectories(new Uuid[]{DirectoryId.UNASSIGNED});
IllegalStateException exception = assertThrows(IllegalStateException.class, builder::build);
assertEquals("You must set isr.", exception.getMessage());
}
@Test
public void testBuilderThrowsIllegalStateExceptionWhenMissingLeader() {
PartitionRegistration.Builder builder = new PartitionRegistration.Builder().
setReplicas(new int[]{0}).
setDirectories(new Uuid[]{DirectoryId.LOST}).
setIsr(new int[]{0}).
setRemovingReplicas(new int[]{0}).
setAddingReplicas(new int[]{0});
IllegalStateException exception = assertThrows(IllegalStateException.class, builder::build);
assertEquals("You must set leader.", exception.getMessage());
}
@Test
public void testBuilderThrowsIllegalStateExceptionWhenMissingLeaderRecoveryState() {
PartitionRegistration.Builder builder = new PartitionRegistration.Builder().
setReplicas(new int[]{0}).
setDirectories(new Uuid[]{DirectoryId.MIGRATING}).
setIsr(new int[]{0}).
setRemovingReplicas(new int[]{0}).
setAddingReplicas(new int[]{0}).
setLeader(0);
IllegalStateException exception = assertThrows(IllegalStateException.class, builder::build);
assertEquals("You must set leader recovery state.", exception.getMessage());
}
@Test
public void testBuilderThrowsIllegalStateExceptionWhenMissingLeaderEpoch() {
PartitionRegistration.Builder builder = new PartitionRegistration.Builder().
setReplicas(new int[]{0}).
setDirectories(new Uuid[]{Uuid.fromString("OP4I696sRmCPanlNidxJYw")}).
setIsr(new int[]{0}).
setRemovingReplicas(new int[]{0}).
setAddingReplicas(new int[]{0}).
setLeader(0).
setLeaderRecoveryState(LeaderRecoveryState.RECOVERED);
IllegalStateException exception = assertThrows(IllegalStateException.class, builder::build);
assertEquals("You must set leader epoch.", exception.getMessage());
}
@Test
public void testBuilderThrowsIllegalStateExceptionWhenMissingPartitionEpoch() {
PartitionRegistration.Builder builder = new PartitionRegistration.Builder().
setReplicas(new int[]{0}).
setDirectories(DirectoryId.migratingArray(1)).
setIsr(new int[]{0}).
setRemovingReplicas(new int[]{0}).
setAddingReplicas(new int[]{0}).
setLeader(0).
setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).
setLeaderEpoch(0);
IllegalStateException exception = assertThrows(IllegalStateException.class, builder::build);
assertEquals("You must set partition epoch.", exception.getMessage());
}
@Test
public void testBuilderSuccess() {
PartitionRegistration.Builder builder = new PartitionRegistration.Builder().
setReplicas(new int[]{0, 1, 2}).
setDirectories(DirectoryId.unassignedArray(3)).
setIsr(new int[]{0, 1}).
setElr(new int[]{2}).
setLastKnownElr(new int[]{0, 1, 2}).
setRemovingReplicas(new int[]{0}).
setAddingReplicas(new int[]{1}).
setLeader(0).
setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).
setLeaderEpoch(0).
setPartitionEpoch(0);
PartitionRegistration partitionRegistration = builder.build();
assertEquals(Replicas.toList(new int[]{0, 1, 2}), Replicas.toList(partitionRegistration.replicas));
assertEquals(Replicas.toList(new int[]{0, 1}), Replicas.toList(partitionRegistration.isr));
assertEquals(Replicas.toList(new int[]{2}), Replicas.toList(partitionRegistration.elr));
assertEquals(Replicas.toList(new int[]{0, 1, 2}), Replicas.toList(partitionRegistration.lastKnownElr));
assertEquals(Replicas.toList(new int[]{0}), Replicas.toList(partitionRegistration.removingReplicas));
assertEquals(Replicas.toList(new int[]{1}), Replicas.toList(partitionRegistration.addingReplicas));
assertEquals(0, partitionRegistration.leader);
assertEquals(LeaderRecoveryState.RECOVERED, partitionRegistration.leaderRecoveryState);
assertEquals(0, partitionRegistration.leaderEpoch);
assertEquals(0, partitionRegistration.partitionEpoch);
}
@Test
public void testBuilderSetsDefaultAddingAndRemovingReplicas() {
PartitionRegistration.Builder builder = new PartitionRegistration.Builder().
setReplicas(new int[]{0, 1}).
setDirectories(DirectoryId.migratingArray(2)).
setIsr(new int[]{0, 1}).
setLeader(0).
setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).
setLeaderEpoch(0).
setPartitionEpoch(0);
PartitionRegistration partitionRegistration = builder.build();
assertEquals(Replicas.toList(Replicas.NONE), Replicas.toList(partitionRegistration.removingReplicas));
assertEquals(Replicas.toList(Replicas.NONE), Replicas.toList(partitionRegistration.addingReplicas));
}
private static Stream<Arguments> metadataVersionsForTestPartitionRegistration() {
return Stream.of(
MetadataVersion.IBP_3_7_IV1,
MetadataVersion.IBP_3_7_IV2,
MetadataVersion.IBP_4_0_IV1
).map(Arguments::of);
}
@ParameterizedTest
@MethodSource("metadataVersionsForTestPartitionRegistration")
public void testPartitionRegistrationToRecord(MetadataVersion metadataVersion) {
PartitionRegistration.Builder builder = new PartitionRegistration.Builder().
setReplicas(new int[]{0, 1, 2, 3, 4}).
setDirectories(new Uuid[]{
DirectoryId.UNASSIGNED,
Uuid.fromString("KBJBm9GVRAG9Ffe25odmmg"),
DirectoryId.LOST,
Uuid.fromString("7DZNT5qBS7yFF7VMMHS7kw"),
Uuid.fromString("cJGPUZsMSEqbidOLYLOIXg")
}).
setIsr(new int[]{0, 1}).
setLeader(0).
setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).
setLeaderEpoch(0).
setPartitionEpoch(0).
setElr(new int[]{2, 3}).
setLastKnownElr(new int[]{4});
PartitionRegistration partitionRegistration = builder.build();
Uuid topicID = Uuid.randomUuid();
PartitionRecord expectRecord = new PartitionRecord().
setTopicId(topicID).
setPartitionId(0).
setReplicas(List.of(0, 1, 2, 3, 4)).
setIsr(List.of(0, 1)).
setLeader(0).
setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()).
setLeaderEpoch(0).
setPartitionEpoch(0);
if (metadataVersion.isElrSupported()) {
expectRecord.
setEligibleLeaderReplicas(List.of(2, 3)).
setLastKnownElr(List.of(4));
}
if (metadataVersion.isDirectoryAssignmentSupported()) {
expectRecord.setDirectories(List.of(
DirectoryId.UNASSIGNED,
Uuid.fromString("KBJBm9GVRAG9Ffe25odmmg"),
DirectoryId.LOST,
Uuid.fromString("7DZNT5qBS7yFF7VMMHS7kw"),
Uuid.fromString("cJGPUZsMSEqbidOLYLOIXg")
));
}
List<UnwritableMetadataException> exceptions = new ArrayList<>();
ImageWriterOptions options = new ImageWriterOptions.Builder(metadataVersion).
setEligibleLeaderReplicasEnabled(metadataVersion.isElrSupported()).
setLossHandler(exceptions::add).
build();
assertEquals(new ApiMessageAndVersion(expectRecord, metadataVersion.partitionRecordVersion()),
partitionRegistration.toRecord(topicID, 0, options));
if (!metadataVersion.isDirectoryAssignmentSupported()) {
assertTrue(exceptions.stream().
anyMatch(e -> e.getMessage().contains("the directory assignment state of one or more replicas")));
}
assertEquals(Replicas.toList(Replicas.NONE), Replicas.toList(partitionRegistration.addingReplicas));
}
@Test
public void testPartitionRegistrationToRecord_ElrShouldBeNullIfEmpty() {
PartitionRegistration.Builder builder = new PartitionRegistration.Builder().
setReplicas(new int[]{0, 1, 2, 3, 4}).
setDirectories(DirectoryId.migratingArray(5)).
setIsr(new int[]{0, 1}).
setLeader(0).
setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).
setLeaderEpoch(0).
setPartitionEpoch(0);
PartitionRegistration partitionRegistration = builder.build();
Uuid topicID = Uuid.randomUuid();
PartitionRecord expectRecord = new PartitionRecord().
setTopicId(topicID).
setPartitionId(0).
setReplicas(List.of(0, 1, 2, 3, 4)).
setIsr(List.of(0, 1)).
setLeader(0).
setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()).
setLeaderEpoch(0).
setDirectories(List.of(DirectoryId.migratingArray(5))).
setPartitionEpoch(0);
List<UnwritableMetadataException> exceptions = new ArrayList<>();
ImageWriterOptions options = new ImageWriterOptions.Builder(MetadataVersion.IBP_4_0_IV1).
setLossHandler(exceptions::add).
build();
assertEquals(new ApiMessageAndVersion(expectRecord, (short) 2), partitionRegistration.toRecord(topicID, 0, options));
assertEquals(Replicas.toList(Replicas.NONE), Replicas.toList(partitionRegistration.addingReplicas));
assertTrue(exceptions.isEmpty());
}
@Property
public void testConsistentEqualsAndHashCode(
@ForAll("uniqueSamples") PartitionRegistration a,
@ForAll("uniqueSamples") PartitionRegistration b
) {
if (a.equals(b)) {
assertEquals(a.hashCode(), b.hashCode(), "a=" + a + "\nb=" + b);
}
if (a.hashCode() != b.hashCode()) {
assertNotEquals(a, b, "a=" + a + "\nb=" + b);
}
}
@Provide
Arbitrary<PartitionRegistration> uniqueSamples() {
return Arbitraries.of(
new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3}).setIsr(new int[] {1, 2, 3}).
setDirectories(new Uuid[]{Uuid.fromString("HyTsxr8hT6Gq5heZMA2Bug"), Uuid.fromString("ePwTiSgFRvaKRBaUX3EcZQ"), Uuid.fromString("F3zwSDR1QWGKNNLMowVoYg")}).
setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).setElr(new int[] {1, 2, 3}).build(),
new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3}).setIsr(new int[] {1, 2, 3}).
setDirectories(new Uuid[]{Uuid.fromString("94alcrMLQ6GOV8EHfAxJnA"), Uuid.fromString("LlD2QCA5RpalzKwPsUTGpw"), Uuid.fromString("Ahfjx9j5SIKpmz48pTLFRg")}).
setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(101).setPartitionEpoch(200).setLastKnownElr(new int[] {1, 2}).build(),
new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3}).setIsr(new int[] {1, 2, 3}).
setDirectories(new Uuid[]{Uuid.fromString("KcXLjTpYSPGjM20DjHd5rA"), Uuid.fromString("NXiBSMNHSvWqvz3qM8a6Vg"), Uuid.fromString("yWinzh1DRD25nHuXUxLfBQ")}).
setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(201).setElr(new int[] {1, 2}).setLastKnownElr(new int[] {1, 2}).build(),
new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3}).setIsr(new int[] {1, 2, 3}).
setDirectories(new Uuid[]{Uuid.fromString("9bDLWtoRRaKUToKixl3NUg"), Uuid.fromString("nLJMwhSUTEOU7DEI0U2GOw"), Uuid.fromString("ULAltTBAQlG2peJh9DZZrw")}).
setLeader(2).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).setLastKnownElr(new int[] {1, 2}).build(),
new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3}).setIsr(new int[] {1}).
setDirectories(new Uuid[]{Uuid.fromString("kWM0QcMoRg6BHc7sdVsjZg"), Uuid.fromString("84F4VbPGTRWewKhlCYctbQ"), Uuid.fromString("W505iUM0S6a5Ds83d1WjcQ")}).
setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERING).setLeaderEpoch(100).setPartitionEpoch(200).build(),
new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3, 4, 5, 6}).setIsr(new int[] {1, 2, 3}).setRemovingReplicas(new int[] {4, 5, 6}).setAddingReplicas(new int[] {1, 2, 3}).
setDirectories(DirectoryId.unassignedArray(6)).
setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).setElr(new int[] {1, 2, 3}).build(),
new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3, 4, 5, 6}).setIsr(new int[] {1, 2, 3}).setRemovingReplicas(new int[] {1, 2, 3}).setAddingReplicas(new int[] {4, 5, 6}).
setDirectories(DirectoryId.migratingArray(6)).
setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).setLastKnownElr(new int[] {1, 2}).build(),
new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3, 4, 5, 6}).setIsr(new int[] {1, 2, 3}).setRemovingReplicas(new int[] {1, 3}).
setDirectories(DirectoryId.unassignedArray(6)).
setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).setElr(new int[] {1, 2, 3}).build(),
new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 3, 4, 5, 6}).setIsr(new int[] {1, 2, 3}).setAddingReplicas(new int[] {4, 5, 6}).
setDirectories(DirectoryId.migratingArray(6)).
setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).setElr(new int[] {2, 3}).setLastKnownElr(new int[] {1, 2}).build()
);
}
@Test
public void testDirectories() {
PartitionRegistration partitionRegistration = new PartitionRegistration.Builder().
setReplicas(new int[] {3, 2, 1}).
setDirectories(new Uuid[]{
Uuid.fromString("FbRuu7CeQtq5YFreEzg16g"),
Uuid.fromString("4rtHTelWSSStAFMODOg3cQ"),
Uuid.fromString("Id1WXzHURROilVxZWJNZlw")
}).
setIsr(new int[] {1, 2, 3}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).
setLeaderEpoch(100).setPartitionEpoch(200).build();
assertEquals(Uuid.fromString("Id1WXzHURROilVxZWJNZlw"), partitionRegistration.directory(1));
assertEquals(Uuid.fromString("4rtHTelWSSStAFMODOg3cQ"), partitionRegistration.directory(2));
assertEquals(Uuid.fromString("FbRuu7CeQtq5YFreEzg16g"), partitionRegistration.directory(3));
assertThrows(IllegalArgumentException.class, () -> partitionRegistration.directory(4));
}
@Test
public void testMigratingRecordDirectories() {
PartitionRecord record = new PartitionRecord().
setTopicId(Uuid.fromString("ONlQ7DDzQtGESsG499UDQg")).
setPartitionId(0).
setReplicas(List.of(0, 1)).
setIsr(List.of(0, 1)).
setLeader(0).
setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()).
setLeaderEpoch(0).
setPartitionEpoch(0);
PartitionRegistration registration = new PartitionRegistration(record);
assertArrayEquals(new Uuid[]{DirectoryId.MIGRATING, DirectoryId.MIGRATING}, registration.directories);
}
}
| PartitionRegistrationTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/JSONSerializerDeprecatedTest.java | {
"start": 316,
"end": 648
} | class ____ extends TestCase {
public void test_() throws Exception {
JSONSerializer ser = new JSONSerializer(new SerializeConfig());
ser.setDateFormat(new ISO8601DateFormat());
Assert.assertEquals(null, ser.getDateFormatPattern());
ser.close();
}
}
| JSONSerializerDeprecatedTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java | {
"start": 2052,
"end": 2669
} | class ____ extends Plugin implements ActionPlugin {
static final Setting<String> INDEX_INTERNAL_SETTING = Setting.simpleString(
"index.internal",
Setting.Property.IndexScope,
Setting.Property.InternalIndex
);
static final Setting<String> INDEX_PRIVATE_SETTING = Setting.simpleString(
"index.private",
Setting.Property.IndexScope,
Setting.Property.PrivateIndex
);
@Override
public List<Setting<?>> getSettings() {
return Arrays.asList(INDEX_INTERNAL_SETTING, INDEX_PRIVATE_SETTING);
}
public static | InternalOrPrivateSettingsPlugin |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/enums/EnumAsMapKeySerializationTest.java | {
"start": 1759,
"end": 2135
} | class ____ {
private Map<Foo661, String> foo = new EnumMap<Foo661, String>(Foo661.class);
public MyBean661(String value) {
foo.put(Foo661.FOO, value);
}
@JsonAnyGetter
@JsonSerialize(keyUsing = Foo661.Serializer.class)
public Map<Foo661, String> getFoo() {
return foo;
}
}
public | MyBean661 |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/RequestMappingMessageConversionIntegrationTests.java | {
"start": 26099,
"end": 26883
} | class ____ {
private String name;
public Person() {
}
public Person(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public boolean equals(@Nullable Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Person person = (Person) o;
return Objects.equals(this.name, person.name);
}
@Override
public int hashCode() {
return this.name != null ? this.name.hashCode() : 0;
}
@Override
public String toString() {
return "Person{" +
"name='" + name + '\'' +
'}';
}
}
@XmlRootElement
@SuppressWarnings({"WeakerAccess", "unused"})
private static | Person |
java | elastic__elasticsearch | x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/action/StackTraceTests.java | {
"start": 835,
"end": 5334
} | class ____ extends ESTestCase {
public void testDecodeFrameId() {
String frameId = "AAAAAAAAAAUAAAAAAAAB3gAAAAAAD67u";
// base64 encoded representation of the tuple (5, 478)
assertEquals("AAAAAAAAAAUAAAAAAAAB3g", StackTrace.getFileIDFromStackFrameID(frameId));
assertEquals(1027822, StackTrace.getAddressFromStackFrameID(frameId));
}
public void testRunlengthDecodeUniqueValues() {
// 0 - 9 (reversed)
String encodedFrameTypes = "AQkBCAEHAQYBBQEEAQMBAgEBAQA";
int[] actual = StackTrace.runLengthDecodeBase64Url(encodedFrameTypes, encodedFrameTypes.length(), 10);
assertArrayEquals(new int[] { 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }, actual);
}
public void testRunlengthDecodeSingleValue() {
// "4", repeated ten times
String encodedFrameTypes = "CgQ";
int[] actual = StackTrace.runLengthDecodeBase64Url(encodedFrameTypes, encodedFrameTypes.length(), 10);
assertArrayEquals(new int[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, actual);
}
public void testRunlengthDecodeFillsGap() {
// "2", repeated three times
String encodedFrameTypes = "AwI";
int[] actual = StackTrace.runLengthDecodeBase64Url(encodedFrameTypes, encodedFrameTypes.length(), 5);
// zeroes should be appended for the last two values which are not present in the encoded representation.
assertArrayEquals(new int[] { 2, 2, 2, 0, 0 }, actual);
}
public void testRunlengthDecodeMixedValue() {
// 4
String encodedFrameTypes = "BQADAg";
int[] actual = StackTrace.runLengthDecodeBase64Url(encodedFrameTypes, encodedFrameTypes.length(), 8);
assertArrayEquals(new int[] { 0, 0, 0, 0, 0, 2, 2, 2 }, actual);
}
public void testCreateFromSource() {
String ids = "AAAAAAAAAAUAAAAAAAAB3gAAAAAAD67u";
String types = "AQI";
// tag::noformat
StackTrace stackTrace = StackTrace.fromSource(
Map.of("Stacktrace",
Map.of("frame",
Map.of(
"ids", ids,
"types", types)
)
)
);
// end::noformat
assertArrayEquals(new String[] { "AAAAAAAAAAUAAAAAAAAB3gAAAAAAD67u" }, stackTrace.frameIds);
assertArrayEquals(new String[] { "AAAAAAAAAAUAAAAAAAAB3g" }, stackTrace.fileIds);
assertArrayEquals(new int[] { 1027822 }, stackTrace.addressOrLines);
assertArrayEquals(new int[] { 2 }, stackTrace.typeIds);
}
public void testToXContent() throws IOException {
XContentType contentType = randomFrom(XContentType.values());
XContentBuilder expectedRequest = XContentFactory.contentBuilder(contentType)
.startObject()
.array("address_or_lines", new int[] { 1027822 })
.array("file_ids", "AAAAAAAAAAUAAAAAAAAB3g")
.array("frame_ids", "AAAAAAAAAAUAAAAAAAAB3gAAAAAAD67u")
.array("type_ids", new int[] { 2 })
.field("annual_co2_tons", 0.3d)
.field("annual_costs_usd", 2.7d)
.field("count", 1)
.endObject();
XContentBuilder actualRequest = XContentFactory.contentBuilder(contentType);
StackTrace stackTrace = new StackTrace(
new int[] { 1027822 },
new String[] { "AAAAAAAAAAUAAAAAAAAB3g" },
new String[] { "AAAAAAAAAAUAAAAAAAAB3gAAAAAAD67u" },
new int[] { 2 }
);
stackTrace.annualCO2Tons = 0.3d;
stackTrace.annualCostsUSD = 2.7d;
stackTrace.count = 1;
stackTrace.toXContent(actualRequest, ToXContent.EMPTY_PARAMS);
assertToXContentEquivalent(BytesReference.bytes(expectedRequest), BytesReference.bytes(actualRequest), contentType);
}
public void testEquality() {
StackTrace stackTrace = new StackTrace(
new int[] { 102782 },
new String[] { "AAAAAAAAAAUAAAAAAAAB3g" },
new String[] { "AAAAAAAAAAUAAAAAAAAB3gAAAAAAD67u" },
new int[] { 2 }
);
EqualsHashCodeTestUtils.checkEqualsAndHashCode(
stackTrace,
(o -> new StackTrace(
Arrays.copyOf(o.addressOrLines, o.addressOrLines.length),
Arrays.copyOf(o.fileIds, o.fileIds.length),
Arrays.copyOf(o.frameIds, o.frameIds.length),
Arrays.copyOf(o.typeIds, o.typeIds.length)
))
);
}
}
| StackTraceTests |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java | {
"start": 44503,
"end": 46003
} | class ____ {
private final Path remoteLogPath;
private final IndexedLogsMeta indexedLogsMeta;
CachedIndexedLogsMeta(IndexedLogsMeta indexedLogsMeta,
Path remoteLogPath) {
this.indexedLogsMeta = indexedLogsMeta;
this.remoteLogPath = remoteLogPath;
}
public Path getRemoteLogPath() {
return this.remoteLogPath;
}
public IndexedLogsMeta getCachedIndexedLogsMeta() {
return this.indexedLogsMeta;
}
}
@Private
public static int getFSOutputBufferSize(Configuration conf) {
return conf.getInt(FS_OUTPUT_BUF_SIZE_ATTR, 256 * 1024);
}
@Private
public static int getFSInputBufferSize(Configuration conf) {
return conf.getInt(FS_INPUT_BUF_SIZE_ATTR, 256 * 1024);
}
@Private
@VisibleForTesting
public long getRollOverLogMaxSize(Configuration conf) {
boolean supportAppend = false;
try {
FileSystem fs = FileSystem.get(remoteRootLogDir.toUri(), conf);
if (fs instanceof LocalFileSystem || fs.hasPathCapability(
remoteRootLogDir, CommonPathCapabilities.FS_APPEND)) {
supportAppend = true;
}
} catch (Exception ioe) {
LOG.warn("Unable to determine if the filesystem supports " +
"append operation", ioe);
}
if (supportAppend) {
return 1024L * 1024 * 1024 * conf.getInt(
LOG_ROLL_OVER_MAX_FILE_SIZE_GB,
LOG_ROLL_OVER_MAX_FILE_SIZE_GB_DEFAULT);
} else {
return 0L;
}
}
private abstract | CachedIndexedLogsMeta |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.