language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__flink | flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/KeyMap.java | {
"start": 15784,
"end": 20836
} | class ____<K, V> {
final K key;
final int hashCode;
V value;
Entry<K, V> next;
long touchedTag;
Entry(K key, V value, int hashCode, Entry<K, V> next) {
this.key = key;
this.value = value;
this.next = next;
this.hashCode = hashCode;
}
public K getKey() {
return key;
}
public V getValue() {
return value;
}
}
// ------------------------------------------------------------------------
/**
* Performs a traversal about logical the multi-map that results from the union of the given
* maps. This method does not actually build a union of the map, but traverses the hash maps
* together.
*
* @param maps The array uf maps whose union should be traversed.
* @param visitor The visitor that is called for each key and all values.
* @param touchedTag A tag that is used to mark elements that have been touched in this specific
* traversal. Each successive traversal should supply a larger value for this tag than the
* previous one.
* @param <K> The type of the map's key.
* @param <V> The type of the map's value.
*/
public static <K, V> void traverseMaps(
final KeyMap<K, V>[] maps,
final TraversalEvaluator<K, V> visitor,
final long touchedTag)
throws Exception {
// we need to work on the maps in descending size
Arrays.sort(maps, CapacityDescendingComparator.INSTANCE);
final int[] shifts = new int[maps.length];
final int[] lowBitsMask = new int[maps.length];
final int numSlots = maps[0].table.length;
final int numTables = maps.length;
// figure out how much each hash table collapses the entries
for (int i = 0; i < numTables; i++) {
shifts[i] = maps[0].log2size - maps[i].log2size;
lowBitsMask[i] = (1 << shifts[i]) - 1;
}
// go over all slots (based on the largest hash table)
for (int pos = 0; pos < numSlots; pos++) {
// for each slot, go over all tables, until the table does not have that slot any more
// for tables where multiple slots collapse into one, we visit that one when we process
// the
// latest of all slots that collapse to that one
int mask;
for (int rootTable = 0;
rootTable < numTables && ((mask = lowBitsMask[rootTable]) & pos) == mask;
rootTable++) {
// use that table to gather keys and start collecting keys from the following tables
// go over all entries of that slot in the table
Entry<K, V> entry = maps[rootTable].table[pos >> shifts[rootTable]];
while (entry != null) {
// take only entries that have not been collected as part of other tables
if (entry.touchedTag < touchedTag) {
entry.touchedTag = touchedTag;
final K key = entry.key;
final int hashCode = entry.hashCode;
visitor.startNewKey(key);
visitor.nextValue(entry.value);
addEntriesFromChain(entry.next, visitor, key, touchedTag, hashCode);
// go over the other hash tables and collect their entries for the key
for (int followupTable = rootTable + 1;
followupTable < numTables;
followupTable++) {
Entry<K, V> followupEntry =
maps[followupTable].table[pos >> shifts[followupTable]];
if (followupEntry != null) {
addEntriesFromChain(
followupEntry, visitor, key, touchedTag, hashCode);
}
}
visitor.keyDone();
}
entry = entry.next;
}
}
}
}
private static <K, V> void addEntriesFromChain(
Entry<K, V> entry,
TraversalEvaluator<K, V> visitor,
K key,
long touchedTag,
int hashCode)
throws Exception {
while (entry != null) {
if (entry.touchedTag < touchedTag
&& entry.hashCode == hashCode
&& entry.key.equals(key)) {
entry.touchedTag = touchedTag;
visitor.nextValue(entry.value);
}
entry = entry.next;
}
}
// ------------------------------------------------------------------------
/**
* Comparator that defines a descending order on maps depending on their table capacity and
* number of elements.
*/
static final | Entry |
java | apache__camel | catalog/camel-route-parser/src/test/java/org/apache/camel/parser/java/RoasterMySedaRouteBuilderTest.java | {
"start": 1407,
"end": 2534
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(RoasterMySedaRouteBuilderTest.class);
@Test
void parse() throws Exception {
JavaClassSource clazz = (JavaClassSource) Roaster
.parse(new File("src/test/java/org/apache/camel/parser/java/MySedaRouteBuilder.java"));
List<CamelEndpointDetails> details = new ArrayList<>();
RouteBuilderParser.parseRouteBuilderEndpoints(clazz, ".",
"src/test/java/org/apache/camel/parser/java/MySedaRouteBuilder.java", details);
LOG.info("{}", details);
assertEquals(7, details.size());
assertEquals("32", details.get(1).getLineNumber());
assertEquals("seda:foo", details.get(1).getEndpointUri());
assertTrue(details.get(1).isConsumerOnly());
assertFalse(details.get(1).isProducerOnly());
assertEquals("35", details.get(2).getLineNumber());
assertEquals("seda:bar", details.get(2).getEndpointUri());
assertTrue(details.get(2).isConsumerOnly());
assertFalse(details.get(2).isProducerOnly());
}
}
| RoasterMySedaRouteBuilderTest |
java | apache__maven | impl/maven-cli/src/test/java/org/apache/maven/cling/invoker/mvnup/goals/CompatibilityFixStrategyTest.java | {
"start": 5237,
"end": 9757
} | class ____ {
@Test
@DisplayName("should remove duplicate dependencies in dependencyManagement")
void shouldRemoveDuplicateDependenciesInDependencyManagement() throws Exception {
String pomXml = """
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0">
<modelVersion>4.0.0</modelVersion>
<groupId>test</groupId>
<artifactId>test</artifactId>
<version>1.0.0</version>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.12.0</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.13.0</version>
</dependency>
</dependencies>
</dependencyManagement>
</project>
""";
Document document = Document.of(pomXml);
Map<Path, Document> pomMap = Map.of(Paths.get("pom.xml"), document);
UpgradeContext context = createMockContext();
UpgradeResult result = strategy.doApply(context, pomMap);
assertTrue(result.success(), "Compatibility fix should succeed");
assertTrue(result.modifiedCount() > 0, "Should have removed duplicate dependency");
// Verify only one dependency remains
Editor editor = new Editor(document);
Element root = editor.root();
Element dependencyManagement = DomUtils.findChildElement(root, "dependencyManagement");
Element dependencies = DomUtils.findChildElement(dependencyManagement, "dependencies");
var dependencyElements = dependencies.children("dependency").toList();
assertEquals(1, dependencyElements.size(), "Should have only one dependency after duplicate removal");
}
@Test
@DisplayName("should remove duplicate dependencies in regular dependencies")
void shouldRemoveDuplicateDependenciesInRegularDependencies() throws Exception {
String pomXml = """
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0">
<modelVersion>4.0.0</modelVersion>
<groupId>test</groupId>
<artifactId>test</artifactId>
<version>1.0.0</version>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13.2</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
""";
Document document = Document.of(pomXml);
Map<Path, Document> pomMap = Map.of(Paths.get("pom.xml"), document);
UpgradeContext context = createMockContext();
UpgradeResult result = strategy.doApply(context, pomMap);
assertTrue(result.success(), "Compatibility fix should succeed");
assertTrue(result.modifiedCount() > 0, "Should have removed duplicate dependency");
// Verify only one dependency remains
Editor editor = new Editor(document);
Element root = editor.root();
Element dependencies = DomUtils.findChildElement(root, "dependencies");
var dependencyElements = dependencies.children("dependency").toList();
assertEquals(1, dependencyElements.size(), "Should have only one dependency after duplicate removal");
}
}
@Nested
@DisplayName("Duplicate Plugin Fixes")
| DuplicateDependencyFixesTests |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/BigIntComparatorTest.java | {
"start": 1078,
"end": 2182
} | class ____ extends ComparatorTestBase<BigInteger> {
@Override
protected TypeComparator<BigInteger> createComparator(boolean ascending) {
return new BigIntComparator(ascending);
}
@Override
protected TypeSerializer<BigInteger> createSerializer() {
return new BigIntSerializer();
}
@Override
protected BigInteger[] getSortedTestData() {
return new BigInteger[] {
new BigInteger("-8745979691234123413478523984729447"),
BigInteger.valueOf(-10000),
BigInteger.valueOf(-1),
BigInteger.ZERO,
BigInteger.ONE,
BigInteger.TEN,
new BigInteger("127"),
new BigInteger("128"),
new BigInteger("129"),
new BigInteger("130"),
BigInteger.valueOf(0b10000000_00000000_00000000_00000000L),
BigInteger.valueOf(0b10000000_00000000_00000000_00000001L),
BigInteger.valueOf(0b10000000_00000000_10000000_00000000L),
new BigInteger("8745979691234123413478523984729447")
};
}
}
| BigIntComparatorTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/metrics/EventSetupTest.java | {
"start": 3160,
"end": 7687
} | class ____ extends TestEventReporter {}
/** Verifies that a reporter can be configured with all it's arguments being forwarded. */
@Test
void testReporterArgumentForwarding() {
final Configuration config = new Configuration();
configureReporter1(config);
final List<EventReporterSetup> reporterSetups =
ReporterSetupBuilder.EVENT_SETUP_BUILDER.fromConfiguration(
config, DefaultReporterFilters::eventsFromConfiguration, null);
Assert.assertEquals(1, reporterSetups.size());
final EventReporterSetup reporterSetup = reporterSetups.get(0);
assertEventReporter1Configured(reporterSetup);
}
/**
* Verifies that multiple reporters can be configured with all their arguments being forwarded.
*/
@Test
void testSeveralReportersWithArgumentForwarding() {
final Configuration config = new Configuration();
configureReporter1(config);
configureReporter2(config);
final List<EventReporterSetup> reporterSetups =
ReporterSetupBuilder.EVENT_SETUP_BUILDER.fromConfiguration(
config, DefaultReporterFilters::eventsFromConfiguration, null);
Assert.assertEquals(2, reporterSetups.size());
final Optional<EventReporterSetup> reporter1Config =
reporterSetups.stream().filter(c -> "reporter1".equals(c.getName())).findFirst();
Assert.assertTrue(reporter1Config.isPresent());
assertEventReporter1Configured(reporter1Config.get());
final Optional<EventReporterSetup> reporter2Config =
reporterSetups.stream().filter(c -> "reporter2".equals(c.getName())).findFirst();
Assert.assertTrue(reporter2Config.isPresent());
assertReporter2Configured(reporter2Config.get());
}
/**
* Verifies that {@link EventOptions#REPORTERS_LIST} is correctly used to filter configured
* reporters.
*/
@Test
void testActivateOneReporterAmongTwoDeclared() {
final Configuration config = new Configuration();
configureReporter1(config);
configureReporter2(config);
config.set(EventOptions.REPORTERS_LIST, "reporter2");
final List<EventReporterSetup> reporterSetups =
ReporterSetupBuilder.EVENT_SETUP_BUILDER.fromConfiguration(
config, DefaultReporterFilters::eventsFromConfiguration, null);
Assert.assertEquals(1, reporterSetups.size());
final EventReporterSetup setup = reporterSetups.get(0);
assertReporter2Configured(setup);
}
@Test
void testReporterSetupSupplier() throws Exception {
final Configuration config = new Configuration();
EventOptions.forReporter(config, "reporter1")
.set(EventOptions.REPORTER_FACTORY_CLASS, TestEventReporter1.class.getName());
final List<EventReporterSetup> reporterSetups =
ReporterSetupBuilder.EVENT_SETUP_BUILDER.fromConfiguration(
config, DefaultReporterFilters::eventsFromConfiguration, null);
Assert.assertEquals(1, reporterSetups.size());
final EventReporterSetup reporterSetup = reporterSetups.get(0);
Assert.assertThat(reporterSetup.getReporter(), instanceOf(TestEventReporter1.class));
}
/** Verifies that multiple reporters are instantiated correctly. */
@Test
void testMultipleReporterInstantiation() throws Exception {
Configuration config = new Configuration();
EventOptions.forReporter(config, "test1")
.set(EventOptions.REPORTER_FACTORY_CLASS, TestEventReporter11.class.getName());
EventOptions.forReporter(config, "test2")
.set(EventOptions.REPORTER_FACTORY_CLASS, TestEventReporter12.class.getName());
EventOptions.forReporter(config, "test3")
.set(EventOptions.REPORTER_FACTORY_CLASS, TestEventReporter13.class.getName());
List<EventReporterSetup> reporterSetups =
ReporterSetupBuilder.EVENT_SETUP_BUILDER.fromConfiguration(
config, DefaultReporterFilters::eventsFromConfiguration, null);
assertEquals(3, reporterSetups.size());
Assert.assertTrue(TestEventReporter11.wasOpened);
Assert.assertTrue(TestEventReporter12.wasOpened);
Assert.assertTrue(TestEventReporter13.wasOpened);
}
/** Reporter that exposes whether open() was called. */
public static | TestEventReporter2 |
java | apache__kafka | tools/src/main/java/org/apache/kafka/tools/EndToEndLatency.java | {
"start": 2137,
"end": 2781
} | class ____ the average end to end latency for a single message to travel through Kafka.
* Following are the required arguments
* <p> --bootstrap-server = location of the bootstrap broker for both the producer and the consumer
* <p> --topic = topic name used by both the producer and the consumer to send/receive messages
* <p> --num-records = # messages to send
* <p> --producer-acks = See ProducerConfig.ACKS_DOC
* <p> --record-size = size of each message value in bytes
*
* <p> e.g. [./bin/kafka-e2e-latency.sh --bootstrap-server localhost:9092 --topic test-topic --num-records 1000 --producer-acks 1 --record-size 512]
*/
public | records |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/customizers/InheritedContextCustomizerRegistrationTests.java | {
"start": 966,
"end": 1288
} | class ____ extends LocalContextCustomizerRegistrationTests {
@Autowired
@Qualifier("bar")
String bar;
@Override
@Test
void injectedBean() {
assertThat(fruit).isEqualTo("apple, banana, cherry");
assertThat(foo).isEqualTo("bar");
assertThat(bar).isEqualTo("baz");
}
}
| InheritedContextCustomizerRegistrationTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/AMQPEndpointBuilderFactory.java | {
"start": 26849,
"end": 28070
} | class ____ is good enough as
* subscription name). Note that shared subscriptions may also be
* durable, so this flag can (and often will) be combined with
* subscriptionDurable as well. Only makes sense when listening to a
* topic (pub-sub domain), therefore this method switches the
* pubSubDomain flag as well. Requires a JMS 2.0 compatible message
* broker.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param subscriptionShared the value to set
* @return the dsl builder
*/
default AMQPEndpointConsumerBuilder subscriptionShared(boolean subscriptionShared) {
doSetProperty("subscriptionShared", subscriptionShared);
return this;
}
/**
* Set whether to make the subscription shared. The shared subscription
* name to be used can be specified through the subscriptionName
* property. Default is false. Set this to true to register a shared
* subscription, typically in combination with a subscriptionName value
* (unless your message listener | name |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/SubTypeResolutionTest.java | {
"start": 3124,
"end": 3177
} | class ____<T> extends Either { }
}
static | Right |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/singlepersistenceunit/SinglePersistenceUnitResourceInjectionEntityManagerTest.java | {
"start": 393,
"end": 1076
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(DefaultEntity.class)
.addAsResource("application.properties"));
@PersistenceContext
EntityManager entityManager;
@Test
@Transactional
public void test() {
DefaultEntity entity = new DefaultEntity("gsmet");
entityManager.persist(entity);
DefaultEntity savedEntity = entityManager.find(DefaultEntity.class, entity.getId());
assertEquals(entity.getName(), savedEntity.getName());
}
}
| SinglePersistenceUnitResourceInjectionEntityManagerTest |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/ast/ReflectWildcardElement.java | {
"start": 1009,
"end": 1823
} | class ____ extends ReflectTypeElement<WildcardType> implements WildcardElement {
ReflectWildcardElement(WildcardType type) {
super(type);
}
@NonNull
@Override
public ClassElement toArray() {
throw new UnsupportedOperationException();
}
@NonNull
@Override
public ClassElement fromArray() {
throw new UnsupportedOperationException();
}
@NonNull
@Override
public List<? extends ClassElement> getUpperBounds() {
return Arrays.stream(type.getUpperBounds()).map(ClassElement::of).collect(Collectors.toList());
}
@NonNull
@Override
public List<? extends ClassElement> getLowerBounds() {
return Arrays.stream(type.getLowerBounds()).map(ClassElement::of).collect(Collectors.toList());
}
}
| ReflectWildcardElement |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/state/ChangelogRescalingITCase.java | {
"start": 11851,
"end": 12476
} | class ____ extends NumberSequenceSource {
private final int numbersPerSecond;
public ThrottlingNumberSequenceSource(long from, long to, int numbersPerSecondPerReader) {
super(from, to);
this.numbersPerSecond = numbersPerSecondPerReader;
}
@Override
public SourceReader<Long, NumberSequenceSplit> createReader(
SourceReaderContext readerContext) {
return new ThrottlingIteratorSourceReader<>(
readerContext, new SourceRateLimiter(numbersPerSecond));
}
}
private static | ThrottlingNumberSequenceSource |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/data/writer/AbstractBinaryWriter.java | {
"start": 2790,
"end": 13277
} | class ____ implements BinaryWriter {
protected MemorySegment segment;
protected int cursor;
protected DataOutputViewStreamWrapper outputView;
/** Set offset and size to fix len part. */
protected abstract void setOffsetAndSize(int pos, int offset, long size);
/** Get field offset. */
protected abstract int getFieldOffset(int pos);
/** After grow, need point to new memory. */
protected abstract void afterGrow();
protected abstract void setNullBit(int ordinal);
/** See {@link BinarySegmentUtils#readStringData(MemorySegment[], int, int, long)}. */
@Override
public void writeString(int pos, StringData input) {
BinaryStringData string = (BinaryStringData) input;
if (string.getSegments() == null) {
String javaObject = string.toString();
writeBytes(pos, javaObject.getBytes(StandardCharsets.UTF_8));
} else {
int len = string.getSizeInBytes();
if (len <= 7) {
byte[] bytes = BinarySegmentUtils.allocateReuseBytes(len);
BinarySegmentUtils.copyToBytes(
string.getSegments(), string.getOffset(), bytes, 0, len);
writeBytesToFixLenPart(segment, getFieldOffset(pos), bytes, len);
} else {
writeSegmentsToVarLenPart(pos, string.getSegments(), string.getOffset(), len);
}
}
}
private void writeBytes(int pos, byte[] bytes) {
int len = bytes.length;
if (len <= BinaryFormat.MAX_FIX_PART_DATA_SIZE) {
writeBytesToFixLenPart(segment, getFieldOffset(pos), bytes, len);
} else {
writeBytesToVarLenPart(pos, bytes, len);
}
}
@Override
public void writeArray(int pos, ArrayData input, ArrayDataSerializer serializer) {
BinaryArrayData binary = serializer.toBinaryArray(input);
writeSegmentsToVarLenPart(
pos, binary.getSegments(), binary.getOffset(), binary.getSizeInBytes());
}
@Override
public void writeMap(int pos, MapData input, MapDataSerializer serializer) {
BinaryMapData binary = serializer.toBinaryMap(input);
writeSegmentsToVarLenPart(
pos, binary.getSegments(), binary.getOffset(), binary.getSizeInBytes());
}
@Override
public void writeVariant(int pos, Variant variant) {
byte[] metadata = ((BinaryVariant) variant).getMetadata();
byte[] value = ((BinaryVariant) variant).getValue();
int metadataLen = metadata.length;
int length = metadata.length + value.length + 4;
ByteBuffer buffer = ByteBuffer.allocate(length);
buffer.putInt(metadataLen).put(metadata).put(value);
writeBytesToVarLenPart(pos, buffer.array(), length);
}
private DataOutputViewStreamWrapper getOutputView() {
if (outputView == null) {
outputView = new DataOutputViewStreamWrapper(new BinaryRowWriterOutputView());
}
return outputView;
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public void writeRawValue(
int pos, RawValueData<?> input, RawValueDataSerializer<?> serializer) {
TypeSerializer innerSerializer = serializer.getInnerSerializer();
// RawValueData only has one implementation which is BinaryRawValueData
BinaryRawValueData rawValue = (BinaryRawValueData) input;
rawValue.ensureMaterialized(innerSerializer);
writeSegmentsToVarLenPart(
pos, rawValue.getSegments(), rawValue.getOffset(), rawValue.getSizeInBytes());
}
@Override
public void writeRow(int pos, RowData input, RowDataSerializer serializer) {
if (input instanceof BinaryFormat) {
BinaryFormat row = (BinaryFormat) input;
writeSegmentsToVarLenPart(
pos, row.getSegments(), row.getOffset(), row.getSizeInBytes());
} else {
BinaryRowData row = serializer.toBinaryRow(input);
writeSegmentsToVarLenPart(
pos, row.getSegments(), row.getOffset(), row.getSizeInBytes());
}
}
@Override
public void writeBinary(int pos, byte[] bytes) {
int len = bytes.length;
if (len <= BinaryFormat.MAX_FIX_PART_DATA_SIZE) {
writeBytesToFixLenPart(segment, getFieldOffset(pos), bytes, len);
} else {
writeBytesToVarLenPart(pos, bytes, len);
}
}
@Override
public void writeDecimal(int pos, DecimalData value, int precision) {
assert value == null || (value.precision() == precision);
if (DecimalData.isCompact(precision)) {
assert value != null;
writeLong(pos, value.toUnscaledLong());
} else {
// grow the global buffer before writing data.
ensureCapacity(16);
// zero-out the bytes
segment.putLong(cursor, 0L);
segment.putLong(cursor + 8, 0L);
// Make sure Decimal object has the same scale as DecimalType.
// Note that we may pass in null Decimal object to set null for it.
if (value == null) {
setNullBit(pos);
// keep the offset for future update
setOffsetAndSize(pos, cursor, 0);
} else {
final byte[] bytes = value.toUnscaledBytes();
assert bytes.length <= 16;
// Write the bytes to the variable length portion.
segment.put(cursor, bytes, 0, bytes.length);
setOffsetAndSize(pos, cursor, bytes.length);
}
// move the cursor forward.
cursor += 16;
}
}
@Override
public void writeTimestamp(int pos, TimestampData value, int precision) {
if (TimestampData.isCompact(precision)) {
writeLong(pos, value.getMillisecond());
} else {
// store the nanoOfMillisecond in fixed-length part as offset and nanoOfMillisecond
ensureCapacity(8);
if (value == null) {
setNullBit(pos);
// zero-out the bytes
segment.putLong(cursor, 0L);
setOffsetAndSize(pos, cursor, 0);
} else {
segment.putLong(cursor, value.getMillisecond());
setOffsetAndSize(pos, cursor, value.getNanoOfMillisecond());
}
cursor += 8;
}
}
private void zeroBytes(int offset, int size) {
for (int i = offset; i < offset + size; i++) {
segment.put(i, (byte) 0);
}
}
protected void zeroOutPaddingBytes(int numBytes) {
if ((numBytes & 0x07) > 0) {
segment.putLong(cursor + ((numBytes >> 3) << 3), 0L);
}
}
protected void ensureCapacity(int neededSize) {
final int length = cursor + neededSize;
if (segment.size() < length) {
grow(length);
}
}
private void writeSegmentsToVarLenPart(
int pos, MemorySegment[] segments, int offset, int size) {
final int roundedSize = roundNumberOfBytesToNearestWord(size);
// grow the global buffer before writing data.
ensureCapacity(roundedSize);
zeroOutPaddingBytes(size);
if (segments.length == 1) {
segments[0].copyTo(offset, segment, cursor, size);
} else {
writeMultiSegmentsToVarLenPart(segments, offset, size);
}
setOffsetAndSize(pos, cursor, size);
// move the cursor forward.
cursor += roundedSize;
}
private void writeMultiSegmentsToVarLenPart(MemorySegment[] segments, int offset, int size) {
// Write the bytes to the variable length portion.
int needCopy = size;
int fromOffset = offset;
int toOffset = cursor;
for (MemorySegment sourceSegment : segments) {
int remain = sourceSegment.size() - fromOffset;
if (remain > 0) {
int copySize = remain > needCopy ? needCopy : remain;
sourceSegment.copyTo(fromOffset, segment, toOffset, copySize);
needCopy -= copySize;
toOffset += copySize;
fromOffset = 0;
} else {
fromOffset -= sourceSegment.size();
}
}
}
private void writeBytesToVarLenPart(int pos, byte[] bytes, int len) {
final int roundedSize = roundNumberOfBytesToNearestWord(len);
// grow the global buffer before writing data.
ensureCapacity(roundedSize);
zeroOutPaddingBytes(len);
// Write the bytes to the variable length portion.
segment.put(cursor, bytes, 0, len);
setOffsetAndSize(pos, cursor, len);
// move the cursor forward.
cursor += roundedSize;
}
/** Increases the capacity to ensure that it can hold at least the minimum capacity argument. */
private void grow(int minCapacity) {
int oldCapacity = segment.size();
int newCapacity = oldCapacity + (oldCapacity >> 1);
if (newCapacity - minCapacity < 0) {
newCapacity = minCapacity;
}
segment = MemorySegmentFactory.wrap(Arrays.copyOf(segment.getArray(), newCapacity));
afterGrow();
}
protected static int roundNumberOfBytesToNearestWord(int numBytes) {
int remainder = numBytes & 0x07;
if (remainder == 0) {
return numBytes;
} else {
return numBytes + (8 - remainder);
}
}
private static void writeBytesToFixLenPart(
MemorySegment segment, int fieldOffset, byte[] bytes, int len) {
long firstByte = len | 0x80; // first bit is 1, other bits is len
long sevenBytes = 0L; // real data
if (BinaryRowData.LITTLE_ENDIAN) {
for (int i = 0; i < len; i++) {
sevenBytes |= ((0x00000000000000FFL & bytes[i]) << (i * 8L));
}
} else {
for (int i = 0; i < len; i++) {
sevenBytes |= ((0x00000000000000FFL & bytes[i]) << ((6 - i) * 8L));
}
}
final long offsetAndSize = (firstByte << 56) | sevenBytes;
segment.putLong(fieldOffset, offsetAndSize);
}
@Internal
public MemorySegment getSegments() {
return segment;
}
/** OutputView for write Generic. */
private | AbstractBinaryWriter |
java | google__guava | android/guava-tests/test/com/google/common/util/concurrent/ForwardingListenableFutureTest.java | {
"start": 825,
"end": 1003
} | class ____ extends TestCase {
public void testForwarding() {
ForwardingObjectTester.testForwardingObject(ForwardingListenableFuture.class);
}
}
| ForwardingListenableFutureTest |
java | netty__netty | transport-classes-io_uring/src/test/java/io/netty/channel/uring/LoadClassTest.java | {
"start": 1274,
"end": 2472
} | class ____ {
static Class<?>[] classes() {
List<Class<?>> classes = new ArrayList<>();
classes.add(IoUringSocketChannel.class);
classes.add(IoUringServerSocketChannel.class);
classes.add(IoUringDatagramChannel.class);
classes.add(IoUringDomainSocketChannel.class);
classes.add(IoUringServerDomainSocketChannel.class);
classes.add(IoUringIoHandler.class);
return classes.toArray(new Class<?>[0]);
}
@EnabledForJreRange(min = JRE.JAVA_9)
@ParameterizedTest
@MethodSource("classes")
public void testLoadClassesWorkWithoutNativeLib(Class<?> clazz) {
// Force loading of class.
assertDoesNotThrow(() -> Class.forName(clazz.getName()));
}
@Test
public void testCheckAvailability() {
// Calling ensureAvailability should work on any JDK version
assertThrows(UnsatisfiedLinkError.class, new Executable() {
@Override
public void execute() throws Throwable {
IoUring.ensureAvailability();
}
});
assertFalse(IoUring.isAvailable());
assertNotNull(IoUring.unavailabilityCause());
}
}
| LoadClassTest |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ZooKeeperEndpointBuilderFactory.java | {
"start": 12824,
"end": 16606
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedZooKeeperEndpointProducerBuilder advanced() {
return (AdvancedZooKeeperEndpointProducerBuilder) this;
}
/**
* Whether the children of the node should be listed.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param listChildren the value to set
* @return the dsl builder
*/
default ZooKeeperEndpointProducerBuilder listChildren(boolean listChildren) {
doSetProperty("listChildren", listChildren);
return this;
}
/**
* Whether the children of the node should be listed.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param listChildren the value to set
* @return the dsl builder
*/
default ZooKeeperEndpointProducerBuilder listChildren(String listChildren) {
doSetProperty("listChildren", listChildren);
return this;
}
/**
* The time interval to wait on connection before timing out.
*
* The option is a: <code>int</code> type.
*
* Default: 5000
* Group: common
*
* @param timeout the value to set
* @return the dsl builder
*/
default ZooKeeperEndpointProducerBuilder timeout(int timeout) {
doSetProperty("timeout", timeout);
return this;
}
/**
* The time interval to wait on connection before timing out.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 5000
* Group: common
*
* @param timeout the value to set
* @return the dsl builder
*/
default ZooKeeperEndpointProducerBuilder timeout(String timeout) {
doSetProperty("timeout", timeout);
return this;
}
/**
* Should the endpoint create the node if it does not currently exist.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param create the value to set
* @return the dsl builder
*/
default ZooKeeperEndpointProducerBuilder create(boolean create) {
doSetProperty("create", create);
return this;
}
/**
* Should the endpoint create the node if it does not currently exist.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param create the value to set
* @return the dsl builder
*/
default ZooKeeperEndpointProducerBuilder create(String create) {
doSetProperty("create", create);
return this;
}
/**
* The create mode that should be used for the newly created node.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: EPHEMERAL
* Group: producer
*
* @param createMode the value to set
* @return the dsl builder
*/
default ZooKeeperEndpointProducerBuilder createMode(String createMode) {
doSetProperty("createMode", createMode);
return this;
}
}
/**
* Advanced builder for endpoint producers for the ZooKeeper component.
*/
public | ZooKeeperEndpointProducerBuilder |
java | google__guice | core/test/com/google/inject/ImplicitBindingTest.java | {
"start": 7162,
"end": 7353
} | class ____ implements Provider<InvalidProvidedBy2> {
@Inject Invalid2 a;
@Override
public InvalidProvidedBy2 get() {
return null;
}
}
static | InvalidProvidedBy2Provider |
java | square__moshi | examples/src/main/java/com/squareup/moshi/recipes/FallbackEnum.java | {
"start": 1265,
"end": 1326
} | enum ____. */
String value();
}
public static final | name |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/extendresultmap/ExtendResultMapTest.java | {
"start": 1038,
"end": 1847
} | class ____ {
private static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
// create a SqlSessionFactory
try (Reader reader = Resources
.getResourceAsReader("org/apache/ibatis/submitted/extendresultmap/mybatis-config.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
// populate in-memory database
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/extendresultmap/CreateDB.sql");
}
@Test
void shouldGetAUser() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
TestMapperY mapper = sqlSession.getMapper(TestMapperY.class);
mapper.retrieveTestString();
}
}
}
| ExtendResultMapTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/language/csimple/CSimplePredicateParserTest.java | {
"start": 931,
"end": 5472
} | class ____ {
@Test
public void testParse() {
CSimplePredicateParser parser = new CSimplePredicateParser();
String code = parser.parsePredicate("'bar' != 'foo'");
Assertions.assertEquals("isNotEqualTo(exchange, \"bar\", \"foo\")", code);
code = parser.parsePredicate("${body} == 'foo'");
Assertions.assertEquals("isEqualTo(exchange, body, \"foo\")", code);
code = parser.parsePredicate("${body} != 'foo'");
Assertions.assertEquals("isNotEqualTo(exchange, body, \"foo\")", code);
code = parser.parsePredicate("${body} == 123");
Assertions.assertEquals("isEqualTo(exchange, body, 123)", code);
code = parser.parsePredicate("${body} > 9.95");
Assertions.assertEquals("isGreaterThan(exchange, body, 9.95d)", code); // double value
code = parser.parsePredicate("${body} > 123456789012345");
Assertions.assertEquals("isGreaterThan(exchange, body, 123456789012345l)", code); // long value
code = parser.parsePredicate("${bodyAs(int)} == 123");
Assertions.assertEquals("isEqualTo(exchange, bodyAs(message, int.class), 123)", code);
code = parser.parsePredicate("${bodyAs(String).length()} == 4");
Assertions.assertEquals("isEqualTo(exchange, bodyAs(message, String.class).length(), 4)", code);
code = parser.parsePredicate("${bodyAs(String).substring(3)} == 'DEF'");
Assertions.assertEquals("isEqualTo(exchange, bodyAs(message, String.class).substring(3), \"DEF\")", code);
code = parser.parsePredicate("${bodyAs(int)} > ${headerAs('foo', int)}");
Assertions.assertEquals("isGreaterThan(exchange, bodyAs(message, int.class), headerAs(message, \"foo\", int.class))",
code);
code = parser.parsePredicate("${camelContext.getName()} == 'myCamel'");
Assertions.assertEquals("isEqualTo(exchange, context.getName(), \"myCamel\")", code);
code = parser.parsePredicate("${camelContext.name} == 'myCamel'");
Assertions.assertEquals("isEqualTo(exchange, context.getName(), \"myCamel\")", code);
code = parser.parsePredicate("${camelContext.inflightRepository.size()} > 0");
Assertions.assertEquals("isGreaterThan(exchange, context.getInflightRepository().size(), 0)", code);
}
@Test
public void testParseEmbeddedFunctions() {
CSimplePredicateParser parser = new CSimplePredicateParser();
String code = parser.parsePredicate("${body.substring(1, ${header.max})} == 'foo'");
Assertions.assertEquals("isEqualTo(exchange, body.substring(1, header(message, \"max\")), \"foo\")", code);
}
@Test
public void testParseSysFunctions() {
CSimplePredicateParser parser = new CSimplePredicateParser();
String code = parser.parsePredicate("${sys.foo} != 'bar'");
Assertions.assertEquals("isNotEqualTo(exchange, sys(\"foo\"), \"bar\")", code);
code = parser.parsePredicate("${env.foo} != 'bar'");
Assertions.assertEquals("isNotEqualTo(exchange, sysenv(\"foo\"), \"bar\")", code);
code = parser.parsePredicate("${env:FOO} != 'bar'");
Assertions.assertEquals("isNotEqualTo(exchange, sysenv(\"FOO\"), \"bar\")", code);
}
@Test
public void testParseExchangeProperty() {
CSimplePredicateParser parser = new CSimplePredicateParser();
String code = parser.parsePredicate("${exchangeProperty.foo} != 'bar'");
Assertions.assertEquals("isNotEqualTo(exchange, exchangeProperty(exchange, \"foo\"), \"bar\")", code);
code = parser.parsePredicate("${exchangeProperty[foo]} != 'bar'");
Assertions.assertEquals("isNotEqualTo(exchange, exchangeProperty(exchange, \"foo\"), \"bar\")", code);
code = parser.parsePredicate("${exchangePropertyAs(foo, com.foo.User)} != 'bar'");
Assertions.assertEquals("isNotEqualTo(exchange, exchangePropertyAs(exchange, \"foo\", com.foo.User.class), \"bar\")",
code);
code = parser.parsePredicate("${exchangePropertyAs(foo, com.foo.User).name} != 'bar'");
Assertions.assertEquals(
"isNotEqualTo(exchange, exchangePropertyAs(exchange, \"foo\", com.foo.User.class).getName(), \"bar\")", code);
code = parser.parsePredicate("${exchangePropertyAs(foo, com.foo.User).getName()} != 'bar'");
Assertions.assertEquals(
"isNotEqualTo(exchange, exchangePropertyAs(exchange, \"foo\", com.foo.User.class).getName(), \"bar\")", code);
}
}
| CSimplePredicateParserTest |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/runtime/datasource/ReactiveTransactionalJsonCommandsImpl.java | {
"start": 608,
"end": 8199
} | class ____<K> extends AbstractTransactionalCommands
implements ReactiveTransactionalJsonCommands<K> {
private final ReactiveJsonCommandsImpl<K> reactive;
public ReactiveTransactionalJsonCommandsImpl(ReactiveTransactionalRedisDataSource ds,
ReactiveJsonCommandsImpl<K> reactive, TransactionHolder tx) {
super(ds, tx);
this.reactive = reactive;
}
@Override
public <T> Uni<Void> jsonSet(K key, String path, T value) {
this.tx.enqueue(resp -> null);
return this.reactive._jsonSet(key, path, value).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonSet(K key, String path, JsonObject json) {
this.tx.enqueue(resp -> null);
return this.reactive._jsonSet(key, path, json).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonSet(K key, String path, JsonObject json, JsonSetArgs args) {
this.tx.enqueue(resp -> null);
return this.reactive._jsonSet(key, path, json, args).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonSet(K key, String path, JsonArray json) {
this.tx.enqueue(resp -> null);
return this.reactive._jsonSet(key, path, json).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonSet(K key, String path, JsonArray json, JsonSetArgs args) {
this.tx.enqueue(resp -> null);
return this.reactive._jsonSet(key, path, json, args).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public <T> Uni<Void> jsonSet(K key, String path, T value, JsonSetArgs args) {
this.tx.enqueue(resp -> null);
return this.reactive._jsonSet(key, path, value, args).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public <T> Uni<Void> jsonGet(K key, Class<T> clazz) {
this.tx.enqueue(r -> {
var m = getJsonObject(r);
if (m != null) {
return m.mapTo(clazz);
}
return null;
});
return this.reactive._jsonGet(key).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonGetObject(K key) {
this.tx.enqueue(ReactiveJsonCommandsImpl::getJsonObject);
return this.reactive._jsonGet(key).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonGetArray(K key) {
this.tx.enqueue(ReactiveJsonCommandsImpl::getJsonArray);
return this.reactive._jsonGet(key).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonGet(K key, String path) {
this.tx.enqueue(ReactiveJsonCommandsImpl::getJsonArrayFromJsonGet);
return this.reactive._jsonGet(key, path).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonGet(K key, String... paths) {
this.tx.enqueue(ReactiveJsonCommandsImpl::getJsonObject);
return this.reactive._jsonGet(key, paths).invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public <T> Uni<Void> jsonArrAppend(K key, String path, T... values) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeAsListOfInteger);
return this.reactive._jsonArrAppend(key, path, values)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public <T> Uni<Void> jsonArrIndex(K key, String path, T value, int start, int end) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeAsListOfInteger);
return this.reactive._jsonArrIndex(key, path, value, start, end)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public <T> Uni<Void> jsonArrInsert(K key, String path, int index, T... values) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeAsListOfInteger);
return this.reactive._jsonArrInsert(key, path, index, values)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonArrLen(K key, String path) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeAsListOfInteger);
return this.reactive._jsonArrLen(key, path)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public <T> Uni<Void> jsonArrPop(K key, Class<T> clazz, String path, int index) {
this.tx.enqueue(r -> decodeArrPopResponse(clazz, r));
return this.reactive._jsonArrPop(key, path, index)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonArrTrim(K key, String path, int start, int stop) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeAsListOfInteger);
return this.reactive._jsonArrTrim(key, path, start, stop)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonClear(K key, String path) {
this.tx.enqueue(Response::toInteger);
return this.reactive._jsonClear(key, path)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonDel(K key, String path) {
this.tx.enqueue(Response::toInteger);
return this.reactive._jsonDel(key, path)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonMget(String path, K... keys) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeMGetResponse);
return this.reactive._jsonMget(path, keys)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonNumincrby(K key, String path, double value) {
this.tx.enqueue(r -> null);
return this.reactive._jsonNumincrby(key, path, value)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonObjKeys(K key, String path) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeObjKeysResponse);
return this.reactive._jsonObjKeys(key, path)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonObjLen(K key, String path) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeAsListOfInteger);
return this.reactive._jsonObjLen(key, path)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonStrAppend(K key, String path, String value) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeAsListOfInteger);
return this.reactive._jsonStrAppend(key, path, value)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonStrLen(K key, String path) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeAsListOfInteger);
return this.reactive._jsonStrLen(key, path)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonToggle(K key, String path) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeToggleResponse);
return this.reactive._jsonToggle(key, path)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
@Override
public Uni<Void> jsonType(K key, String path) {
this.tx.enqueue(ReactiveJsonCommandsImpl::decodeTypeResponse);
return this.reactive._jsonType(key, path)
.invoke(this::queuedOrDiscard).replaceWithVoid();
}
}
| ReactiveTransactionalJsonCommandsImpl |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/ParallelMergeOrderedTest.java | {
"start": 1291,
"end": 5326
} | class ____ {
// see https://github.com/reactor/reactor-core/issues/1958
@Test
void dealsWithPrefetchLargerThanSmallBufferSize() {
int parallelism = 2; // parallelism must be > 1 to expose issue
int bufferS = Queues.SMALL_BUFFER_SIZE;
int orderedPrefetch = bufferS + 128; // if orderedPrefetch > bufferS then operator used to drop elements and eventually hang
final AtomicInteger prev = new AtomicInteger(-1);
Flux.range(0, 200_000)
.subscribeOn(Schedulers.newSingle("init", true))
.parallel(parallelism, bufferS)
.runOn(Schedulers.newParallel("process", parallelism, true), bufferS)
.map(i -> i)
.ordered(Comparator.comparing(i -> i), orderedPrefetch)
.as(StepVerifier::create)
.thenConsumeWhile(current -> {
int previous = prev.getAndSet(current);
try {
assertThat(current)
.withFailMessage("elements dropped: prev: %d, next: %d, lost: %d\n", previous, current, current - previous)
.isEqualTo(previous + 1);
}
catch (AssertionError ae) {
ae.printStackTrace();
}
return true;
})
.expectComplete()
.verify(Duration.ofSeconds(5)); //should run in 3s
}
@Test
public void reorderingByIndex() {
final int LOOPS = 100;
final int PARALLELISM = 2;
final List<Integer> ordered = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
int notShuffled = 0;
for (int i = 0; i < LOOPS; i++) {
final Scheduler SCHEDULER = Schedulers.newParallel("test", PARALLELISM);
final List<Integer> disordered = Collections.synchronizedList(new ArrayList<>());
List<Integer> reordered = Flux.fromIterable(ordered)
.hide()
.index()
.parallel(PARALLELISM)
.runOn(SCHEDULER)
.doOnNext(t2 -> disordered.add(t2.getT2()))
.ordered(Comparator.comparing(Tuple2::getT1))
.map(Tuple2::getT2)
.collectList()
.block();
SCHEDULER.dispose();
assertThat(reordered).containsExactlyElementsOf(ordered);
assertThat(disordered).containsExactlyInAnyOrderElementsOf(ordered);
try {
assertThat(disordered).doesNotContainSequence(ordered);
System.out.println("parallel shuffled the collection into " + disordered);
break;
}
catch (AssertionError e) {
notShuffled++;
}
}
if (notShuffled > 0) {
System.out.println("not shuffled loops: " + notShuffled);
}
assertThat(LOOPS - notShuffled)
.as("at least one run shuffled")
.isGreaterThan(0);
}
@Test
public void rejectPrefetchZero() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new ParallelMergeOrdered<>(null, 0, null))
.withMessage("prefetch > 0 required but it was 0");
}
@Test
public void rejectPrefetchNegative() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new ParallelMergeOrdered<>(null,-1, null))
.withMessage("prefetch > 0 required but it was -1");
}
@Test
public void getPrefetch() {
ParallelMergeOrdered<Integer> test = new ParallelMergeOrdered<>(null, 123, null);
assertThat(test.getPrefetch()).isEqualTo(123);
}
@Test
public void getPrefetchAPI() {
Flux<Integer> test = Flux.range(1, 10)
.parallel()
.ordered(Comparator.naturalOrder(), 123);
assertThat(test.getPrefetch()).isEqualTo(123);
}
@Test
public void scanUnsafe() {
ParallelFlux<Integer> source = Flux.range(1, 10)
.parallel(2);
ParallelMergeOrdered<Integer> test = new ParallelMergeOrdered<>(source, 123, null);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(source);
assertThat(test.scan(Scannable.Attr.PREFETCH)).isEqualTo(123);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.NAME)).isNull();
}
}
| ParallelMergeOrderedTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/InterruptionTest.java | {
"start": 2094,
"end": 2471
} | class ____ {
void f(Thread t) {
// BUG: Diagnostic contains:
t.interrupt();
}
}
""")
.doTest();
}
@Test
public void negative() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import java.util.concurrent.Future;
| Test |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/schemaStat/SchemaStatTest19.java | {
"start": 441,
"end": 1854
} | class ____ extends TestCase {
public void test_schemaStat() throws Exception {
SchemaRepository repository = new SchemaRepository(JdbcConstants.MYSQL);
String sql = "select * from table1 a left outer join table2 b on a.id=b.id";
SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, JdbcConstants.MYSQL);
SQLStatement stmt = parser.parseStatementList().get(0);
SchemaStatVisitor statVisitor = SQLUtils.createSchemaStatVisitor(repository);
stmt.accept(statVisitor);
System.out.println("Tables : " + statVisitor.getTables());
System.out.println("columns : " + statVisitor.getColumns());
// System.out.println(statVisitor.getGroupByColumns()); // group by
System.out.println("relationships : " + statVisitor.getRelationships()); // group by
System.out.println(statVisitor.getConditions());
assertEquals(4, statVisitor.getColumns().size());
assertEquals(2, statVisitor.getConditions().size());
assertEquals(0, statVisitor.getFunctions().size());
assertTrue(statVisitor.containsTable("table1"));
assertTrue(statVisitor.containsColumn("table1", "*"));
assertTrue(statVisitor.containsColumn("table1", "id"));
assertTrue(statVisitor.containsColumn("table2", "id"));
assertTrue(statVisitor.containsColumn("table2", "*"));
}
}
| SchemaStatTest19 |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/RedisDataSource.java | {
"start": 9330,
"end": 10214
} | class ____ the values
* @param <K> the type of the redis key
* @param <F> the type of the fields (map's keys)
* @param <V> the type of the value
* @return the object to execute commands manipulating hashes (a.k.a. {@code Map<K, V>}).
*/
<K, F, V> HashCommands<K, F, V> hash(Class<K> redisKeyType, Class<F> typeOfField, Class<V> typeOfValue);
/**
* Gets the object to execute commands manipulating hashes (a.k.a. {@code Map<F, V>}).
* <p>
* If you want to use a hash of {@code <String -> Person>} stored using String identifier, you would use:
* {@code hash(String.class, String.class, Person.class)}.
* If you want to use a hash of {@code <String -> Person>} stored using UUID identifier, you would use:
* {@code hash(UUID.class, String.class, Person.class)}.
*
* @param redisKeyType the | of |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java | {
"start": 4626,
"end": 5096
} | class ____ {
// Content field names
public static final String CONTENT_FIELD_NAME_INPUT = "input";
public static final String CONTENT_FIELD_NAME_WEIGHT = "weight";
public static final String CONTENT_FIELD_NAME_CONTEXTS = "contexts";
}
private static Builder builder(FieldMapper in) {
return ((CompletionFieldMapper) in).builder;
}
/**
* Builder for {@link CompletionFieldMapper}
*/
public static | Fields |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/completion/HuggingFaceChatCompletionModel.java | {
"start": 1021,
"end": 4070
} | class ____ extends HuggingFaceModel {
/**
* Creates a new {@link HuggingFaceChatCompletionModel} by copying properties from an existing model,
* replacing the {@code modelId} in the service settings with the one from the given {@link UnifiedCompletionRequest},
* if present. If the request does not specify a model ID, the original value is retained.
*
* @param model the original model to copy from
* @param request the request potentially containing an overridden model ID
* @return a new {@link HuggingFaceChatCompletionModel} with updated service settings
*/
public static HuggingFaceChatCompletionModel of(HuggingFaceChatCompletionModel model, UnifiedCompletionRequest request) {
var originalModelServiceSettings = model.getServiceSettings();
var overriddenServiceSettings = new HuggingFaceChatCompletionServiceSettings(
request.model() != null ? request.model() : originalModelServiceSettings.modelId(),
originalModelServiceSettings.uri(),
originalModelServiceSettings.rateLimitSettings()
);
return new HuggingFaceChatCompletionModel(
model.getInferenceEntityId(),
model.getTaskType(),
model.getConfigurations().getService(),
overriddenServiceSettings,
model.getSecretSettings()
);
}
public HuggingFaceChatCompletionModel(
String inferenceEntityId,
TaskType taskType,
String service,
Map<String, Object> serviceSettings,
@Nullable Map<String, Object> secrets,
ConfigurationParseContext context
) {
this(
inferenceEntityId,
taskType,
service,
HuggingFaceChatCompletionServiceSettings.fromMap(serviceSettings, context),
DefaultSecretSettings.fromMap(secrets)
);
}
HuggingFaceChatCompletionModel(
String inferenceEntityId,
TaskType taskType,
String service,
HuggingFaceChatCompletionServiceSettings serviceSettings,
@Nullable DefaultSecretSettings secretSettings
) {
super(
new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings),
new ModelSecrets(secretSettings),
serviceSettings,
secretSettings
);
}
@Override
public HuggingFaceChatCompletionServiceSettings getServiceSettings() {
return (HuggingFaceChatCompletionServiceSettings) super.getServiceSettings();
}
@Override
public DefaultSecretSettings getSecretSettings() {
return (DefaultSecretSettings) super.getSecretSettings();
}
@Override
public ExecutableAction accept(HuggingFaceActionVisitor creator) {
return creator.create(this);
}
@Override
public Integer getTokenLimit() {
throw new UnsupportedOperationException("Token Limit for chat completion is sent in request and not retrieved from the model");
}
}
| HuggingFaceChatCompletionModel |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/format/JsonValueJDBCTypeAdapterFactory.java | {
"start": 247,
"end": 951
} | class ____ {
/**
* Gets a type adapter for a given reader
* @param reader the JSON document reader from which the adapter gets its value from.
* @param returnEmbeddable
* @return the adapter
*/
public static JsonValueJDBCTypeAdapter getAdapter(JsonDocumentReader reader , boolean returnEmbeddable) {
assert reader != null : "reader is null";
if (reader instanceof StringJsonDocumentReader) {
return new StringJsonValueJDBCTypeAdapter( returnEmbeddable );
}
if (reader instanceof OsonDocumentReader ) {
return new OsonValueJDBCTypeAdapter( );
}
throw new IllegalArgumentException("Unsupported type of document reader " + reader.getClass());
}
}
| JsonValueJDBCTypeAdapterFactory |
java | apache__camel | components/camel-xmlsecurity/src/main/java/org/apache/camel/component/xmlsecurity/api/KeyAccessor.java | {
"start": 1386,
"end": 3297
} | interface ____ {
/**
* Returns the key selector which determines the key for signing the XML document. The method is called every time a
* XML document is signed.
*
* If <code>null</code> is returned the XML signature generator will throw a {@link XmlSignatureNoKeyException}.
*
* @param message the incoming message, from which you can read headers to configure the key selector, for
* example, a header could contain a private key for the key selector
* @return key selector, must not be <code>null</code>
* @throws Exception if an error occurs
*/
KeySelector getKeySelector(Message message) throws Exception;
/**
* Returns the optional key info to be incorporated into the XML signature. If <code>null</code> is returned, no key
* info element is created. You can create a key info instance via the key info factory.
* <p>
* This method will be called several times if several signatures shall be created for the XML document; for each
* signature a call is executed. This allows you to create different key info IDs.
*
* @param message incoming message, from which you can read headers, for example, there could be a header
* which contains the public key or certificate for the key info
* @param messageBody the message body as DOM node. If the message body is plain text then the node will be a
* text node. If the message body is a XML document, then the node is the root element.
* @param keyInfoFactory key info factory for creating the KeyInfo instance
* @return key info, can be <code>null</code>
* @throws Exception if an error occurs
*/
KeyInfo getKeyInfo(Message message, Node messageBody, KeyInfoFactory keyInfoFactory) throws Exception;
}
| KeyAccessor |
java | apache__maven | compat/maven-compat/src/test/java/org/apache/maven/repository/legacy/resolver/DefaultArtifactCollectorTest.java | {
"start": 38896,
"end": 44014
} | class ____ implements ArtifactMetadataSource {
private Map<String, ArtifactSpec> artifacts = new HashMap<>();
private Map<String, List<ArtifactVersion>> versions = new HashMap<>();
@Override
public ResolutionGroup retrieve(
Artifact artifact, ArtifactRepository localRepository, List<ArtifactRepository> remoteRepositories)
throws ArtifactMetadataRetrievalException {
String key = getKey(artifact);
ArtifactSpec a = artifacts.get(key);
try {
return new ResolutionGroup(
artifact,
createArtifacts(
artifactFactory, a.dependencies, artifact.getScope(), artifact.getDependencyFilter()),
Collections.emptyList());
} catch (InvalidVersionSpecificationException e) {
throw new ArtifactMetadataRetrievalException("Invalid version creating artifacts", e, artifact);
}
}
private String getKey(Artifact artifact) {
return artifact.getDependencyConflictId();
}
private Set<Artifact> createArtifacts(
ArtifactFactory artifactFactory,
Set<Artifact> dependencies,
String inheritedScope,
ArtifactFilter dependencyFilter)
throws InvalidVersionSpecificationException {
Set<Artifact> projectArtifacts = new HashSet<>();
for (Artifact d : dependencies) {
VersionRange versionRange;
if (d.getVersionRange() != null) {
versionRange = d.getVersionRange();
} else {
versionRange = VersionRange.createFromVersionSpec(d.getVersion());
}
Artifact artifact;
if (d.getScope().equals(Artifact.SCOPE_TEST) || d.getScope().equals(Artifact.SCOPE_PROVIDED)) {
/* don't call createDependencyArtifact as it'll ignore test and provided scopes */
artifact = artifactFactory.createArtifact(
d.getGroupId(), d.getArtifactId(), d.getVersion(), d.getScope(), d.getType());
} else {
artifact = artifactFactory.createDependencyArtifact(
d.getGroupId(),
d.getArtifactId(),
versionRange,
d.getType(),
d.getClassifier(),
d.getScope(),
inheritedScope,
d.isOptional());
}
if (artifact != null && (dependencyFilter == null || dependencyFilter.include(artifact))) {
artifact.setDependencyFilter(dependencyFilter);
projectArtifacts.add(artifact);
}
}
return projectArtifacts;
}
public void addArtifact(ArtifactSpec spec) {
artifacts.put(getKey(spec.artifact), spec);
String key = spec.artifact.getDependencyConflictId();
List<ArtifactVersion> artifactVersions = versions.computeIfAbsent(key, k -> new ArrayList<>());
if (spec.artifact.getVersion() != null) {
artifactVersions.add(new DefaultArtifactVersion(spec.artifact.getVersion()));
}
}
@Override
public List<ArtifactVersion> retrieveAvailableVersions(
Artifact artifact, ArtifactRepository localRepository, List<ArtifactRepository> remoteRepositories)
throws ArtifactMetadataRetrievalException {
return retrieveAvailableVersions(artifact);
}
@Override
public List<ArtifactVersion> retrieveAvailableVersionsFromDeploymentRepository(
Artifact artifact, ArtifactRepository localRepository, ArtifactRepository remoteRepository)
throws ArtifactMetadataRetrievalException {
return retrieveAvailableVersions(artifact);
}
private List<ArtifactVersion> retrieveAvailableVersions(Artifact artifact) {
List<ArtifactVersion> artifactVersions = versions.get(artifact.getDependencyConflictId());
if (artifactVersions == null) {
artifactVersions = Collections.emptyList();
}
return artifactVersions;
}
@Override
public ResolutionGroup retrieve(MetadataResolutionRequest request) throws ArtifactMetadataRetrievalException {
return retrieve(request.getArtifact(), request.getLocalRepository(), request.getRemoteRepositories());
}
@Override
public List<ArtifactVersion> retrieveAvailableVersions(MetadataResolutionRequest request)
throws ArtifactMetadataRetrievalException {
return retrieveAvailableVersions(
request.getArtifact(), request.getLocalRepository(), request.getRemoteRepositories());
}
}
}
| Source |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/MergedAnnotationsComposedOnSingleAnnotatedElementTests.java | {
"start": 8343,
"end": 8447
} | class ____ {
}
@FooCache(key = "fooKey")
@BarCache(key = "barKey")
private | ComposedPlusLocalCachesClass |
java | quarkusio__quarkus | extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/exporter/otlp/OtlpUpstreamTracerExporterErrorTest.java | {
"start": 437,
"end": 1358
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withEmptyApplication()
.overrideConfigKey("quarkus.otel.traces.exporter", "otlp")
.assertException(t -> {
assertEquals(DeploymentException.class, t.getClass());
Assertions.assertTrue(t.getMessage().contains(
"io.quarkus.runtime.configuration.ConfigurationException: " +
"OpenTelemetry exporter set to 'otlp' but upstream dependencies not found"));
});
@Inject
OpenTelemetry openTelemetry;
@Test
void testOpenTelemetryButNoBatchSpanProcessor() {
Assertions.fail("Test should not be run as deployment should fail with: " +
"OpenTelemetry exporter set to 'otlp' but upstream dependencies not found... ");
}
}
| OtlpUpstreamTracerExporterErrorTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/compositefk/OneToManyIdClassFKTest.java | {
"start": 4293,
"end": 5083
} | class ____ {
private String refNo;
private String seqNo;
private Set<Party> parties = new HashSet<>();
@Id
@Column( name = "REF_NO" )
public String getRefNo() {
return refNo;
}
public void setRefNo(String refNo) {
this.refNo = refNo;
}
@Id
@Column( name = "SEQ_NO" )
public String getSeqNo() {
return seqNo;
}
public void setSeqNo(String seqNo) {
this.seqNo = seqNo;
}
@OneToMany( fetch = FetchType.EAGER, cascade = CascadeType.ALL )
@JoinColumns( {
@JoinColumn( name = "REF_NO", referencedColumnName = "REF_NO" ),
@JoinColumn( name = "SEQ_NO", referencedColumnName = "SEQ_NO" )
} )
public Set<Party> getParties() {
return parties;
}
public void setParties(Set<Party> parties) {
this.parties = parties;
}
}
}
| Location |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/protocol/types/RawTaggedFieldWriter.java | {
"start": 1097,
"end": 2971
} | class ____ {
private static final RawTaggedFieldWriter EMPTY_WRITER =
new RawTaggedFieldWriter(new ArrayList<>(0));
private final List<RawTaggedField> fields;
private final ListIterator<RawTaggedField> iter;
private int prevTag;
public static RawTaggedFieldWriter forFields(List<RawTaggedField> fields) {
if (fields == null) {
return EMPTY_WRITER;
}
return new RawTaggedFieldWriter(fields);
}
private RawTaggedFieldWriter(List<RawTaggedField> fields) {
this.fields = fields;
this.iter = this.fields.listIterator();
this.prevTag = -1;
}
public int numFields() {
return fields.size();
}
public void writeRawTags(Writable writable, int nextDefinedTag) {
while (iter.hasNext()) {
RawTaggedField field = iter.next();
int tag = field.tag();
if (tag >= nextDefinedTag) {
if (tag == nextDefinedTag) {
// We must not have a raw tag field that duplicates the tag of another field.
throw new RuntimeException("Attempted to use tag " + tag + " as an " +
"undefined tag.");
}
iter.previous();
return;
}
if (tag <= prevTag) {
// The raw tag field list must be sorted by tag, and there must not be
// any duplicate tags.
throw new RuntimeException("Invalid raw tag field list: tag " + tag +
" comes after tag " + prevTag + ", but is not higher than it.");
}
writable.writeUnsignedVarint(field.tag());
writable.writeUnsignedVarint(field.data().length);
writable.writeByteArray(field.data());
prevTag = tag;
}
}
}
| RawTaggedFieldWriter |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/i18n/DefaultFileDuplicateFoundTest.java | {
"start": 611,
"end": 1895
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot(root -> root
.addAsResource(new StringAsset("hi=Nazdar!"), "messages/msg.properties")
.addAsResource(new StringAsset("hi=Ahoj!"), "messages/msg_cs.properties"))
.overrideConfigKey("quarkus.default-locale", "cs")
.assertException(t -> {
Throwable e = t;
MessageBundleException mbe = null;
while (e != null) {
if (e instanceof MessageBundleException) {
mbe = (MessageBundleException) e;
break;
}
e = e.getCause();
}
assertNotNull(mbe);
assertTrue(mbe.getMessage().contains("Duplicate localized files with priority 10 found"), mbe.getMessage());
assertTrue(mbe.getMessage().contains("msg_cs.properties"), mbe.getMessage());
assertTrue(mbe.getMessage().contains("msg.properties"), mbe.getMessage());
});
@Test
public void testValidation() {
fail();
}
@MessageBundle(DEFAULT_NAME)
public | DefaultFileDuplicateFoundTest |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/failure/FailureEnricher.java | {
"start": 2628,
"end": 4340
} | enum ____ {
/**
* The failure has occurred in the scheduler context and can't be tracked back to a
* particular task.
*/
GLOBAL,
/** The failure has been reported by a particular task. */
TASK,
/**
* The TaskManager has non-gracefully disconnected from the JobMaster or we have not
* received heartbeats for the {@link
* org.apache.flink.configuration.HeartbeatManagerOptions#HEARTBEAT_INTERVAL configured
* timeout}.
*/
TASK_MANAGER
}
/**
* Get the metric group of the JobMaster.
*
* @return the metric group of the JobMaster
*/
MetricGroup getMetricGroup();
/**
* Return the type of the failure e.g., global failure that happened in the scheduler
* context.
*
* @return FailureType
*/
FailureType getFailureType();
/**
* Get the user {@link ClassLoader} used for code generation, UDF loading and other
* operations requiring reflections on user code.
*
* @return the user ClassLoader
*/
ClassLoader getUserClassLoader();
/**
* Get an Executor pool for the Enrichers to run async operations that can potentially be
* IO-heavy.
*
* @return the Executor pool
*/
Executor getIOExecutor();
/**
* Get the meta information of current job.
*
* @return the job meta information.
*/
@PublicEvolving
JobInfo getJobInfo();
}
}
| FailureType |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedTestIntegrationTests.java | {
"start": 75602,
"end": 75772
} | class ____ {
@Target(ElementType.METHOD)
@Retention(RUNTIME)
@ParameterizedTest(quoteTextArguments = false, name = "{arguments}")
@FieldSource
@ | FieldSourceTestCase |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/ioc/visitor/VersionedIntrospected.java | {
"start": 783,
"end": 1062
} | class ____ {
@Version("1.0")
private String foo;
public String getFoo() {
return foo;
}
public void setFoo(String foo) {
this.foo = foo;
}
@Version("1.0")
public String getOther() {
return foo;
}
}
| VersionedIntrospected |
java | resilience4j__resilience4j | resilience4j-bulkhead/src/test/java/io/github/resilience4j/bulkhead/ThreadPoolBulkheadRegistryTest.java | {
"start": 9968,
"end": 16463
} | class ____ implements
RegistryEventConsumer<ThreadPoolBulkhead> {
@Override
public void onEntryAddedEvent(EntryAddedEvent<ThreadPoolBulkhead> entryAddedEvent) {
}
@Override
public void onEntryRemovedEvent(EntryRemovedEvent<ThreadPoolBulkhead> entryRemoveEvent) {
}
@Override
public void onEntryReplacedEvent(
EntryReplacedEvent<ThreadPoolBulkhead> entryReplacedEvent) {
}
}
@Test
public void testCreateUsingBuilderWithDefaultConfig() {
ThreadPoolBulkheadRegistry threadPoolBulkheadRegistry =
ThreadPoolBulkheadRegistry.custom().withThreadPoolBulkheadConfig(ThreadPoolBulkheadConfig.ofDefaults()).build();
ThreadPoolBulkhead threadPoolBulkhead = threadPoolBulkheadRegistry.bulkhead("testName");
ThreadPoolBulkhead threadPoolBulkhead2 = threadPoolBulkheadRegistry.bulkhead("otherTestName");
assertThat(threadPoolBulkhead).isNotSameAs(threadPoolBulkhead2);
assertThat(threadPoolBulkheadRegistry.getAllBulkheads()).hasSize(2);
}
@Test
public void testCreateUsingBuilderWithCustomConfig() {
int maxThreadPoolSize = 100;
ThreadPoolBulkheadConfig threadPoolBulkheadConfig = ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(maxThreadPoolSize).build();
ThreadPoolBulkheadRegistry threadPoolBulkheadRegistry =
ThreadPoolBulkheadRegistry.custom().withThreadPoolBulkheadConfig(threadPoolBulkheadConfig).build();
ThreadPoolBulkhead threadPoolBulkhead = threadPoolBulkheadRegistry.bulkhead("testName");
assertThat(threadPoolBulkhead.getBulkheadConfig().getMaxThreadPoolSize())
.isEqualTo(maxThreadPoolSize);
}
@Test
public void testCreateUsingBuilderWithoutDefaultConfig() {
int maxThreadPoolSize = 100;
ThreadPoolBulkheadConfig threadPoolBulkheadConfig = ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(maxThreadPoolSize).build();
ThreadPoolBulkheadRegistry threadPoolBulkheadRegistry =
ThreadPoolBulkheadRegistry.custom().addThreadPoolBulkheadConfig("someSharedConfig", threadPoolBulkheadConfig).build();
assertThat(threadPoolBulkheadRegistry.getDefaultConfig()).isNotNull();
assertThat(threadPoolBulkheadRegistry.getDefaultConfig().getMaxThreadPoolSize())
.isEqualTo(Runtime.getRuntime().availableProcessors());
assertThat(threadPoolBulkheadRegistry.getConfiguration("someSharedConfig")).isNotEmpty();
ThreadPoolBulkhead threadPoolBulkhead = threadPoolBulkheadRegistry
.bulkhead("name", "someSharedConfig");
assertThat(threadPoolBulkhead.getBulkheadConfig()).isEqualTo(threadPoolBulkheadConfig);
assertThat(threadPoolBulkhead.getBulkheadConfig().getMaxThreadPoolSize())
.isEqualTo(maxThreadPoolSize);
}
@Test(expected = IllegalArgumentException.class)
public void testAddMultipleDefaultConfigUsingBuilderShouldThrowException() {
ThreadPoolBulkheadConfig threadPoolBulkheadConfig = ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(100).build();
ThreadPoolBulkheadRegistry.custom().addThreadPoolBulkheadConfig("default", threadPoolBulkheadConfig).build();
}
@Test
public void testCreateUsingBuilderWithDefaultAndCustomConfig() {
ThreadPoolBulkheadConfig threadPoolBulkheadConfig = ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(100).build();
ThreadPoolBulkheadConfig customThreadPoolBulkheadConfig = ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(200).build();
ThreadPoolBulkheadRegistry threadPoolBulkheadRegistry = ThreadPoolBulkheadRegistry.custom()
.withThreadPoolBulkheadConfig(threadPoolBulkheadConfig)
.addThreadPoolBulkheadConfig("custom", customThreadPoolBulkheadConfig)
.build();
assertThat(threadPoolBulkheadRegistry.getDefaultConfig()).isNotNull();
assertThat(threadPoolBulkheadRegistry.getDefaultConfig().getMaxThreadPoolSize())
.isEqualTo(100);
assertThat(threadPoolBulkheadRegistry.getConfiguration("custom")).isNotEmpty();
}
@Test
public void testCreateUsingBuilderWithNullConfig() {
assertThatThrownBy(
() -> ThreadPoolBulkheadRegistry.custom().withThreadPoolBulkheadConfig(null).build())
.isInstanceOf(NullPointerException.class).hasMessage("Config must not be null");
}
@Test
public void testCreateUsingBuilderWithMultipleRegistryEventConsumer() {
ThreadPoolBulkheadRegistry threadPoolBulkheadRegistry = ThreadPoolBulkheadRegistry.custom()
.withThreadPoolBulkheadConfig(ThreadPoolBulkheadConfig.ofDefaults())
.addRegistryEventConsumer(new NoOpThreadPoolBulkheadEventConsumer())
.addRegistryEventConsumer(new NoOpThreadPoolBulkheadEventConsumer())
.build();
getEventProcessor(threadPoolBulkheadRegistry.getEventPublisher())
.ifPresent(eventProcessor -> assertThat(eventProcessor.hasConsumers()).isTrue());
}
@Test
public void testCreateUsingBuilderWithRegistryTags() {
Map<String, String> threadPoolBulkheadTags = Map.of("key1", "value1", "key2", "value2");
ThreadPoolBulkheadRegistry threadPoolBulkheadRegistry = ThreadPoolBulkheadRegistry.custom()
.withThreadPoolBulkheadConfig(ThreadPoolBulkheadConfig.ofDefaults())
.withTags(threadPoolBulkheadTags)
.build();
ThreadPoolBulkhead threadPoolBulkhead = threadPoolBulkheadRegistry.bulkhead("testName");
assertThat(threadPoolBulkhead.getTags()).containsAllEntriesOf(threadPoolBulkheadTags);
}
@Test
public void testCreateUsingBuilderWithRegistryStore() {
ThreadPoolBulkheadRegistry threadPoolBulkheadRegistry = ThreadPoolBulkheadRegistry.custom()
.withThreadPoolBulkheadConfig(ThreadPoolBulkheadConfig.ofDefaults())
.withRegistryStore(new InMemoryRegistryStore<>())
.build();
ThreadPoolBulkhead threadPoolBulkhead = threadPoolBulkheadRegistry.bulkhead("testName");
ThreadPoolBulkhead threadPoolBulkhead2 = threadPoolBulkheadRegistry.bulkhead("otherTestName");
assertThat(threadPoolBulkhead).isNotSameAs(threadPoolBulkhead2);
assertThat(threadPoolBulkheadRegistry.getAllBulkheads()).hasSize(2);
}
}
| NoOpThreadPoolBulkheadEventConsumer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/batch/CompositeIdAndElementCollectionBatchingTest.java | {
"start": 3151,
"end": 3300
} | class ____ {
@Id
private Long id;
public EntityB() {
}
public EntityB(Long id) {
this.id = id;
}
}
@Embeddable
public static | EntityB |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NoAllocationCheckerTest.java | {
"start": 16737,
"end": 16877
} | interface ____ {
@NoAllocation
void method();
}
public static | NoAllocationInterface |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java | {
"start": 57382,
"end": 64705
} | enum ____ {
UNKNOWN,
STARTED,
SUCCESS;
@Override
public String toString() {
return name().toLowerCase(Locale.ROOT);
}
}
public static final Setting<DownsampleTaskStatus> INDEX_DOWNSAMPLE_STATUS = Setting.enumSetting(
DownsampleTaskStatus.class,
INDEX_DOWNSAMPLE_STATUS_KEY,
DownsampleTaskStatus.UNKNOWN,
Property.IndexScope,
Property.InternalIndex
);
public static final Setting<String> INDEX_DOWNSAMPLE_INTERVAL = Setting.simpleString(
INDEX_DOWNSAMPLE_INTERVAL_KEY,
Property.IndexScope,
Property.InternalIndex
);
public static final Setting<String> INDEX_DOWNSAMPLE_METHOD = Setting.simpleString(
INDEX_DOWNSAMPLE_METHOD_KEY,
Property.IndexScope,
Property.InternalIndex
);
// LIFECYCLE_NAME is here an as optimization, see LifecycleSettings.LIFECYCLE_NAME and
// LifecycleSettings.LIFECYCLE_NAME_SETTING for the 'real' version
public static final String LIFECYCLE_NAME = "index.lifecycle.name";
Map<String, DiffableStringMap> getCustomData() {
return this.customData;
}
public Map<String, String> getCustomData(final String key) {
return this.customData.get(key);
}
public Map<Integer, Set<String>> getInSyncAllocationIds() {
return inSyncAllocationIds;
}
public Map<String, RolloverInfo> getRolloverInfos() {
return rolloverInfos;
}
public Set<String> inSyncAllocationIds(int shardId) {
assert shardId >= 0 && shardId < numberOfShards;
return inSyncAllocationIds.get(shardId);
}
@Nullable
public DiscoveryNodeFilters requireFilters() {
return requireFilters;
}
@Nullable
public DiscoveryNodeFilters getInitialRecoveryFilters() {
return initialRecoveryFilters;
}
@Nullable
public DiscoveryNodeFilters includeFilters() {
return includeFilters;
}
@Nullable
public DiscoveryNodeFilters excludeFilters() {
return excludeFilters;
}
public IndexLongFieldRange getTimestampRange() {
return timestampRange;
}
public IndexLongFieldRange getEventIngestedRange() {
return eventIngestedRange;
}
/**
* @return whether this index has a time series timestamp range
*/
public boolean hasTimeSeriesTimestampRange() {
return indexMode != null && indexMode.getTimestampBound(this) != null;
}
/**
* @param dateFieldType the date field type of '@timestamp' field which is
* used to convert the start and end times recorded in index metadata
* to the right format that is being used by '@timestamp' field.
* For example, the '@timestamp' can be configured with nanosecond precision.
* @return the time range this index represents if this index is in time series mode.
* Otherwise <code>null</code> is returned.
*/
@Nullable
public IndexLongFieldRange getTimeSeriesTimestampRange(DateFieldMapper.DateFieldType dateFieldType) {
var bounds = indexMode != null ? indexMode.getTimestampBound(this) : null;
if (bounds != null) {
long start = dateFieldType.resolution().convert(Instant.ofEpochMilli(bounds.startTime()));
long end = dateFieldType.resolution().convert(Instant.ofEpochMilli(bounds.endTime()));
return IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(start, end));
} else {
return null;
}
}
@Nullable
public IndexReshardingMetadata getReshardingMetadata() {
return reshardingMetadata;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
IndexMetadata that = (IndexMetadata) o;
if (version != that.version) {
return false;
}
if (aliases.equals(that.aliases) == false) {
return false;
}
if (index.equals(that.index) == false) {
return false;
}
if (Objects.equals(mapping, that.mapping) == false) {
return false;
}
if (settings.equals(that.settings) == false) {
return false;
}
if (state != that.state) {
return false;
}
if (customData.equals(that.customData) == false) {
return false;
}
if (routingNumShards != that.routingNumShards) {
return false;
}
if (routingFactor != that.routingFactor) {
return false;
}
if (Arrays.equals(primaryTerms, that.primaryTerms) == false) {
return false;
}
if (inSyncAllocationIds.equals(that.inSyncAllocationIds) == false) {
return false;
}
if (rolloverInfos.equals(that.rolloverInfos) == false) {
return false;
}
if (inferenceFields.equals(that.inferenceFields) == false) {
return false;
}
if (isSystem != that.isSystem) {
return false;
}
if (Objects.equals(reshardingMetadata, that.reshardingMetadata) == false) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = index.hashCode();
result = 31 * result + Long.hashCode(version);
result = 31 * result + state.hashCode();
result = 31 * result + aliases.hashCode();
result = 31 * result + settings.hashCode();
result = 31 * result + Objects.hash(mapping);
result = 31 * result + customData.hashCode();
result = 31 * result + Long.hashCode(routingFactor);
result = 31 * result + Long.hashCode(routingNumShards);
result = 31 * result + Arrays.hashCode(primaryTerms);
result = 31 * result + inSyncAllocationIds.hashCode();
result = 31 * result + rolloverInfos.hashCode();
result = 31 * result + inferenceFields.hashCode();
result = 31 * result + Boolean.hashCode(isSystem);
result = 31 * result + Objects.hashCode(reshardingMetadata);
return result;
}
@Override
public Diff<IndexMetadata> diff(IndexMetadata previousState) {
return new IndexMetadataDiff(previousState, this);
}
public static Diff<IndexMetadata> readDiffFrom(StreamInput in) throws IOException {
return new IndexMetadataDiff(in);
}
public static IndexMetadata fromXContent(XContentParser parser) throws IOException {
return Builder.fromXContent(parser);
}
public static IndexMetadata fromXContent(XContentParser parser, Map<String, MappingMetadata> mappingsByHash) throws IOException {
return Builder.fromXContent(parser, mappingsByHash);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
Builder.toXContent(this, builder, params);
return builder;
}
private static final TransportVersion SETTING_DIFF_VERSION = TransportVersions.V_8_5_0;
private static | DownsampleTaskStatus |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UseEnumSwitchTest.java | {
"start": 3080,
"end": 3540
} | enum ____ implements A {
ONE,
TWO,
THREE
}
int f(A e) {
if (e.equals(E.ONE)) {
return 1;
} else if (e.equals(E.TWO)) {
return 2;
} else {
return 3;
}
}
}
""")
.expectUnchanged()
.doTest(TestMode.TEXT_MATCH);
}
}
| E |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/EventAnnouncement.java | {
"start": 1301,
"end": 3433
} | class ____ extends RuntimeEvent {
private final AbstractEvent announcedEvent;
private final int sequenceNumber;
public EventAnnouncement(AbstractEvent announcedEvent, int sequenceNumber) {
this.announcedEvent = announcedEvent;
this.sequenceNumber = sequenceNumber;
}
public AbstractEvent getAnnouncedEvent() {
return announcedEvent;
}
public int getSequenceNumber() {
return sequenceNumber;
}
// ------------------------------------------------------------------------
// Serialization
// ------------------------------------------------------------------------
//
// These methods are inherited form the generic serialization of AbstractEvent
// but would require the CheckpointBarrier to be mutable. Since all serialization
// for events goes through the EventSerializer class, which has special serialization
// for the CheckpointBarrier, we don't need these methods
//
@Override
public void write(DataOutputView out) throws IOException {
throw new UnsupportedOperationException("This method should never be called");
}
@Override
public void read(DataInputView in) throws IOException {
throw new UnsupportedOperationException("This method should never be called");
}
// ------------------------------------------------------------------------
@Override
public int hashCode() {
return Objects.hash(announcedEvent, sequenceNumber);
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other == null || other.getClass() != EventAnnouncement.class) {
return false;
} else {
EventAnnouncement that = (EventAnnouncement) other;
return Objects.equals(this.announcedEvent, that.announcedEvent)
&& this.sequenceNumber == that.sequenceNumber;
}
}
@Override
public String toString() {
return String.format("Announcement of [%s] at %d", announcedEvent, sequenceNumber);
}
}
| EventAnnouncement |
java | spring-projects__spring-boot | module/spring-boot-persistence/src/test/java/org/springframework/boot/persistence/autoconfigure/EntityScannerTests.java | {
"start": 7066,
"end": 7187
} | class ____ {
}
@Configuration(proxyBeanMethods = false)
@EntityScan("${com.example.entity-package}")
static | ScanBConfig |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/idmanytoone/Card.java | {
"start": 512,
"end": 793
} | class ____ {
@Id
private CardPrimaryKey primaryKey = new CardPrimaryKey();
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.EAGER, mappedBy = "primaryKey.card")
private Set<CardField> fields;
@ManyToOne
private CardField mainCardField;
@Embeddable
public static | Card |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TempDirectoryTests.java | {
"start": 37407,
"end": 41393
} | class ____ {
@Test
void makeEmptyTempDirectoryNonExecutable(@TempDir Path tempDir) {
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithFileNonExecutable(@TempDir Path tempDir) throws IOException {
Files.createFile(tempDir.resolve("test-file.txt"));
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithEmptyFolderNonExecutable(@TempDir Path tempDir) throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir"));
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonWritableFileNonExecutable(@TempDir Path tempDir) throws IOException {
Files.createFile(tempDir.resolve("test-file.txt")).toFile().setWritable(false);
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonReadableFileNonExecutable(@TempDir Path tempDir) throws IOException {
Files.createFile(tempDir.resolve("test-file.txt")).toFile().setReadable(false);
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonWritableFolderNonExecutable(@TempDir Path tempDir) throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir")).toFile().setWritable(false);
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonReadableFolderNonExecutable(@TempDir Path tempDir) throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir")).toFile().setReadable(false);
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonExecutableFolderNonExecutable(@TempDir Path tempDir) throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir")).toFile().setExecutable(false);
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonEmptyNonWritableFolderNonExecutable(@TempDir Path tempDir) throws IOException {
Path subDir = Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt"));
subDir.toFile().setWritable(false);
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonEmptyNonReadableFolderNonExecutable(@TempDir Path tempDir) throws IOException {
Path subDir = Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt"));
subDir.toFile().setReadable(false);
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonEmptyNonExecutableFolderNonExecutable(@TempDir Path tempDir)
throws IOException {
Path subDir = Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt"));
subDir.toFile().setExecutable(false);
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonEmptyFolderNonExecutable(@TempDir Path tempDir) throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt"));
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonEmptyFolderContainingNonWritableFileNonExecutable(@TempDir Path tempDir)
throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt")).toFile().setWritable(false);
tempDir.toFile().setExecutable(false);
}
@Test
void makeTempDirectoryWithNonEmptyFolderContainingNonReadableFileNonExecutable(@TempDir Path tempDir)
throws IOException {
Files.createDirectory(tempDir.resolve("test-sub-dir"));
Files.createFile(tempDir.resolve("test-sub-dir/test-file.txt")).toFile().setReadable(false);
tempDir.toFile().setExecutable(false);
}
}
}
// https://github.com/junit-team/junit-framework/issues/2079
@SuppressWarnings("JUnitMalformedDeclaration")
static | NonExecutable |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/CustomDeserializersTest.java | {
"start": 4827,
"end": 4964
} | class ____ {
protected Bean375Inner inner;
public Bean375Outer(Bean375Inner v) { inner = v; }
}
static | Bean375Outer |
java | spring-projects__spring-security | test/src/main/java/org/springframework/security/test/web/servlet/request/SecurityMockMvcRequestBuilders.java | {
"start": 1640,
"end": 3135
} | class ____ {
private SecurityMockMvcRequestBuilders() {
}
/**
* Creates a request (including any necessary {@link CsrfToken}) that will submit a
* form based login to POST "/login".
* @return the FormLoginRequestBuilder for further customizations
*/
public static FormLoginRequestBuilder formLogin() {
return new FormLoginRequestBuilder();
}
/**
* Creates a request (including any necessary {@link CsrfToken}) that will submit a
* form based login to POST {@code loginProcessingUrl}.
* @param loginProcessingUrl the URL to POST to
* @return the FormLoginRequestBuilder for further customizations
*/
public static FormLoginRequestBuilder formLogin(String loginProcessingUrl) {
return formLogin().loginProcessingUrl(loginProcessingUrl);
}
/**
* Creates a logout request.
* @return the LogoutRequestBuilder for additional customizations
*/
public static LogoutRequestBuilder logout() {
return new LogoutRequestBuilder();
}
/**
* Creates a logout request (including any necessary {@link CsrfToken}) to the
* specified {@code logoutUrl}
* @param logoutUrl the logout request URL
* @return the LogoutRequestBuilder for additional customizations
*/
public static LogoutRequestBuilder logout(String logoutUrl) {
return new LogoutRequestBuilder().logoutUrl(logoutUrl);
}
/**
* Creates a logout request (including any necessary {@link CsrfToken})
*
* @author Rob Winch
* @since 4.0
*/
public static final | SecurityMockMvcRequestBuilders |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/customproviders/ConditionalBeanFiltersTest.java | {
"start": 1305,
"end": 2419
} | class ____ {
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(WontBeEnabledFilter.class, WillBeEnabledFilter.class, AlwaysEnabledFilter.class,
TestResource.class);
}
});
@Test
public void testExpectedFilters() {
List<String> responseFiltersValues = get("/test/filters")
.then().statusCode(200)
.body(Matchers.is("void-on,response-on,uni-on,void-lookup-on,always"))
.extract()
.headers()
.getList("response-filters")
.stream()
.map(Header::getValue)
.collect(Collectors.toList());
assertThat(responseFiltersValues).containsOnly("always", "void-lookup-on", "void-on", "uni-on");
}
@Path("test")
public static | ConditionalBeanFiltersTest |
java | quarkusio__quarkus | devtools/project-core-extension-codestarts/src/main/resources/codestarts/quarkus/examples/google-cloud-functions-example/java/src/test/java/org/acme/googlecloudfunctions/HelloWorldHttpFunctionTest.java | {
"start": 372,
"end": 586
} | class ____ {
@Test
void testService() {
when()
.get()
.then()
.statusCode(200)
.body(is("Hello World"));
}
} | HelloWorldHttpFunctionTest |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/suite/engine/SuiteLauncherDiscoveryRequestBuilderTests.java | {
"start": 12237,
"end": 12528
} | class ____ {
@SuppressWarnings("unused")
void testMethod() {
}
}
@TestFactory
Stream<DynamicTest> selectOneMethodWithOneParameter() {
@SelectMethod("org.junit.platform.suite.engine.SuiteLauncherDiscoveryRequestBuilderTests$OneParameterTestCase#testMethod(int)")
| NoParameterTestCase |
java | quarkusio__quarkus | extensions/vertx-http/deployment/src/main/java/io/quarkus/vertx/http/deployment/HttpSecurityProcessor.java | {
"start": 51998,
"end": 52510
} | class ____ implements BooleanSupplier {
private final boolean required;
public IsApplicationBasicAuthRequired(VertxHttpBuildTimeConfig httpBuildTimeConfig,
ManagementInterfaceBuildTimeConfig managementBuildTimeConfig) {
required = applicationBasicAuthRequired(httpBuildTimeConfig, managementBuildTimeConfig);
}
@Override
public boolean getAsBoolean() {
return required;
}
}
static final | IsApplicationBasicAuthRequired |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java | {
"start": 7818,
"end": 27974
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(DfsClientConf
.class);
private final int hdfsTimeout; // timeout value for a DFS operation.
private final int maxFailoverAttempts;
private final int maxRetryAttempts;
private final int maxPipelineRecoveryRetries;
private final int failoverSleepBaseMillis;
private final int failoverSleepMaxMillis;
private final int maxBlockAcquireFailures;
private final int datanodeSocketWriteTimeout;
private final int ioBufferSize;
private final ChecksumOpt defaultChecksumOpt;
private final ChecksumCombineMode checksumCombineMode;
private final int checksumEcSocketTimeout;
private final int writePacketSize;
private final int writeMaxPackets;
private final ByteArrayManager.Conf writeByteArrayManagerConf;
private final int socketTimeout;
private final int socketSendBufferSize;
private final long excludedNodesCacheExpiry;
/** Wait time window (in msec) if BlockMissingException is caught. */
private final int timeWindow;
private final int numCachedConnRetry;
private final int numBlockWriteRetry;
private final int numBlockWriteLocateFollowingRetry;
private final int blockWriteLocateFollowingInitialDelayMs;
private final int blockWriteLocateFollowingMaxDelayMs;
private final long defaultBlockSize;
private final long prefetchSize;
private final boolean uriCacheEnabled;
private final short defaultReplication;
private final String taskId;
private final FsPermission uMask;
private final boolean connectToDnViaHostname;
private final int retryTimesForGetLastBlockLength;
private final int retryIntervalForGetLastBlockLength;
private final long datanodeRestartTimeout;
private final long slowIoWarningThresholdMs;
private final int markSlowNodeAsBadNodeThreshold;
/** wait time window before refreshing blocklocation for inputstream. */
private final long refreshReadBlockLocationsMS;
private final boolean refreshReadBlockLocationsAutomatically;
private final ShortCircuitConf shortCircuitConf;
private final int clientShortCircuitNum;
private final long hedgedReadThresholdMillis;
private final int hedgedReadThreadpoolSize;
private final List<Class<? extends ReplicaAccessorBuilder>>
replicaAccessorBuilderClasses;
private final int stripedReadThreadpoolSize;
private final boolean dataTransferTcpNoDelay;
private final boolean readUseCachePriority;
private final boolean deadNodeDetectionEnabled;
private final long leaseHardLimitPeriod;
private final boolean recoverLeaseOnCloseException;
public DfsClientConf(Configuration conf) {
// The hdfsTimeout is currently the same as the ipc timeout
hdfsTimeout = Client.getRpcTimeout(conf);
maxRetryAttempts = conf.getInt(
Retry.MAX_ATTEMPTS_KEY,
Retry.MAX_ATTEMPTS_DEFAULT);
timeWindow = conf.getInt(
Retry.WINDOW_BASE_KEY,
Retry.WINDOW_BASE_DEFAULT);
retryTimesForGetLastBlockLength = conf.getInt(
Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY,
Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT);
retryIntervalForGetLastBlockLength = conf.getInt(
Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY,
Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT);
maxFailoverAttempts = conf.getInt(
Failover.MAX_ATTEMPTS_KEY,
Failover.MAX_ATTEMPTS_DEFAULT);
failoverSleepBaseMillis = conf.getInt(
Failover.SLEEPTIME_BASE_KEY,
Failover.SLEEPTIME_BASE_DEFAULT);
failoverSleepMaxMillis = conf.getInt(
Failover.SLEEPTIME_MAX_KEY,
Failover.SLEEPTIME_MAX_DEFAULT);
maxBlockAcquireFailures = conf.getInt(
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
datanodeSocketWriteTimeout = conf.getInt(
DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
HdfsConstants.WRITE_TIMEOUT);
ioBufferSize = conf.getInt(
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
defaultChecksumOpt = getChecksumOptFromConf(conf);
checksumCombineMode = getChecksumCombineModeFromConf(conf);
checksumEcSocketTimeout = conf.getInt(DFS_CHECKSUM_EC_SOCKET_TIMEOUT_KEY,
DFS_CHECKSUM_EC_SOCKET_TIMEOUT_DEFAULT);
dataTransferTcpNoDelay = conf.getBoolean(
DFS_DATA_TRANSFER_CLIENT_TCPNODELAY_KEY,
DFS_DATA_TRANSFER_CLIENT_TCPNODELAY_DEFAULT);
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
HdfsConstants.READ_TIMEOUT);
socketSendBufferSize = conf.getInt(DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY,
DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_DEFAULT);
/** dfs.write.packet.size is an internal config variable */
writePacketSize = conf.getInt(
DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
writeMaxPackets = conf.getInt(
Write.MAX_PACKETS_IN_FLIGHT_KEY,
Write.MAX_PACKETS_IN_FLIGHT_DEFAULT);
writeByteArrayManagerConf = loadWriteByteArrayManagerConf(conf);
defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
DFS_BLOCK_SIZE_DEFAULT);
defaultReplication = (short) conf.getInt(
DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
excludedNodesCacheExpiry = conf.getLong(
Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY,
Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
prefetchSize = conf.getLong(Read.PREFETCH_SIZE_KEY,
10 * defaultBlockSize);
uriCacheEnabled = conf.getBoolean(Read.URI_CACHE_KEY,
Read.URI_CACHE_DEFAULT);
numCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
numBlockWriteRetry = conf.getInt(
BlockWrite.RETRIES_KEY,
BlockWrite.RETRIES_DEFAULT);
numBlockWriteLocateFollowingRetry = conf.getInt(
BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
blockWriteLocateFollowingInitialDelayMs = conf.getInt(
BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY,
BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT);
blockWriteLocateFollowingMaxDelayMs = conf.getInt(
BlockWrite.LOCATEFOLLOWINGBLOCK_MAX_DELAY_MS_KEY,
BlockWrite.LOCATEFOLLOWINGBLOCK_MAX_DELAY_MS_DEFAULT);
uMask = FsPermission.getUMask(conf);
connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
datanodeRestartTimeout = conf.getTimeDuration(
DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,
DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT,
TimeUnit.SECONDS, TimeUnit.MILLISECONDS);
slowIoWarningThresholdMs = conf.getLong(
DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
readUseCachePriority = conf.getBoolean(DFS_CLIENT_READ_USE_CACHE_PRIORITY,
DFS_CLIENT_READ_USE_CACHE_PRIORITY_DEFAULT);
markSlowNodeAsBadNodeThreshold = conf.getInt(
DFS_CLIENT_MARK_SLOWNODE_AS_BADNODE_THRESHOLD_KEY,
DFS_CLIENT_MARK_SLOWNODE_AS_BADNODE_THRESHOLD_DEFAULT);
refreshReadBlockLocationsMS = conf.getLong(
HdfsClientConfigKeys.DFS_CLIENT_REFRESH_READ_BLOCK_LOCATIONS_MS_KEY,
HdfsClientConfigKeys.
DFS_CLIENT_REFRESH_READ_BLOCK_LOCATIONS_MS_DEFAULT);
refreshReadBlockLocationsAutomatically = conf.getBoolean(
HdfsClientConfigKeys.DFS_CLIENT_REFRESH_READ_BLOCK_LOCATIONS_AUTOMATICALLY_KEY,
HdfsClientConfigKeys.DFS_CLIENT_REFRESH_READ_BLOCK_LOCATIONS_AUTOMATICALLY_DEFAULT);
hedgedReadThresholdMillis = conf.getLong(
HedgedRead.THRESHOLD_MILLIS_KEY,
HedgedRead.THRESHOLD_MILLIS_DEFAULT);
hedgedReadThreadpoolSize = conf.getInt(
HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT);
deadNodeDetectionEnabled =
conf.getBoolean(DFS_CLIENT_DEAD_NODE_DETECTION_ENABLED_KEY,
DFS_CLIENT_DEAD_NODE_DETECTION_ENABLED_DEFAULT);
stripedReadThreadpoolSize = conf.getInt(
HdfsClientConfigKeys.StripedRead.THREADPOOL_SIZE_KEY,
HdfsClientConfigKeys.StripedRead.THREADPOOL_SIZE_DEFAULT);
Preconditions.checkArgument(stripedReadThreadpoolSize > 0, "The value of " +
HdfsClientConfigKeys.StripedRead.THREADPOOL_SIZE_KEY +
" must be greater than 0.");
replicaAccessorBuilderClasses = loadReplicaAccessorBuilderClasses(conf);
leaseHardLimitPeriod =
conf.getLong(HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000;
shortCircuitConf = new ShortCircuitConf(conf);
clientShortCircuitNum = conf.getInt(
HdfsClientConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_NUM,
HdfsClientConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_NUM_DEFAULT);
Preconditions.checkArgument(clientShortCircuitNum >= 1,
HdfsClientConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_NUM +
"can't be less then 1.");
Preconditions.checkArgument(clientShortCircuitNum <= 5,
HdfsClientConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_NUM +
"can't be more then 5.");
maxPipelineRecoveryRetries = conf.getInt(
HdfsClientConfigKeys.DFS_CLIENT_PIPELINE_RECOVERY_MAX_RETRIES,
HdfsClientConfigKeys.DFS_CLIENT_PIPELINE_RECOVERY_MAX_RETRIES_DEFAULT
);
recoverLeaseOnCloseException = conf.getBoolean(
Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_KEY,
Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_DEFAULT
);
}
private ByteArrayManager.Conf loadWriteByteArrayManagerConf(
Configuration conf) {
final boolean byteArrayManagerEnabled = conf.getBoolean(
Write.ByteArrayManager.ENABLED_KEY,
Write.ByteArrayManager.ENABLED_DEFAULT);
if (!byteArrayManagerEnabled) {
return null;
}
final int countThreshold = conf.getInt(
Write.ByteArrayManager.COUNT_THRESHOLD_KEY,
Write.ByteArrayManager.COUNT_THRESHOLD_DEFAULT);
final int countLimit = conf.getInt(
Write.ByteArrayManager.COUNT_LIMIT_KEY,
Write.ByteArrayManager.COUNT_LIMIT_DEFAULT);
final long countResetTimePeriodMs = conf.getLong(
Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_KEY,
Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_DEFAULT);
return new ByteArrayManager.Conf(
countThreshold, countLimit, countResetTimePeriodMs);
}
@SuppressWarnings("unchecked")
private List<Class<? extends ReplicaAccessorBuilder>>
loadReplicaAccessorBuilderClasses(Configuration conf) {
String[] classNames = conf.getTrimmedStrings(
HdfsClientConfigKeys.REPLICA_ACCESSOR_BUILDER_CLASSES_KEY);
if (classNames.length == 0) {
return Collections.emptyList();
}
ArrayList<Class<? extends ReplicaAccessorBuilder>> classes =
new ArrayList<>();
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
for (String className: classNames) {
try {
Class<? extends ReplicaAccessorBuilder> cls =
(Class<? extends ReplicaAccessorBuilder>)
classLoader.loadClass(className);
classes.add(cls);
} catch (Throwable t) {
LOG.warn("Unable to load {}", className, t);
}
}
return classes;
}
private static DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFS_CHECKSUM_TYPE_KEY,
DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
LOG.warn("Bad checksum type: {}. Using default {}", checksum,
DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFS_CHECKSUM_TYPE_DEFAULT);
}
}
private static ChecksumCombineMode getChecksumCombineModeFromConf(
Configuration conf) {
final String mode = conf.get(
DFS_CHECKSUM_COMBINE_MODE_KEY,
DFS_CHECKSUM_COMBINE_MODE_DEFAULT);
try {
return ChecksumCombineMode.valueOf(mode);
} catch(IllegalArgumentException iae) {
LOG.warn("Bad checksum combine mode: {}. Using default {}", mode,
DFS_CHECKSUM_COMBINE_MODE_DEFAULT);
return ChecksumCombineMode.valueOf(
DFS_CHECKSUM_COMBINE_MODE_DEFAULT);
}
}
// Construct a checksum option from conf
public static ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}
/** create a DataChecksum with the given option. */
public DataChecksum createChecksum(ChecksumOpt userOpt) {
// Fill in any missing field with the default.
ChecksumOpt opt = ChecksumOpt.processChecksumOpt(
defaultChecksumOpt, userOpt);
DataChecksum dataChecksum = DataChecksum.newDataChecksum(
opt.getChecksumType(),
opt.getBytesPerChecksum());
if (dataChecksum == null) {
throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt="
+ userOpt + ", default=" + defaultChecksumOpt
+ ", effective=null");
}
return dataChecksum;
}
@VisibleForTesting
public int getBlockWriteLocateFollowingInitialDelayMs() {
return blockWriteLocateFollowingInitialDelayMs;
}
public int getBlockWriteLocateFollowingMaxDelayMs() {
return blockWriteLocateFollowingMaxDelayMs;
}
/**
* @return the hdfsTimeout
*/
public int getHdfsTimeout() {
return hdfsTimeout;
}
/**
* @return the maxFailoverAttempts
*/
public int getMaxFailoverAttempts() {
return maxFailoverAttempts;
}
/**
* @return the maxRetryAttempts
*/
public int getMaxRetryAttempts() {
return maxRetryAttempts;
}
/**
* @return the failoverSleepBaseMillis
*/
public int getFailoverSleepBaseMillis() {
return failoverSleepBaseMillis;
}
/**
* @return the failoverSleepMaxMillis
*/
public int getFailoverSleepMaxMillis() {
return failoverSleepMaxMillis;
}
/**
* @return the maxBlockAcquireFailures
*/
public int getMaxBlockAcquireFailures() {
return maxBlockAcquireFailures;
}
/**
* @return the datanodeSocketWriteTimeout
*/
public int getDatanodeSocketWriteTimeout() {
return datanodeSocketWriteTimeout;
}
/**
* @return the ioBufferSize
*/
public int getIoBufferSize() {
return ioBufferSize;
}
/**
* @return the defaultChecksumOpt
*/
public ChecksumOpt getDefaultChecksumOpt() {
return defaultChecksumOpt;
}
/**
* @return the checksumCombineMode
*/
public ChecksumCombineMode getChecksumCombineMode() {
return checksumCombineMode;
}
/**
* @return the checksumEcSocketTimeout
*/
public int getChecksumEcSocketTimeout() {
return checksumEcSocketTimeout;
}
/**
* @return the writePacketSize
*/
public int getWritePacketSize() {
return writePacketSize;
}
/**
* @return the writeMaxPackets
*/
public int getWriteMaxPackets() {
return writeMaxPackets;
}
/**
* @return the writeByteArrayManagerConf
*/
public ByteArrayManager.Conf getWriteByteArrayManagerConf() {
return writeByteArrayManagerConf;
}
/**
* @return whether TCP_NODELAY should be set on client sockets
*/
public boolean getDataTransferTcpNoDelay() {
return dataTransferTcpNoDelay;
}
/**
* @return the socketTimeout
*/
public int getSocketTimeout() {
return socketTimeout;
}
/**
* @return the socketSendBufferSize
*/
public int getSocketSendBufferSize() {
return socketSendBufferSize;
}
/**
* @return the excludedNodesCacheExpiry
*/
public long getExcludedNodesCacheExpiry() {
return excludedNodesCacheExpiry;
}
/**
* @return the timeWindow
*/
public int getTimeWindow() {
return timeWindow;
}
/**
* @return the numCachedConnRetry
*/
public int getNumCachedConnRetry() {
return numCachedConnRetry;
}
/**
* @return the numBlockWriteRetry
*/
public int getNumBlockWriteRetry() {
return numBlockWriteRetry;
}
/**
* @return the numBlockWriteLocateFollowingRetry
*/
public int getNumBlockWriteLocateFollowingRetry() {
return numBlockWriteLocateFollowingRetry;
}
/**
* @return the defaultBlockSize
*/
public long getDefaultBlockSize() {
return defaultBlockSize;
}
/**
* @return the prefetchSize
*/
public long getPrefetchSize() {
return prefetchSize;
}
/**
* @return the uriCacheEnable
*/
public boolean isUriCacheEnabled() {
return uriCacheEnabled;
}
/**
* @return the defaultReplication
*/
public short getDefaultReplication() {
return defaultReplication;
}
/**
* @return the taskId
*/
public String getTaskId() {
return taskId;
}
/**
* @return the uMask
*/
public FsPermission getUMask() {
return uMask;
}
/**
* @return the connectToDnViaHostname
*/
public boolean isConnectToDnViaHostname() {
return connectToDnViaHostname;
}
/**
* @return the retryTimesForGetLastBlockLength
*/
public int getRetryTimesForGetLastBlockLength() {
return retryTimesForGetLastBlockLength;
}
/**
* @return the retryIntervalForGetLastBlockLength
*/
public int getRetryIntervalForGetLastBlockLength() {
return retryIntervalForGetLastBlockLength;
}
/**
* @return the datanodeRestartTimeout
*/
public long getDatanodeRestartTimeout() {
return datanodeRestartTimeout;
}
/**
* @return the slowIoWarningThresholdMs
*/
public long getSlowIoWarningThresholdMs() {
return slowIoWarningThresholdMs;
}
/**
* @return the continuous slowNode replies received to mark slowNode as badNode
*/
public int getMarkSlowNodeAsBadNodeThreshold() {
return markSlowNodeAsBadNodeThreshold;
}
/*
* @return the clientShortCircuitNum
*/
public int getClientShortCircuitNum() {
return clientShortCircuitNum;
}
/**
* @return the hedgedReadThresholdMillis
*/
public long getHedgedReadThresholdMillis() {
return hedgedReadThresholdMillis;
}
/**
* @return the hedgedReadThreadpoolSize
*/
public int getHedgedReadThreadpoolSize() {
return hedgedReadThreadpoolSize;
}
/**
* @return the stripedReadThreadpoolSize
*/
public int getStripedReadThreadpoolSize() {
return stripedReadThreadpoolSize;
}
/**
* @return the deadNodeDetectionEnabled
*/
public boolean isDeadNodeDetectionEnabled() {
return deadNodeDetectionEnabled;
}
/**
* @return the leaseHardLimitPeriod
*/
public long getleaseHardLimitPeriod() {
return leaseHardLimitPeriod;
}
/**
* @return the readUseCachePriority
*/
public boolean isReadUseCachePriority() {
return readUseCachePriority;
}
/**
* @return the replicaAccessorBuilderClasses
*/
public List<Class<? extends ReplicaAccessorBuilder>>
getReplicaAccessorBuilderClasses() {
return replicaAccessorBuilderClasses;
}
public boolean isLocatedBlocksRefresherEnabled() {
return refreshReadBlockLocationsMS > 0;
}
public long getLocatedBlocksRefresherInterval() {
return refreshReadBlockLocationsMS;
}
public boolean isRefreshReadBlockLocationsAutomatically() {
return refreshReadBlockLocationsAutomatically;
}
/**
* @return the shortCircuitConf
*/
public ShortCircuitConf getShortCircuitConf() {
return shortCircuitConf;
}
/**
*@return the maxPipelineRecoveryRetries
*/
public int getMaxPipelineRecoveryRetries() {
return maxPipelineRecoveryRetries;
}
public boolean getRecoverLeaseOnCloseException() {
return recoverLeaseOnCloseException;
}
/**
* Configuration for short-circuit reads.
*/
public static | DfsClientConf |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorCustomSchedule.java | {
"start": 8223,
"end": 13434
} | class ____ implements Writeable, ToXContentObject {
@Nullable
private final Integer maxCrawlDepth;
@Nullable
private final Boolean sitemapDiscoveryDisabled;
@Nullable
private final List<String> domainAllowList;
@Nullable
private final List<String> sitemapUrls;
@Nullable
private final List<String> seedUrls;
private ConfigurationOverrides(
Integer maxCrawlDepth,
Boolean sitemapDiscoveryDisabled,
List<String> domainAllowList,
List<String> sitemapUrls,
List<String> seedUrls
) {
this.maxCrawlDepth = maxCrawlDepth;
this.sitemapDiscoveryDisabled = sitemapDiscoveryDisabled;
this.domainAllowList = domainAllowList;
this.sitemapUrls = sitemapUrls;
this.seedUrls = seedUrls;
}
public ConfigurationOverrides(StreamInput in) throws IOException {
this.maxCrawlDepth = in.readOptionalInt();
this.sitemapDiscoveryDisabled = in.readOptionalBoolean();
this.domainAllowList = in.readOptionalStringCollectionAsList();
this.sitemapUrls = in.readOptionalStringCollectionAsList();
this.seedUrls = in.readOptionalStringCollectionAsList();
}
private static final ParseField MAX_CRAWL_DEPTH_FIELD = new ParseField("max_crawl_depth");
private static final ParseField SITEMAP_DISCOVERY_DISABLED_FIELD = new ParseField("sitemap_discovery_disabled");
private static final ParseField DOMAIN_ALLOWLIST_FIELD = new ParseField("domain_allowlist");
private static final ParseField SITEMAP_URLS_FIELD = new ParseField("sitemap_urls");
private static final ParseField SEED_URLS_FIELD = new ParseField("seed_urls");
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<ConfigurationOverrides, Void> PARSER = new ConstructingObjectParser<>(
"configuration_override",
true,
args -> new Builder().setMaxCrawlDepth((Integer) args[0])
.setSitemapDiscoveryDisabled((Boolean) args[1])
.setDomainAllowList((List<String>) args[2])
.setSitemapUrls((List<String>) args[3])
.setSeedUrls((List<String>) args[4])
.build()
);
static {
PARSER.declareInt(optionalConstructorArg(), MAX_CRAWL_DEPTH_FIELD);
PARSER.declareBoolean(optionalConstructorArg(), SITEMAP_DISCOVERY_DISABLED_FIELD);
PARSER.declareStringArray(optionalConstructorArg(), DOMAIN_ALLOWLIST_FIELD);
PARSER.declareStringArray(optionalConstructorArg(), SITEMAP_URLS_FIELD);
PARSER.declareStringArray(optionalConstructorArg(), SEED_URLS_FIELD);
}
public static ConfigurationOverrides fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (maxCrawlDepth != null) {
builder.field(MAX_CRAWL_DEPTH_FIELD.getPreferredName(), maxCrawlDepth);
}
if (sitemapDiscoveryDisabled != null) {
builder.field(SITEMAP_DISCOVERY_DISABLED_FIELD.getPreferredName(), sitemapDiscoveryDisabled);
}
if (domainAllowList != null) {
builder.stringListField(DOMAIN_ALLOWLIST_FIELD.getPreferredName(), domainAllowList);
}
if (sitemapUrls != null) {
builder.stringListField(SITEMAP_URLS_FIELD.getPreferredName(), sitemapUrls);
}
if (seedUrls != null) {
builder.stringListField(SEED_URLS_FIELD.getPreferredName(), seedUrls);
}
builder.endObject();
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalInt(maxCrawlDepth);
out.writeOptionalBoolean(sitemapDiscoveryDisabled);
out.writeOptionalStringCollection(domainAllowList);
out.writeOptionalStringCollection(sitemapUrls);
out.writeOptionalStringCollection(seedUrls);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ConfigurationOverrides that = (ConfigurationOverrides) o;
return Objects.equals(maxCrawlDepth, that.maxCrawlDepth)
&& Objects.equals(sitemapDiscoveryDisabled, that.sitemapDiscoveryDisabled)
&& Objects.equals(domainAllowList, that.domainAllowList)
&& Objects.equals(sitemapUrls, that.sitemapUrls)
&& Objects.equals(seedUrls, that.seedUrls);
}
@Override
public int hashCode() {
return Objects.hash(maxCrawlDepth, sitemapDiscoveryDisabled, domainAllowList, sitemapUrls, seedUrls);
}
public static | ConfigurationOverrides |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateIntAggregatorFunctionSupplier.java | {
"start": 647,
"end": 1715
} | class ____ implements AggregatorFunctionSupplier {
private final boolean isDelta;
public IrateIntAggregatorFunctionSupplier(boolean isDelta) {
this.isDelta = isDelta;
}
@Override
public List<IntermediateStateDesc> nonGroupingIntermediateStateDesc() {
throw new UnsupportedOperationException("non-grouping aggregator is not supported");
}
@Override
public List<IntermediateStateDesc> groupingIntermediateStateDesc() {
return IrateIntGroupingAggregatorFunction.intermediateStateDesc();
}
@Override
public AggregatorFunction aggregator(DriverContext driverContext, List<Integer> channels) {
throw new UnsupportedOperationException("non-grouping aggregator is not supported");
}
@Override
public IrateIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext,
List<Integer> channels) {
return IrateIntGroupingAggregatorFunction.create(channels, driverContext, isDelta);
}
@Override
public String describe() {
return IrateIntAggregator.describe();
}
}
| IrateIntAggregatorFunctionSupplier |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEvent.java | {
"start": 22505,
"end": 24013
} | interface ____ {
ParseField TYPE = new ParseField("type");
ParseField EVENT_TYPE = new ParseField("event_type");
ParseField ACCOUNT = new ParseField("account");
ParseField PROXY = new ParseField("proxy");
ParseField DESCRIPTION = new ParseField("description");
ParseField INCIDENT_KEY = new ParseField("incident_key");
ParseField CLIENT = new ParseField("client");
ParseField CLIENT_URL = new ParseField("client_url");
ParseField ATTACH_PAYLOAD = new ParseField("attach_payload");
ParseField CONTEXTS = new ParseField("contexts");
// this field exists because in versions prior 6.0 we accidentally used context instead of contexts and thus the correct data
// was never picked up on the pagerduty side
// we need to keep this for BWC
ParseField CONTEXT_DEPRECATED = new ParseField("context");
ParseField PAYLOAD = new ParseField("payload");
ParseField ROUTING_KEY = new ParseField("routing_key");
ParseField EVENT_ACTION = new ParseField("event_action");
ParseField DEDUP_KEY = new ParseField("dedup_key");
ParseField SUMMARY = new ParseField("summary");
ParseField SOURCE = new ParseField("source");
ParseField SEVERITY = new ParseField("severity");
ParseField LINKS = new ParseField("links");
ParseField IMAGES = new ParseField("images");
ParseField CUSTOM_DETAILS = new ParseField("custom_details");
}
}
| Fields |
java | netty__netty | transport-classes-io_uring/src/main/java/io/netty/channel/uring/IoUringDatagramChannelConfig.java | {
"start": 1229,
"end": 18630
} | class ____ extends IoUringChannelConfig implements DatagramChannelConfig {
private static final RecvByteBufAllocator DEFAULT_RCVBUF_ALLOCATOR = new FixedRecvByteBufAllocator(2048);
private boolean activeOnOpen;
private volatile int maxDatagramSize;
IoUringDatagramChannelConfig(AbstractIoUringChannel channel) {
super(channel);
setRecvByteBufAllocator(DEFAULT_RCVBUF_ALLOCATOR);
}
@Override
@SuppressWarnings("deprecation")
public Map<ChannelOption<?>, Object> getOptions() {
return getOptions(
super.getOptions(),
ChannelOption.SO_BROADCAST, ChannelOption.SO_RCVBUF, ChannelOption.SO_SNDBUF,
ChannelOption.SO_REUSEADDR, ChannelOption.IP_MULTICAST_LOOP_DISABLED,
ChannelOption.IP_MULTICAST_ADDR, ChannelOption.IP_MULTICAST_IF, ChannelOption.IP_MULTICAST_TTL,
ChannelOption.IP_TOS, ChannelOption.DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION,
IoUringChannelOption.SO_REUSEPORT, IoUringChannelOption.IP_FREEBIND,
IoUringChannelOption.IP_TRANSPARENT, IoUringChannelOption.MAX_DATAGRAM_PAYLOAD_SIZE,
IoUringChannelOption.IP_MULTICAST_ALL);
}
@SuppressWarnings({ "unchecked", "deprecation" })
@Override
public <T> T getOption(ChannelOption<T> option) {
if (option == ChannelOption.SO_BROADCAST) {
return (T) Boolean.valueOf(isBroadcast());
}
if (option == ChannelOption.SO_RCVBUF) {
return (T) Integer.valueOf(getReceiveBufferSize());
}
if (option == ChannelOption.SO_SNDBUF) {
return (T) Integer.valueOf(getSendBufferSize());
}
if (option == ChannelOption.SO_REUSEADDR) {
return (T) Boolean.valueOf(isReuseAddress());
}
if (option == ChannelOption.IP_MULTICAST_LOOP_DISABLED) {
return (T) Boolean.valueOf(isLoopbackModeDisabled());
}
if (option == ChannelOption.IP_MULTICAST_ADDR) {
return (T) getInterface();
}
if (option == ChannelOption.IP_MULTICAST_IF) {
return (T) getNetworkInterface();
}
if (option == ChannelOption.IP_MULTICAST_TTL) {
return (T) Integer.valueOf(getTimeToLive());
}
if (option == ChannelOption.IP_TOS) {
return (T) Integer.valueOf(getTrafficClass());
}
if (option == ChannelOption.DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION) {
return (T) Boolean.valueOf(activeOnOpen);
}
if (option == IoUringChannelOption.SO_REUSEPORT) {
return (T) Boolean.valueOf(isReusePort());
}
if (option == IoUringChannelOption.IP_TRANSPARENT) {
return (T) Boolean.valueOf(isIpTransparent());
}
if (option == IoUringChannelOption.IP_FREEBIND) {
return (T) Boolean.valueOf(isFreeBind());
}
if (option == IoUringChannelOption.MAX_DATAGRAM_PAYLOAD_SIZE) {
return (T) Integer.valueOf(getMaxDatagramPayloadSize());
}
if (option == IoUringChannelOption.IP_MULTICAST_ALL) {
return (T) Boolean.valueOf(isIpMulticastAll());
}
return super.getOption(option);
}
@Override
@SuppressWarnings("deprecation")
public <T> boolean setOption(ChannelOption<T> option, T value) {
validate(option, value);
if (option == ChannelOption.SO_BROADCAST) {
setBroadcast((Boolean) value);
} else if (option == ChannelOption.SO_RCVBUF) {
setReceiveBufferSize((Integer) value);
} else if (option == ChannelOption.SO_SNDBUF) {
setSendBufferSize((Integer) value);
} else if (option == ChannelOption.SO_REUSEADDR) {
setReuseAddress((Boolean) value);
} else if (option == ChannelOption.IP_MULTICAST_LOOP_DISABLED) {
setLoopbackModeDisabled((Boolean) value);
} else if (option == ChannelOption.IP_MULTICAST_ADDR) {
setInterface((InetAddress) value);
} else if (option == ChannelOption.IP_MULTICAST_IF) {
setNetworkInterface((NetworkInterface) value);
} else if (option == ChannelOption.IP_MULTICAST_TTL) {
setTimeToLive((Integer) value);
} else if (option == ChannelOption.IP_TOS) {
setTrafficClass((Integer) value);
} else if (option == ChannelOption.DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION) {
setActiveOnOpen((Boolean) value);
} else if (option == IoUringChannelOption.SO_REUSEPORT) {
setReusePort((Boolean) value);
} else if (option == IoUringChannelOption.IP_FREEBIND) {
setFreeBind((Boolean) value);
} else if (option == IoUringChannelOption.IP_TRANSPARENT) {
setIpTransparent((Boolean) value);
} else if (option == IoUringChannelOption.MAX_DATAGRAM_PAYLOAD_SIZE) {
setMaxDatagramPayloadSize((Integer) value);
} else if (option == IoUringChannelOption.IP_MULTICAST_ALL) {
setIpMulticastAll((Boolean) value);
} else {
return super.setOption(option, value);
}
return true;
}
private void setActiveOnOpen(boolean activeOnOpen) {
if (channel.isRegistered()) {
throw new IllegalStateException("Can only changed before channel was registered");
}
this.activeOnOpen = activeOnOpen;
}
boolean getActiveOnOpen() {
return activeOnOpen;
}
@Override
public IoUringDatagramChannelConfig setMessageSizeEstimator(MessageSizeEstimator estimator) {
super.setMessageSizeEstimator(estimator);
return this;
}
@Override
@Deprecated
public IoUringDatagramChannelConfig setWriteBufferLowWaterMark(int writeBufferLowWaterMark) {
super.setWriteBufferLowWaterMark(writeBufferLowWaterMark);
return this;
}
@Override
@Deprecated
public IoUringDatagramChannelConfig setWriteBufferHighWaterMark(int writeBufferHighWaterMark) {
super.setWriteBufferHighWaterMark(writeBufferHighWaterMark);
return this;
}
@Override
public IoUringDatagramChannelConfig setWriteBufferWaterMark(WriteBufferWaterMark writeBufferWaterMark) {
super.setWriteBufferWaterMark(writeBufferWaterMark);
return this;
}
@Override
public IoUringDatagramChannelConfig setAutoClose(boolean autoClose) {
super.setAutoClose(autoClose);
return this;
}
@Override
public IoUringDatagramChannelConfig setAutoRead(boolean autoRead) {
super.setAutoRead(autoRead);
return this;
}
@Override
public IoUringDatagramChannelConfig setRecvByteBufAllocator(RecvByteBufAllocator allocator) {
super.setRecvByteBufAllocator(allocator);
return this;
}
@Override
public IoUringDatagramChannelConfig setWriteSpinCount(int writeSpinCount) {
super.setWriteSpinCount(writeSpinCount);
return this;
}
@Override
public IoUringDatagramChannelConfig setAllocator(ByteBufAllocator allocator) {
super.setAllocator(allocator);
return this;
}
@Override
public IoUringDatagramChannelConfig setConnectTimeoutMillis(int connectTimeoutMillis) {
super.setConnectTimeoutMillis(connectTimeoutMillis);
return this;
}
@Override
@Deprecated
public IoUringDatagramChannelConfig setMaxMessagesPerRead(int maxMessagesPerRead) {
super.setMaxMessagesPerRead(maxMessagesPerRead);
return this;
}
@Override
public int getSendBufferSize() {
try {
return ((AbstractIoUringChannel) channel).socket.getSendBufferSize();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringDatagramChannelConfig setSendBufferSize(int sendBufferSize) {
try {
((AbstractIoUringChannel) channel).socket.setSendBufferSize(sendBufferSize);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public int getReceiveBufferSize() {
try {
return ((AbstractIoUringChannel) channel).socket.getReceiveBufferSize();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringDatagramChannelConfig setReceiveBufferSize(int receiveBufferSize) {
try {
((AbstractIoUringChannel) channel).socket.setReceiveBufferSize(receiveBufferSize);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public int getTrafficClass() {
try {
return ((AbstractIoUringChannel) channel).socket.getTrafficClass();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringDatagramChannelConfig setTrafficClass(int trafficClass) {
try {
((AbstractIoUringChannel) channel).socket.setTrafficClass(trafficClass);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public boolean isReuseAddress() {
try {
return ((AbstractIoUringChannel) channel).socket.isReuseAddress();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringDatagramChannelConfig setReuseAddress(boolean reuseAddress) {
try {
((AbstractIoUringChannel) channel).socket.setReuseAddress(reuseAddress);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public boolean isBroadcast() {
try {
return ((AbstractIoUringChannel) channel).socket.isBroadcast();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringDatagramChannelConfig setBroadcast(boolean broadcast) {
try {
((AbstractIoUringChannel) channel).socket.setBroadcast(broadcast);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public boolean isLoopbackModeDisabled() {
try {
return ((AbstractIoUringChannel) channel).socket.isLoopbackModeDisabled();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringDatagramChannelConfig setLoopbackModeDisabled(boolean loopbackModeDisabled) {
try {
((AbstractIoUringChannel) channel).socket.setLoopbackModeDisabled(loopbackModeDisabled);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public int getTimeToLive() {
try {
return ((AbstractIoUringChannel) channel).socket.getTimeToLive();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringDatagramChannelConfig setTimeToLive(int ttl) {
try {
((AbstractIoUringChannel) channel).socket.setTimeToLive(ttl);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public InetAddress getInterface() {
try {
return ((AbstractIoUringChannel) channel).socket.getInterface();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringDatagramChannelConfig setInterface(InetAddress interfaceAddress) {
try {
((AbstractIoUringChannel) channel).socket.setInterface(interfaceAddress);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public NetworkInterface getNetworkInterface() {
try {
return ((AbstractIoUringChannel) channel).socket.getNetworkInterface();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringDatagramChannelConfig setNetworkInterface(NetworkInterface networkInterface) {
try {
((AbstractIoUringChannel) channel).socket.setNetworkInterface(networkInterface);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Returns {@code true} if the SO_REUSEPORT option is set.
*/
public boolean isReusePort() {
try {
return ((AbstractIoUringChannel) channel).socket.isReusePort();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Set the SO_REUSEPORT option on the underlying Channel. This will allow to bind multiple
* {@link io.netty.channel.socket.DatagramChannel}s to the same port and so receive datagrams with multiple threads.
*
* Be aware this method needs be called before
* {@link io.netty.channel.socket.DatagramChannel#bind(java.net.SocketAddress)} to have
* any affect.
*/
public IoUringDatagramChannelConfig setReusePort(boolean reusePort) {
try {
((AbstractIoUringChannel) channel).socket.setReusePort(reusePort);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Returns {@code true} if <a href="https://man7.org/linux/man-pages/man7/ip.7.html">IP_TRANSPARENT</a> is enabled,
* {@code false} otherwise.
*/
public boolean isIpTransparent() {
try {
return ((AbstractIoUringChannel) channel).socket.isIpTransparent();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* If {@code true} is used <a href="https://man7.org/linux/man-pages/man7/ip.7.html">IP_TRANSPARENT</a> is enabled,
* {@code false} for disable it. Default is disabled.
*/
public IoUringDatagramChannelConfig setIpTransparent(boolean ipTransparent) {
try {
((AbstractIoUringChannel) channel).socket.setIpTransparent(ipTransparent);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Returns {@code true} if <a href="https://man7.org/linux/man-pages/man7/ip.7.html">IP_FREEBIND</a> is enabled,
* {@code false} otherwise.
*/
public boolean isFreeBind() {
try {
return ((AbstractIoUringChannel) channel).socket.isIpFreeBind();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* If {@code true} is used <a href="https://man7.org/linux/man-pages/man7/ip.7.html">IP_FREEBIND</a> is enabled,
* {@code false} for disable it. Default is disabled.
*/
public IoUringDatagramChannelConfig setFreeBind(boolean freeBind) {
try {
((AbstractIoUringChannel) channel).socket.setIpFreeBind(freeBind);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Set the maximum {@link io.netty.channel.socket.DatagramPacket} size. This will be used to determine if
* a batch of {@code IORING_IO_RECVMSG} should be used when reading from the underlying socket.
* When batched {@code recvmmsg} is used
* we may be able to read multiple {@link io.netty.channel.socket.DatagramPacket}s with one syscall and so
* greatly improve the performance. This number will be used to slice {@link ByteBuf}s returned by the used
* {@link RecvByteBufAllocator}. You can use {@code 0} to disable the usage of batching, any other bigger value
* will enable it.
*/
public IoUringDatagramChannelConfig setMaxDatagramPayloadSize(int maxDatagramSize) {
this.maxDatagramSize = ObjectUtil.checkPositiveOrZero(maxDatagramSize, "maxDatagramSize");
return this;
}
/**
* Get the maximum {@link io.netty.channel.socket.DatagramPacket} size.
*/
public int getMaxDatagramPayloadSize() {
return maxDatagramSize;
}
/**
* If {@code true} is used <a href="https://man7.org/linux/man-pages/man7/ip.7.html">IP_MULTICAST_ALL</a> is
* enabled (or IPV6_MULTICAST_ALL for IPV6), {@code false} for disable it. Default is enabled.
*/
public IoUringDatagramChannelConfig setIpMulticastAll(boolean multicastAll) {
try {
((IoUringDatagramChannel) channel).socket.setIpMulticastAll(multicastAll);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Returns {@code true} if <a href="https://man7.org/linux/man-pages/man7/ip.7.html">IP_MULTICAST_ALL</a> (or
* IPV6_MULTICAST_ALL for IPV6) is enabled, {@code false} otherwise.
*/
public boolean isIpMulticastAll() {
try {
return ((IoUringDatagramChannel) channel).socket.isIpMulticastAll();
} catch (IOException e) {
throw new ChannelException(e);
}
}
}
| IoUringDatagramChannelConfig |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-api/src/main/java/org/apache/dubbo/remoting/exchange/support/Replier.java | {
"start": 1023,
"end": 1266
} | interface ____<T> {
/**
* reply.
*
* @param channel
* @param request
* @return response
* @throws RemotingException
*/
Object reply(ExchangeChannel channel, T request) throws RemotingException;
}
| Replier |
java | quarkusio__quarkus | extensions/mailer/runtime/src/test/java/io/quarkus/mailer/runtime/MockMailerImplTest.java | {
"start": 365,
"end": 1846
} | class ____ {
private static final String FROM = "test@test.org";
private static final String TO = "foo@quarkus.io";
private static Vertx vertx;
private MockMailboxImpl mockMailbox;
private MutinyMailerImpl mailer;
@BeforeAll
static void start() {
vertx = Vertx.vertx();
}
@AfterAll
static void stop() {
vertx.close().await().indefinitely();
}
@BeforeEach
void init() {
mockMailbox = new MockMailboxImpl();
mailer = new MutinyMailerImpl(vertx, null, mockMailbox, FROM, null, true, List.of(), false, false, null);
}
@Test
void testTextMail() {
String content = UUID.randomUUID().toString();
mailer.send(Mail.withText(TO, "Test", content)).await().indefinitely();
List<Mail> sent = mockMailbox.getMessagesSentTo(TO);
assertThat(sent).hasSize(1);
Mail actual = sent.get(0);
assertThat(actual.getText()).contains(content);
assertThat(actual.getSubject()).isEqualTo("Test");
}
@Test
void testWithSeveralMails() {
Mail mail1 = Mail.withText(TO, "Mail 1", "Mail 1").addCc("cc@quarkus.io").addBcc("bcc@quarkus.io");
Mail mail2 = Mail.withHtml(TO, "Mail 2", "<strong>Mail 2</strong>").addCc("cc2@quarkus.io")
.addBcc("bcc2@quarkus.io");
mailer.send(mail1, mail2).await().indefinitely();
assertThat(mockMailbox.getTotalMessagesSent()).isEqualTo(6);
}
}
| MockMailerImplTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/DelegatingCreatorsTest.java | {
"start": 1600,
"end": 2101
} | class ____
{
protected String name1;
protected String name2;
protected int age;
private FactoryBean711(int a, String n1, String n2) {
age = a;
name1 = n1;
name2 = n2;
}
@JsonCreator(mode = JsonCreator.Mode.DELEGATING)
public static FactoryBean711 create(@JacksonInject String n1, int a, @JacksonInject String n2) {
return new FactoryBean711(a, n1, n2);
}
}
static | FactoryBean711 |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/web/context/servlet/WebApplicationContextInitializer.java | {
"start": 1476,
"end": 4005
} | class ____ {
private static final Log logger = LogFactory.getLog(WebApplicationContextInitializer.class);
private final ConfigurableWebApplicationContext context;
public WebApplicationContextInitializer(ConfigurableWebApplicationContext context) {
this.context = context;
}
public void initialize(ServletContext servletContext) throws ServletException {
prepareWebApplicationContext(servletContext);
registerApplicationScope(servletContext, this.context.getBeanFactory());
WebApplicationContextUtils.registerEnvironmentBeans(this.context.getBeanFactory(), servletContext);
for (ServletContextInitializer initializerBean : new ServletContextInitializerBeans(
this.context.getBeanFactory())) {
initializerBean.onStartup(servletContext);
}
}
private void prepareWebApplicationContext(ServletContext servletContext) {
Object rootContext = servletContext.getAttribute(WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE);
if (rootContext != null) {
if (rootContext == this) {
throw new IllegalStateException(
"Cannot initialize context because there is already a root application context present - "
+ "check whether you have multiple ServletContextInitializers!");
}
return;
}
try {
servletContext.setAttribute(WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE, this.context);
if (logger.isDebugEnabled()) {
logger.debug("Published root WebApplicationContext as ServletContext attribute with name ["
+ WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE + "]");
}
this.context.setServletContext(servletContext);
if (logger.isInfoEnabled()) {
long elapsedTime = System.currentTimeMillis() - this.context.getStartupDate();
logger.info("Root WebApplicationContext: initialization completed in " + elapsedTime + " ms");
}
}
catch (RuntimeException | Error ex) {
logger.error("Context initialization failed", ex);
servletContext.setAttribute(WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE, ex);
throw ex;
}
}
private void registerApplicationScope(ServletContext servletContext, ConfigurableListableBeanFactory beanFactory) {
ServletContextScope appScope = new ServletContextScope(servletContext);
beanFactory.registerScope(WebApplicationContext.SCOPE_APPLICATION, appScope);
// Register as ServletContext attribute, for ContextCleanupListener to detect it.
servletContext.setAttribute(ServletContextScope.class.getName(), appScope);
}
}
| WebApplicationContextInitializer |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoActionTests.java | {
"start": 1164,
"end": 6204
} | class ____ extends ESTestCase {
public void testDuplicatedFiltersAreNotRemoved() {
Map<String, String> params = new HashMap<>();
params.put("nodeId", "_all,master:false,_all");
RestRequest restRequest = buildRestRequest(params);
NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest);
assertArrayEquals(new String[] { "_all", "master:false", "_all" }, actual.nodesIds());
}
public void testOnlyMetrics() {
Map<String, String> params = new HashMap<>();
int metricsCount = randomIntBetween(1, ALLOWED_METRICS.size());
List<String> metrics = new ArrayList<>();
for (int i = 0; i < metricsCount; i++) {
metrics.add(randomFrom(ALLOWED_METRICS));
}
params.put("nodeId", String.join(",", metrics));
RestRequest restRequest = buildRestRequest(params);
NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest);
assertArrayEquals(new String[] { "_all" }, actual.nodesIds());
assertMetrics(metrics, actual);
}
public void testAllMetricsSelectedWhenNodeAndMetricSpecified() {
Map<String, String> params = new HashMap<>();
String nodeId = randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(23));
String metric = randomFrom(ALLOWED_METRICS);
params.put("nodeId", nodeId + "," + metric);
RestRequest restRequest = buildRestRequest(params);
NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest);
assertArrayEquals(new String[] { nodeId, metric }, actual.nodesIds());
assertAllMetricsTrue(actual);
}
public void testSeparateNodeIdsAndMetrics() {
Map<String, String> params = new HashMap<>();
List<String> nodeIds = new ArrayList<>(5);
List<String> metrics = new ArrayList<>(5);
for (int i = 0; i < 5; i++) {
nodeIds.add(randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(23)));
metrics.add(randomFrom(ALLOWED_METRICS));
}
params.put("nodeId", String.join(",", nodeIds));
params.put("metrics", String.join(",", metrics));
RestRequest restRequest = buildRestRequest(params);
NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest);
assertArrayEquals(nodeIds.toArray(), actual.nodesIds());
assertMetrics(metrics, actual);
}
public void testExplicitAllMetrics() {
Map<String, String> params = new HashMap<>();
List<String> nodeIds = new ArrayList<>(5);
for (int i = 0; i < 5; i++) {
nodeIds.add(randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(23)));
}
params.put("nodeId", String.join(",", nodeIds));
params.put("metrics", "_all");
RestRequest restRequest = buildRestRequest(params);
NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest);
assertArrayEquals(nodeIds.toArray(), actual.nodesIds());
assertAllMetricsTrue(actual);
}
/**
* Test that if a user requests a non-existent metric, it's dropped from the
* request without an error.
*/
public void testNonexistentMetricsDropped() {
Map<String, String> params = new HashMap<>();
List<String> nodeIds = new ArrayList<>(5);
List<String> metrics = new ArrayList<>(5);
for (int i = 0; i < 5; i++) {
nodeIds.add(randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(23)));
metrics.add(randomFrom(ALLOWED_METRICS));
}
String nonAllowedMetric = randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(5));
metrics.add(nonAllowedMetric);
params.put("nodeId", String.join(",", nodeIds));
params.put("metrics", String.join(",", metrics));
RestRequest restRequest = buildRestRequest(params);
NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest);
assertArrayEquals(nodeIds.toArray(), actual.nodesIds());
assertThat(actual.requestedMetrics(), not(hasItem(nonAllowedMetric)));
assertMetrics(metrics, actual);
}
private FakeRestRequest buildRestRequest(Map<String, String> params) {
return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET)
.withPath("/_nodes")
.withParams(params)
.build();
}
private void assertMetrics(List<String> metrics, NodesInfoRequest nodesInfoRequest) {
Set<String> validRequestedMetrics = Sets.intersection(new HashSet<>(metrics), ALLOWED_METRICS);
assertThat(nodesInfoRequest.requestedMetrics(), equalTo(validRequestedMetrics));
}
private void assertAllMetricsTrue(NodesInfoRequest nodesInfoRequest) {
assertThat(nodesInfoRequest.requestedMetrics(), equalTo(ALLOWED_METRICS));
}
}
| RestNodesInfoActionTests |
java | elastic__elasticsearch | modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/parsers/ShapeParser.java | {
"start": 993,
"end": 3349
} | interface ____ {
ParseField FIELD_TYPE = new ParseField("type");
ParseField FIELD_COORDINATES = new ParseField("coordinates");
ParseField FIELD_GEOMETRIES = new ParseField("geometries");
ParseField FIELD_ORIENTATION = new ParseField("orientation");
/**
* Create a new {@link ShapeBuilder} from {@link XContent}
* @param parser parser to read the GeoShape from
* @param geometryMapper document field mapper reference required for spatial parameters relevant
* to the shape construction process (e.g., orientation)
* todo: refactor to place build specific parameters in the SpatialContext
* @return {@link ShapeBuilder} read from the parser or null
* if the parsers current token has been <code>null</code>
* @throws IOException if the input could not be read
*/
static ShapeBuilder<?, ?, ?> parse(XContentParser parser, AbstractGeometryFieldMapper<?> geometryMapper) throws IOException {
AbstractShapeGeometryFieldMapper<?> shapeMapper = null;
if (geometryMapper != null) {
if (geometryMapper instanceof AbstractShapeGeometryFieldMapper == false) {
throw new IllegalArgumentException("geometry must be a shape type");
}
shapeMapper = (AbstractShapeGeometryFieldMapper<?>) geometryMapper;
}
if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
return null;
}
if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
return GeoJsonParser.parse(parser, shapeMapper);
} else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
return GeoWKTParser.parse(parser, shapeMapper);
}
throw new ElasticsearchParseException("shape must be an object consisting of type and coordinates");
}
/**
* Create a new {@link ShapeBuilder} from {@link XContent}
* @param parser parser to read the GeoShape from
* @return {@link ShapeBuilder} read from the parser or null
* if the parsers current token has been <code>null</code>
* @throws IOException if the input could not be read
*/
static ShapeBuilder<?, ?, ?> parse(XContentParser parser) throws IOException {
return parse(parser, null);
}
}
| ShapeParser |
java | spring-projects__spring-boot | module/spring-boot-actuator-autoconfigure/src/test/java/org/springframework/boot/actuate/autoconfigure/beans/BeansEndpointAutoConfigurationTests.java | {
"start": 1078,
"end": 1982
} | class ____ {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(BeansEndpointAutoConfiguration.class));
@Test
void runShouldHaveEndpointBean() {
this.contextRunner.withPropertyValues("management.endpoints.web.exposure.include=beans")
.run((context) -> assertThat(context).hasSingleBean(BeansEndpoint.class));
}
@Test
void runWhenNotExposedShouldNotHaveEndpointBean() {
this.contextRunner.run((context) -> assertThat(context).doesNotHaveBean(BeansEndpoint.class));
}
@Test
void runWhenEnabledPropertyIsFalseShouldNotHaveEndpointBean() {
this.contextRunner.withPropertyValues("management.endpoint.beans.enabled:false")
.withPropertyValues("management.endpoints.web.exposure.include=*")
.run((context) -> assertThat(context).doesNotHaveBean(BeansEndpoint.class));
}
}
| BeansEndpointAutoConfigurationTests |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/commons/util/DefaultClasspathScannerTests.java | {
"start": 3688,
"end": 3904
} | class ____");
}
return true;
};
assertClassesScannedWhenExceptionIsThrown(malformedClassNameSimulationFilter);
assertDebugMessageLogged(listener, "The java.lang.Class loaded from path .+ has a malformed | name |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/VertexParallelismInformation.java | {
"start": 992,
"end": 2412
} | interface ____ {
/**
* Returns a vertex's min parallelism.
*
* @return the min parallelism for the vertex
*/
int getMinParallelism();
/**
* Returns a vertex's parallelism.
*
* @return the parallelism for the vertex
*/
int getParallelism();
/**
* Returns the vertex's max parallelism.
*
* @return the max parallelism for the vertex
*/
int getMaxParallelism();
/**
* Set a given vertex's parallelism property. The parallelism can be changed only if the vertex
* parallelism was not decided yet (i.e. was -1).
*
* @param parallelism the parallelism for the vertex
*/
void setParallelism(int parallelism);
/**
* Changes a given vertex's max parallelism property. The caller should first check the validity
* of the new setting via {@link #canRescaleMaxParallelism}, otherwise this operation may fail.
*
* @param maxParallelism the new max parallelism for the vertex
*/
void setMaxParallelism(int maxParallelism);
/**
* Returns whether the vertex's max parallelism can be changed to a given value.
*
* @param desiredMaxParallelism the desired max parallelism for the vertex
* @return whether the max parallelism can be changed to the given value
*/
boolean canRescaleMaxParallelism(int desiredMaxParallelism);
}
| VertexParallelismInformation |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/TooManyParametersTest.java | {
"start": 7470,
"end": 7708
} | class ____ extends TestParametersValuesProvider {
@Override
public ImmutableList<TestParameters.TestParametersValues> provideValues(Context context) {
return ImmutableList.of();
}
}
}
""")
.doTest();
}
}
| TestArgs |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/LimitAwareBulkIndexer.java | {
"start": 675,
"end": 825
} | class ____ gathers index requests in bulk requests
* that do exceed a 1000 operations or half the available memory
* limit for indexing.
*/
public | that |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/resource/Transports.java | {
"start": 1848,
"end": 5060
} | class ____ {
private static final InternalLogger transportsLogger = InternalLoggerFactory.getInstance(Transports.class);
static EventLoopResources RESOURCES = KqueueProvider.isAvailable() ? KqueueProvider.getResources()
: IOUringProvider.isAvailable() ? IOUringProvider.getResources() : EpollProvider.getResources();
/**
* @return {@code true} if a native transport is available.
*/
static boolean isAvailable() {
if (EpollProvider.isAvailable() && IOUringProvider.isAvailable()) {
transportsLogger.warn("Both epoll and io_uring native transports are available, epoll has been prioritized.");
}
return EpollProvider.isAvailable() || KqueueProvider.isAvailable() || IOUringProvider.isAvailable();
}
/**
* @return {@code true} if a native transport for domain sockets is available.
*/
public static boolean isDomainSocketSupported() {
return EpollProvider.isAvailable() || KqueueProvider.isAvailable();
}
/**
* @return the native transport socket {@link Channel} class.
*/
static Class<? extends Channel> socketChannelClass() {
return RESOURCES.socketChannelClass();
}
/**
* @return the native transport socket {@link DatagramChannel} class.
*/
static Class<? extends DatagramChannel> datagramChannelClass() {
return RESOURCES.datagramChannelClass();
}
/**
* @return the native transport domain socket {@link Channel} class.
*/
public static Class<? extends Channel> domainSocketChannelClass() {
assertDomainSocketAvailable();
return EpollProvider.isAvailable() && IOUringProvider.isAvailable()
? EpollProvider.getResources().domainSocketChannelClass()
: RESOURCES.domainSocketChannelClass();
}
/**
* @return the native transport {@link EventLoopGroup} class. Defaults to TCP sockets. See
* {@link #eventLoopGroupClass(boolean)} to request a specific EventLoopGroup for Domain Socket usage.
*/
public static Class<? extends EventLoopGroup> eventLoopGroupClass() {
return eventLoopGroupClass(false);
}
/**
* @return the native transport {@link EventLoopGroup} class.
* @param domainSocket {@code true} to indicate Unix Domain Socket usage, {@code false} otherwise.
* @since 6.3.3
*/
public static Class<? extends EventLoopGroup> eventLoopGroupClass(boolean domainSocket) {
return domainSocket && EpollProvider.isAvailable() && IOUringProvider.isAvailable()
? EpollProvider.getResources().eventLoopGroupClass()
: RESOURCES.eventLoopGroupClass();
}
public static void assertDomainSocketAvailable() {
LettuceAssert.assertState(NativeTransports.isDomainSocketSupported(),
"A unix domain socket connection requires epoll or kqueue and neither is available");
}
}
}
| NativeTransports |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestPartitionQueueMetrics.java | {
"start": 2096,
"end": 31775
} | class ____ {
static final int GB = 1024; // MB
private static final Configuration CONF = new Configuration();
private MetricsSystem ms;
@BeforeEach
public void setUp() {
ms = new MetricsSystemImpl();
QueueMetrics.clearQueueMetrics();
PartitionQueueMetrics.clearQueueMetrics();
}
@AfterEach
public void tearDown() {
ms.shutdown();
}
/**
* Structure:
* Both queues, q1 & q2 has been configured to run in only 1 partition, x.
*
* root
* / \
* q1 q2
*
* @throws Exception
*/
@Test
public void testSinglePartitionWithSingleLevelQueueMetrics()
throws Exception {
String parentQueueName = "root";
Queue parentQueue = mock(Queue.class);
String user = "alice";
QueueMetrics root = QueueMetrics.forQueue(ms, "root", null, true, CONF);
when(parentQueue.getMetrics()).thenReturn(root);
when(parentQueue.getQueueName()).thenReturn(parentQueueName);
QueueMetrics q1 =
QueueMetrics.forQueue(ms, "root.q1", parentQueue, true, CONF);
QueueMetrics q2 =
QueueMetrics.forQueue(ms, "root.q2", parentQueue, true, CONF);
q1.submitApp(user, false);
q1.submitAppAttempt(user, false);
root.setAvailableResourcesToQueue("x",
Resources.createResource(200 * GB, 200));
q1.setAvailableResourcesToQueue("x",
Resources.createResource(100 * GB, 100));
q1.incrPendingResources("x", user, 2, Resource.newInstance(1024, 1));
MetricsSource partitionSource = partitionSource(ms, "x");
MetricsSource rootQueueSource = queueSource(ms, "x", parentQueueName);
MetricsSource q1Source = queueSource(ms, "x", "root.q1");
checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 2 * GB, 2, 2);
q2.incrPendingResources("x", user, 3, Resource.newInstance(1024, 1));
MetricsSource q2Source = queueSource(ms, "x", "root.q2");
checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 5 * GB, 5, 5);
checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 5 * GB, 5, 5);
checkResources(q2Source, 0, 0, 0, 0, 0, 3 * GB, 3, 3);
PartitionQueueMetrics pq1 =
new PartitionQueueMetrics(ms, "root.q1", parentQueue, true, CONF, "x");
assertTrue(pq1.registry.info().name()
.compareTo(PartitionQueueMetrics.P_RECORD_INFO.name()) == 0, "Name of registry should be \""
+ PartitionQueueMetrics.P_RECORD_INFO.name() + "\", but was \""
+ pq1.registry.info().name() + "\".");
}
/**
* Structure:
* Both queues, q1 & q2 has been configured to run in both partitions, x & y.
*
* root
* / \
* q1 q2
*
* @throws Exception
*/
@Test
public void testTwoPartitionWithSingleLevelQueueMetrics() throws Exception {
String parentQueueName = "root";
String user = "alice";
QueueMetrics root =
QueueMetrics.forQueue(ms, parentQueueName, null, false, CONF);
Queue parentQueue = mock(Queue.class);
when(parentQueue.getMetrics()).thenReturn(root);
when(parentQueue.getQueueName()).thenReturn(parentQueueName);
QueueMetrics q1 =
QueueMetrics.forQueue(ms, "root.q1", parentQueue, false, CONF);
QueueMetrics q2 =
QueueMetrics.forQueue(ms, "root.q2", parentQueue, false, CONF);
AppSchedulingInfo app = mockApp(user);
q1.submitApp(user, false);
q1.submitAppAttempt(user, false);
root.setAvailableResourcesToQueue("x",
Resources.createResource(200 * GB, 200));
q1.setAvailableResourcesToQueue("x",
Resources.createResource(100 * GB, 100));
q1.incrPendingResources("x", user, 2, Resource.newInstance(1024, 1));
MetricsSource xPartitionSource = partitionSource(ms, "x");
MetricsSource xRootQueueSource = queueSource(ms, "x", parentQueueName);
MetricsSource q1Source = queueSource(ms, "x", "root.q1");
checkResources(xPartitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
checkResources(xRootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 2 * GB, 2, 2);
root.setAvailableResourcesToQueue("y",
Resources.createResource(400 * GB, 400));
q2.setAvailableResourcesToQueue("y",
Resources.createResource(200 * GB, 200));
q2.incrPendingResources("y", user, 3, Resource.newInstance(1024, 1));
MetricsSource yPartitionSource = partitionSource(ms, "y");
MetricsSource yRootQueueSource = queueSource(ms, "y", parentQueueName);
MetricsSource q2Source = queueSource(ms, "y", "root.q2");
checkResources(yPartitionSource, 0, 0, 0, 400 * GB, 400, 3 * GB, 3, 3);
checkResources(yRootQueueSource, 0, 0, 0, 400 * GB, 400, 3 * GB, 3, 3);
checkResources(q2Source, 0, 0, 0, 200 * GB, 200, 3 * GB, 3, 3);
}
/**
* Structure:
* Both queues, q1 has been configured to run in multiple partitions, x & y.
*
* root
* /
* q1
*
* @throws Exception
*/
@Test
public void testMultiplePartitionWithSingleQueueMetrics() throws Exception {
String parentQueueName = "root";
Queue parentQueue = mock(Queue.class);
QueueMetrics root =
QueueMetrics.forQueue(ms, parentQueueName, null, true, CONF);
when(parentQueue.getMetrics()).thenReturn(root);
when(parentQueue.getQueueName()).thenReturn(parentQueueName);
QueueMetrics q1 =
QueueMetrics.forQueue(ms, "root.q1", parentQueue, true, CONF);
root.setAvailableResourcesToQueue("x",
Resources.createResource(200 * GB, 200));
root.setAvailableResourcesToQueue("y",
Resources.createResource(300 * GB, 300));
q1.incrPendingResources("x", "test_user", 2, Resource.newInstance(1024, 1));
MetricsSource partitionSource = partitionSource(ms, "x");
MetricsSource rootQueueSource = queueSource(ms, "x", parentQueueName);
MetricsSource q1Source = queueSource(ms, "x", "root.q1");
MetricsSource userSource = userSource(ms, "x", "test_user", "root.q1");
checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
checkResources(q1Source, 0, 0, 0, 0, 0, 2 * GB, 2, 2);
checkResources(userSource, 0, 0, 0, 0, 0, 2 * GB, 2, 2);
q1.incrPendingResources("x", "test_user", 3, Resource.newInstance(1024, 1));
checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 5 * GB, 5, 5);
checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 5 * GB, 5, 5);
checkResources(q1Source, 0, 0, 0, 0, 0, 5 * GB, 5, 5);
checkResources(userSource, 0, 0, 0, 0, 0, 5 * GB, 5, 5);
q1.incrPendingResources("x", "test_user1", 4,
Resource.newInstance(1024, 1));
MetricsSource userSource1 = userSource(ms, "x", "test_user1", "root.q1");
checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 9 * GB, 9, 9);
checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 9 * GB, 9, 9);
checkResources(q1Source, 0, 0, 0, 0, 0, 9 * GB, 9, 9);
checkResources(userSource1, 0, 0, 0, 0, 0, 4 * GB, 4, 4);
q1.incrPendingResources("y", "test_user1", 6,
Resource.newInstance(1024, 1));
MetricsSource partitionSourceY = partitionSource(ms, "y");
MetricsSource rootQueueSourceY = queueSource(ms, "y", parentQueueName);
MetricsSource q1SourceY = queueSource(ms, "y", "root.q1");
MetricsSource userSourceY = userSource(ms, "y", "test_user1", "root.q1");
checkResources(partitionSourceY, 0, 0, 0, 300 * GB, 300, 6 * GB, 6, 6);
checkResources(rootQueueSourceY, 0, 0, 0, 300 * GB, 300, 6 * GB, 6, 6);
checkResources(q1SourceY, 0, 0, 0, 0, 0, 6 * GB, 6, 6);
checkResources(userSourceY, 0, 0, 0, 0, 0, 6 * GB, 6, 6);
}
/**
* Structure:
* Both queues, q1 & q2 has been configured to run in both partitions, x & y.
*
* root
* / \
* q1 q2
* q1
* / \
* q11 q12
* q2
* / \
* q21 q22
*
* @throws Exception
*/
@Test
public void testMultiplePartitionsWithMultiLevelQueuesMetrics()
throws Exception {
String parentQueueName = "root";
Queue parentQueue = mock(Queue.class);
QueueMetrics root =
QueueMetrics.forQueue(ms, parentQueueName, null, true, CONF);
when(parentQueue.getQueueName()).thenReturn(parentQueueName);
when(parentQueue.getMetrics()).thenReturn(root);
QueueMetrics q1 =
QueueMetrics.forQueue(ms, "root.q1", parentQueue, true, CONF);
Queue childQueue1 = mock(Queue.class);
when(childQueue1.getQueueName()).thenReturn("root.q1");
when(childQueue1.getMetrics()).thenReturn(q1);
QueueMetrics q11 =
QueueMetrics.forQueue(ms, "root.q1.q11", childQueue1, true, CONF);
QueueMetrics q12 =
QueueMetrics.forQueue(ms, "root.q1.q12", childQueue1, true, CONF);
QueueMetrics q2 =
QueueMetrics.forQueue(ms, "root.q2", parentQueue, true, CONF);
Queue childQueue2 = mock(Queue.class);
when(childQueue2.getQueueName()).thenReturn("root.q2");
when(childQueue2.getMetrics()).thenReturn(q2);
QueueMetrics q21 =
QueueMetrics.forQueue(ms, "root.q2.q21", childQueue2, true, CONF);
QueueMetrics q22 =
QueueMetrics.forQueue(ms, "root.q2.q22", childQueue2, true, CONF);
root.setAvailableResourcesToQueue("x",
Resources.createResource(200 * GB, 200));
q1.setAvailableResourcesToQueue("x",
Resources.createResource(100 * GB, 100));
q11.setAvailableResourcesToQueue("x",
Resources.createResource(50 * GB, 50));
q11.incrPendingResources("x", "test_user", 2,
Resource.newInstance(1024, 1));
MetricsSource partitionSource = partitionSource(ms, "x");
MetricsSource rootQueueSource = queueSource(ms, "x", parentQueueName);
MetricsSource q1Source = queueSource(ms, "x", "root.q1");
MetricsSource userSource = userSource(ms, "x", "test_user", "root.q1");
checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 2 * GB, 2, 2);
checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 2 * GB, 2, 2);
checkResources(userSource, 0, 0, 0, 0 * GB, 0, 2 * GB, 2, 2);
q11.incrPendingResources("x", "test_user", 4,
Resource.newInstance(1024, 1));
MetricsSource q11Source = queueSource(ms, "x", "root.q1.q11");
checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 6 * GB, 6, 6);
checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 6 * GB, 6, 6);
checkResources(q11Source, 0, 0, 0, 50 * GB, 50, 6 * GB, 6, 6);
checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 6 * GB, 6, 6);
checkResources(userSource, 0, 0, 0, 0 * GB, 0, 6 * GB, 6, 6);
q11.incrPendingResources("x", "test_user1", 5,
Resource.newInstance(1024, 1));
MetricsSource q1UserSource1 = userSource(ms, "x", "test_user1", "root.q1");
MetricsSource userSource1 =
userSource(ms, "x", "test_user1", "root.q1.q11");
checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 11 * GB, 11, 11);
checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 11 * GB, 11, 11);
checkResources(q11Source, 0, 0, 0, 50 * GB, 50, 11 * GB, 11, 11);
checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 11 * GB, 11, 11);
checkResources(userSource, 0, 0, 0, 0 * GB, 0, 6 * GB, 6, 6);
checkResources(q1UserSource1, 0, 0, 0, 0 * GB, 0, 5 * GB, 5, 5);
checkResources(userSource1, 0, 0, 0, 0 * GB, 0, 5 * GB, 5, 5);
q12.incrPendingResources("x", "test_user", 5,
Resource.newInstance(1024, 1));
MetricsSource q12Source = queueSource(ms, "x", "root.q1.q12");
checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 16 * GB, 16, 16);
checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 16 * GB, 16, 16);
checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 16 * GB, 16, 16);
checkResources(q12Source, 0, 0, 0, 0, 0, 5 * GB, 5, 5);
root.setAvailableResourcesToQueue("y",
Resources.createResource(200 * GB, 200));
q1.setAvailableResourcesToQueue("y",
Resources.createResource(100 * GB, 100));
q12.setAvailableResourcesToQueue("y",
Resources.createResource(50 * GB, 50));
q12.incrPendingResources("y", "test_user", 3,
Resource.newInstance(1024, 1));
MetricsSource yPartitionSource = partitionSource(ms, "y");
MetricsSource yRootQueueSource = queueSource(ms, "y", parentQueueName);
MetricsSource q1YSource = queueSource(ms, "y", "root.q1");
MetricsSource q12YSource = queueSource(ms, "y", "root.q1.q12");
checkResources(yPartitionSource, 0, 0, 0, 200 * GB, 200, 3 * GB, 3, 3);
checkResources(yRootQueueSource, 0, 0, 0, 200 * GB, 200, 3 * GB, 3, 3);
checkResources(q1YSource, 0, 0, 0, 100 * GB, 100, 3 * GB, 3, 3);
checkResources(q12YSource, 0, 0, 0, 50 * GB, 50, 3 * GB, 3, 3);
root.setAvailableResourcesToQueue("y",
Resources.createResource(200 * GB, 200));
q2.setAvailableResourcesToQueue("y",
Resources.createResource(100 * GB, 100));
q21.setAvailableResourcesToQueue("y",
Resources.createResource(50 * GB, 50));
q21.incrPendingResources("y", "test_user", 5,
Resource.newInstance(1024, 1));
MetricsSource q21Source = queueSource(ms, "y", "root.q2.q21");
MetricsSource q2YSource = queueSource(ms, "y", "root.q2");
checkResources(yPartitionSource, 0, 0, 0, 200 * GB, 200, 8 * GB, 8, 8);
checkResources(yRootQueueSource, 0, 0, 0, 200 * GB, 200, 8 * GB, 8, 8);
checkResources(q2YSource, 0, 0, 0, 100 * GB, 100, 5 * GB, 5, 5);
checkResources(q21Source, 0, 0, 0, 50 * GB, 50, 5 * GB, 5, 5);
q22.incrPendingResources("y", "test_user", 6,
Resource.newInstance(1024, 1));
MetricsSource q22Source = queueSource(ms, "y", "root.q2.q22");
checkResources(yPartitionSource, 0, 0, 0, 200 * GB, 200, 14 * GB, 14, 14);
checkResources(yRootQueueSource, 0, 0, 0, 200 * GB, 200, 14 * GB, 14, 14);
checkResources(q22Source, 0, 0, 0, 0, 0, 6 * GB, 6, 6);
}
@Test
public void testTwoLevelWithUserMetrics() {
String parentQueueName = "root";
String leafQueueName = "root.leaf";
String user = "alice";
String partition = "x";
QueueMetrics parentMetrics =
QueueMetrics.forQueue(ms, parentQueueName, null, true, CONF);
Queue parentQueue = mock(Queue.class);
when(parentQueue.getQueueName()).thenReturn(parentQueueName);
when(parentQueue.getMetrics()).thenReturn(parentMetrics);
QueueMetrics metrics =
QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true, CONF);
AppSchedulingInfo app = mockApp(user);
metrics.submitApp(user, false);
metrics.submitAppAttempt(user, false);
parentMetrics.setAvailableResourcesToQueue(partition,
Resources.createResource(100 * GB, 100));
metrics.setAvailableResourcesToQueue(partition,
Resources.createResource(100 * GB, 100));
parentMetrics.setAvailableResourcesToUser(partition, user,
Resources.createResource(10 * GB, 10));
metrics.setAvailableResourcesToUser(partition, user,
Resources.createResource(10 * GB, 10));
metrics.incrPendingResources(partition, user, 6,
Resources.createResource(3 * GB, 3));
MetricsSource partitionSource = partitionSource(ms, partition);
MetricsSource parentQueueSource =
queueSource(ms, partition, parentQueueName);
MetricsSource queueSource = queueSource(ms, partition, leafQueueName);
MetricsSource userSource = userSource(ms, partition, user, leafQueueName);
MetricsSource userSource1 =
userSource(ms, partition, user, parentQueueName);
checkResources(queueSource, 0, 0, 0, 0, 0, 100 * GB, 100, 18 * GB, 18, 6, 0,
0, 0);
checkResources(parentQueueSource, 0, 0, 0, 0, 0, 100 * GB, 100, 18 * GB, 18,
6, 0, 0, 0);
checkResources(userSource, 0, 0, 0, 0, 0, 10 * GB, 10, 18 * GB, 18, 6, 0, 0,
0);
checkResources(userSource1, 0, 0, 0, 0, 0, 10 * GB, 10, 18 * GB, 18, 6, 0,
0, 0);
checkResources(partitionSource, 0, 0, 0, 0, 0, 100 * GB, 100, 18 * GB, 18,
6, 0, 0, 0);
metrics.runAppAttempt(app.getApplicationId(), user, false);
metrics.allocateResources(partition, user, 3,
Resources.createResource(1 * GB, 1), true);
metrics.reserveResource(partition, user,
Resources.createResource(3 * GB, 3));
// Available resources is set externally, as it depends on dynamic
// configurable cluster/queue resources
checkResources(queueSource, 3 * GB, 3, 3, 3, 0, 100 * GB, 100, 15 * GB, 15,
3, 3 * GB, 3, 1);
checkResources(parentQueueSource, 3 * GB, 3, 3, 3, 0, 100 * GB, 100,
15 * GB, 15, 3, 3 * GB, 3, 1);
checkResources(partitionSource, 3 * GB, 3, 3, 3, 0, 100 * GB, 100, 15 * GB,
15, 3, 3 * GB, 3, 1);
checkResources(userSource, 3 * GB, 3, 3, 3, 0, 10 * GB, 10, 15 * GB, 15, 3,
3 * GB, 3, 1);
checkResources(userSource1, 3 * GB, 3, 3, 3, 0, 10 * GB, 10, 15 * GB, 15, 3,
3 * GB, 3, 1);
metrics.allocateResources(partition, user, 3,
Resources.createResource(1 * GB, 1), true);
checkResources(queueSource, 6 * GB, 6, 6, 6, 0, 100 * GB, 100, 12 * GB, 12,
0, 3 * GB, 3, 1);
checkResources(parentQueueSource, 6 * GB, 6, 6, 6, 0, 100 * GB, 100,
12 * GB, 12, 0, 3 * GB, 3, 1);
metrics.releaseResources(partition, user, 1,
Resources.createResource(2 * GB, 2));
metrics.unreserveResource(partition, user,
Resources.createResource(3 * GB, 3));
checkResources(queueSource, 4 * GB, 4, 5, 6, 1, 100 * GB, 100, 12 * GB, 12,
0, 0, 0, 0);
checkResources(parentQueueSource, 4 * GB, 4, 5, 6, 1, 100 * GB, 100,
12 * GB, 12, 0, 0, 0, 0);
checkResources(partitionSource, 4 * GB, 4, 5, 6, 1, 100 * GB, 100, 12 * GB,
12, 0, 0, 0, 0);
checkResources(userSource, 4 * GB, 4, 5, 6, 1, 10 * GB, 10, 12 * GB, 12, 0,
0, 0, 0);
checkResources(userSource1, 4 * GB, 4, 5, 6, 1, 10 * GB, 10, 12 * GB, 12, 0,
0, 0, 0);
metrics.finishAppAttempt(app.getApplicationId(), app.isPending(),
app.getUser(), false);
metrics.finishApp(user, RMAppState.FINISHED, false);
}
@Test
public void testThreeLevelWithUserMetrics() {
String parentQueueName = "root";
String leafQueueName = "root.leaf";
String leafQueueName1 = "root.leaf.leaf1";
String user = "alice";
String partitionX = "x";
String partitionY = "y";
QueueMetrics parentMetrics =
QueueMetrics.forQueue(parentQueueName, null, true, CONF);
Queue parentQueue = mock(Queue.class);
when(parentQueue.getQueueName()).thenReturn(parentQueueName);
when(parentQueue.getMetrics()).thenReturn(parentMetrics);
QueueMetrics metrics =
QueueMetrics.forQueue(leafQueueName, parentQueue, true, CONF);
Queue leafQueue = mock(Queue.class);
when(leafQueue.getQueueName()).thenReturn(leafQueueName);
when(leafQueue.getMetrics()).thenReturn(metrics);
QueueMetrics metrics1 =
QueueMetrics.forQueue(leafQueueName1, leafQueue, true, CONF);
AppSchedulingInfo app = mockApp(user);
metrics1.submitApp(user, false);
metrics1.submitAppAttempt(user, false);
parentMetrics.setAvailableResourcesToQueue(partitionX,
Resources.createResource(200 * GB, 200));
parentMetrics.setAvailableResourcesToQueue(partitionY,
Resources.createResource(500 * GB, 500));
metrics.setAvailableResourcesToQueue(partitionX,
Resources.createResource(100 * GB, 100));
metrics.setAvailableResourcesToQueue(partitionY,
Resources.createResource(400 * GB, 400));
metrics1.setAvailableResourcesToQueue(partitionX,
Resources.createResource(50 * GB, 50));
metrics1.setAvailableResourcesToQueue(partitionY,
Resources.createResource(300 * GB, 300));
parentMetrics.setAvailableResourcesToUser(partitionX, user,
Resources.createResource(20 * GB, 20));
parentMetrics.setAvailableResourcesToUser(partitionY, user,
Resources.createResource(50 * GB, 50));
metrics.setAvailableResourcesToUser(partitionX, user,
Resources.createResource(10 * GB, 10));
metrics.setAvailableResourcesToUser(partitionY, user,
Resources.createResource(40 * GB, 40));
metrics1.setAvailableResourcesToUser(partitionX, user,
Resources.createResource(5 * GB, 5));
metrics1.setAvailableResourcesToUser(partitionY, user,
Resources.createResource(30 * GB, 30));
metrics1.incrPendingResources(partitionX, user, 6,
Resources.createResource(3 * GB, 3));
metrics1.incrPendingResources(partitionY, user, 6,
Resources.createResource(4 * GB, 4));
MetricsSource partitionSourceX =
partitionSource(metrics1.getMetricsSystem(), partitionX);
MetricsSource parentQueueSourceWithPartX =
queueSource(metrics1.getMetricsSystem(), partitionX, parentQueueName);
MetricsSource queueSourceWithPartX =
queueSource(metrics1.getMetricsSystem(), partitionX, leafQueueName);
MetricsSource queueSource1WithPartX =
queueSource(metrics1.getMetricsSystem(), partitionX, leafQueueName1);
MetricsSource parentUserSourceWithPartX = userSource(metrics1.getMetricsSystem(),
partitionX, user, parentQueueName);
MetricsSource userSourceWithPartX = userSource(metrics1.getMetricsSystem(),
partitionX, user, leafQueueName);
MetricsSource userSource1WithPartX = userSource(metrics1.getMetricsSystem(),
partitionX, user, leafQueueName1);
checkResources(partitionSourceX, 0, 0, 0, 0, 0, 200 * GB, 200, 18 * GB, 18,
6, 0, 0, 0);
checkResources(parentQueueSourceWithPartX, 0, 0, 0, 0, 0, 200 * GB, 200, 18 * GB,
18, 6, 0, 0, 0);
checkResources(queueSourceWithPartX, 0, 0, 0, 0, 0, 100 * GB, 100, 18 * GB, 18, 6,
0, 0, 0);
checkResources(queueSource1WithPartX, 0, 0, 0, 0, 0, 50 * GB, 50, 18 * GB, 18, 6,
0, 0, 0);
checkResources(parentUserSourceWithPartX, 0, 0, 0, 0, 0, 20 * GB, 20, 18 * GB, 18,
6, 0, 0, 0);
checkResources(userSourceWithPartX, 0, 0, 0, 0, 0, 10 * GB, 10, 18 * GB, 18, 6, 0,
0, 0);
checkResources(userSource1WithPartX, 0, 0, 0, 0, 0, 5 * GB, 5, 18 * GB, 18, 6, 0,
0, 0);
MetricsSource partitionSourceY =
partitionSource(metrics1.getMetricsSystem(), partitionY);
MetricsSource parentQueueSourceWithPartY =
queueSource(metrics1.getMetricsSystem(), partitionY, parentQueueName);
MetricsSource queueSourceWithPartY =
queueSource(metrics1.getMetricsSystem(), partitionY, leafQueueName);
MetricsSource queueSource1WithPartY =
queueSource(metrics1.getMetricsSystem(), partitionY, leafQueueName1);
MetricsSource parentUserSourceWithPartY = userSource(metrics1.getMetricsSystem(),
partitionY, user, parentQueueName);
MetricsSource userSourceWithPartY = userSource(metrics1.getMetricsSystem(),
partitionY, user, leafQueueName);
MetricsSource userSource1WithPartY = userSource(metrics1.getMetricsSystem(),
partitionY, user, leafQueueName1);
checkResources(partitionSourceY, 0, 0, 0, 0, 0, 500 * GB, 500, 24 * GB, 24,
6, 0, 0, 0);
checkResources(parentQueueSourceWithPartY, 0, 0, 0, 0, 0, 500 * GB, 500, 24 * GB,
24, 6, 0, 0, 0);
checkResources(queueSourceWithPartY, 0, 0, 0, 0, 0, 400 * GB, 400, 24 * GB, 24, 6,
0, 0, 0);
checkResources(queueSource1WithPartY, 0, 0, 0, 0, 0, 300 * GB, 300, 24 * GB, 24, 6,
0, 0, 0);
checkResources(parentUserSourceWithPartY, 0, 0, 0, 0, 0, 50 * GB, 50, 24 * GB, 24,
6, 0, 0, 0);
checkResources(userSourceWithPartY, 0, 0, 0, 0, 0, 40 * GB, 40, 24 * GB, 24, 6, 0,
0, 0);
checkResources(userSource1WithPartY, 0, 0, 0, 0, 0, 30 * GB, 30, 24 * GB, 24, 6, 0,
0, 0);
metrics1.finishAppAttempt(app.getApplicationId(), app.isPending(),
app.getUser(), false);
metrics1.finishApp(user, RMAppState.FINISHED, false);
}
/**
* Structure:
* Both queues, q1 & q2 has been configured to run in only 1 partition, x
* UserMetrics has been disabled, hence trying to access the user source
* throws NPE from sources.
*
* root
* / \
* q1 q2
*
* @throws Exception
*/
@Test
public void testSinglePartitionWithSingleLevelQueueMetricsWithoutUserMetrics()
throws Exception {
assertThrows(NullPointerException.class, ()->{
String parentQueueName = "root";
Queue parentQueue = mock(Queue.class);
String user = "alice";
QueueMetrics root = QueueMetrics.forQueue("root", null, false, CONF);
when(parentQueue.getMetrics()).thenReturn(root);
when(parentQueue.getQueueName()).thenReturn(parentQueueName);
CSQueueMetrics q1 =
CSQueueMetrics.forQueue("root.q1", parentQueue, false, CONF);
CSQueueMetrics q2 =
CSQueueMetrics.forQueue("root.q2", parentQueue, false, CONF);
AppSchedulingInfo app = mockApp(user);
q1.submitApp(user, false);
q1.submitAppAttempt(user, false);
root.setAvailableResourcesToQueue("x",
Resources.createResource(200 * GB, 200));
q1.incrPendingResources("x", user, 2, Resource.newInstance(1024, 1));
MetricsSource partitionSource = partitionSource(q1.getMetricsSystem(), "x");
MetricsSource rootQueueSource =
queueSource(q1.getMetricsSystem(), "x", parentQueueName);
MetricsSource q1Source = queueSource(q1.getMetricsSystem(), "x", "root.q1");
MetricsSource q1UserSource =
userSource(q1.getMetricsSystem(), "x", user, "root.q1");
checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
checkResources(q1Source, 0, 0, 0, 0, 0, 2 * GB, 2, 2);
checkResources(q1UserSource, 0, 0, 0, 0, 0, 2 * GB, 2, 2);
q2.incrPendingResources("x", user, 3, Resource.newInstance(1024, 1));
MetricsSource q2Source = queueSource(q2.getMetricsSystem(), "x", "root.q2");
MetricsSource q2UserSource =
userSource(q1.getMetricsSystem(), "x", user, "root.q2");
checkResources(partitionSource, 0, 0, 0, 0, 0, 5 * GB, 5, 5);
checkResources(rootQueueSource, 0, 0, 0, 0, 0, 5 * GB, 5, 5);
checkResources(q2Source, 0, 0, 0, 0, 0, 3 * GB, 3, 3);
checkResources(q2UserSource, 0, 0, 0, 0, 0, 3 * GB, 3, 3);
q1.finishAppAttempt(app.getApplicationId(), app.isPending(), app.getUser(),
false);
q1.finishApp(user, RMAppState.FINISHED, false);
});
}
public static MetricsSource partitionSource(MetricsSystem ms,
String partition) {
MetricsSource s =
ms.getSource(QueueMetrics.pSourceName(partition).toString());
return s;
}
public static MetricsSource queueSource(MetricsSystem ms, String partition,
String queue) {
MetricsSource s = ms.getSource(QueueMetrics.pSourceName(partition)
.append(QueueMetrics.qSourceName(queue)).toString());
return s;
}
public static MetricsSource userSource(MetricsSystem ms, String partition,
String user, String queue) {
MetricsSource s = ms.getSource(QueueMetrics.pSourceName(partition)
.append(QueueMetrics.qSourceName(queue)).append(",user=")
.append(user).toString());
return s;
}
public static void checkResources(MetricsSource source, long allocatedMB,
int allocatedCores, int allocCtnrs, long availableMB, int availableCores,
long pendingMB, int pendingCores, int pendingCtnrs) {
MetricsRecordBuilder rb = getMetrics(source);
assertGauge("AllocatedMB", allocatedMB, rb);
assertGauge("AllocatedVCores", allocatedCores, rb);
assertGauge("AllocatedContainers", allocCtnrs, rb);
assertGauge("AvailableMB", availableMB, rb);
assertGauge("AvailableVCores", availableCores, rb);
assertGauge("PendingMB", pendingMB, rb);
assertGauge("PendingVCores", pendingCores, rb);
assertGauge("PendingContainers", pendingCtnrs, rb);
}
private static AppSchedulingInfo mockApp(String user) {
AppSchedulingInfo app = mock(AppSchedulingInfo.class);
when(app.getUser()).thenReturn(user);
ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
ApplicationAttemptId id = BuilderUtils.newApplicationAttemptId(appId, 1);
when(app.getApplicationAttemptId()).thenReturn(id);
return app;
}
public static void checkResources(MetricsSource source, long allocatedMB,
int allocatedCores, int allocCtnrs, long aggreAllocCtnrs,
long aggreReleasedCtnrs, long availableMB, int availableCores,
long pendingMB, int pendingCores, int pendingCtnrs, long reservedMB,
int reservedCores, int reservedCtnrs) {
MetricsRecordBuilder rb = getMetrics(source);
assertGauge("AllocatedMB", allocatedMB, rb);
assertGauge("AllocatedVCores", allocatedCores, rb);
assertGauge("AllocatedContainers", allocCtnrs, rb);
assertCounter("AggregateContainersAllocated", aggreAllocCtnrs, rb);
assertCounter("AggregateContainersReleased", aggreReleasedCtnrs, rb);
assertGauge("AvailableMB", availableMB, rb);
assertGauge("AvailableVCores", availableCores, rb);
assertGauge("PendingMB", pendingMB, rb);
assertGauge("PendingVCores", pendingCores, rb);
assertGauge("PendingContainers", pendingCtnrs, rb);
assertGauge("ReservedMB", reservedMB, rb);
assertGauge("ReservedVCores", reservedCores, rb);
assertGauge("ReservedContainers", reservedCtnrs, rb);
}
} | TestPartitionQueueMetrics |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java | {
"start": 3243,
"end": 3334
} | class ____ find
* @return the file
* @throws IOException any IO problem, including the | to |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java | {
"start": 67639,
"end": 68468
} | class ____ extends SinkConnector {
@Override
public String version() {
return "0.0";
}
@Override
public void start(Map<String, String> props) {
// no-op
}
@Override
public Class<? extends Task> taskClass() {
return SimpleTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
return IntStream.range(0, maxTasks)
.mapToObj(i -> Map.<String, String>of())
.collect(Collectors.toList());
}
@Override
public void stop() {
// no-op
}
@Override
public ConfigDef config() {
return new ConfigDef();
}
}
public static | EmptyTaskConfigsConnector |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java | {
"start": 20800,
"end": 20914
} | class ____ {
/**
* A local exception with a name to match the expected one.
*/
private static | Local |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvContainsIntEvaluator.java | {
"start": 1051,
"end": 3230
} | class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(MvContainsIntEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator superset;
private final EvalOperator.ExpressionEvaluator subset;
private final DriverContext driverContext;
private Warnings warnings;
public MvContainsIntEvaluator(Source source, EvalOperator.ExpressionEvaluator superset,
EvalOperator.ExpressionEvaluator subset, DriverContext driverContext) {
this.source = source;
this.superset = superset;
this.subset = subset;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (IntBlock supersetBlock = (IntBlock) superset.eval(page)) {
try (IntBlock subsetBlock = (IntBlock) subset.eval(page)) {
return eval(page.getPositionCount(), supersetBlock, subsetBlock);
}
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += superset.baseRamBytesUsed();
baseRamBytesUsed += subset.baseRamBytesUsed();
return baseRamBytesUsed;
}
public BooleanBlock eval(int positionCount, IntBlock supersetBlock, IntBlock subsetBlock) {
try(BooleanBlock.Builder result = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
result.appendBoolean(MvContains.process(p, supersetBlock, subsetBlock));
}
return result.build();
}
}
@Override
public String toString() {
return "MvContainsIntEvaluator[" + "superset=" + superset + ", subset=" + subset + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(superset, subset);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static | MvContainsIntEvaluator |
java | quarkusio__quarkus | integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/KubernetesWithIngressTest.java | {
"start": 657,
"end": 3064
} | class ____ {
private static final String APP_NAME = "kubernetes-with-ingress";
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class))
.setApplicationName(APP_NAME)
.setApplicationVersion("0.1-SNAPSHOT")
.withConfigurationResource(APP_NAME + ".properties")
.setLogFileName("k8s.log")
.setForcedDependencies(List.of(Dependency.of("io.quarkus", "quarkus-kubernetes", Version.getVersion())));
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@Test
public void assertGeneratedResources() throws IOException {
final Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir)
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.yml"));
List<HasMetadata> kubernetesList = DeserializationUtil
.deserializeAsList(kubernetesDir.resolve("kubernetes.yml"));
assertThat(kubernetesList.get(0)).isInstanceOfSatisfying(Deployment.class, d -> {
assertThat(d.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo(APP_NAME);
});
});
assertThat(kubernetesList).filteredOn(i -> "Ingress".equals(i.getKind())).singleElement().satisfies(item -> {
assertThat(item).isInstanceOfSatisfying(Ingress.class, ingress -> {
//Check that labels and annotations are also applied to Routes (#10260)
assertThat(ingress.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo(APP_NAME);
});
assertThat(ingress.getSpec()).satisfies(spec -> {
assertThat(spec.getIngressClassName()).isEqualTo("Nginx");
});
assertThat(ingress.getSpec().getRules()).allSatisfy(rule -> {
assertThat(rule.getHttp().getPaths()).allSatisfy(path -> {
assertThat(path.getPath()).isNotBlank();
assertThat(path.getPathType()).isNotBlank();
});
});
});
});
}
}
| KubernetesWithIngressTest |
java | redisson__redisson | redisson/src/main/java/org/redisson/config/BaseConfig.java | {
"start": 1038,
"end": 27933
} | class ____<T extends BaseConfig<T>> {
protected static final Logger log = LoggerFactory.getLogger("config");
/**
* If pooled connection not used for a <code>timeout</code> time
* and current connections amount bigger than minimum idle connections pool size,
* then it will closed and removed from pool.
* Value in milliseconds.
*
*/
private int idleConnectionTimeout = 10000;
/**
* Timeout during connecting to any Redis server.
* Value in milliseconds.
*
*/
private int connectTimeout = 10000;
/**
* Redis server response timeout. Starts to countdown when Redis command was succesfully sent.
* Value in milliseconds.
*
*/
private int timeout = 3000;
private int subscriptionTimeout = 7500;
private int retryAttempts = 4;
@Deprecated
private int retryInterval = 1500;
private DelayStrategy retryDelay = new EqualJitterDelay(Duration.ofMillis(1000), Duration.ofSeconds(2));
private DelayStrategy reconnectionDelay = new EqualJitterDelay(Duration.ofMillis(100), Duration.ofSeconds(10));
/**
* Password for Redis authentication. Should be null if not needed
*/
@Deprecated
private String password;
@Deprecated
private String username;
@Deprecated
private CredentialsResolver credentialsResolver = new DefaultCredentialsResolver();
/**
* Subscriptions per Redis connection limit
*/
private int subscriptionsPerConnection = 5;
/**
* Name of client connection
*/
private String clientName;
@Deprecated
private SslVerificationMode sslVerificationMode = SslVerificationMode.STRICT;
@Deprecated
private String sslKeystoreType;
@Deprecated
private SslProvider sslProvider = SslProvider.JDK;
@Deprecated
private URL sslTruststore;
@Deprecated
private String sslTruststorePassword;
@Deprecated
private URL sslKeystore;
@Deprecated
private String sslKeystorePassword;
@Deprecated
private String[] sslProtocols;
@Deprecated
private String[] sslCiphers;
@Deprecated
private TrustManagerFactory sslTrustManagerFactory;
@Deprecated
private KeyManagerFactory sslKeyManagerFactory;
private int pingConnectionInterval = 30000;
@Deprecated
private boolean keepAlive;
@Deprecated
private int tcpKeepAliveCount;
@Deprecated
private int tcpKeepAliveIdle;
@Deprecated
private int tcpKeepAliveInterval;
@Deprecated
private int tcpUserTimeout;
@Deprecated
private boolean tcpNoDelay = true;
@Deprecated
private NameMapper nameMapper = NameMapper.direct();
@Deprecated
private CommandMapper commandMapper = CommandMapper.direct();
BaseConfig() {
}
BaseConfig(T config) {
if (config.getUsername() != null) {
setUsername(config.getUsername());
}
if (config.getPassword() != null) {
setPassword(config.getPassword());
}
if (!(config.getNameMapper() instanceof DefaultNameMapper)) {
setNameMapper(config.getNameMapper());
}
if (!(config.getCommandMapper() instanceof DefaultCommandMapper)) {
setCommandMapper(config.getCommandMapper());
}
if (!(config.getCredentialsResolver() instanceof DefaultCredentialsResolver)) {
setCredentialsResolver(config.getCredentialsResolver());
}
if (config.getSslVerificationMode() != SslVerificationMode.STRICT) {
setSslVerificationMode(config.getSslVerificationMode());
}
if (config.getSslKeystoreType() != null) {
setSslKeystoreType(config.getSslKeystoreType());
}
if (config.getSslProvider() != SslProvider.JDK) {
setSslProvider(config.getSslProvider());
}
if (config.getSslTruststore() != null) {
setSslTruststore(config.getSslTruststore());
}
if (config.getSslTruststorePassword() != null) {
setSslTruststorePassword(config.getSslTruststorePassword());
}
if (config.getSslKeystore() != null) {
setSslKeystore(config.getSslKeystore());
}
if (config.getSslKeystorePassword() != null) {
setSslKeystorePassword(config.getSslKeystorePassword());
}
if (config.getSslProtocols() != null) {
setSslProtocols(config.getSslProtocols());
}
if (config.getSslCiphers() != null) {
setSslCiphers(config.getSslCiphers());
}
if (config.getSslKeyManagerFactory() != null) {
setSslKeyManagerFactory(config.getSslKeyManagerFactory());
}
if (config.getSslTrustManagerFactory() != null) {
setSslTrustManagerFactory(config.getSslTrustManagerFactory());
}
if (config.isKeepAlive()) {
setKeepAlive(config.isKeepAlive());
}
if (config.getTcpKeepAliveCount() != 0) {
setTcpKeepAliveCount(config.getTcpKeepAliveCount());
}
if (config.getTcpKeepAliveIdle() != 0) {
setTcpKeepAliveIdle(config.getTcpKeepAliveIdle());
}
if (config.getTcpKeepAliveInterval() != 0) {
setTcpKeepAliveInterval(config.getTcpKeepAliveInterval());
}
if (config.getTcpUserTimeout() != 0) {
setTcpUserTimeout(config.getTcpUserTimeout());
}
if (!config.isTcpNoDelay()) {
setTcpNoDelay(config.isTcpNoDelay());
}
setSubscriptionsPerConnection(config.getSubscriptionsPerConnection());
setRetryAttempts(config.getRetryAttempts());
setRetryDelay(config.getRetryDelay());
setReconnectionDelay(config.getReconnectionDelay());
setTimeout(config.getTimeout());
setClientName(config.getClientName());
setConnectTimeout(config.getConnectTimeout());
setIdleConnectionTimeout(config.getIdleConnectionTimeout());
setPingConnectionInterval(config.getPingConnectionInterval());
setSubscriptionTimeout(config.getSubscriptionTimeout());
}
/**
* Subscriptions per Redis connection limit
* <p>
* Default is <code>5</code>
*
* @param subscriptionsPerConnection amount
* @return config
*/
public T setSubscriptionsPerConnection(int subscriptionsPerConnection) {
this.subscriptionsPerConnection = subscriptionsPerConnection;
return (T) this;
}
public int getSubscriptionsPerConnection() {
return subscriptionsPerConnection;
}
/**
* Use {@link Config#setPassword(String)} instead.
* Password for Redis authentication. Should be null if not needed.
* <p>
* Default is <code>null</code>
*
* @param password for connection
* @return config
*/
@Deprecated
public T setPassword(String password) {
log.warn("password setting is deprecated. Use password setting in Config instead.");
this.password = password;
return (T) this;
}
@Deprecated
public String getPassword() {
return password;
}
/**
* Use {@link Config#setUsername(String)} instead.
* Username for Redis authentication. Should be null if not needed
* <p>
* Default is <code>null</code>
* <p>
* Requires Redis 6.0+
*
* @param username for connection
* @return config
*/
@Deprecated
public T setUsername(String username) {
log.warn("username setting is deprecated. Use username setting in object instead.");
this.username = username;
return (T) this;
}
@Deprecated
public String getUsername() {
return username;
}
/**
* Error will be thrown if Redis command can't be sent to Redis server after <code>retryAttempts</code>.
* But if it sent successfully then <code>timeout</code> will be started.
* <p>
* Default is <code>3</code> attempts
*
* @see #timeout
* @param retryAttempts retry attempts
* @return config
*/
public T setRetryAttempts(int retryAttempts) {
if (retryAttempts < 0 || retryAttempts == Integer.MAX_VALUE) {
throw new IllegalArgumentException("retryAttempts setting can't be negative or MAX_VALUE");
}
this.retryAttempts = retryAttempts;
return (T) this;
}
public int getRetryAttempts() {
return retryAttempts;
}
/**
* Use {@link #setRetryDelay(DelayStrategy)} instead.
*
* @param retryInterval - time in milliseconds
* @return config
*/
@Deprecated
public T setRetryInterval(int retryInterval) {
this.retryInterval = retryInterval;
this.retryDelay = new ConstantDelay(Duration.ofMillis(retryInterval));
return (T) this;
}
@Deprecated
public int getRetryInterval() {
return retryInterval;
}
/**
* Redis server response timeout. Starts to countdown when Redis command has been successfully sent.
* <p>
* Default is <code>3000</code> milliseconds
*
* @param timeout in milliseconds
* @return config
*/
public T setTimeout(int timeout) {
this.timeout = timeout;
return (T) this;
}
public int getTimeout() {
return timeout;
}
public int getSubscriptionTimeout() {
return subscriptionTimeout;
}
/**
* Defines subscription timeout applied per channel subscription.
* <p>
* Default is <code>7500</code> milliseconds.
*
* @param subscriptionTimeout timeout in milliseconds
* @return config
*/
public T setSubscriptionTimeout(int subscriptionTimeout) {
this.subscriptionTimeout = subscriptionTimeout;
return (T) this;
}
/**
* Setup connection name during connection init
* via CLIENT SETNAME command
* <p>
* Default is <code>null</code>
*
* @param clientName name of client
* @return config
*/
public T setClientName(String clientName) {
this.clientName = clientName;
return (T) this;
}
public String getClientName() {
return clientName;
}
/**
* Timeout during connecting to any Redis server.
* <p>
* Default is <code>10000</code> milliseconds.
*
* @param connectTimeout timeout in milliseconds
* @return config
*/
public T setConnectTimeout(int connectTimeout) {
this.connectTimeout = connectTimeout;
return (T) this;
}
public int getConnectTimeout() {
return connectTimeout;
}
/**
* If pooled connection not used for a <code>timeout</code> time
* and current connections amount bigger than minimum idle connections pool size,
* then it will closed and removed from pool.
* <p>
* Default is <code>10000</code> milliseconds.
*
* @param idleConnectionTimeout timeout in milliseconds
* @return config
*/
public T setIdleConnectionTimeout(int idleConnectionTimeout) {
this.idleConnectionTimeout = idleConnectionTimeout;
return (T) this;
}
public int getIdleConnectionTimeout() {
return idleConnectionTimeout;
}
@Deprecated
public boolean isSslEnableEndpointIdentification() {
return this.sslVerificationMode == SslVerificationMode.STRICT;
}
/**
* Use {@link #setSslVerificationMode(SslVerificationMode)} instead.
*
* @param sslEnableEndpointIdentification boolean value
* @return config
*/
@Deprecated
public T setSslEnableEndpointIdentification(boolean sslEnableEndpointIdentification) {
log.warn("sslEnableEndpointIdentification setting is deprecated. Use sslVerificationMode setting instead.");
if (sslEnableEndpointIdentification) {
this.sslVerificationMode = SslVerificationMode.STRICT;
} else {
this.sslVerificationMode = SslVerificationMode.NONE;
}
return (T) this;
}
@Deprecated
public SslProvider getSslProvider() {
return sslProvider;
}
/**
* Use {@link Config#setSslProvider(SslProvider)} instead.
* Defines SSL provider used to handle SSL connections.
* <p>
* Default is <code>JDK</code>
*
* @param sslProvider ssl provider
* @return config
*/
@Deprecated
public T setSslProvider(SslProvider sslProvider) {
log.warn("sslProvider setting is deprecated. Use sslProvider setting in Config instead.");
this.sslProvider = sslProvider;
return (T) this;
}
@Deprecated
public URL getSslTruststore() {
return sslTruststore;
}
/**
* Use {@link Config#setSslTruststore(URL)} instead.
* Defines path to SSL truststore
* <p>
* Default is <code>null</code>
*
* @param sslTruststore truststore path
* @return config
*/
@Deprecated
public T setSslTruststore(URL sslTruststore) {
log.warn("sslTruststore setting is deprecated. Use sslTruststore setting in Config instead.");
this.sslTruststore = sslTruststore;
return (T) this;
}
@Deprecated
public String getSslTruststorePassword() {
return sslTruststorePassword;
}
/**
* Use {@link Config#setSslTruststorePassword(String)} instead.
* Defines password for SSL truststore.
* SSL truststore is read on each new connection creation and can be dynamically reloaded.
* <p>
* Default is <code>null</code>
*
* @param sslTruststorePassword - password
* @return config
*/
@Deprecated
public T setSslTruststorePassword(String sslTruststorePassword) {
log.warn("sslTruststorePassword setting is deprecated. Use sslTruststorePassword setting in Config instead.");
this.sslTruststorePassword = sslTruststorePassword;
return (T) this;
}
@Deprecated
public URL getSslKeystore() {
return sslKeystore;
}
/**
* Use {@link Config#setSslKeystore(URL)} instead.
* Defines path to SSL keystore.
* SSL keystore is read on each new connection creation and can be dynamically reloaded.
* <p>
* Default is <code>null</code>
*
* @param sslKeystore path to keystore
* @return config
*/
@Deprecated
public T setSslKeystore(URL sslKeystore) {
log.warn("sslKeystore setting is deprecated. Use sslKeystore setting in Config instead.");
this.sslKeystore = sslKeystore;
return (T) this;
}
@Deprecated
public String getSslKeystorePassword() {
return sslKeystorePassword;
}
/**
* Use {@link Config#setSslKeystorePassword(String)} instead.
* Defines password for SSL keystore
* <p>
* Default is <code>null</code>
*
* @param sslKeystorePassword password
* @return config
*/
@Deprecated
public T setSslKeystorePassword(String sslKeystorePassword) {
log.warn("sslKeystorePassword setting is deprecated. Use sslKeystorePassword setting in Config instead.");
this.sslKeystorePassword = sslKeystorePassword;
return (T) this;
}
@Deprecated
public String[] getSslProtocols() {
return sslProtocols;
}
/**
* Use {@link Config#setSslProtocols(String[])} instead.
* Defines SSL protocols.
* Example values: TLSv1.3, TLSv1.2, TLSv1.1, TLSv1
* <p>
* Default is <code>null</code>
*
* @param sslProtocols protocols
* @return config
*/
@Deprecated
public T setSslProtocols(String[] sslProtocols) {
log.warn("sslProtocols setting is deprecated. Use sslProtocols setting in Config instead.");
this.sslProtocols = sslProtocols;
return (T) this;
}
public int getPingConnectionInterval() {
return pingConnectionInterval;
}
/**
* Defines PING command sending interval per connection to Redis.
* <code>0</code> means disable.
* <p>
* Default is <code>30000</code>
*
* @param pingConnectionInterval time in milliseconds
* @return config
*/
public T setPingConnectionInterval(int pingConnectionInterval) {
this.pingConnectionInterval = pingConnectionInterval;
return (T) this;
}
@Deprecated
public boolean isKeepAlive() {
return keepAlive;
}
/**
* Use {@link Config#setTcpKeepAlive(boolean)} instead.
* Enables TCP keepAlive for connection
* <p>
* Default is <code>false</code>
*
* @param keepAlive boolean value
* @return config
*/
@Deprecated
public T setKeepAlive(boolean keepAlive) {
log.warn("setKeepAlive setting is deprecated. Use setTcpKeepAlive setting in Config instead.");
this.keepAlive = keepAlive;
return (T) this;
}
@Deprecated
public int getTcpKeepAliveCount() {
return tcpKeepAliveCount;
}
/**
* Use {@link Config#setTcpKeepAliveCount(int)} instead.
* Defines the maximum number of keepalive probes
* TCP should send before dropping the connection.
*
* @param tcpKeepAliveCount maximum number of keepalive probes
* @return config
*/
@Deprecated
public T setTcpKeepAliveCount(int tcpKeepAliveCount) {
log.warn("setTcpKeepAliveCount setting is deprecated. Use setTcpKeepAliveCount setting in Config instead.");
this.tcpKeepAliveCount = tcpKeepAliveCount;
return (T) this;
}
@Deprecated
public int getTcpKeepAliveIdle() {
return tcpKeepAliveIdle;
}
/**
* Use {@link Config#setTcpKeepAliveIdle(int)} instead.
* Defines the time in seconds the connection needs to remain idle
* before TCP starts sending keepalive probes,
*
* @param tcpKeepAliveIdle time in seconds
* @return config
*/
@Deprecated
public T setTcpKeepAliveIdle(int tcpKeepAliveIdle) {
log.warn("setTcpKeepAliveIdle setting is deprecated. Use setTcpKeepAliveIdle setting in Config instead.");
this.tcpKeepAliveIdle = tcpKeepAliveIdle;
return (T) this;
}
@Deprecated
public int getTcpKeepAliveInterval() {
return tcpKeepAliveInterval;
}
/**
* Use {@link Config#setTcpKeepAliveInterval(int)} instead.
* Defines the time in seconds between individual keepalive probes.
*
* @param tcpKeepAliveInterval time in seconds
* @return config
*/
@Deprecated
public T setTcpKeepAliveInterval(int tcpKeepAliveInterval) {
log.warn("setTcpKeepAliveInterval setting is deprecated. Use setTcpKeepAliveInterval setting in Config instead.");
this.tcpKeepAliveInterval = tcpKeepAliveInterval;
return (T) this;
}
@Deprecated
public int getTcpUserTimeout() {
return tcpUserTimeout;
}
/**
* Use {@link Config#setTcpUserTimeout(int)} instead.
* Defines the maximum amount of time in milliseconds that transmitted data may
* remain unacknowledged, or buffered data may remain untransmitted
* (due to zero window size) before TCP will forcibly close the connection.
*
* @param tcpUserTimeout time in milliseconds
* @return config
*/
@Deprecated
public T setTcpUserTimeout(int tcpUserTimeout) {
log.warn("setTcpUserTimeout setting is deprecated. Use setTcpUserTimeout setting in Config instead.");
this.tcpUserTimeout = tcpUserTimeout;
return (T) this;
}
@Deprecated
public boolean isTcpNoDelay() {
return tcpNoDelay;
}
/**
* Use {@link Config#setTcpNoDelay(boolean)} instead.
* Enables TCP noDelay for connection
* <p>
* Default is <code>true</code>
*
* @param tcpNoDelay boolean value
* @return config
*/
@Deprecated
public T setTcpNoDelay(boolean tcpNoDelay) {
log.warn("setTcpNoDelay setting is deprecated. Use setTcpNoDelay setting in Config instead.");
this.tcpNoDelay = tcpNoDelay;
return (T) this;
}
@Deprecated
public NameMapper getNameMapper() {
return nameMapper;
}
/**
* Use {@link Config#setNameMapper(NameMapper)} instead.
* Defines Name mapper which maps Redisson object name.
* Applied to all Redisson objects.
*
* @param nameMapper name mapper object
* @return config
*/
@Deprecated
public T setNameMapper(NameMapper nameMapper) {
log.warn("nameMapper setting is deprecated. Use nameMapper setting in Config instead.");
this.nameMapper = nameMapper;
return (T) this;
}
@Deprecated
public CredentialsResolver getCredentialsResolver() {
return credentialsResolver;
}
/**
* Use {@link Config#setCredentialsResolver(CredentialsResolver)} instead.
* Defines Credentials resolver which is invoked during connection for Redis server authentication.
* It makes possible to specify dynamically changing Redis credentials.
*
* @see EntraIdCredentialsResolver
*
* @param credentialsResolver Credentials resolver object
* @return config
*/
@Deprecated
public T setCredentialsResolver(CredentialsResolver credentialsResolver) {
log.warn("credentialsResolver setting is deprecated. Use credentialsResolver setting in Config instead.");
this.credentialsResolver = credentialsResolver;
return (T) this;
}
@Deprecated
public String getSslKeystoreType() {
return sslKeystoreType;
}
/**
* Use {@link Config#setSslKeystoreType(String)} instead.
* Defines SSL keystore type.
* <p>
* Default is <code>null</code>
*
* @param sslKeystoreType keystore type
* @return config
*/
@Deprecated
public T setSslKeystoreType(String sslKeystoreType) {
log.warn("sslKeystoreType setting is deprecated. Use sslKeystoreType setting in Config instead.");
this.sslKeystoreType = sslKeystoreType;
return (T) this;
}
@Deprecated
public String[] getSslCiphers() {
return sslCiphers;
}
/**
* Use {@link Config#setSslCiphers(String[])} instead.
* Defines SSL ciphers.
* <p>
* Default is <code>null</code>
*
* @param sslCiphers ciphers
* @return config
*/
@Deprecated
public T setSslCiphers(String[] sslCiphers) {
log.warn("sslCiphers setting is deprecated. Use sslCiphers setting in Config instead.");
this.sslCiphers = sslCiphers;
return (T) this;
}
@Deprecated
public TrustManagerFactory getSslTrustManagerFactory() {
return sslTrustManagerFactory;
}
/**
* Use {@link Config#setSslTrustManagerFactory(TrustManagerFactory)} instead.
* Defines SSL TrustManagerFactory.
* <p>
* Default is <code>null</code>
*
* @param trustManagerFactory trust manager value
* @return config
*/
@Deprecated
public T setSslTrustManagerFactory(TrustManagerFactory trustManagerFactory) {
log.warn("trustManagerFactory setting is deprecated. Use trustManagerFactory setting in Config instead.");
this.sslTrustManagerFactory = trustManagerFactory;
return (T) this;
}
@Deprecated
public KeyManagerFactory getSslKeyManagerFactory() {
return sslKeyManagerFactory;
}
/**
* Use {@link Config#setSslKeyManagerFactory(KeyManagerFactory)} instead.
* Defines SSL KeyManagerFactory.
* <p>
* Default is <code>null</code>
*
* @param keyManagerFactory key manager value
* @return config
*/
@Deprecated
public BaseConfig<T> setSslKeyManagerFactory(KeyManagerFactory keyManagerFactory) {
log.warn("keyManagerFactory setting is deprecated. Use keyManagerFactory setting in Config instead.");
this.sslKeyManagerFactory = keyManagerFactory;
return this;
}
@Deprecated
public CommandMapper getCommandMapper() {
return commandMapper;
}
/**
* Use {@link Config#setCommandMapper(CommandMapper)} instead.
* Defines Command mapper which maps Redis command name.
* Applied to all Redis commands.
*
* @param commandMapper Redis command name mapper object
* @return config
*/
@Deprecated
public T setCommandMapper(CommandMapper commandMapper) {
log.warn("commandMapper setting is deprecated. Use commandMapper setting in Config instead.");
this.commandMapper = commandMapper;
return (T) this;
}
@Deprecated
public SslVerificationMode getSslVerificationMode() {
return sslVerificationMode;
}
/**
* Use {@link Config#setSslVerificationMode(SslVerificationMode)} instead.
* Defines SSL verification mode, which prevents man-in-the-middle attacks.
*
* <p>
* Default is <code>SslVerificationMode.STRICT</code>
*
* @param sslVerificationMode mode value
* @return config
*/
@Deprecated
public T setSslVerificationMode(SslVerificationMode sslVerificationMode) {
log.warn("sslVerificationMode setting is deprecated. Use sslVerificationMode setting in Config instead.");
this.sslVerificationMode = sslVerificationMode;
return (T) this;
}
public DelayStrategy getRetryDelay() {
return retryDelay;
}
/**
* Defines the delay strategy for a new attempt to send a command.
* <p>
* Default is <code>EqualJitterDelay(Duration.ofSeconds(1), Duration.ofSeconds(2))</code>
*
* @see DecorrelatedJitterDelay
* @see EqualJitterDelay
* @see FullJitterDelay
* @see ConstantDelay
*
* @param retryDelay delay strategy implementation
* @return options instance
*/
public T setRetryDelay(DelayStrategy retryDelay) {
this.retryDelay = retryDelay;
return (T) this;
}
public DelayStrategy getReconnectionDelay() {
return reconnectionDelay;
}
/**
* Defines the delay strategy for a new attempt to reconnect a connection.
* <p>
* Default is <code>EqualJitterDelay(Duration.ofMillis(100), Duration.ofSeconds(10))</code>
*
* @see DecorrelatedJitterDelay
* @see EqualJitterDelay
* @see FullJitterDelay
* @see ConstantDelay
*
* @param reconnectionDelay delay strategy implementation
* @return options instance
*/
public T setReconnectionDelay(DelayStrategy reconnectionDelay) {
this.reconnectionDelay = reconnectionDelay;
return (T) this;
}
}
| BaseConfig |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/manytomany/defaults/JpaCompliantManyToManyImplicitNamingTest.java | {
"start": 1039,
"end": 1141
} | class ____ extends ManyToManyImplicitNamingTest {
public static | JpaCompliantManyToManyImplicitNamingTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/floatarrays/FloatArrays_assertHasSizeGreaterThanOrEqualTo_Test.java | {
"start": 1061,
"end": 2405
} | class ____ extends FloatArraysBaseTest {
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertHasSizeGreaterThanOrEqualTo(someInfo(), null,
6))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_size_of_actual_is_not_greater_than_or_equal_to_boundary() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertHasSizeGreaterThanOrEqualTo(someInfo(), actual,
6))
.withMessage(shouldHaveSizeGreaterThanOrEqualTo(actual, actual.length,
6).create());
}
@Test
void should_pass_if_size_of_actual_is_greater_than_boundary() {
arrays.assertHasSizeGreaterThanOrEqualTo(someInfo(), actual, 1);
}
@Test
void should_pass_if_size_of_actual_is_equal_to_boundary() {
arrays.assertHasSizeGreaterThanOrEqualTo(someInfo(), actual, actual.length);
}
}
| FloatArrays_assertHasSizeGreaterThanOrEqualTo_Test |
java | apache__flink | flink-python/src/test/java/org/apache/flink/table/utils/TestingSinks.java | {
"start": 1759,
"end": 3009
} | class ____ implements DynamicTableSink {
private final DataType rowDataType;
public TestAppendingSink(DataType rowDataType) {
this.rowDataType = rowDataType;
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
return requestedMode;
}
@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
final DataStructureConverter converter =
context.createDataStructureConverter(rowDataType);
return new DataStreamSinkProvider() {
@Override
public DataStreamSink<?> consumeDataStream(
ProviderContext providerContext, DataStream<RowData> dataStream) {
return dataStream.addSink(new RowSink(converter));
}
};
}
@Override
public DynamicTableSink copy() {
return new TestAppendingSink(rowDataType);
}
@Override
public String asSummaryString() {
return String.format("TestingAppendSink(%s)", DataType.getFields(rowDataType));
}
}
/** RowSink for testing. */
static | TestAppendingSink |
java | apache__camel | components/camel-pdf/src/main/java/org/apache/camel/component/pdf/PdfProducer.java | {
"start": 1973,
"end": 7973
} | class ____ extends DefaultProducer {
private static final Logger LOG = LoggerFactory.getLogger(PdfProducer.class);
private final WriteStrategy writeStrategy;
private final SplitStrategy splitStrategy;
private final LineBuilderStrategy lineBuilderStrategy;
private final PdfConfiguration pdfConfiguration;
public PdfProducer(PdfEndpoint endpoint) {
super(endpoint);
this.pdfConfiguration = endpoint.getPdfConfiguration();
TextProcessingAbstractFactory textProcessingFactory = createTextProcessingFactory(pdfConfiguration);
this.writeStrategy = textProcessingFactory.createWriteStrategy();
this.splitStrategy = textProcessingFactory.createSplitStrategy();
this.lineBuilderStrategy = textProcessingFactory.createLineBuilderStrategy();
}
@Override
public void process(Exchange exchange) throws Exception {
Object result;
switch (pdfConfiguration.getOperation()) {
case append:
result = doAppend(exchange);
break;
case create:
result = doCreate(exchange);
break;
case extractText:
result = doExtractText(exchange);
break;
case merge:
result = doMerge(exchange);
break;
default:
throw new IllegalArgumentException(String.format("Unknown operation %s", pdfConfiguration.getOperation()));
}
// propagate headers
exchange.getMessage().setHeaders(exchange.getIn().getHeaders());
// and set result
exchange.getMessage().setBody(result);
}
private OutputStream doMerge(Exchange exchange) throws IOException, NoSuchHeaderException {
LOG.debug("Got {} operation, going to merge multiple files into a single pdf document.",
pdfConfiguration.getOperation());
PDFMergerUtility mergerUtility = new PDFMergerUtility();
List<File> files = ExchangeHelper.getMandatoryHeader(exchange, FILES_TO_MERGE_HEADER_NAME, List.class);
if (files.size() < 2) {
throw new IllegalArgumentException("Must provide at least 2 files to merge");
}
for (File file : files) {
mergerUtility.addSource(file);
}
mergerUtility.setDestinationStream(new ByteArrayOutputStream());
mergerUtility.mergeDocuments(RandomAccessStreamCacheImpl::new);
return mergerUtility.getDestinationStream();
}
private Object doAppend(Exchange exchange) throws IOException {
LOG.debug("Got {} operation, going to append text to provided pdf.", pdfConfiguration.getOperation());
String body = exchange.getIn().getBody(String.class);
try (PDDocument document = exchange.getIn().getHeader(PDF_DOCUMENT_HEADER_NAME, PDDocument.class)) {
if (document == null) {
throw new IllegalArgumentException(
String.format("%s header is expected for append operation",
PDF_DOCUMENT_HEADER_NAME));
}
if (document.isEncrypted()) {
document.setAllSecurityToBeRemoved(true);
}
ProtectionPolicy protectionPolicy = exchange.getIn().getHeader(
PROTECTION_POLICY_HEADER_NAME, ProtectionPolicy.class);
appendToPdfDocument(body, document, protectionPolicy);
OutputStream byteArrayOutputStream = new ByteArrayOutputStream();
document.save(byteArrayOutputStream);
return byteArrayOutputStream;
}
}
private String doExtractText(Exchange exchange) throws IOException {
LOG.debug("Got {} operation, going to extract text from provided pdf.", pdfConfiguration.getOperation());
try (PDDocument document = exchange.getIn().getBody(PDDocument.class)) {
PDFTextStripper pdfTextStripper = new PDFTextStripper();
return pdfTextStripper.getText(document);
}
}
private OutputStream doCreate(Exchange exchange) throws IOException {
LOG.debug("Got {} operation, going to create and write provided string to pdf document.",
pdfConfiguration.getOperation());
String body = exchange.getIn().getBody(String.class);
try (PDDocument document = new PDDocument()) {
StandardProtectionPolicy protectionPolicy = exchange.getIn().getHeader(
PROTECTION_POLICY_HEADER_NAME, StandardProtectionPolicy.class);
appendToPdfDocument(body, document, protectionPolicy);
OutputStream byteArrayOutputStream = new ByteArrayOutputStream();
document.save(byteArrayOutputStream);
return byteArrayOutputStream;
}
}
private void appendToPdfDocument(String text, PDDocument document, ProtectionPolicy protectionPolicy) throws IOException {
Collection<String> words = splitStrategy.split(text);
Collection<String> lines = lineBuilderStrategy.buildLines(words);
writeStrategy.write(lines, document);
if (protectionPolicy != null) {
document.protect(protectionPolicy);
}
}
private TextProcessingAbstractFactory createTextProcessingFactory(PdfConfiguration pdfConfiguration) {
TextProcessingAbstractFactory result;
switch (pdfConfiguration.getTextProcessingFactory()) {
case autoFormatting:
result = new AutoFormattedWriterAbstractFactory(pdfConfiguration);
break;
case lineTermination:
result = new LineTerminationWriterAbstractFactory(pdfConfiguration);
break;
default:
throw new IllegalArgumentException(
String.format("Unknown text processing factory %s",
pdfConfiguration.getTextProcessingFactory()));
}
return result;
}
}
| PdfProducer |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/condition/DisabledForJreRange.java | {
"start": 4080,
"end": 4619
} | enum ____ does not exist for a particular JRE
* version, you can specify the minimum version via
* {@link #minVersion() minVersion} instead.
*
* <p>Defaults to {@link JRE#UNDEFINED UNDEFINED}, which will be interpreted
* as {@link JRE#JAVA_17 JAVA_17} if the {@link #minVersion() minVersion} is
* not set.
*
* @see JRE
* @see #minVersion()
*/
JRE min() default JRE.UNDEFINED;
/**
* Java Runtime Environment version which is used as the upper boundary for
* the version range that determines if the annotated | constant |
java | apache__camel | test-infra/camel-test-infra-artemis/src/main/java/org/apache/camel/test/infra/artemis/services/ArtemisAMQPInfraService.java | {
"start": 2024,
"end": 5669
} | class ____ extends AbstractArtemisEmbeddedService {
private String brokerURL;
private int amqpPort;
@Override
protected Configuration configure(Configuration artemisConfiguration, int port, int brokerId) {
amqpPort = port;
String sslEnabled = System.getProperty(ArtemisProperties.ARTEMIS_SSL_ENABLED, "false");
String keyStorePath = System.getProperty(ArtemisProperties.ARTEMIS_SSL_KEYSTORE_PATH, "");
String keyStorePassword = System.getProperty(ArtemisProperties.ARTEMIS_SSL_KEYSTORE_PASSWORD, "");
String trustStorePath = System.getProperty(ArtemisProperties.ARTEMIS_SSL_TRUSTSTORE_PATH, "");
String trustStorePassword = System.getProperty(ArtemisProperties.ARTEMIS_SSL_TRUSTSTORE_PASSWORD, "");
brokerURL = "tcp://0.0.0.0:" + amqpPort
+ "?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=AMQP;useEpoll=true;amqpCredits=1000;amqpMinCredits=300"
+ String.format(
";sslEnabled=%s;keyStorePath=%s;keyStorePassword=%s;trustStorePath=%s;trustStorePassword=%s",
sslEnabled, keyStorePath, keyStorePassword, trustStorePath, trustStorePassword);
AddressSettings addressSettings = new AddressSettings();
addressSettings.setAddressFullMessagePolicy(AddressFullMessagePolicy.FAIL);
// Disable auto create address to make sure that topic name is correct without prefix
try {
artemisConfiguration.addAcceptorConfiguration("amqp", brokerURL);
} catch (Exception e) {
LOG.warn(e.getMessage(), e);
throw new ArtemisRunException("AMQP acceptor cannot be configured", e);
}
artemisConfiguration.setPersistenceEnabled(false);
artemisConfiguration.addAddressSetting("#", addressSettings);
artemisConfiguration.setSecurityEnabled(
"true".equalsIgnoreCase(System.getProperty(ArtemisProperties.ARTEMIS_AUTHENTICATION_ENABLED)));
if (artemisConfiguration.isSecurityEnabled()) {
SecurityConfiguration sc = new SecurityConfiguration();
String user = System.getProperty(ArtemisProperties.ARTEMIS_USERNAME, "camel");
String pw = System.getProperty(ArtemisProperties.ARTEMIS_PASSWORD, "rider");
sc.addUser(user, pw);
sc.addRole(user, "ALLOW_ALL");
ActiveMQSecurityManager securityManager = new ActiveMQJAASSecurityManager(InVMLoginModule.class.getName(), sc);
embeddedBrokerService.setSecurityManager(securityManager);
// any user can have full control of generic topics
String roleName = "ALLOW_ALL";
Role role = new Role(roleName, true, true, true, true, true, true, true, true, true, true, false, false);
Set<Role> roles = new HashSet<>();
roles.add(role);
artemisConfiguration.putSecurityRoles("#", roles);
}
artemisConfiguration.setMaxDiskUsage(98);
// Set explicit topic name
CoreAddressConfiguration pingTopicConfig = new CoreAddressConfiguration();
pingTopicConfig.setName("topic.ping");
pingTopicConfig.addRoutingType(RoutingType.MULTICAST);
artemisConfiguration.addAddressConfiguration(pingTopicConfig);
return artemisConfiguration;
}
@Override
public String serviceAddress() {
return brokerURL;
}
@Override
public String remoteURI() {
return "amqp://0.0.0.0:" + amqpPort;
}
@Override
public int brokerPort() {
return amqpPort;
}
}
| ArtemisAMQPInfraService |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/onetoone/BidirectionalOneToOneWithConverterEagerTest.java | {
"start": 1368,
"end": 3631
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
BarEntity bar = new BarEntity();
bar.setBusinessId( new BusinessId( SafeRandomUUIDGenerator.safeRandomUUIDAsString() ) );
bar.setaDouble( 0.5 );
FooEntity foo = new FooEntity();
foo.setBusinessId( new BusinessId( SafeRandomUUIDGenerator.safeRandomUUIDAsString() ) );
foo.setName( "foo_name" );
foo.setBar( bar );
bar.setFoo( foo );
session.persist( bar );
session.persist( foo );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from FooEntity" ).executeUpdate();
session.createMutationQuery( "delete from BarEntity" ).executeUpdate();
} );
}
@Test
public void testBidirectionalFetch(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( session -> {
FooEntity foo = session.find( FooEntity.class, 1L );
statementInspector.assertExecutedCount( 1 );
BarEntity bar = foo.getBar();
statementInspector.assertExecutedCount( 1 );
assertEquals( 0.5, bar.getaDouble() );
FooEntity associatedFoo = bar.getFoo();
statementInspector.assertExecutedCount( 1 );
assertEquals( "foo_name", associatedFoo.getName() );
assertEquals( foo, associatedFoo );
assertEquals( bar, associatedFoo.getBar() );
} );
}
@Test
public void testBidirectionalFetchInverse(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( session -> {
BarEntity bar = session.find( BarEntity.class, 1L );
statementInspector.assertExecutedCount( 1 );
FooEntity foo = bar.getFoo();
statementInspector.assertExecutedCount( 1 );
assertEquals( "foo_name", foo.getName() );
BarEntity associatedBar = foo.getBar();
statementInspector.assertExecutedCount( 1 );
assertEquals( 0.5, associatedBar.getaDouble() );
assertEquals( bar, associatedBar );
assertEquals( foo, associatedBar.getFoo() );
} );
}
public static | BidirectionalOneToOneWithConverterEagerTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/TestDeprecate.java | {
"start": 311,
"end": 716
} | class ____ {
private int id;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
/**
* @deprecated
* @return
*/
public int getId2() {
return this.id;
}
@Deprecated
public int getId3() {
return this.id;
}
}
}
| VO |
java | google__guava | guava/src/com/google/common/hash/LittleEndianByteArray.java | {
"start": 9552,
"end": 11705
} | enum ____ implements LittleEndianBytes {
INSTANCE {
@Override
public long getLongLittleEndian(byte[] source, int offset) {
return Longs.fromBytes(
source[offset + 7],
source[offset + 6],
source[offset + 5],
source[offset + 4],
source[offset + 3],
source[offset + 2],
source[offset + 1],
source[offset]);
}
@Override
public void putLongLittleEndian(byte[] sink, int offset, long value) {
long mask = 0xFFL;
for (int i = 0; i < 8; mask <<= 8, i++) {
sink[offset + i] = (byte) ((value & mask) >> (i * 8));
}
}
@Override
public boolean usesFastPath() {
return false;
}
}
}
static LittleEndianBytes makeGetter() {
LittleEndianBytes usingVarHandle =
VarHandleLittleEndianBytesMaker.INSTANCE.tryMakeVarHandleLittleEndianBytes();
if (usingVarHandle != null) {
return usingVarHandle;
}
try {
/*
* UnsafeByteArray uses Unsafe.getLong() in an unsupported way, which is known to cause
* crashes on Android when running in 32-bit mode. For maximum safety, we shouldn't use
* Unsafe.getLong() at all, but the performance benefit on x86_64 is too great to ignore, so
* as a compromise, we enable the optimization only on platforms that we specifically know to
* work.
*
* In the future, the use of Unsafe.getLong() should be replaced by ByteBuffer.getLong(),
* which will have an efficient native implementation in JDK 9.
*
*/
String arch = System.getProperty("os.arch");
if (Objects.equals(arch, "amd64") || Objects.equals(arch, "aarch64")) {
return ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN)
? UnsafeByteArray.UNSAFE_LITTLE_ENDIAN
: UnsafeByteArray.UNSAFE_BIG_ENDIAN;
}
} catch (Throwable t) {
// ensure we really catch *everything*
}
return JavaLittleEndianBytes.INSTANCE;
}
// Compare AbstractFuture.VarHandleAtomicHelperMaker.
private | JavaLittleEndianBytes |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/writer/TimelineEventsWriter.java | {
"start": 1617,
"end": 2521
} | class ____ implements MessageBodyWriter<TimelineEvents> {
private ObjectMapper objectMapper = new ObjectMapper();
@Override
public boolean isWriteable(Class<?> type, Type genericType,
Annotation[] annotations, MediaType mediaType) {
return type == TimelineEvents.class;
}
@Override
public void writeTo(TimelineEvents timelineEvents, Class<?> type,
Type genericType, Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream)
throws IOException, WebApplicationException {
String entity = objectMapper.writeValueAsString(timelineEvents);
entityStream.write(entity.getBytes(StandardCharsets.UTF_8));
}
@Override
public long getSize(TimelineEvents timelineEvents, Class<?> type, Type genericType,
Annotation[] annotations, MediaType mediaType) {
return -1L;
}
}
| TimelineEventsWriter |
java | spring-projects__spring-boot | module/spring-boot-integration/src/main/java/org/springframework/boot/integration/autoconfigure/IntegrationPropertiesEnvironmentPostProcessor.java | {
"start": 1707,
"end": 2834
} | class ____ implements EnvironmentPostProcessor, Ordered {
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE;
}
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
Resource resource = new ClassPathResource("META-INF/spring.integration.properties");
if (resource.exists()) {
registerIntegrationPropertiesPropertySource(environment, resource);
}
}
protected void registerIntegrationPropertiesPropertySource(ConfigurableEnvironment environment, Resource resource) {
PropertiesPropertySourceLoader loader = new PropertiesPropertySourceLoader();
try {
OriginTrackedMapPropertySource propertyFileSource = (OriginTrackedMapPropertySource) loader
.load("META-INF/spring.integration.properties", resource)
.get(0);
environment.getPropertySources().addLast(new IntegrationPropertiesPropertySource(propertyFileSource));
}
catch (IOException ex) {
throw new IllegalStateException("Failed to load integration properties from " + resource, ex);
}
}
private static final | IntegrationPropertiesEnvironmentPostProcessor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cascade/BidirectionalOneToManyCascadeTest.java | {
"start": 989,
"end": 4071
} | class ____ {
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
/**
* Saves the parent object with a child when both the one-to-many and
* many-to-one associations use cascade="all"
*/
@Test
public void testSaveParentWithChild(SessionFactoryScope scope) {
Parent parent = new Parent();
scope.inTransaction(
session -> {
Child child = new Child();
child.setParent( parent );
parent.setChildren( Collections.singleton( child ) );
session.persist( parent );
}
);
scope.inTransaction(
session -> {
Parent result = session.get( Parent.class, parent.getId() );
assertEquals( 1, result.getChildren().size() );
assertEquals( 0, result.getDeleteOrphanChildren().size() );
}
);
}
/**
* Saves the child object with the parent when both the one-to-many and
* many-to-one associations use cascade="all"
*/
@Test
public void testSaveChildWithParent(SessionFactoryScope scope) {
Parent parent = new Parent();
scope.inTransaction(
session -> {
Child child = new Child();
child.setParent( parent );
parent.setChildren( Collections.singleton( child ) );
session.persist( child );
}
);
scope.inTransaction(
session -> {
Parent result = session.get( Parent.class, parent.getId() );
assertEquals( 1, result.getChildren().size() );
assertEquals( 0, result.getDeleteOrphanChildren().size() );
}
);
}
/**
* Saves the parent object with a child when the one-to-many association
* uses cascade="all-delete-orphan" and the many-to-one association uses
* cascade="all"
*/
@Test
public void testSaveParentWithOrphanDeleteChild(SessionFactoryScope scope) {
Parent parent = new Parent();
scope.inTransaction(
session -> {
DeleteOrphanChild child = new DeleteOrphanChild();
child.setParent( parent );
parent.setDeleteOrphanChildren( Collections.singleton( child ) );
session.persist( parent );
}
);
scope.inTransaction(
session -> {
Parent result = session.get( Parent.class, parent.getId() );
assertEquals( 0, result.getChildren().size() );
assertEquals( 1, result.getDeleteOrphanChildren().size() );
}
);
}
/**
* Saves the child object with the parent when the one-to-many association
* uses cascade="all-delete-orphan" and the many-to-one association uses
* cascade="all"
*/
@Test
public void testSaveOrphanDeleteChildWithParent(SessionFactoryScope scope) {
Parent parent = new Parent();
scope.inTransaction(
session -> {
DeleteOrphanChild child = new DeleteOrphanChild();
child.setParent( parent );
parent.setDeleteOrphanChildren( Collections.singleton( child ) );
session.persist( child );
}
);
scope.inTransaction(
session -> {
Parent result = session.get( Parent.class, parent.getId() );
assertEquals( 0, result.getChildren().size() );
assertEquals( 1, result.getDeleteOrphanChildren().size() );
}
);
}
}
| BidirectionalOneToManyCascadeTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/Validate.java | {
"start": 1032,
"end": 1320
} | class ____ Apache commons lang3.
* <p>
* It provides consistent message strings for frequently encountered checks.
* That simplifies callers because they have to supply only the name of the argument
* that failed a check instead of having to supply the entire message.
*/
public final | in |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/engine/cache/DiskCache.java | {
"start": 704,
"end": 1616
} | interface ____ {
/**
* Writes data to the file and returns true if the write was successful and should be committed,
* and false if the write should be aborted.
*
* @param file The File the Writer should write to.
*/
boolean write(@NonNull File file);
}
/**
* Get the cache for the value at the given key.
*
* <p>Note - This is potentially dangerous, someone may write a new value to the file at any point
* in time and we won't know about it.
*
* @param key The key in the cache.
* @return An InputStream representing the data at key at the time get is called.
*/
@Nullable
File get(Key key);
/**
* Write to a key in the cache. {@link Writer} is used so that the cache implementation can
* perform actions after the write finishes, like commit (via atomic file rename).
*
* @param key The key to write to.
* @param writer An | Writer |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scheduling/annotation/EnableAsyncTests.java | {
"start": 15948,
"end": 16094
} | class ____ {
@Bean
@Lazy
public AsyncBean asyncBean() {
return mock();
}
}
@Configuration
@EnableAsync
static | AsyncConfigWithMockito |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxTakeLastOne.java | {
"start": 1000,
"end": 1550
} | class ____<T> extends InternalFluxOperator<T, T> implements Fuseable {
FluxTakeLastOne(Flux<? extends T> source) {
super(source);
}
@Override
public CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super T> actual) {
return new MonoTakeLastOne.TakeLastOneSubscriber<>(actual, null, false);
}
@Override
public int getPrefetch() {
return Integer.MAX_VALUE;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
}
| FluxTakeLastOne |
java | spring-projects__spring-security | oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/jackson2/JsonNodeUtils.java | {
"start": 1178,
"end": 2190
} | class ____ {
static final TypeReference<Set<String>> STRING_SET = new TypeReference<>() {
};
static final TypeReference<Map<String, Object>> STRING_OBJECT_MAP = new TypeReference<>() {
};
static String findStringValue(JsonNode jsonNode, String fieldName) {
if (jsonNode == null) {
return null;
}
JsonNode value = jsonNode.findValue(fieldName);
return (value != null && value.isTextual()) ? value.asText() : null;
}
static <T> T findValue(JsonNode jsonNode, String fieldName, TypeReference<T> valueTypeReference,
ObjectMapper mapper) {
if (jsonNode == null) {
return null;
}
JsonNode value = jsonNode.findValue(fieldName);
return (value != null && value.isContainerNode()) ? mapper.convertValue(value, valueTypeReference) : null;
}
static JsonNode findObjectNode(JsonNode jsonNode, String fieldName) {
if (jsonNode == null) {
return null;
}
JsonNode value = jsonNode.findValue(fieldName);
return (value != null && value.isObject()) ? value : null;
}
}
| JsonNodeUtils |
java | apache__hadoop | hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSPosixBucketUtils.java | {
"start": 1817,
"end": 26026
} | class ____ {
/**
* Class logger.
*/
private static final Logger LOG = LoggerFactory.getLogger(
OBSPosixBucketUtils.class);
private OBSPosixBucketUtils() {
}
/**
* Get the depth of an absolute path, that is the number of '/' in the path.
*
* @param key object key
* @return depth
*/
static int fsGetObjectKeyDepth(final String key) {
int depth = 0;
for (int idx = key.indexOf('/');
idx >= 0; idx = key.indexOf('/', idx + 1)) {
depth++;
}
return key.endsWith("/") ? depth - 1 : depth;
}
/**
* Used to judge that an object is a file or folder.
*
* @param attr posix object attribute
* @return is posix folder
*/
static boolean fsIsFolder(final ObsFSAttribute attr) {
final int ifDir = 0x004000;
int mode = attr.getMode();
// object mode is -1 when the object is migrated from
// object bucket to posix bucket.
// -1 is a file, not folder.
if (mode < 0) {
return false;
}
return (mode & ifDir) != 0;
}
/**
* The inner rename operation based on Posix bucket.
*
* @param owner OBS File System instance
* @param src source path to be renamed from
* @param dst destination path to be renamed to
* @return boolean
* @throws RenameFailedException if some criteria for a state changing rename
* was not met. This means work didn't happen;
* it's not something which is reported upstream
* to the FileSystem APIs, for which the
* semantics of "false" are pretty vague.
* @throws IOException on IO failure.
*/
static boolean renameBasedOnPosix(final OBSFileSystem owner, final Path src,
final Path dst) throws IOException {
Path dstPath = dst;
String srcKey = OBSCommonUtils.pathToKey(owner, src);
String dstKey = OBSCommonUtils.pathToKey(owner, dstPath);
if (srcKey.isEmpty()) {
LOG.error("rename: src [{}] is root directory", src);
return false;
}
try {
FileStatus dstStatus = owner.getFileStatus(dstPath);
if (dstStatus.isDirectory()) {
String newDstString = OBSCommonUtils.maybeAddTrailingSlash(
dstPath.toString());
String filename = srcKey.substring(
OBSCommonUtils.pathToKey(owner, src.getParent())
.length() + 1);
dstPath = new Path(newDstString + filename);
dstKey = OBSCommonUtils.pathToKey(owner, dstPath);
LOG.debug(
"rename: dest is an existing directory and will be "
+ "changed to [{}]", dstPath);
if (owner.exists(dstPath)) {
LOG.error("rename: failed to rename " + src + " to "
+ dstPath
+ " because destination exists");
return false;
}
} else {
if (srcKey.equals(dstKey)) {
LOG.warn(
"rename: src and dest refer to the same "
+ "file or directory: {}", dstPath);
return true;
} else {
LOG.error("rename: failed to rename " + src + " to "
+ dstPath
+ " because destination exists");
return false;
}
}
} catch (FileNotFoundException e) {
// if destination does not exist, do not change the
// destination key, and just do rename.
LOG.debug("rename: dest [{}] does not exist", dstPath);
} catch (FileConflictException e) {
Path parent = dstPath.getParent();
if (!OBSCommonUtils.pathToKey(owner, parent).isEmpty()) {
FileStatus dstParentStatus = owner.getFileStatus(parent);
if (!dstParentStatus.isDirectory()) {
throw new ParentNotDirectoryException(
parent + " is not a directory");
}
}
}
if (dstKey.startsWith(srcKey) && (dstKey.equals(srcKey)
|| dstKey.charAt(srcKey.length()) == Path.SEPARATOR_CHAR)) {
LOG.error("rename: dest [{}] cannot be a descendant of src [{}]",
dstPath, src);
return false;
}
return innerFsRenameWithRetry(owner, src, dstPath, srcKey, dstKey);
}
private static boolean innerFsRenameWithRetry(final OBSFileSystem owner,
final Path src,
final Path dst, final String srcKey, final String dstKey)
throws IOException {
boolean renameResult = true;
int retryTime = 1;
while (retryTime <= OBSCommonUtils.MAX_RETRY_TIME) {
try {
LOG.debug("rename: {}-st rename from [{}] to [{}] ...",
retryTime, srcKey, dstKey);
innerFsRenameFile(owner, srcKey, dstKey);
renameResult = true;
break;
} catch (FileNotFoundException e) {
if (owner.exists(dst)) {
LOG.warn(
"rename: successfully {}-st rename src [{}] "
+ "to dest [{}] with SDK retry",
retryTime, src, dst, e);
renameResult = true;
} else {
LOG.error(
"rename: failed {}-st rename src [{}] to dest [{}]",
retryTime, src, dst, e);
renameResult = false;
}
break;
} catch (IOException e) {
if (retryTime == OBSCommonUtils.MAX_RETRY_TIME) {
LOG.error(
"rename: failed {}-st rename src [{}] to dest [{}]",
retryTime, src, dst, e);
throw e;
} else {
LOG.warn(
"rename: failed {}-st rename src [{}] to dest [{}]",
retryTime, src, dst, e);
if (owner.exists(dst) && owner.exists(src)) {
LOG.warn(
"rename: failed {}-st rename src [{}] to "
+ "dest [{}] with SDK retry", retryTime, src,
dst, e);
renameResult = false;
break;
}
try {
Thread.sleep(OBSCommonUtils.DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
}
retryTime++;
}
return renameResult;
}
/**
* Used to rename a source folder to a destination folder that is not existed
* before rename.
*
* @param owner OBS File System instance
* @param src source folder key
* @param dst destination folder key that not existed before rename
* @throws IOException any io exception
* @throws ObsException any obs operation exception
*/
static void fsRenameToNewFolder(final OBSFileSystem owner, final String src,
final String dst)
throws IOException, ObsException {
LOG.debug("RenameFolder path {} to {}", src, dst);
try {
RenameRequest renameObjectRequest = new RenameRequest();
renameObjectRequest.setBucketName(owner.getBucket());
renameObjectRequest.setObjectKey(src);
renameObjectRequest.setNewObjectKey(dst);
owner.getObsClient().renameFolder(renameObjectRequest);
owner.getSchemeStatistics().incrementWriteOps(1);
} catch (ObsException e) {
throw OBSCommonUtils.translateException(
"renameFile(" + src + ", " + dst + ")", src, e);
}
}
static void innerFsRenameFile(final OBSFileSystem owner,
final String srcKey,
final String dstKey) throws IOException {
LOG.debug("RenameFile path {} to {}", srcKey, dstKey);
try {
final RenameRequest renameObjectRequest = new RenameRequest();
renameObjectRequest.setBucketName(owner.getBucket());
renameObjectRequest.setObjectKey(srcKey);
renameObjectRequest.setNewObjectKey(dstKey);
owner.getObsClient().renameFile(renameObjectRequest);
owner.getSchemeStatistics().incrementWriteOps(1);
} catch (ObsException e) {
if (e.getResponseCode() == OBSCommonUtils.NOT_FOUND_CODE) {
throw new FileNotFoundException(
"No such file or directory: " + srcKey);
}
if (e.getResponseCode() == OBSCommonUtils.CONFLICT_CODE) {
throw new FileConflictException(
"File conflicts during rename, " + e.getResponseStatus());
}
throw OBSCommonUtils.translateException(
"renameFile(" + srcKey + ", " + dstKey + ")", srcKey, e);
}
}
/**
* Used to rename a source object to a destination object which is not existed
* before rename.
*
* @param owner OBS File System instance
* @param srcKey source object key
* @param dstKey destination object key
* @throws IOException io exception
*/
static void fsRenameToNewObject(final OBSFileSystem owner,
final String srcKey,
final String dstKey) throws IOException {
String newSrcKey = srcKey;
String newdstKey = dstKey;
newSrcKey = OBSCommonUtils.maybeDeleteBeginningSlash(newSrcKey);
newdstKey = OBSCommonUtils.maybeDeleteBeginningSlash(newdstKey);
if (newSrcKey.endsWith("/")) {
// Rename folder.
fsRenameToNewFolder(owner, newSrcKey, newdstKey);
} else {
// Rename file.
innerFsRenameFile(owner, newSrcKey, newdstKey);
}
}
// Delete a file.
private static int fsRemoveFile(final OBSFileSystem owner,
final String sonObjectKey,
final List<KeyAndVersion> files)
throws IOException {
files.add(new KeyAndVersion(sonObjectKey));
if (files.size() == owner.getMaxEntriesToDelete()) {
// batch delete files.
OBSCommonUtils.removeKeys(owner, files, true, false);
return owner.getMaxEntriesToDelete();
}
return 0;
}
// Recursively delete a folder that might be not empty.
static boolean fsDelete(final OBSFileSystem owner, final FileStatus status,
final boolean recursive)
throws IOException, ObsException {
long startTime = System.currentTimeMillis();
long threadId = Thread.currentThread().getId();
Path f = status.getPath();
String key = OBSCommonUtils.pathToKey(owner, f);
if (!status.isDirectory()) {
LOG.debug("delete: Path is a file");
trashObjectIfNeed(owner, key);
} else {
LOG.debug("delete: Path is a directory: {} - recursive {}", f,
recursive);
key = OBSCommonUtils.maybeAddTrailingSlash(key);
boolean isEmptyDir = OBSCommonUtils.isFolderEmpty(owner, key);
if (key.equals("")) {
return OBSCommonUtils.rejectRootDirectoryDelete(
owner.getBucket(), isEmptyDir, recursive);
}
if (!recursive && !isEmptyDir) {
LOG.warn("delete: Path is not empty: {} - recursive {}", f,
recursive);
throw new PathIsNotEmptyDirectoryException(f.toString());
}
if (isEmptyDir) {
LOG.debug(
"delete: Deleting fake empty directory {} - recursive {}",
f, recursive);
OBSCommonUtils.deleteObject(owner, key);
} else {
LOG.debug(
"delete: Deleting objects for directory prefix {} to "
+ "delete - recursive {}", f, recursive);
trashFolderIfNeed(owner, key, f);
}
}
long endTime = System.currentTimeMillis();
LOG.debug("delete Path:{} thread:{}, timeUsedInMilliSec:{}", f,
threadId, endTime - startTime);
return true;
}
private static void trashObjectIfNeed(final OBSFileSystem owner,
final String key)
throws ObsException, IOException {
if (needToTrash(owner, key)) {
mkTrash(owner, key);
StringBuilder sb = new StringBuilder(owner.getTrashDir());
sb.append(key);
if (owner.exists(new Path(sb.toString()))) {
SimpleDateFormat df = new SimpleDateFormat("-yyyyMMddHHmmss");
sb.append(df.format(new Date()));
}
fsRenameToNewObject(owner, key, sb.toString());
LOG.debug("Moved: '" + key + "' to trash at: " + sb.toString());
} else {
OBSCommonUtils.deleteObject(owner, key);
}
}
private static void trashFolderIfNeed(final OBSFileSystem owner,
final String key,
final Path f) throws ObsException, IOException {
if (needToTrash(owner, key)) {
mkTrash(owner, key);
StringBuilder sb = new StringBuilder(owner.getTrashDir());
String subKey = OBSCommonUtils.maybeAddTrailingSlash(key);
sb.append(subKey);
if (owner.exists(new Path(sb.toString()))) {
SimpleDateFormat df = new SimpleDateFormat("-yyyyMMddHHmmss");
sb.insert(sb.length() - 1, df.format(new Date()));
}
String srcKey = OBSCommonUtils.maybeDeleteBeginningSlash(key);
String dstKey = OBSCommonUtils.maybeDeleteBeginningSlash(
sb.toString());
fsRenameToNewFolder(owner, srcKey, dstKey);
LOG.debug("Moved: '" + key + "' to trash at: " + sb.toString());
} else {
if (owner.isEnableMultiObjectDeleteRecursion()) {
long delNum = fsRecursivelyDeleteDir(owner, key, true);
LOG.debug("Recursively delete {} files/dirs when deleting {}",
delNum, key);
} else {
fsNonRecursivelyDelete(owner, f);
}
}
}
static long fsRecursivelyDeleteDir(final OBSFileSystem owner,
final String parentKey,
final boolean deleteParent) throws IOException {
long delNum = 0;
List<KeyAndVersion> subdirList = new ArrayList<>(
owner.getMaxEntriesToDelete());
List<KeyAndVersion> fileList = new ArrayList<>(
owner.getMaxEntriesToDelete());
ListObjectsRequest request = OBSCommonUtils.createListObjectsRequest(
owner, parentKey, "/", owner.getMaxKeys());
ObjectListing objects = OBSCommonUtils.listObjects(owner, request);
while (true) {
for (String commonPrefix : objects.getCommonPrefixes()) {
if (commonPrefix.equals(parentKey)) {
// skip prefix itself
continue;
}
delNum += fsRemoveSubdir(owner, commonPrefix, subdirList);
}
for (ObsObject sonObject : objects.getObjects()) {
String sonObjectKey = sonObject.getObjectKey();
if (sonObjectKey.equals(parentKey)) {
// skip prefix itself
continue;
}
if (!sonObjectKey.endsWith("/")) {
delNum += fsRemoveFile(owner, sonObjectKey, fileList);
} else {
delNum += fsRemoveSubdir(owner, sonObjectKey, subdirList);
}
}
if (!objects.isTruncated()) {
break;
}
objects = OBSCommonUtils.continueListObjects(owner, objects);
}
delNum += fileList.size();
OBSCommonUtils.removeKeys(owner, fileList, true, false);
delNum += subdirList.size();
OBSCommonUtils.removeKeys(owner, subdirList, true, false);
if (deleteParent) {
OBSCommonUtils.deleteObject(owner, parentKey);
delNum++;
}
return delNum;
}
private static boolean needToTrash(final OBSFileSystem owner,
final String key) {
String newKey = key;
newKey = OBSCommonUtils.maybeDeleteBeginningSlash(newKey);
if (owner.isEnableTrash() && newKey.startsWith(owner.getTrashDir())) {
return false;
}
return owner.isEnableTrash();
}
// Delete a sub dir.
private static int fsRemoveSubdir(final OBSFileSystem owner,
final String subdirKey,
final List<KeyAndVersion> subdirList)
throws IOException {
fsRecursivelyDeleteDir(owner, subdirKey, false);
subdirList.add(new KeyAndVersion(subdirKey));
if (subdirList.size() == owner.getMaxEntriesToDelete()) {
// batch delete subdirs.
OBSCommonUtils.removeKeys(owner, subdirList, true, false);
return owner.getMaxEntriesToDelete();
}
return 0;
}
private static void mkTrash(final OBSFileSystem owner, final String key)
throws ObsException, IOException {
String newKey = key;
StringBuilder sb = new StringBuilder(owner.getTrashDir());
newKey = OBSCommonUtils.maybeAddTrailingSlash(newKey);
sb.append(newKey);
sb.deleteCharAt(sb.length() - 1);
sb.delete(sb.lastIndexOf("/"), sb.length());
Path fastDeleteRecycleDirPath = new Path(sb.toString());
// keep the parent directory of the target path exists
if (!owner.exists(fastDeleteRecycleDirPath)) {
owner.mkdirs(fastDeleteRecycleDirPath);
}
}
// List all sub objects at first, delete sub objects in batch secondly.
private static void fsNonRecursivelyDelete(final OBSFileSystem owner,
final Path parent)
throws IOException, ObsException {
// List sub objects sorted by path depth.
FileStatus[] arFileStatus = OBSCommonUtils.innerListStatus(owner,
parent, true);
// Remove sub objects one depth by one depth to avoid that parents and
// children in a same batch.
fsRemoveKeys(owner, arFileStatus);
// Delete parent folder that should has become empty.
OBSCommonUtils.deleteObject(owner,
OBSCommonUtils.pathToKey(owner, parent));
}
// Remove sub objects of each depth one by one to avoid that parents and
// children in a same batch.
private static void fsRemoveKeys(final OBSFileSystem owner,
final FileStatus[] arFileStatus)
throws ObsException, IOException {
if (arFileStatus.length <= 0) {
// exit fast if there are no keys to delete
return;
}
String key;
for (FileStatus fileStatus : arFileStatus) {
key = OBSCommonUtils.pathToKey(owner, fileStatus.getPath());
OBSCommonUtils.blockRootDelete(owner.getBucket(), key);
}
fsRemoveKeysByDepth(owner, arFileStatus);
}
// Batch delete sub objects one depth by one depth to avoid that parents and
// children in a same
// batch.
// A batch deletion might be split into some concurrent deletions to promote
// the performance, but
// it
// can't make sure that an object is deleted before it's children.
private static void fsRemoveKeysByDepth(final OBSFileSystem owner,
final FileStatus[] arFileStatus)
throws ObsException, IOException {
if (arFileStatus.length <= 0) {
// exit fast if there is no keys to delete
return;
}
// Find all leaf keys in the list.
String key;
int depth = Integer.MAX_VALUE;
List<KeyAndVersion> leafKeys = new ArrayList<>(
owner.getMaxEntriesToDelete());
for (int idx = arFileStatus.length - 1; idx >= 0; idx--) {
if (leafKeys.size() >= owner.getMaxEntriesToDelete()) {
OBSCommonUtils.removeKeys(owner, leafKeys, true, false);
}
key = OBSCommonUtils.pathToKey(owner, arFileStatus[idx].getPath());
// Check file.
if (!arFileStatus[idx].isDirectory()) {
// A file must be a leaf.
leafKeys.add(new KeyAndVersion(key, null));
continue;
}
// Check leaf folder at current depth.
int keyDepth = fsGetObjectKeyDepth(key);
if (keyDepth == depth) {
// Any key at current depth must be a leaf.
leafKeys.add(new KeyAndVersion(key, null));
continue;
}
if (keyDepth < depth) {
// The last batch delete at current depth.
OBSCommonUtils.removeKeys(owner, leafKeys, true, false);
// Go on at the upper depth.
depth = keyDepth;
leafKeys.add(new KeyAndVersion(key, null));
continue;
}
LOG.warn(
"The objects list is invalid because it isn't sorted by"
+ " path depth.");
throw new ObsException("System failure");
}
// The last batch delete at the minimum depth of all keys.
OBSCommonUtils.removeKeys(owner, leafKeys, true, false);
}
// Used to create a folder
static void fsCreateFolder(final OBSFileSystem owner,
final String objectName)
throws ObsException {
for (int retryTime = 1;
retryTime < OBSCommonUtils.MAX_RETRY_TIME; retryTime++) {
try {
innerFsCreateFolder(owner, objectName);
return;
} catch (ObsException e) {
LOG.warn("Failed to create folder [{}], retry time [{}], "
+ "exception [{}]", objectName, retryTime, e);
try {
Thread.sleep(OBSCommonUtils.DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
}
innerFsCreateFolder(owner, objectName);
}
private static void innerFsCreateFolder(final OBSFileSystem owner,
final String objectName)
throws ObsException {
final NewFolderRequest newFolderRequest = new NewFolderRequest(
owner.getBucket(), objectName);
newFolderRequest.setAcl(owner.getCannedACL());
long len = newFolderRequest.getObjectKey().length();
owner.getObsClient().newFolder(newFolderRequest);
owner.getSchemeStatistics().incrementWriteOps(1);
owner.getSchemeStatistics().incrementBytesWritten(len);
}
// Used to get the status of a file or folder in a file-gateway bucket.
static OBSFileStatus innerFsGetObjectStatus(final OBSFileSystem owner,
final Path f) throws IOException {
final Path path = OBSCommonUtils.qualify(owner, f);
String key = OBSCommonUtils.pathToKey(owner, path);
LOG.debug("Getting path status for {} ({})", path, key);
if (key.isEmpty()) {
LOG.debug("Found root directory");
return new OBSFileStatus(path, owner.getUsername());
}
try {
final GetAttributeRequest getAttrRequest = new GetAttributeRequest(
owner.getBucket(), key);
ObsFSAttribute meta = owner.getObsClient()
.getAttribute(getAttrRequest);
owner.getSchemeStatistics().incrementReadOps(1);
if (fsIsFolder(meta)) {
LOG.debug("Found file (with /): fake directory");
return new OBSFileStatus(path,
OBSCommonUtils.dateToLong(meta.getLastModified()),
owner.getUsername());
} else {
LOG.debug(
"Found file (with /): real file? should not happen: {}",
key);
return new OBSFileStatus(
meta.getContentLength(),
OBSCommonUtils.dateToLong(meta.getLastModified()),
path,
owner.getDefaultBlockSize(path),
owner.getUsername());
}
} catch (ObsException e) {
if (e.getResponseCode() == OBSCommonUtils.NOT_FOUND_CODE) {
LOG.debug("Not Found: {}", path);
throw new FileNotFoundException(
"No such file or directory: " + path);
}
if (e.getResponseCode() == OBSCommonUtils.CONFLICT_CODE) {
throw new FileConflictException(
"file conflicts: " + e.getResponseStatus());
}
throw OBSCommonUtils.translateException("getFileStatus", path, e);
}
}
static ContentSummary fsGetDirectoryContentSummary(
final OBSFileSystem owner,
final String key) throws IOException {
String newKey = key;
newKey = OBSCommonUtils.maybeAddTrailingSlash(newKey);
long[] summary = {0, 0, 1};
LOG.debug("Summary key {}", newKey);
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(owner.getBucket());
request.setPrefix(newKey);
request.setMaxKeys(owner.getMaxKeys());
ObjectListing objects = OBSCommonUtils.listObjects(owner, request);
while (true) {
if (!objects.getCommonPrefixes().isEmpty() || !objects.getObjects()
.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found path as directory (with /): {}/{}",
objects.getCommonPrefixes().size(),
objects.getObjects().size());
}
for (String prefix : objects.getCommonPrefixes()) {
if (!prefix.equals(newKey)) {
summary[2]++;
}
}
for (ObsObject obj : objects.getObjects()) {
if (!obj.getObjectKey().endsWith("/")) {
summary[0] += obj.getMetadata().getContentLength();
summary[1] += 1;
} else if (!obj.getObjectKey().equals(newKey)) {
summary[2]++;
}
}
}
if (!objects.isTruncated()) {
break;
}
objects = OBSCommonUtils.continueListObjects(owner, objects);
}
LOG.debug(String.format(
"file size [%d] - file count [%d] - directory count [%d] - "
+ "file path [%s]",
summary[0], summary[1], summary[2], newKey));
return new ContentSummary.Builder().length(summary[0])
.fileCount(summary[1]).directoryCount(summary[2])
.spaceConsumed(summary[0]).build();
}
}
| OBSPosixBucketUtils |
java | spring-projects__spring-framework | spring-websocket/src/main/java/org/springframework/web/socket/sockjs/transport/handler/XhrStreamingTransportHandler.java | {
"start": 2564,
"end": 2935
} | class ____ extends StreamingSockJsSession {
public XhrStreamingSockJsSession(String sessionId, SockJsServiceConfig config,
WebSocketHandler wsHandler, Map<String, Object> attributes) {
super(sessionId, config, wsHandler, attributes);
}
@Override
protected byte[] getPrelude(ServerHttpRequest request) {
return PRELUDE;
}
}
}
| XhrStreamingSockJsSession |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java | {
"start": 19248,
"end": 20231
} | class ____ implements
MultipleArcTransition<RMStateStore, RMStateStoreEvent,
RMStateStoreState> {
@Override
public RMStateStoreState transition(RMStateStore store,
RMStateStoreEvent event) {
if (!(event instanceof RMStateStoreRMDTEvent)) {
// should never happen
LOG.error("Illegal event type: " + event.getClass());
return RMStateStoreState.ACTIVE;
}
boolean isFenced = false;
RMStateStoreRMDTEvent dtEvent = (RMStateStoreRMDTEvent) event;
try {
LOG.info("Storing RMDelegationToken and SequenceNumber");
store.storeRMDelegationTokenState(
dtEvent.getRmDTIdentifier(), dtEvent.getRenewDate());
} catch (Exception e) {
LOG.error("Error While Storing RMDelegationToken and SequenceNumber ",
e);
isFenced = store.notifyStoreOperationFailedInternal(e);
}
return finalState(isFenced);
}
}
private static | StoreRMDTTransition |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/path/UnionTypePredicate.java | {
"start": 931,
"end": 1169
} | class ____ implements PositionalPathPredicate {
private final String type;
public UnionTypePredicate(String type) {
this.type = type;
}
@Override
public String toString() {
return "[" + type + "]";
}
}
| UnionTypePredicate |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/aop/framework/AbstractAopProxyTests.java | {
"start": 38839,
"end": 41379
} | class ____ checks proxy is bound before getTarget() call
config.setTargetSource(new TargetSource() {
@Override
public Class<?> getTargetClass() {
return TestBean.class;
}
@Override
public Object getTarget() {
assertThat(AopContext.currentProxy()).isEqualTo(proxy);
return target;
}
});
// Just test anything: it will fail if context wasn't found
assertThat(proxy.getAge()).isEqualTo(0);
}
@Test
void equals() {
IOther a = new AllInstancesAreEqual();
IOther b = new AllInstancesAreEqual();
NopInterceptor i1 = new NopInterceptor();
NopInterceptor i2 = new NopInterceptor();
ProxyFactory pfa = new ProxyFactory(a);
pfa.addAdvice(i1);
ProxyFactory pfb = new ProxyFactory(b);
pfb.addAdvice(i2);
IOther proxyA = (IOther) createProxy(pfa);
IOther proxyB = (IOther) createProxy(pfb);
assertThat(pfb.getAdvisors()).hasSameSizeAs(pfa.getAdvisors());
assertThat(b).isEqualTo(a);
assertThat(i2).isEqualTo(i1);
assertThat(proxyB).isEqualTo(proxyA);
assertThat(proxyB.hashCode()).isEqualTo(proxyA.hashCode());
assertThat(proxyA).isNotEqualTo(a);
// Equality checks were handled by the proxy
assertThat(i1.getCount()).isEqualTo(0);
// When we invoke A, it's NopInterceptor will have count == 1
// and won't think it's equal to B's NopInterceptor
proxyA.absquatulate();
assertThat(i1.getCount()).isEqualTo(1);
assertThat(proxyA).isNotEqualTo(proxyB);
}
@Test
void beforeAdvisorIsInvoked() {
CountingBeforeAdvice cba = new CountingBeforeAdvice();
@SuppressWarnings("serial")
Advisor matchesNoArgs = new StaticMethodMatcherPointcutAdvisor(cba) {
@Override
public boolean matches(Method m, @Nullable Class<?> targetClass) {
return m.getParameterCount() == 0;
}
};
TestBean target = new TestBean();
target.setAge(80);
ProxyFactory pf = new ProxyFactory(target);
pf.addAdvice(new NopInterceptor());
pf.addAdvisor(matchesNoArgs);
assertThat(pf.getAdvisors()[1]).as("Advisor was added").isEqualTo(matchesNoArgs);
ITestBean proxied = (ITestBean) createProxy(pf);
assertThat(cba.getCalls()).isEqualTo(0);
assertThat(cba.getCalls("getAge")).isEqualTo(0);
assertThat(proxied.getAge()).isEqualTo(target.getAge());
assertThat(cba.getCalls()).isEqualTo(1);
assertThat(cba.getCalls("getAge")).isEqualTo(1);
assertThat(cba.getCalls("setAge")).isEqualTo(0);
// Won't be advised
proxied.setAge(26);
assertThat(cba.getCalls()).isEqualTo(1);
assertThat(proxied.getAge()).isEqualTo(26);
}
@Test
void userAttributes() {
| just |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java | {
"start": 3440,
"end": 4164
} | class ____ extends YarnCLI {
private static final String CLUSTER_INFO_URL = "/ws/v1/cluster/info";
private static final Logger LOG = LoggerFactory
.getLogger(TopCLI.class);
private String CLEAR = "\u001b[2J";
private String CLEAR_LINE = "\u001b[2K";
private String SET_CURSOR_HOME = "\u001b[H";
private String CHANGE_BACKGROUND = "\u001b[7m";
private String RESET_BACKGROUND = "\u001b[0m";
private String SET_CURSOR_LINE_7_COLUMN_0 = "\u001b[7;0f";
// guava cache for getapplications call
protected Cache<GetApplicationsRequest, List<ApplicationReport>>
applicationReportsCache = CacheBuilder.newBuilder().maximumSize(1000)
.expireAfterWrite(5, TimeUnit.SECONDS).build();
| TopCLI |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.