language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DecodingState.java | {
"start": 993,
"end": 1082
} | class ____ maintains decoding state during a decode call.
*/
@InterfaceAudience.Private
| that |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/component/extension/ComponentVerifierExtension.java | {
"start": 9004,
"end": 9708
} | interface ____ extends Serializable {
/**
* Name of the code. All uppercase for standard codes, all lower case for custom codes. Separator between
* two words is an underscore.
*
* @return code name
*/
String name();
/**
* Bean style accessor to name. This is required for framework like Jackson using bean convention for object
* serialization.
*
* @return code name
*/
default String getName() {
return name();
}
}
/**
* Standard set of error codes
*/
| Code |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/testers/MapContainsKeyTester.java | {
"start": 1805,
"end": 3677
} | class ____<K, V> extends AbstractMapTester<K, V> {
@CollectionSize.Require(absent = ZERO)
public void testContains_yes() {
assertTrue("containsKey(present) should return true", getMap().containsKey(k0()));
}
public void testContains_no() {
assertFalse("containsKey(notPresent) should return false", getMap().containsKey(k3()));
}
@MapFeature.Require(ALLOWS_NULL_KEY_QUERIES)
public void testContains_nullNotContainedButAllowed() {
assertFalse("containsKey(null) should return false", getMap().containsKey(null));
}
@MapFeature.Require(absent = ALLOWS_NULL_KEY_QUERIES)
public void testContains_nullNotContainedAndUnsupported() {
expectNullKeyMissingWhenNullKeysUnsupported("containsKey(null) should return false or throw");
}
@MapFeature.Require(ALLOWS_NULL_KEYS)
@CollectionSize.Require(absent = ZERO)
public void testContains_nonNullWhenNullContained() {
initMapWithNullKey();
assertFalse("containsKey(notPresent) should return false", getMap().containsKey(k3()));
}
@MapFeature.Require(ALLOWS_NULL_KEYS)
@CollectionSize.Require(absent = ZERO)
public void testContains_nullContained() {
initMapWithNullKey();
assertTrue("containsKey(null) should return true", getMap().containsKey(null));
}
@MapFeature.Require(ALLOWS_NULL_VALUES)
@CollectionSize.Require(absent = ZERO)
public void testContains_keyWithNullValueContained() {
initMapWithNullValue();
assertTrue(
"containsKey(keyForNullValue) should return true",
getMap().containsKey(getKeyForNullValue()));
}
public void testContains_wrongType() {
try {
// noinspection SuspiciousMethodCalls
assertFalse(
"containsKey(wrongType) should return false or throw",
getMap().containsKey(WrongType.VALUE));
} catch (ClassCastException tolerated) {
}
}
}
| MapContainsKeyTester |
java | apache__spark | common/kvstore/src/test/java/org/apache/spark/util/kvstore/DBIteratorSuite.java | {
"start": 1908,
"end": 16284
} | interface ____ extends Comparator<CustomType1> {
/**
* Returns a comparator that falls back to natural order if this comparator's ordering
* returns equality for two elements. Used to mimic how the index sorts things internally.
*/
default BaseComparator fallback() {
return (t1, t2) -> {
int result = BaseComparator.this.compare(t1, t2);
if (result != 0) {
return result;
}
return t1.key.compareTo(t2.key);
};
}
/** Reverses the order of this comparator. */
default BaseComparator reverse() {
return (t1, t2) -> -BaseComparator.this.compare(t1, t2);
}
}
private static final BaseComparator NATURAL_ORDER = (t1, t2) -> t1.key.compareTo(t2.key);
private static final BaseComparator REF_INDEX_ORDER = (t1, t2) -> t1.id.compareTo(t2.id);
private static final BaseComparator COPY_INDEX_ORDER = (t1, t2) -> t1.name.compareTo(t2.name);
private static final BaseComparator NUMERIC_INDEX_ORDER =
(t1, t2) -> Integer.compare(t1.num, t2.num);
private static final BaseComparator CHILD_INDEX_ORDER = (t1, t2) -> t1.child.compareTo(t2.child);
/**
* Implementations should override this method; it is called only once, before all tests are
* run. Any state can be safely stored in static variables and cleaned up in a @AfterAll
* handler.
*/
protected abstract KVStore createStore() throws Exception;
@BeforeAll
public static void setupClass() {
long seed = RND.nextLong();
LOG.info("Random seed: {}", seed);
RND.setSeed(seed);
}
@AfterAll
public static void cleanupData() {
allEntries = null;
db = null;
}
@BeforeEach
public void setup() throws Exception {
if (db != null) {
return;
}
db = createStore();
int count = RND.nextInt(MAX_ENTRIES) + MIN_ENTRIES;
allEntries = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
CustomType1 t = new CustomType1();
t.key = "key" + i;
t.id = "id" + i;
t.name = "name" + RND.nextInt(MAX_ENTRIES);
// Force one item to have an integer value of zero to test the fix for SPARK-23103.
t.num = (i != 0) ? (int) RND.nextLong() : 0;
t.child = "child" + (i % MIN_ENTRIES);
allEntries.add(t);
}
// Shuffle the entries to avoid the insertion order matching the natural ordering. Just in case.
Collections.shuffle(allEntries, RND);
for (CustomType1 e : allEntries) {
db.write(e);
}
// Pick the first generated value, and forcefully create a few entries that will clash
// with the indexed values (id and name), to make sure the index behaves correctly when
// multiple entities are indexed by the same value.
//
// This also serves as a test for the test code itself, to make sure it's sorting indices
// the same way the store is expected to.
CustomType1 first = allEntries.get(0);
clashingEntries = new ArrayList<>();
int clashCount = RND.nextInt(MIN_ENTRIES) + 1;
for (int i = 0; i < clashCount; i++) {
CustomType1 t = new CustomType1();
t.key = "n-key" + (count + i);
t.id = first.id;
t.name = first.name;
t.num = first.num;
t.child = first.child;
allEntries.add(t);
clashingEntries.add(t);
db.write(t);
}
// Create another entry that could cause problems: take the first entry, and make its indexed
// name be an extension of the existing ones, to make sure the implementation sorts these
// correctly even considering the separator character (shorter strings first).
CustomType1 t = new CustomType1();
t.key = "extended-key-0";
t.id = first.id;
t.name = first.name + "a";
t.num = first.num;
t.child = first.child;
allEntries.add(t);
db.write(t);
}
@Test
public void naturalIndex() throws Exception {
testIteration(NATURAL_ORDER, view(), null, null);
}
@Test
public void refIndex() throws Exception {
testIteration(REF_INDEX_ORDER, view().index("id"), null, null);
}
@Test
public void copyIndex() throws Exception {
testIteration(COPY_INDEX_ORDER, view().index("name"), null, null);
}
@Test
public void numericIndex() throws Exception {
testIteration(NUMERIC_INDEX_ORDER, view().index("int"), null, null);
}
@Test
public void childIndex() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id), null, null);
}
@Test
public void naturalIndexDescending() throws Exception {
testIteration(NATURAL_ORDER, view().reverse(), null, null);
}
@Test
public void refIndexDescending() throws Exception {
testIteration(REF_INDEX_ORDER, view().index("id").reverse(), null, null);
}
@Test
public void copyIndexDescending() throws Exception {
testIteration(COPY_INDEX_ORDER, view().index("name").reverse(), null, null);
}
@Test
public void numericIndexDescending() throws Exception {
testIteration(NUMERIC_INDEX_ORDER, view().index("int").reverse(), null, null);
}
@Test
public void childIndexDescending() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).reverse(), null, null);
}
@Test
public void naturalIndexWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(NATURAL_ORDER, view().first(first.key), first, null);
}
@Test
public void refIndexWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(REF_INDEX_ORDER, view().index("id").first(first.id), first, null);
}
@Test
public void copyIndexWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(COPY_INDEX_ORDER, view().index("name").first(first.name), first, null);
}
@Test
public void numericIndexWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(NUMERIC_INDEX_ORDER, view().index("int").first(first.num), first, null);
}
@Test
public void childIndexWithStart() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).first(any.child), null,
null);
}
@Test
public void naturalIndexDescendingWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(NATURAL_ORDER, view().reverse().first(first.key), first, null);
}
@Test
public void refIndexDescendingWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(REF_INDEX_ORDER, view().reverse().index("id").first(first.id), first, null);
}
@Test
public void copyIndexDescendingWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(COPY_INDEX_ORDER, view().reverse().index("name").first(first.name), first, null);
}
@Test
public void numericIndexDescendingWithStart() throws Exception {
CustomType1 first = pickLimit();
testIteration(NUMERIC_INDEX_ORDER, view().reverse().index("int").first(first.num), first, null);
}
@Test
public void childIndexDescendingWithStart() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER,
view().index("child").parent(any.id).first(any.child).reverse(), null, null);
}
@Test
public void naturalIndexWithSkip() throws Exception {
testIteration(NATURAL_ORDER, view().skip(pickCount()), null, null);
}
@Test
public void refIndexWithSkip() throws Exception {
testIteration(REF_INDEX_ORDER, view().index("id").skip(pickCount()), null, null);
}
@Test
public void copyIndexWithSkip() throws Exception {
testIteration(COPY_INDEX_ORDER, view().index("name").skip(pickCount()), null, null);
}
@Test
public void childIndexWithSkip() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).skip(pickCount()),
null, null);
}
@Test
public void naturalIndexWithMax() throws Exception {
testIteration(NATURAL_ORDER, view().max(pickCount()), null, null);
}
@Test
public void copyIndexWithMax() throws Exception {
testIteration(COPY_INDEX_ORDER, view().index("name").max(pickCount()), null, null);
}
@Test
public void childIndexWithMax() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).max(pickCount()), null,
null);
}
@Test
public void naturalIndexWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(NATURAL_ORDER, view().last(last.key), null, last);
}
@Test
public void refIndexWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(REF_INDEX_ORDER, view().index("id").last(last.id), null, last);
}
@Test
public void copyIndexWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(COPY_INDEX_ORDER, view().index("name").last(last.name), null, last);
}
@Test
public void numericIndexWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(NUMERIC_INDEX_ORDER, view().index("int").last(last.num), null, last);
}
@Test
public void childIndexWithLast() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).last(any.child), null,
null);
}
@Test
public void naturalIndexDescendingWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(NATURAL_ORDER, view().reverse().last(last.key), null, last);
}
@Test
public void refIndexDescendingWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(REF_INDEX_ORDER, view().reverse().index("id").last(last.id), null, last);
}
@Test
public void copyIndexDescendingWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(COPY_INDEX_ORDER, view().reverse().index("name").last(last.name),
null, last);
}
@Test
public void numericIndexDescendingWithLast() throws Exception {
CustomType1 last = pickLimit();
testIteration(NUMERIC_INDEX_ORDER, view().reverse().index("int").last(last.num),
null, last);
}
@Test
public void childIndexDescendingWithLast() throws Exception {
CustomType1 any = pickLimit();
testIteration(CHILD_INDEX_ORDER, view().index("child").parent(any.id).last(any.child).reverse(),
null, null);
}
@Test
public void testRefWithIntNaturalKey() throws Exception {
IntKeyType i = new IntKeyType();
i.key = 1;
i.id = "1";
i.values = Arrays.asList("1");
db.write(i);
try(KVStoreIterator<?> it = db.view(i.getClass()).closeableIterator()) {
Object read = it.next();
assertEquals(i, read);
}
}
private CustomType1 pickLimit() {
// Picks an element that has clashes with other elements in the given index.
return clashingEntries.get(RND.nextInt(clashingEntries.size()));
}
private int pickCount() {
int count = RND.nextInt(allEntries.size() / 2);
return Math.max(count, 1);
}
/**
* Compares the two values and falls back to comparing the natural key of CustomType1
* if they're the same, to mimic the behavior of the indexing code.
*/
private <T extends Comparable<T>> int compareWithFallback(
T v1,
T v2,
CustomType1 ct1,
CustomType1 ct2) {
int result = v1.compareTo(v2);
if (result != 0) {
return result;
}
return ct1.key.compareTo(ct2.key);
}
private void testIteration(
final BaseComparator order,
final KVStoreView<CustomType1> params,
final CustomType1 first,
final CustomType1 last) throws Exception {
List<CustomType1> indexOrder = sortBy(order.fallback());
if (!params.ascending) {
indexOrder = Lists.reverse(indexOrder);
}
Iterable<CustomType1> expected = indexOrder;
BaseComparator expectedOrder = params.ascending ? order : order.reverse();
if (params.parent != null) {
expected = Iterables.filter(expected, v -> params.parent.equals(v.id));
}
if (first != null) {
expected = Iterables.filter(expected, v -> expectedOrder.compare(first, v) <= 0);
}
if (last != null) {
expected = Iterables.filter(expected, v -> expectedOrder.compare(v, last) <= 0);
}
if (params.skip > 0) {
expected = Iterables.skip(expected, (int) params.skip);
}
if (params.max != Long.MAX_VALUE) {
expected = Iterables.limit(expected, (int) params.max);
}
List<CustomType1> actual = collect(params);
compareLists(expected, actual);
}
/** Could use assertEquals(), but that creates hard to read errors for large lists. */
private void compareLists(Iterable<?> expected, List<?> actual) {
Iterator<?> expectedIt = expected.iterator();
Iterator<?> actualIt = actual.iterator();
int count = 0;
while (expectedIt.hasNext()) {
if (!actualIt.hasNext()) {
break;
}
count++;
assertEquals(expectedIt.next(), actualIt.next());
}
String message;
Object[] remaining;
int expectedCount = count;
int actualCount = count;
if (expectedIt.hasNext()) {
remaining = Iterators.toArray(expectedIt, Object.class);
expectedCount += remaining.length;
message = "missing";
} else {
remaining = Iterators.toArray(actualIt, Object.class);
actualCount += remaining.length;
message = "stray";
}
assertEquals(expectedCount, actualCount,
String.format("Found %s elements: %s", message, Arrays.asList(remaining)));
}
private KVStoreView<CustomType1> view() throws Exception {
// SPARK-38896: this `view` will be closed in
// the `collect(KVStoreView<CustomType1> view)` method.
return db.view(CustomType1.class);
}
private List<CustomType1> collect(KVStoreView<CustomType1> view) throws Exception {
try (KVStoreIterator<CustomType1> iterator = view.closeableIterator()) {
List<CustomType1> list = new ArrayList<>();
iterator.forEachRemaining(list::add);
return list;
}
}
private List<CustomType1> sortBy(Comparator<CustomType1> comp) {
List<CustomType1> copy = new ArrayList<>(allEntries);
Collections.sort(copy, comp);
return copy;
}
}
| BaseComparator |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java | {
"start": 4760,
"end": 8891
} | class ____ extends ESTestCase {
private static final List<InternalAggregationTestCase<?>> aggsTests = List.of(
new InternalCardinalityTests(),
new InternalTDigestPercentilesTests(),
new InternalTDigestPercentilesRanksTests(),
new InternalHDRPercentilesTests(),
new InternalHDRPercentilesRanksTests(),
new InternalPercentilesBucketTests(),
new MinTests(),
new MaxTests(),
new InternalAvgTests(),
new InternalWeightedAvgTests(),
new SumTests(),
new InternalValueCountTests(),
new InternalSimpleValueTests(),
new InternalBucketMetricValueTests(),
new InternalStatsTests(),
new InternalStatsBucketTests(),
new InternalExtendedStatsTests(),
new InternalExtendedStatsBucketTests(),
new InternalGeoBoundsTests(),
new InternalGeoCentroidTests(),
new InternalHistogramTests(),
new InternalDateHistogramTests(),
new InternalVariableWidthHistogramTests(),
new LongTermsTests(),
new DoubleTermsTests(),
new StringTermsTests(),
new LongRareTermsTests(),
new StringRareTermsTests(),
new InternalMissingTests(),
new InternalNestedTests(),
new InternalReverseNestedTests(),
new InternalGlobalTests(),
new InternalFilterTests(),
new InternalSamplerTests(),
new GeoHashGridTests(),
new GeoTileGridTests(),
new InternalRangeTests(),
new InternalDateRangeTests(),
new InternalGeoDistanceTests(),
new InternalFiltersTests(),
new SignificantLongTermsTests(),
new SignificantStringTermsTests(),
new InternalScriptedMetricTests(),
new InternalBinaryRangeTests(),
new InternalTopHitsTests(),
new InternalCompositeTests(),
new InternalMedianAbsoluteDeviationTests()
);
@Before
public void init() throws Exception {
for (InternalAggregationTestCase<?> aggsTest : aggsTests) {
if (aggsTest instanceof InternalMultiBucketAggregationTestCase) {
// Lower down the number of buckets generated by multi bucket aggregation tests in
// order to avoid too many aggregations to be created.
((InternalMultiBucketAggregationTestCase<?>) aggsTest).setMaxNumberOfBuckets(3);
}
aggsTest.setUp();
}
}
@After
public void cleanUp() throws Exception {
for (InternalAggregationTestCase<?> aggsTest : aggsTests) {
aggsTest.tearDown();
}
}
public final InternalAggregations createTestInstance() {
return createTestInstance(1, 0, 5);
}
private static InternalAggregations createTestInstance(final int minNumAggs, final int currentDepth, final int maxDepth) {
int numAggs = randomIntBetween(minNumAggs, 4);
List<InternalAggregation> aggs = new ArrayList<>(numAggs);
for (int i = 0; i < numAggs; i++) {
InternalAggregationTestCase<?> testCase = randomFrom(aggsTests);
if (testCase instanceof InternalMultiBucketAggregationTestCase<?> multiBucketAggTestCase) {
if (currentDepth < maxDepth) {
multiBucketAggTestCase.setSubAggregationsSupplier(() -> createTestInstance(0, currentDepth + 1, maxDepth));
} else {
multiBucketAggTestCase.setSubAggregationsSupplier(() -> InternalAggregations.EMPTY);
}
} else if (testCase instanceof InternalSingleBucketAggregationTestCase<?> singleBucketAggTestCase) {
if (currentDepth < maxDepth) {
singleBucketAggTestCase.subAggregationsSupplier = () -> createTestInstance(0, currentDepth + 1, maxDepth);
} else {
singleBucketAggTestCase.subAggregationsSupplier = () -> InternalAggregations.EMPTY;
}
}
aggs.add(testCase.createTestInstanceForXContent());
}
return InternalAggregations.from(aggs);
}
}
| AggregationsTests |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/upgrade/AbstractHttpUpgradeCheckTestBase.java | {
"start": 3432,
"end": 3581
} | class ____ {
@OnTextMessage
public String onMessage(String message) {
return message + " Hey";
}
}
}
| Responding |
java | quarkusio__quarkus | independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/CustomAlterableContextsGenerator.java | {
"start": 533,
"end": 3297
} | class ____ extends AbstractGenerator {
private static final Logger LOGGER = Logger.getLogger(CustomAlterableContextsGenerator.class);
CustomAlterableContextsGenerator(boolean generateSources) {
super(generateSources);
}
/**
* Creator of an {@link CustomAlterableContexts} must call this method at an appropriate point
* in time and write the result to an appropriate output. If not, the bytecode sequences generated
* using the result of {@code CustomAlterableContexts.add()} will refer to non-existing classes.
*
* @return the generated classes, never {@code null}
*/
Collection<Resource> generate(CustomAlterableContexts.CustomAlterableContextInfo info) {
ResourceClassOutput classOutput = new ResourceClassOutput(info.isApplicationClass, generateSources);
createInjectableContextSubclass(classOutput, info);
return classOutput.getResources();
}
private void createInjectableContextSubclass(ClassOutput classOutput, CustomAlterableContextInfo info) {
Gizmo gizmo = gizmo(classOutput);
gizmo.class_(info.generatedName, cc -> {
cc.extends_(info.contextClass);
cc.implements_(InjectableContext.class);
cc.defaultConstructor();
// implement `isNormal()` if needed
if (info.isNormal != null) {
cc.method("isNormal", mc -> {
mc.returning(boolean.class);
mc.body(bc -> {
bc.return_(info.isNormal);
});
});
}
// implement `destroy()`
cc.method("destroy", mc -> {
mc.returning(void.class);
mc.body(bc -> {
bc.throw_(UnsupportedOperationException.class, "Custom AlterableContext cannot destroy all instances");
});
});
// implement `getState()`
cc.method("getState", mc -> {
mc.returning(InjectableContext.ContextState.class);
mc.body(bc -> {
bc.throw_(UnsupportedOperationException.class, "Custom AlterableContext has no state");
});
});
// implement `destroy(ContextState)`
cc.method("destroy", mc -> {
mc.returning(void.class);
mc.parameter("state", InjectableContext.ContextState.class);
mc.body(bc -> {
bc.throw_(UnsupportedOperationException.class, "Custom AlterableContext has no state");
});
});
});
LOGGER.debugf("InjectableContext subclass generated: %s", info.generatedName);
}
}
| CustomAlterableContextsGenerator |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/ContextLossDetectionTest.java | {
"start": 7947,
"end": 8752
} | class ____<T> implements Publisher<T> {
final Publisher<T> source;
final @Nullable Context lossyContext;
final boolean useCoreSubscriber;
ContextLossyPublisher(Publisher<T> source, Context lossyContext) {
this.source = source;
this.lossyContext = lossyContext;
this.useCoreSubscriber = false;
}
ContextLossyPublisher(Publisher<T> source, boolean useCoreSubscriber) {
this.source = source;
this.lossyContext = null;
this.useCoreSubscriber = useCoreSubscriber;
}
@Override
public void subscribe(Subscriber<? super T> subscriber) {
if (lossyContext == null && !useCoreSubscriber) {
source.subscribe(new ForeignOperator<>(subscriber));
}
else {
source.subscribe(new CoreLossyOperator<>(subscriber, lossyContext));
}
}
static | ContextLossyPublisher |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/network/ThreadWatchdog.java | {
"start": 4904,
"end": 5245
} | class ____ keeps track of activity on that thread, represented as a {@code long} which is incremented every time an
* activity starts or stops. Thus the parity of its value indicates whether the thread is idle or not. Crucially, the activity tracking
* is very lightweight (on the tracked thread).
*/
public static final | which |
java | spring-projects__spring-boot | build-plugin/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/CustomLayersProvider.java | {
"start": 1898,
"end": 6411
} | class ____ {
CustomLayers getLayers(Document document) {
validate(document);
Element root = document.getDocumentElement();
List<ContentSelector<String>> applicationSelectors = getApplicationSelectors(root);
List<ContentSelector<Library>> librarySelectors = getLibrarySelectors(root);
List<Layer> layers = getLayers(root);
return new CustomLayers(layers, applicationSelectors, librarySelectors);
}
private void validate(Document document) {
Schema schema = loadSchema();
try {
Validator validator = schema.newValidator();
validator.validate(new DOMSource(document));
}
catch (SAXException | IOException ex) {
throw new IllegalStateException("Invalid layers.xml configuration", ex);
}
}
private Schema loadSchema() {
try {
SchemaFactory factory = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI);
return factory.newSchema(getClass().getResource("layers.xsd"));
}
catch (SAXException ex) {
throw new IllegalStateException("Unable to load layers XSD");
}
}
private List<ContentSelector<String>> getApplicationSelectors(Element root) {
return getSelectors(root, "application", (element) -> getSelector(element, ApplicationContentFilter::new));
}
private List<ContentSelector<Library>> getLibrarySelectors(Element root) {
return getSelectors(root, "dependencies", (element) -> getLibrarySelector(element, LibraryContentFilter::new));
}
private List<Layer> getLayers(Element root) {
Element layerOrder = getChildElement(root, "layerOrder");
if (layerOrder == null) {
return Collections.emptyList();
}
return getChildNodeTextContent(layerOrder, "layer").stream().map(Layer::new).toList();
}
private <T> List<ContentSelector<T>> getSelectors(Element root, String elementName,
Function<Element, ContentSelector<T>> selectorFactory) {
Element element = getChildElement(root, elementName);
if (element == null) {
return Collections.emptyList();
}
List<ContentSelector<T>> selectors = new ArrayList<>();
NodeList children = element.getChildNodes();
for (int i = 0; i < children.getLength(); i++) {
Node child = children.item(i);
if (child instanceof Element childElement) {
ContentSelector<T> selector = selectorFactory.apply(childElement);
selectors.add(selector);
}
}
return selectors;
}
private <T> ContentSelector<T> getSelector(Element element, Function<String, ContentFilter<T>> filterFactory) {
Layer layer = new Layer(element.getAttribute("layer"));
List<String> includes = getChildNodeTextContent(element, "include");
List<String> excludes = getChildNodeTextContent(element, "exclude");
return new IncludeExcludeContentSelector<>(layer, includes, excludes, filterFactory);
}
private ContentSelector<Library> getLibrarySelector(Element element,
Function<String, ContentFilter<Library>> filterFactory) {
Layer layer = new Layer(element.getAttribute("layer"));
List<String> includes = getChildNodeTextContent(element, "include");
List<String> excludes = getChildNodeTextContent(element, "exclude");
Element includeModuleDependencies = getChildElement(element, "includeModuleDependencies");
Element excludeModuleDependencies = getChildElement(element, "excludeModuleDependencies");
List<ContentFilter<Library>> includeFilters = includes.stream()
.map(filterFactory)
.collect(Collectors.toCollection(ArrayList::new));
if (includeModuleDependencies != null) {
includeFilters.add(Library::isLocal);
}
List<ContentFilter<Library>> excludeFilters = excludes.stream()
.map(filterFactory)
.collect(Collectors.toCollection(ArrayList::new));
if (excludeModuleDependencies != null) {
excludeFilters.add(Library::isLocal);
}
return new IncludeExcludeContentSelector<>(layer, includeFilters, excludeFilters);
}
private List<String> getChildNodeTextContent(Element element, String tagName) {
List<String> patterns = new ArrayList<>();
NodeList nodes = element.getElementsByTagName(tagName);
for (int i = 0; i < nodes.getLength(); i++) {
Node node = nodes.item(i);
if (node instanceof Element) {
patterns.add(node.getTextContent());
}
}
return patterns;
}
private @Nullable Element getChildElement(Element element, String tagName) {
NodeList nodes = element.getElementsByTagName(tagName);
if (nodes.getLength() == 0) {
return null;
}
if (nodes.getLength() > 1) {
throw new IllegalStateException("Multiple '" + tagName + "' nodes found");
}
return (Element) nodes.item(0);
}
}
| CustomLayersProvider |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/config/dialect/DialectSpecificSettingsMariaDBIgnoredTest.java | {
"start": 482,
"end": 2215
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(MyEntity.class)
.addAsResource("application-start-offline-mariadb-dialect.properties", "application.properties"))
.setLogRecordPredicate(record -> HibernateProcessorUtil.class.getName().equals(record.getLoggerName()))
.overrideConfigKey("quarkus.datasource.db-kind", "") // This will override to default which is H2
.overrideConfigKey("quarkus.hibernate-orm.dialect.storage-engine", "")
.overrideConfigKey("quarkus.hibernate-orm.dialect.mariadb.bytes-per-character", "8") // This will be ignored
.overrideConfigKey("quarkus.hibernate-orm.dialect.mariadb.no-backslash-escapes", "true") // This will be ignored
.setLogRecordPredicate(record -> HibernateProcessorUtil.class.getName().equals(record.getLoggerName()))
.assertLogRecords(records -> {
assertThat(records)
.extracting(LogRecord::getMessage)
.anyMatch(
l -> l.contains("MariaDB specific settings being ignored because the database is not MariaDB"));
});
@Inject
EntityManagerFactory entityManagerFactory;
@Test
public void applicationStarts() {
assertThat(entityManagerFactory.getProperties().get("hibernate.dialect.mysql.bytes_per_character"))
.isEqualTo(null);
assertThat(entityManagerFactory.getProperties().get("hibernate.dialect.mysql.no_backslash_escapes"))
.isEqualTo(null);
}
}
| DialectSpecificSettingsMariaDBIgnoredTest |
java | apache__camel | components/camel-cxf/camel-cxf-soap/src/test/java/org/apache/camel/component/cxf/jaxws/CxfConsumerClientDisconnectedTest.java | {
"start": 1393,
"end": 3959
} | class ____ extends CamelTestSupport {
private static final int PORT = CXFTestSupport.getPort1();
private static final String CONTEXT = "/CxfConsumerClientDisconnectedTest";
private static final String CXT = PORT + CONTEXT;
private String cxfRsEndpointUri = "cxf://http://localhost:" + CXT + "/rest?synchronous=" + isSynchronous()
+ "&serviceClass=org.apache.camel.component.cxf.jaxws.ServiceProvider&dataFormat=PAYLOAD";
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
public void configure() {
getContext().setStreamCaching(true);
getContext().getStreamCachingStrategy().setSpoolThreshold(1L);
errorHandler(noErrorHandler());
from(cxfRsEndpointUri)
// should be able to convert to Customer
.to("mock:result")
.process(exchange -> {
Thread.sleep(100);
exchange.getExchangeExtension().addOnCompletion(new Synchronization() {
@Override
public void onComplete(Exchange exchange) {
template.sendBody("mock:onComplete", "");
}
@Override
public void onFailure(Exchange exchange) {
}
});
});
}
};
}
@Test
public void testClientDisconnect() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
MockEndpoint onComplete = getMockEndpoint("mock:onComplete");
onComplete.expectedMessageCount(1);
TelnetClient telnetClient = new TelnetClient();
telnetClient.connect("localhost", PORT);
telnetClient.setTcpNoDelay(true);
telnetClient.setReceiveBufferSize(1);
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(telnetClient.getOutputStream()));
writer.write("GET " + CONTEXT + "/rest/customerservice/customers HTTP/1.1\nhost: localhost\n\n");
writer.flush();
telnetClient.disconnect();
mock.assertIsSatisfied();
onComplete.assertIsSatisfied();
}
protected boolean isSynchronous() {
return false;
}
}
| CxfConsumerClientDisconnectedTest |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-common/deployment/src/main/java/io/quarkus/resteasy/reactive/common/deployment/JaxRsResourceIndexBuildItem.java | {
"start": 364,
"end": 642
} | class ____ extends SimpleBuildItem {
private final IndexView indexView;
public JaxRsResourceIndexBuildItem(IndexView indexView) {
this.indexView = indexView;
}
public IndexView getIndexView() {
return indexView;
}
}
| JaxRsResourceIndexBuildItem |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/config/TwoRouteRefReverseOnExceptionTest.java | {
"start": 1250,
"end": 2422
} | class ____ extends SpringTestSupport {
@Test
public void testTwoRouteRefReverseNoOnException() throws Exception {
getMockEndpoint("mock:foo").expectedMessageCount(1);
getMockEndpoint("mock:handled").expectedMessageCount(0);
try {
template.sendBody("direct:foo", "Hello World");
fail("Should have thrown exception");
} catch (CamelExecutionException e) {
assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertEquals("Damn", e.getCause().getMessage());
}
assertMockEndpointsSatisfied();
}
@Test
public void testTwoRouteRefReverseOnException() throws Exception {
getMockEndpoint("mock:bar").expectedMessageCount(1);
getMockEndpoint("mock:handled").expectedMessageCount(1);
template.sendBody("direct:bar", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected AbstractXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/spring/config/TwoRouteRefReverseOnException.xml");
}
}
| TwoRouteRefReverseOnExceptionTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/UnnecessaryCheckNotNullTest.java | {
"start": 12081,
"end": 17173
} | class ____ {
private Tester field = new Tester();
public void test() {
Object a = new Object();
Object b = new Object();
byte byte1 = 0;
short short1 = 0;
int int1 = 0, int2 = 0;
long long1 = 0;
float float1 = 0;
double double1 = 0;
boolean boolean1 = false, boolean2 = false;
char char1 = 0;
Tester tester = new Tester();
// Do we detect all primitive types?
// BUG: Diagnostic contains: remove this line
Preconditions.checkNotNull(byte1);
// BUG: Diagnostic contains: remove this line
Preconditions.checkNotNull(short1);
// BUG: Diagnostic contains: remove this line
Preconditions.checkNotNull(int1);
// BUG: Diagnostic contains: remove this line
Preconditions.checkNotNull(long1);
// BUG: Diagnostic contains: remove this line
Preconditions.checkNotNull(float1);
// BUG: Diagnostic contains: remove this line
Preconditions.checkNotNull(double1);
// BUG: Diagnostic contains: remove this line
Preconditions.checkNotNull(boolean1);
// BUG: Diagnostic contains: remove this line
Preconditions.checkNotNull(char1);
// Do we give the right suggested fix?
// BUG: Diagnostic contains: boolean1 = boolean2;
boolean1 = Preconditions.checkNotNull(boolean2);
// BUG: Diagnostic contains: boolean1 = int1 == int2;
boolean1 = Preconditions.checkNotNull(int1 == int2);
// BUG: Diagnostic contains: checkState(tester.hasId())
Preconditions.checkNotNull(tester.hasId());
// BUG: Diagnostic contains: checkState(tester.hasId(), "Must have ID!")
Preconditions.checkNotNull(tester.hasId(), "Must have ID!");
// BUG: Diagnostic contains: checkState(tester.hasId(), "Must have %s!", "ID")
Preconditions.checkNotNull(tester.hasId(), "Must have %s!", "ID");
// Do we handle arguments that evaluate to a primitive type?
// BUG: Diagnostic contains: Preconditions.checkNotNull(a)
Preconditions.checkNotNull(a != null);
// BUG: Diagnostic contains: Preconditions.checkNotNull(a)
Preconditions.checkNotNull(a == null);
// BUG: Diagnostic contains: checkState(int1 == int2)
Preconditions.checkNotNull(int1 == int2);
// BUG: Diagnostic contains: checkState(int1 > int2)
Preconditions.checkNotNull(int1 > int2);
// BUG: Diagnostic contains: remove this line
Preconditions.checkNotNull(boolean1 ? int1 : int2);
// Do we handle static imports?
// BUG: Diagnostic contains: remove this line
checkNotNull(byte1);
// BUG: Diagnostic contains: 'checkState(tester.hasId())
checkNotNull(tester.hasId());
}
public void test2(Tester arg) {
Tester local = new Tester();
// Do we correctly distinguish checkArgument from checkState?
// BUG: Diagnostic contains: checkArgument(arg.hasId())
checkNotNull(arg.hasId());
// BUG: Diagnostic contains: checkState(field.hasId())
checkNotNull(field.hasId());
// BUG: Diagnostic contains: checkState(local.hasId())
checkNotNull(local.hasId());
// BUG: Diagnostic contains: checkState(!local.hasId())
checkNotNull(!local.hasId());
// BUG: Diagnostic contains: checkArgument(!(arg instanceof Tester))
checkNotNull(!(arg instanceof Tester));
// BUG: Diagnostic contains: checkState(getTrue())
checkNotNull(getTrue());
// BUG: Diagnostic contains: remove this line
checkNotNull(arg.getId());
// BUG: Diagnostic contains: id = arg.getId()
int id = checkNotNull(arg.getId());
// BUG: Diagnostic contains: boolean b = arg.hasId();
boolean b = checkNotNull(arg.hasId());
// Do we handle long chains of method calls?
// BUG: Diagnostic contains: checkArgument(arg.getTester().getTester().hasId())
checkNotNull(arg.getTester().getTester().hasId());
// BUG: Diagnostic contains: checkArgument(arg.tester.getTester().hasId())
checkNotNull(arg.tester.getTester().hasId());
}
private boolean getTrue() {
return true;
}
private static | UnnecessaryCheckNotNullPrimitivePositiveCases |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/test/util/ConcurrencyUtils.java | {
"start": 994,
"end": 2330
} | class ____ {
public static final long DEFAULT_LATCH_AWAIT_TIME_MS = TimeUnit.SECONDS.toMillis(5);
/**
* {@link CountDownLatch#await(long, TimeUnit) Await} the given latch, failing if the timeout elapses or the wait is interrupted.
* @param latch the latch to await; may not be null
* @param timeoutMs the maximum amount of time to wait for the latch, in milliseconds
* @param message the failure message to use if the timeout elapses or the wait is interrupted; may be null
*/
public static void awaitLatch(CountDownLatch latch, long timeoutMs, String message) {
try {
assertTrue(latch.await(timeoutMs, TimeUnit.MILLISECONDS), message);
} catch (InterruptedException e) {
throw new AssertionError(message, e);
}
}
/**
* {@link CountDownLatch#await(long, TimeUnit) Await} the given latch, failing if the
* {@link #DEFAULT_LATCH_AWAIT_TIME_MS default timeout} elapses or the wait is interrupted.
* @param latch the latch to await; may not be null
* @param message the failure message to use if the timeout elapses or the wait is interrupted; may be null
*/
public static void awaitLatch(CountDownLatch latch, String message) {
awaitLatch(latch, DEFAULT_LATCH_AWAIT_TIME_MS, message);
}
}
| ConcurrencyUtils |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/net/Facility.java | {
"start": 4843,
"end": 5001
} | enum ____, case-insensitive. If null, returns, defaultFacility
* @param defaultFacility the Facility to return if name is null
* @return a Facility | name |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/expression/SqmModifiedSubQueryExpression.java | {
"start": 779,
"end": 2988
} | enum ____ {
ALL,
ANY,
SOME,
}
private final SqmSubQuery<T> subQuery;
private final Modifier modifier;
public SqmModifiedSubQueryExpression(
SqmSubQuery<T> subquery,
Modifier modifier,
NodeBuilder builder) {
this (
subquery,
modifier,
subquery.getNodeType(),
builder
);
}
public SqmModifiedSubQueryExpression(
SqmSubQuery<T> subQuery,
Modifier modifier,
@Nullable SqmBindableType<T> resultType,
NodeBuilder builder) {
super( resultType, builder );
this.subQuery = subQuery;
this.modifier = modifier;
}
@Override
public SqmModifiedSubQueryExpression<T> copy(SqmCopyContext context) {
final SqmModifiedSubQueryExpression<T> existing = context.getCopy( this );
if ( existing != null ) {
return existing;
}
final SqmModifiedSubQueryExpression<T> expression = context.registerCopy(
this,
new SqmModifiedSubQueryExpression<>(
subQuery.copy( context ),
modifier,
getNodeType(),
nodeBuilder()
)
);
copyTo( expression, context );
return expression;
}
public Modifier getModifier() {
return modifier;
}
public SqmSubQuery<T> getSubQuery() {
return subQuery;
}
@Override
public <X> X accept(SemanticQueryWalker<X> walker) {
return walker.visitModifiedSubQueryExpression( this );
}
@Override
public void appendHqlString(StringBuilder hql, SqmRenderContext context) {
hql.append( modifier );
hql.append( " (" );
subQuery.appendHqlString( hql, context );
hql.append( ')' );
}
@Override
public boolean equals(@Nullable Object object) {
return object instanceof SqmModifiedSubQueryExpression<?> that
&& modifier == that.modifier
&& subQuery.equals( that.subQuery );
}
@Override
public int hashCode() {
int result = subQuery.hashCode();
result = 31 * result + modifier.hashCode();
return result;
}
@Override
public boolean isCompatible(Object object) {
return object instanceof SqmModifiedSubQueryExpression<?> that
&& modifier == that.modifier
&& subQuery.isCompatible( that.subQuery );
}
@Override
public int cacheHashCode() {
int result = subQuery.cacheHashCode();
result = 31 * result + modifier.hashCode();
return result;
}
}
| Modifier |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/OptionalMapToOptionalTest.java | {
"start": 1681,
"end": 2135
} | class ____ {
public boolean test(Optional<Integer> optional) {
// BUG: Diagnostic contains:
return optional.transform(i -> Optional.of(1)).isPresent();
}
}
""")
.doTest();
}
@Test
public void positiveReturned() {
helper
.addSourceLines(
"Test.java",
"""
import com.google.common.base.Optional;
| Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java | {
"start": 6320,
"end": 6469
} | class ____ {
// static {
// assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_4_9:
// "Remove this | XMoreLikeThis |
java | playframework__playframework | documentation/manual/working/javaGuide/main/http/code/javaguide/http/full/Application.java | {
"start": 284,
"end": 404
} | class ____ extends Controller {
public Result index() {
return ok("It works!");
}
}
// #full-controller
| Application |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | {
"start": 298476,
"end": 299045
} | class ____ extends SqlShuttle {
public SqlNode go(SqlNode node) {
return requireNonNull(
node.accept(this), () -> "NavigationModifier returned for " + node);
}
}
/**
* Shuttle that expands navigation expressions in a MATCH_RECOGNIZE clause.
*
* <p>Examples:
*
* <ul>
* <li>{@code PREV(A.price + A.amount)} → {@code PREV(A.price) + PREV(A.amount)}
* <li>{@code FIRST(A.price * 2)} → {@code FIRST(A.PRICE) * 2}
* </ul>
*/
private static | NavigationModifier |
java | quarkusio__quarkus | extensions/oidc-db-token-state-manager/deployment/src/test/java/io/quarkus/oidc/db/token/state/manager/GreetingResource.java | {
"start": 328,
"end": 803
} | class ____ {
@Inject
EntityManager em;
@Transactional
@Path("/new")
@GET
public void newGreeting() {
var entity = new GreetingEntity();
entity.greeting = Objects.requireNonNull("Good day");
em.persist(entity);
}
@GET
public Object getGreetings() {
return em
.createNativeQuery("SELECT greeting FROM Greeting")
.getResultList()
.get(0);
}
}
| GreetingResource |
java | apache__flink | flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueueTest.java | {
"start": 1440,
"end": 9478
} | class ____ {
private static final int DEFAULT_CAPACITY = 2;
@Test
void testBasics() throws InterruptedException {
FutureCompletingBlockingQueue<Integer> queue = new FutureCompletingBlockingQueue<>(5);
CompletableFuture<Void> future = queue.getAvailabilityFuture();
assertThat(queue.isEmpty()).isTrue();
assertThat(queue.size()).isEqualTo(0);
queue.put(0, 1234);
assertThat(future.isDone()).isTrue();
assertThat(queue.size()).isEqualTo(1);
assertThat(queue.isEmpty()).isFalse();
assertThat(queue.remainingCapacity()).isEqualTo(4);
assertThat(queue.peek()).isNotNull();
assertThat((int) queue.peek()).isEqualTo(1234);
assertThat((int) queue.poll()).isEqualTo(1234);
assertThat(queue.size()).isEqualTo(0);
assertThat(queue.isEmpty()).isTrue();
assertThat(queue.remainingCapacity()).isEqualTo(5);
}
@Test
void testPoll() throws InterruptedException {
FutureCompletingBlockingQueue<Integer> queue = new FutureCompletingBlockingQueue<>();
queue.put(0, 1234);
Integer value = queue.poll();
assertThat(value).isNotNull();
assertThat((int) value).isEqualTo(1234);
}
@Test
void testPollEmptyQueue() throws InterruptedException {
FutureCompletingBlockingQueue<Integer> queue = new FutureCompletingBlockingQueue<>();
queue.put(0, 1234);
assertThat(queue.poll()).isNotNull();
assertThat(queue.poll()).isNull();
assertThat(queue.poll()).isNull();
}
@Test
void testWakeUpPut() throws InterruptedException {
FutureCompletingBlockingQueue<Integer> queue = new FutureCompletingBlockingQueue<>(1);
CountDownLatch latch = new CountDownLatch(1);
new Thread(
() -> {
try {
assertThat(queue.put(0, 1234)).isTrue();
assertThat(queue.put(0, 1234)).isFalse();
latch.countDown();
} catch (InterruptedException e) {
fail("Interrupted unexpectedly.");
}
})
.start();
queue.wakeUpPuttingThread(0);
latch.await();
assertThat(latch.getCount()).isEqualTo(0);
}
@Test
void testConcurrency() throws InterruptedException {
FutureCompletingBlockingQueue<Integer> queue = new FutureCompletingBlockingQueue<>(5);
final int numValuesPerThread = 10000;
final int numPuttingThreads = 5;
List<Thread> threads = new ArrayList<>();
for (int i = 0; i < numPuttingThreads; i++) {
final int index = i;
Thread t =
new Thread(
() -> {
for (int j = 0; j < numValuesPerThread; j++) {
int base = index * numValuesPerThread;
try {
queue.put(index, base + j);
} catch (InterruptedException e) {
fail("putting thread interrupted.");
}
}
});
t.start();
threads.add(t);
}
BitSet bitSet = new BitSet();
AtomicInteger count = new AtomicInteger(0);
for (int i = 0; i < 5; i++) {
Thread t =
new Thread(
() -> {
while (count.get() < numPuttingThreads * numValuesPerThread) {
Integer value = queue.poll();
if (value == null) {
continue;
}
count.incrementAndGet();
if (bitSet.get(value)) {
fail("Value " + value + " has been consumed before");
}
synchronized (bitSet) {
bitSet.set(value);
}
}
});
t.start();
threads.add(t);
}
for (Thread t : threads) {
t.join();
}
}
@Test
void testSpecifiedQueueCapacity() {
final int capacity = 8_000;
final FutureCompletingBlockingQueue<Object> queue =
new FutureCompletingBlockingQueue<>(capacity);
assertThat(queue.remainingCapacity()).isEqualTo(capacity);
}
@Test
void testQueueDefaultCapacity() {
final FutureCompletingBlockingQueue<Object> queue = new FutureCompletingBlockingQueue<>();
assertThat(queue.remainingCapacity()).isEqualTo(DEFAULT_CAPACITY);
assertThat(SourceReaderOptions.ELEMENT_QUEUE_CAPACITY.defaultValue().intValue())
.isEqualTo(DEFAULT_CAPACITY);
}
@Test
void testUnavailableWhenEmpty() {
final FutureCompletingBlockingQueue<Object> queue = new FutureCompletingBlockingQueue<>();
assertThat(queue.getAvailabilityFuture().isDone()).isFalse();
}
@Test
void testImmediatelyAvailableAfterPut() throws InterruptedException {
final FutureCompletingBlockingQueue<Object> queue = new FutureCompletingBlockingQueue<>();
queue.put(0, new Object());
assertThat(queue.getAvailabilityFuture().isDone()).isTrue();
}
@Test
void testFutureBecomesAvailableAfterPut() throws InterruptedException {
final FutureCompletingBlockingQueue<Object> queue = new FutureCompletingBlockingQueue<>();
final CompletableFuture<?> future = queue.getAvailabilityFuture();
queue.put(0, new Object());
assertThat(future.isDone()).isTrue();
}
@Test
void testUnavailableWhenBecomesEmpty() throws InterruptedException {
final FutureCompletingBlockingQueue<Object> queue = new FutureCompletingBlockingQueue<>();
queue.put(0, new Object());
queue.poll();
assertThat(queue.getAvailabilityFuture().isDone()).isFalse();
}
@Test
void testAvailableAfterNotifyAvailable() throws InterruptedException {
final FutureCompletingBlockingQueue<Object> queue = new FutureCompletingBlockingQueue<>();
queue.notifyAvailable();
assertThat(queue.getAvailabilityFuture().isDone()).isTrue();
}
@Test
void testFutureBecomesAvailableAfterNotifyAvailable() throws InterruptedException {
final FutureCompletingBlockingQueue<Object> queue = new FutureCompletingBlockingQueue<>();
final CompletableFuture<?> future = queue.getAvailabilityFuture();
queue.notifyAvailable();
assertThat(future.isDone()).isTrue();
}
@Test
void testPollResetsAvailability() throws InterruptedException {
final FutureCompletingBlockingQueue<Object> queue = new FutureCompletingBlockingQueue<>();
queue.notifyAvailable();
final CompletableFuture<?> beforePoll = queue.getAvailabilityFuture();
queue.poll();
final CompletableFuture<?> afterPoll = queue.getAvailabilityFuture();
assertThat(beforePoll.isDone()).isTrue();
assertThat(afterPoll.isDone()).isFalse();
}
/**
* This test is to guard that our reflection is not broken and we do not lose the performance
* advantage. This is possible, because the tests depend on the runtime modules while the main
* scope does not.
*/
@Test
void testQueueUsesShortCircuitFuture() {
assertThat(FutureCompletingBlockingQueue.AVAILABLE)
.isSameAs(AvailabilityProvider.AVAILABLE);
}
}
| FutureCompletingBlockingQueueTest |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java | {
"start": 435,
"end": 708
} | class ____ extends ActionType<AckWatchResponse> {
public static final AckWatchAction INSTANCE = new AckWatchAction();
public static final String NAME = "cluster:admin/xpack/watcher/watch/ack";
private AckWatchAction() {
super(NAME);
}
}
| AckWatchAction |
java | apache__maven | impl/maven-cli/src/test/java/org/apache/maven/cling/invoker/mvn/resident/ResidentMavenInvokerTest.java | {
"start": 1575,
"end": 2545
} | class ____ extends MavenInvokerTestSupport {
@Override
protected Invoker createInvoker(ClassWorld classWorld) {
return new ResidentMavenInvoker(
ProtoLookup.builder().addMapping(ClassWorld.class, classWorld).build(), null);
}
@Override
protected Parser createParser() {
return new MavenParser();
}
@Test
void defaultFs(
@TempDir(cleanup = CleanupMode.ON_SUCCESS) Path cwd,
@TempDir(cleanup = CleanupMode.ON_SUCCESS) Path userHome)
throws Exception {
invoke(cwd, userHome, List.of("verify"), List.of());
}
@Disabled("Enable it when fully moved to NIO2 with Path/Filesystem (ie MavenExecutionRequest)")
@Test
void jimFs() throws Exception {
try (FileSystem fs = Jimfs.newFileSystem(Configuration.unix())) {
invoke(fs.getPath("/cwd"), fs.getPath("/home"), List.of("verify"), List.of());
}
}
}
| ResidentMavenInvokerTest |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/statement/SQLUniqueConstraint.java | {
"start": 693,
"end": 888
} | interface ____ extends SQLConstraint {
List<SQLSelectOrderByItem> getColumns();
boolean containsColumn(String column);
boolean containsColumn(long columnNameHash);
}
| SQLUniqueConstraint |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/NotifyCheckpointAbortedITCase.java | {
"start": 16574,
"end": 17259
} | class ____
extends DefaultOperatorStateBackend {
public DeclineSinkFailingOperatorStateBackend(
ExecutionConfig executionConfig,
CloseableRegistry closeStreamOnCancelRegistry,
SnapshotStrategyRunner<OperatorStateHandle, ?> snapshotStrategyRunner) {
super(
executionConfig,
closeStreamOnCancelRegistry,
new HashMap<>(),
new HashMap<>(),
new HashMap<>(),
new HashMap<>(),
snapshotStrategyRunner);
}
}
public static | DeclineSinkFailingOperatorStateBackend |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldContain_create_Test.java | {
"start": 1664,
"end": 15172
} | class ____ {
@Test
void should_create_error_message() {
// GIVEN
ErrorMessageFactory factory = shouldContain(list("Yoda"), list("Luke", "Yoda"), newLinkedHashSet("Luke"));
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting ArrayList:%n" +
" [\"Yoda\"]%n" +
"to contain:%n" +
" [\"Luke\", \"Yoda\"]%n" +
"but could not find the following element(s):%n" +
" [\"Luke\"]%n"));
}
@Test
void should_create_error_message_with_custom_comparison_strategy() {
// GIVEN
ErrorMessageFactory factory = shouldContain(list("Yoda"), list("Luke", "Yoda"), newLinkedHashSet("Luke"),
new ComparatorBasedComparisonStrategy(CaseInsensitiveStringComparator.INSTANCE));
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting ArrayList:%n" +
" [\"Yoda\"]%n" +
"to contain:%n" +
" [\"Luke\", \"Yoda\"]%n" +
"but could not find the following element(s):%n" +
" [\"Luke\"]%n" +
"when comparing values using CaseInsensitiveStringComparator"));
}
@Test
void should_create_error_message_differentiating_long_from_integer_in_arrays() {
// GIVEN
ErrorMessageFactory factory = shouldContain(list(5L, 7L), list(5, 7), newLinkedHashSet(5, 7));
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting ArrayList:%n" +
" [5L, 7L]%n" +
"to contain:%n" +
" [5, 7]%n" +
"but could not find the following element(s):%n" +
" [5, 7]%n"));
}
@Test
void should_create_error_message_differentiating_double_from_float() {
// GIVEN
ErrorMessageFactory factory = shouldContain(list(5d, 7d), list(5f, 7f), newLinkedHashSet(5f, 7f));
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting ArrayList:%n" +
" [5.0, 7.0]%n" +
"to contain:%n" +
" [5.0f, 7.0f]%n" +
"but could not find the following element(s):%n" +
" [5.0f, 7.0f]%n"));
}
@Test
void should_create_error_message_for_map() {
// GIVEN
Map<String, Double> map = mapOf(MapEntry.entry("1", 2d));
ErrorMessageFactory factory = shouldContain(map, MapEntry.entry("3", 4d), MapEntry.entry("3", 4d));
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting map:%n"
+ " {\"1\"=2.0}%n"
+ "to contain:%n"
+ " \"3\"=4.0%n"
+ "but could not find the following map entries:%n"
+ " \"3\"=4.0%n"));
}
@Test
void should_create_error_message_for_byte_array() {
// GIVEN
ErrorMessageFactory factory = shouldContain(new byte[] { 2, 3 }, new byte[] { 4 }, new byte[] { 4 });
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting byte[]:%n"
+ " [2, 3]%n"
+ "to contain:%n"
+ " [4]%n"
+ "but could not find the following byte(s):%n"
+ " [4]%n"));
}
@Test
void should_create_error_message_for_float_array() {
// GIVEN
ErrorMessageFactory factory = shouldContain(new float[] { 2f, 3f }, new float[] { 4f }, new float[] { 4f });
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting float[]:%n"
+ " [2.0f, 3.0f]%n"
+ "to contain:%n"
+ " [4.0f]%n"
+ "but could not find the following float(s):%n"
+ " [4.0f]%n"));
}
@Test
void should_create_error_message_for_int_array() {
// GIVEN
ErrorMessageFactory factory = shouldContain(new int[] { 2, 3 }, new int[] { 4 }, new int[] { 4 });
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting int[]:%n"
+ " [2, 3]%n"
+ "to contain:%n"
+ " [4]%n"
+ "but could not find the following int(s):%n"
+ " [4]%n"));
}
@Test
void should_create_error_message_for_char_array() {
// GIVEN
ErrorMessageFactory factory = shouldContain(new char[] { 'a', 'b' }, new char[] { 'c', 'd' }, new char[] { 'c', 'd' });
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting char[]:%n"
+ " ['a', 'b']%n"
+ "to contain:%n"
+ " ['c', 'd']%n"
+ "but could not find the following char(s):%n"
+ " ['c', 'd']%n"));
}
@Test
void should_create_error_message_for_long_array() {
// GIVEN
ErrorMessageFactory factory = shouldContain(new long[] { 6L, 8L }, new long[] { 10L, 9L }, new long[] { 10L, 9L });
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting long[]:%n"
+ " [6L, 8L]%n"
+ "to contain:%n"
+ " [10L, 9L]%n"
+ "but could not find the following long(s):%n"
+ " [10L, 9L]%n"));
}
@Test
void should_create_error_message_for_double_array() {
// GIVEN
ErrorMessageFactory factory = shouldContain(new double[] { 6, 8 }, new double[] { 10, 9 }, new double[] { 10, 9 });
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting double[]:%n"
+ " [6.0, 8.0]%n"
+ "to contain:%n"
+ " [10.0, 9.0]%n"
+ "but could not find the following double(s):%n"
+ " [10.0, 9.0]%n"));
}
@Test
void should_create_error_message_for_boolean_array() {
// GIVEN
ErrorMessageFactory factory = shouldContain(new boolean[] { true }, new boolean[] { true, false }, new boolean[] { false });
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting boolean[]:%n"
+ " [true]%n"
+ "to contain:%n"
+ " [true, false]%n"
+ "but could not find the following boolean(s):%n"
+ " [false]%n"));
}
@Test
void should_create_error_message_for_short_array() {
// GIVEN
ErrorMessageFactory factory = shouldContain(new short[] { 6, 8 }, new short[] { 10, 9 }, new short[] { 10, 9 });
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting short[]:%n"
+ " [6, 8]%n"
+ "to contain:%n"
+ " [10, 9]%n"
+ "but could not find the following short(s):%n"
+ " [10, 9]%n"));
}
@Test
void should_create_error_message_for_String_array() {
// GIVEN
ErrorMessageFactory factory = shouldContain(new String[] { "a" }, new String[] { "b" }, new String[] { "b" });
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting String[]:%n"
+ " [\"a\"]%n"
+ "to contain:%n"
+ " [\"b\"]%n"
+ "but could not find the following string(s):%n"
+ " [\"b\"]%n"));
}
@Test
void should_create_error_message_for_custom_class_array() {
Jedi actual = new Jedi("Yoda", "green");
Jedi expected = new Jedi("Luke", "blue");
// GIVEN
ErrorMessageFactory factory = shouldContain(array(actual), array(expected), array(expected));
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting Jedi[]:%n"
+ " [Yoda the Jedi]%n"
+ "to contain:%n"
+ " [Luke the Jedi]%n"
+ "but could not find the following jedi(s):%n"
+ " [Luke the Jedi]%n"));
}
@Test
void should_create_error_message_for_file_directory() {
// GIVEN
File directory = mock(File.class);
given(directory.getAbsolutePath()).willReturn("root");
List<File> directoryContent = list(new File("root", "foo.txt"), new File("root", "bar.txt"));
ErrorMessageFactory factory = directoryShouldContain(directory, directoryContent, "glob:**.java");
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting directory:%n" +
" root%n" +
"to contain at least one file matching glob:**.java but there was none.%n" +
"The directory content was:%n" +
" [foo.txt, bar.txt]"));
}
@Test
void should_create_error_message_for_file_directory_escaping_percent() {
// GIVEN
File directory = mock(File.class);
given(directory.getAbsolutePath()).willReturn("root%dir");
List<File> directoryContent = list(new File("root%dir", "foo%1.txt"), new File("root%dir", "bar%2.txt"));
ErrorMessageFactory factory = directoryShouldContain(directory, directoryContent, "glob:**%Test.java");
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting directory:%n" +
" root%%dir%n" +
"to contain at least one file matching glob:**%%Test.java but there was none.%n" +
"The directory content was:%n" +
" [foo%%1.txt, bar%%2.txt]"));
}
@Test
void should_create_error_message_for_path_directory() {
// GIVEN
Path directory = Path.of("root");
List<Path> directoryContent = list(directory.resolve("foo.txt"), directory.resolve("bar.txt"));
ErrorMessageFactory factory = directoryShouldContain(directory, directoryContent, "glob:**.java");
// WHEN
String message = factory.create(new TextDescription("Test"));
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting directory:%n" +
" root%n" +
"to contain at least one file matching glob:**.java but there was none.%n" +
"The directory content was:%n" +
" [%s, %s]",
directory.resolve("foo.txt"), directory.resolve("bar.txt")));
}
}
| ShouldContain_create_Test |
java | apache__kafka | jmh-benchmarks/src/main/java/org/apache/kafka/jmh/log/TestLinearWriteSpeed.java | {
"start": 2838,
"end": 8829
} | class ____ {
public static void main(String[] args) throws Exception {
var parser = new OptionParser();
var option = createOptions(parser);
OptionSet options = parser.parse(args);
CommandLineUtils.checkRequiredArgs(parser, options, option.bytesOpt, option.sizeOpt);
long bytesToWrite = options.valueOf(option.bytesOpt);
int bufferSize = options.valueOf(option.sizeOpt);
int numFiles = options.valueOf(option.filesOpt);
long reportingInterval = options.valueOf(option.reportingIntervalOpt);
String dir = options.valueOf(option.dirOpt);
long maxThroughputBytes = options.valueOf(option.maxThroughputOpt) * 1024L * 1024L;
ByteBuffer buffer = ByteBuffer.allocate(bufferSize);
int messageSize = options.valueOf(option.messageSizeOpt);
long flushInterval = options.valueOf(option.flushIntervalOpt);
CompressionType compressionType = CompressionType.forName(options.valueOf(option.compressionCodecOpt));
Compression.Builder<? extends Compression> compressionBuilder = Compression.of(compressionType);
Integer compressionLevel = options.valueOf(option.compressionLevelOpt);
if (compressionLevel != null) setupCompression(compressionType, compressionBuilder, compressionLevel);
Compression compression = compressionBuilder.build();
ThreadLocalRandom.current().nextBytes(buffer.array());
int numMessages = bufferSize / (messageSize + Records.LOG_OVERHEAD);
long createTime = System.currentTimeMillis();
List<SimpleRecord> recordsList = new ArrayList<>();
for (int i = 0; i < numMessages; i++) {
recordsList.add(new SimpleRecord(createTime, null, new byte[messageSize]));
}
MemoryRecords messageSet = MemoryRecords.withRecords(compression, recordsList.toArray(new SimpleRecord[0]));
Writable[] writables = new Writable[numFiles];
KafkaScheduler scheduler = new KafkaScheduler(1);
scheduler.startup();
for (int i = 0; i < numFiles; i++) {
if (options.has(option.mmapOpt)) {
writables[i] = new MmapWritable(new File(dir, "kafka-test-" + i + ".dat"), bytesToWrite / numFiles, buffer);
} else if (options.has(option.channelOpt)) {
writables[i] = new ChannelWritable(new File(dir, "kafka-test-" + i + ".dat"), buffer);
} else if (options.has(option.logOpt)) {
int segmentSize = ThreadLocalRandom.current().nextInt(512) * 1024 * 1024 + 64 * 1024 * 1024;
Properties logProperties = new Properties();
logProperties.put(TopicConfig.SEGMENT_BYTES_CONFIG, Integer.toString(segmentSize));
logProperties.put(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, Long.toString(flushInterval));
LogConfig logConfig = new LogConfig(logProperties);
writables[i] = new LogWritable(new File(dir, "kafka-test-" + i), logConfig, scheduler, messageSet);
} else {
System.err.println("Must specify what to write to with one of --log, --channel, or --mmap");
Exit.exit(1);
}
}
bytesToWrite = (bytesToWrite / numFiles) * numFiles;
System.out.printf("%10s\t%10s\t%10s%n", "mb_sec", "avg_latency", "max_latency");
long beginTest = System.nanoTime();
long maxLatency = 0L;
long totalLatency = 0L;
long count = 0L;
long written = 0L;
long totalWritten = 0L;
long lastReport = beginTest;
while (totalWritten + bufferSize < bytesToWrite) {
long start = System.nanoTime();
int writeSize = writables[(int) (count % numFiles)].write();
long elapsed = System.nanoTime() - start;
maxLatency = Math.max(elapsed, maxLatency);
totalLatency += elapsed;
written += writeSize;
count += 1;
totalWritten += writeSize;
if ((start - lastReport) / (1000.0 * 1000.0) > reportingInterval) {
double elapsedSecs = (start - lastReport) / (1000.0 * 1000.0 * 1000.0);
double mb = written / (1024.0 * 1024.0);
System.out.printf("%10.3f\t%10.3f\t%10.3f%n", mb / elapsedSecs, (totalLatency / (double) count) / (1000.0 * 1000.0), maxLatency / (1000.0 * 1000.0));
lastReport = start;
written = 0;
maxLatency = 0L;
totalLatency = 0L;
} else if (written > maxThroughputBytes * (reportingInterval / 1000.0)) {
long lastReportMs = lastReport / (1000 * 1000);
long now = System.nanoTime() / (1000 * 1000);
long sleepMs = lastReportMs + reportingInterval - now;
if (sleepMs > 0)
Thread.sleep(sleepMs);
}
}
double elapsedSecs = (System.nanoTime() - beginTest) / (1000.0 * 1000.0 * 1000.0);
System.out.println((bytesToWrite / (1024.0 * 1024.0 * elapsedSecs)) + " MB per sec");
scheduler.shutdown();
for (Writable writable : writables) {
writable.close();
}
}
private static void setupCompression(CompressionType compressionType,
Compression.Builder<? extends Compression> compressionBuilder,
Integer compressionLevel) {
switch (compressionType) {
case GZIP:
((GzipCompression.Builder) compressionBuilder).level(compressionLevel);
break;
case LZ4:
((Lz4Compression.Builder) compressionBuilder).level(compressionLevel);
break;
case ZSTD:
((ZstdCompression.Builder) compressionBuilder).level(compressionLevel);
break;
default:
break;
}
}
| TestLinearWriteSpeed |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/testFixtures/java/org/springframework/boot/autoconfigure/jndi/TestableInitialContextFactory.java | {
"start": 1032,
"end": 1738
} | class ____ implements InitialContextFactory {
private static TestableContext context;
@Override
public Context getInitialContext(Hashtable<?, ?> environment) {
return getContext();
}
public static void bind(String name, Object obj) {
try {
getContext().bind(name, obj);
}
catch (NamingException ex) {
throw new IllegalStateException(ex);
}
}
public static void clearAll() {
getContext().clearAll();
}
private static TestableContext getContext() {
if (context == null) {
try {
context = new TestableContext();
}
catch (NamingException ex) {
throw new IllegalStateException(ex);
}
}
return context;
}
private static final | TestableInitialContextFactory |
java | alibaba__nacos | sys/src/main/java/com/alibaba/nacos/sys/utils/InetUtils.java | {
"start": 10550,
"end": 11557
} | class ____ extends SlowEvent {
private String oldIP;
private String newIP;
public String getOldIP() {
return oldIP;
}
public void setOldIP(String oldIP) {
this.oldIP = oldIP;
}
public String getNewIP() {
return newIP;
}
public void setNewIP(String newIP) {
this.newIP = newIP;
}
@Override
public String toString() {
return "IPChangeEvent{" + "oldIP='" + oldIP + '\'' + ", newIP='" + newIP + '\'' + '}';
}
}
public static String getGrpcListenIp() {
String grpcListenIp = System.getProperty(NACOS_REMOTE_GRPC_LISTEN_IP);
if (StringUtils.isNotBlank(grpcListenIp) && !InternetAddressUtil.isIp(grpcListenIp)) {
throw new RuntimeException("nacos address " + grpcListenIp + " is not ip");
}
return grpcListenIp;
}
}
| IPChangeEvent |
java | spring-projects__spring-boot | module/spring-boot-rsocket/src/main/java/org/springframework/boot/rsocket/netty/NettyRSocketServerFactory.java | {
"start": 9055,
"end": 9653
} | class ____ extends SslCustomizer {
private final SslBundle sslBundle;
private TcpServerSslCustomizer(Ssl.@Nullable ClientAuth clientAuth, SslBundle sslBundle,
Map<String, SslBundle> serverNameSslBundles) {
super(Ssl.ClientAuth.map(clientAuth, ClientAuth.NONE, ClientAuth.OPTIONAL, ClientAuth.REQUIRE));
this.sslBundle = sslBundle;
}
private TcpServer apply(TcpServer server) {
GenericSslContextSpec<?> sslContextSpec = createSslContextSpec(this.sslBundle);
return server.secure((spec) -> spec.sslContext(sslContextSpec));
}
}
private static final | TcpServerSslCustomizer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/strategy/usertype/embedded/record/RecordAsCompositeTypeEmbeddableTest.java | {
"start": 1723,
"end": 3480
} | class ____ implements CompositeUserType<MonetaryAmount> {
public record MonetaryAmountMapper(
BigDecimal amount,
String currency
) {
}
public MonetaryAmountType() {
}
@Override
public Object getPropertyValue(MonetaryAmount component, int property) throws HibernateException {
//Alphabetical
return switch ( property ) {
case 0 -> component.getNumber().numberValueExact( BigDecimal.class );
case 1 -> component.getCurrency().getCurrencyCode();
default -> null;
};
}
@Override
public MonetaryAmount instantiate(ValueAccess values) {
//Alphabetical
BigDecimal amount = values.getValue( 0, BigDecimal.class );
String currency = values.getValue( 1, String.class );
return FastMoney.of( amount, currency );
}
@Override
public Class<?> embeddable() {
return MonetaryAmountMapper.class;
}
@Override
public Class<MonetaryAmount> returnedClass() {
return MonetaryAmount.class;
}
@Override
public boolean equals(MonetaryAmount x, MonetaryAmount y) {
return Objects.equals( x, y );
}
@Override
public int hashCode(MonetaryAmount x) {
return x.hashCode();
}
@Override
public MonetaryAmount deepCopy(MonetaryAmount value) {
return value;
}
@Override
public boolean isMutable() {
return false;
}
@Override
public Serializable disassemble(MonetaryAmount value) {
return (Serializable) value;
}
@Override
public MonetaryAmount assemble(Serializable cached, Object owner) {
return (MonetaryAmount) cached;
}
@Override
public MonetaryAmount replace(MonetaryAmount detached, MonetaryAmount managed, Object owner) {
return detached;
}
}
@Entity(name="RecordAsCompositeTypeEmbeddableEntity")
public static | MonetaryAmountType |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java | {
"start": 1773,
"end": 3563
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(TestDNS.class);
private static final String DEFAULT = "default";
// This is not a legal hostname (starts with a hyphen). It will never
// be returned on any test machine.
private static final String DUMMY_HOSTNAME = "-DUMMY_HOSTNAME";
private static final String INVALID_DNS_SERVER = "0.0.0.0";
/**
* Test that asking for the default hostname works
* @throws Exception if hostname lookups fail
*/
@Test
public void testGetLocalHost() throws Exception {
String hostname = DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname);
}
/**
* Test that repeated calls to getting the local host are fairly fast, and
* hence that caching is being used
* @throws Exception if hostname lookups fail
*/
@Test
public void testGetLocalHostIsFast() throws Exception {
String hostname1 = DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname1);
String hostname2 = DNS.getDefaultHost(DEFAULT);
long t1 = Time.now();
String hostname3 = DNS.getDefaultHost(DEFAULT);
long t2 = Time.now();
assertEquals(hostname3, hostname2);
assertEquals(hostname2, hostname1);
long interval = t2 - t1;
assertTrue(interval < 20000,
"Took too long to determine local host - caching is not working");
}
/**
* Test that our local IP address is not null
* @throws Exception if something went wrong
*/
@Test
public void testLocalHostHasAnAddress() throws Exception {
assertNotNull(getLocalIPAddr());
}
private InetAddress getLocalIPAddr() throws UnknownHostException {
String hostname = DNS.getDefaultHost(DEFAULT);
InetAddress localhost = InetAddress.getByName(hostname);
return localhost;
}
/**
* Test null | TestDNS |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/reservedstate/ReservedStateHandler.java | {
"start": 835,
"end": 1040
} | interface ____ for implementing 'operator mode' cluster state updates.
*
* <p>
* Reserving cluster state, for file based settings and modules/plugins, requires
* that we have a separate update handler | used |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractConfigurableStoreFactory.java | {
"start": 1088,
"end": 2149
} | class ____ implements StoreFactory {
private final Set<String> connectedProcessorNames = new HashSet<>();
private DslStoreSuppliers dslStoreSuppliers;
public AbstractConfigurableStoreFactory(final DslStoreSuppliers initialStoreSuppliers) {
this.dslStoreSuppliers = initialStoreSuppliers;
}
@Override
public void configure(final StreamsConfig config) {
if (dslStoreSuppliers == null) {
dslStoreSuppliers = config.getConfiguredInstance(
StreamsConfig.DSL_STORE_SUPPLIERS_CLASS_CONFIG,
DslStoreSuppliers.class,
config.originals()
);
}
}
@Override
public Set<String> connectedProcessorNames() {
return connectedProcessorNames;
}
protected DslStoreSuppliers dslStoreSuppliers() {
if (dslStoreSuppliers == null) {
throw new IllegalStateException("Expected configure() to be called before using dslStoreSuppliers");
}
return dslStoreSuppliers;
}
}
| AbstractConfigurableStoreFactory |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/superclass/auditoverride/ClassOverrideNotAuditedEntity.java | {
"start": 581,
"end": 1799
} | class ____ extends AuditedBaseEntity {
@Audited
private String str2;
public ClassOverrideNotAuditedEntity() {
}
public ClassOverrideNotAuditedEntity(String str1, Integer number, String str2) {
super( str1, number );
this.str2 = str2;
}
public ClassOverrideNotAuditedEntity(String str1, Integer number, Integer id, String str2) {
super( str1, number, id );
this.str2 = str2;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !(o instanceof ClassOverrideNotAuditedEntity) ) {
return false;
}
if ( !super.equals( o ) ) {
return false;
}
ClassOverrideNotAuditedEntity that = (ClassOverrideNotAuditedEntity) o;
if ( str2 != null ? !str2.equals( that.str2 ) : that.str2 != null ) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (str2 != null ? str2.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "ClassOverrideNotAuditedEntity(" + super.toString() + ", str2 = " + str2 + ")";
}
public String getStr2() {
return str2;
}
public void setStr2(String str2) {
this.str2 = str2;
}
}
| ClassOverrideNotAuditedEntity |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java | {
"start": 1930,
"end": 2823
} | class ____ extends Param.Domain<Integer> {
/** The radix of the number. */
final int radix;
Domain(final String paramName) {
this(paramName, 10);
}
Domain(final String paramName, final int radix) {
super(paramName);
this.radix = radix;
}
@Override
public String getDomain() {
return "<" + NULL + " | int in radix " + radix + ">";
}
@Override
Integer parse(final String str) {
try{
return NULL.equals(str) || str == null ? null : Integer.parseInt(str,
radix);
} catch(NumberFormatException e) {
throw new IllegalArgumentException("Failed to parse \"" + str
+ "\" as a radix-" + radix + " integer.", e);
}
}
/** Convert an Integer to a String. */
String toString(final Integer n) {
return n == null? NULL: Integer.toString(n, radix);
}
}
}
| Domain |
java | elastic__elasticsearch | x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/capabilities/Unresolvable.java | {
"start": 317,
"end": 640
} | interface ____ extends Resolvable {
String UNRESOLVED_PREFIX = "?";
@Override
default boolean resolved() {
return false;
}
/**
* NOTE: Any non-null return value from this method indicates that the item in question could not be resolved.
*/
String unresolvedMessage();
}
| Unresolvable |
java | alibaba__nacos | plugin/datasource/src/test/java/com/alibaba/nacos/plugin/datasource/MapperManagerTest.java | {
"start": 1220,
"end": 3041
} | class ____ {
@Test
void testInstance() {
MapperManager instance = MapperManager.instance(false);
assertNotNull(instance);
}
@Test
void testLoadInitial() throws NoSuchFieldException, IllegalAccessException {
MapperManager instance = MapperManager.instance(false);
instance.loadInitial();
Class<MapperManager> mapperManagerClass = MapperManager.class;
Field declaredField = mapperManagerClass.getDeclaredField("MAPPER_SPI_MAP");
declaredField.setAccessible(true);
Map<String, Map<String, Mapper>> map = (Map<String, Map<String, Mapper>>) declaredField.get(instance);
assertEquals(2, map.size());
}
@Test
void testJoin() {
MapperManager.join(new AbstractMapperByMysql() {
@Override
public String getTableName() {
return "test";
}
@Override
public String getDataSource() {
return DataSourceConstant.MYSQL;
}
});
MapperManager instance = MapperManager.instance(false);
Mapper mapper = instance.findMapper(DataSourceConstant.MYSQL, "test");
assertNotNull(mapper);
}
@Test
void testFindMapper() {
testJoin();
MapperManager instance = MapperManager.instance(false);
Mapper mapper = instance.findMapper(DataSourceConstant.MYSQL, "test");
assertNotNull(mapper);
}
@Test
void testEnableDataSourceLogJoin() {
MapperManager.join(new TestMapper());
MapperManager instance = MapperManager.instance(true);
ConfigInfoGrayMapper mapper = instance.findMapper(DataSourceConstant.MYSQL, "enable_data_source_log_test");
assertNotNull(mapper);
}
}
| MapperManagerTest |
java | quarkusio__quarkus | integration-tests/hibernate-search-orm-elasticsearch/src/test/java/io/quarkus/it/hibernate/search/orm/elasticsearch/ElasticsearchClientInGraalIT.java | {
"start": 144,
"end": 216
} | class ____ extends ElasticsearchClientTest {
}
| ElasticsearchClientInGraalIT |
java | quarkusio__quarkus | extensions/smallrye-health/spi/src/main/java/io/quarkus/smallrye/health/deployment/spi/HealthBuildItem.java | {
"start": 113,
"end": 1137
} | class ____ extends MultiBuildItem {
private final String healthCheckClass;
private final boolean enabled;
/**
*
* @param healthCheckClass
* @param enabled
* @param configRootName This parameter is not used
* @deprecated Use {@link #HealthBuildItem(String, boolean)} instead.
*/
@Deprecated
public HealthBuildItem(String healthCheckClass, boolean enabled, String configRootName) {
this(healthCheckClass, enabled);
}
/**
* @param healthCheckClass the name of the health check class, needs to implement
* {@link org.eclipse.microprofile.health.HealthCheck}
* @param enabled whether the check is enabled
*/
public HealthBuildItem(String healthCheckClass, boolean enabled) {
this.healthCheckClass = healthCheckClass;
this.enabled = enabled;
}
public String getHealthCheckClass() {
return this.healthCheckClass;
}
public boolean isEnabled() {
return enabled;
}
}
| HealthBuildItem |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/support/ActionFilter.java | {
"start": 757,
"end": 1433
} | interface ____ {
/**
* The position of the filter in the chain. Execution is done from lowest order to highest.
*/
int order();
/**
* Enables filtering the execution of an action on the request side, either by sending a response through the
* {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain}
*/
<Request extends ActionRequest, Response extends ActionResponse> void apply(
Task task,
String action,
Request request,
ActionListener<Response> listener,
ActionFilterChain<Request, Response> chain
);
/**
* A simple base | ActionFilter |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/view/xslt/XsltViewResolver.java | {
"start": 2506,
"end": 4844
} | interface ____ custom handling of transformation errors and warnings.
* <p>If not set, a default
* {@link org.springframework.util.xml.SimpleTransformErrorListener} is
* used that simply logs warnings using the logger instance of the view class,
* and rethrows errors to discontinue the XML transformation.
* @see org.springframework.util.xml.SimpleTransformErrorListener
*/
public void setErrorListener(ErrorListener errorListener) {
this.errorListener = errorListener;
}
/**
* Set whether the XSLT transformer may add additional whitespace when
* outputting the result tree.
* <p>Default is {@code true} (on); set this to {@code false} (off)
* to not specify an "indent" key, leaving the choice up to the stylesheet.
* @see javax.xml.transform.OutputKeys#INDENT
*/
public void setIndent(boolean indent) {
this.indent = indent;
}
/**
* Set arbitrary transformer output properties to be applied to the stylesheet.
* <p>Any values specified here will override defaults that this view sets
* programmatically.
* @see javax.xml.transform.Transformer#setOutputProperty
*/
public void setOutputProperties(Properties outputProperties) {
this.outputProperties = outputProperties;
}
/**
* Turn on/off the caching of the XSLT templates.
* <p>The default value is "true". Only set this to "false" in development,
* where caching does not seriously impact performance.
*/
public void setCacheTemplates(boolean cacheTemplates) {
this.cacheTemplates = cacheTemplates;
}
@Override
protected Class<?> requiredViewClass() {
return XsltView.class;
}
@Override
protected AbstractUrlBasedView instantiateView() {
return (getViewClass() == XsltView.class ? new XsltView() : super.instantiateView());
}
@Override
protected AbstractUrlBasedView buildView(String viewName) throws Exception {
XsltView view = (XsltView) super.buildView(viewName);
if (this.sourceKey != null) {
view.setSourceKey(this.sourceKey);
}
if (this.uriResolver != null) {
view.setUriResolver(this.uriResolver);
}
if (this.errorListener != null) {
view.setErrorListener(this.errorListener);
}
view.setIndent(this.indent);
if (this.outputProperties != null) {
view.setOutputProperties(this.outputProperties);
}
view.setCacheTemplates(this.cacheTemplates);
return view;
}
}
| for |
java | google__dagger | dagger-grpc-server-processor/main/java/dagger/grpc/server/processor/ServiceDefinitionTypeGenerator.java | {
"start": 1157,
"end": 2620
} | class ____ extends SourceGenerator {
private final GrpcServiceModel grpcServiceModel;
ServiceDefinitionTypeGenerator(GrpcServiceModel grpcServiceModel) {
super(grpcServiceModel.packageName());
this.grpcServiceModel = grpcServiceModel;
}
@Override
protected TypeSpec createType() {
TypeSpec.Builder type =
interfaceBuilder(grpcServiceModel.serviceDefinitionTypeName.simpleName())
.addJavadoc("A component must implement this interface.\n")
.addModifiers(PUBLIC);
grpcServiceModel.generatedAnnotation().ifPresent(type::addAnnotation);
type.addType(
interfaceBuilder(grpcServiceModel.serviceDefinitionTypeFactoryName.simpleName())
.addModifiers(PUBLIC, STATIC)
.addMethod(
methodBuilder("grpcService")
.addModifiers(PUBLIC, ABSTRACT)
.returns(grpcServiceModel.serviceDefinitionTypeName)
.addParameter(
Dagger.GrpcServer.GRPC_CALL_METADATA_MODULE, "grpcCallMetadataModule")
.build())
.build());
type.addMethod(
methodBuilder(grpcServiceModel.subcomponentServiceDefinitionMethodName())
.addModifiers(PUBLIC, ABSTRACT)
.returns(IoGrpc.SERVER_SERVICE_DEFINITION)
.addAnnotation(grpcServiceModel.forGrpcService())
.build());
return type.build();
}
}
| ServiceDefinitionTypeGenerator |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/util/Strings_isEmpty_Test.java | {
"start": 842,
"end": 1225
} | class ____ {
@Test
void should_return_true_if_String_is_empty() {
assertThat(Strings.isNullOrEmpty("")).isTrue();
}
@Test
void should_return_true_if_String_is_null() {
assertThat(Strings.isNullOrEmpty(null)).isTrue();
}
@Test
void should_return_false_if_String_is_not_empty() {
assertThat(Strings.isNullOrEmpty("foo")).isFalse();
}
}
| Strings_isEmpty_Test |
java | apache__hadoop | hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/common/TestHdfsCompatFsCommand.java | {
"start": 4938,
"end": 5341
} | class ____ implements HdfsCompatSuite {
@Override
public String getSuiteName() {
return "Mkdir";
}
@Override
public Class<? extends AbstractHdfsCompatCase>[] getApiCases() {
return new Class[]{
HdfsCompatMkdirTestCases.class,
};
}
@Override
public String[] getShellCases() {
return new String[0];
}
}
private static | MkdirTestSuite |
java | apache__camel | components/camel-activemq/src/generated/java/org/apache/camel/component/activemq/converter/ActiveMQConverterLoader.java | {
"start": 894,
"end": 2682
} | class ____ implements TypeConverterLoader, CamelContextAware {
private CamelContext camelContext;
public ActiveMQConverterLoader() {
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void load(TypeConverterRegistry registry) throws TypeConverterLoaderException {
registerConverters(registry);
}
private void registerConverters(TypeConverterRegistry registry) {
addTypeConverter(registry, org.apache.activemq.command.ActiveMQDestination.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = getActiveMQConverter().toDestination((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
}
private static void addTypeConverter(TypeConverterRegistry registry, Class<?> toType, Class<?> fromType, boolean allowNull, SimpleTypeConverter.ConversionMethod method) {
registry.addTypeConverter(toType, fromType, new SimpleTypeConverter(allowNull, method));
}
private volatile org.apache.camel.component.activemq.converter.ActiveMQConverter activeMQConverter;
private org.apache.camel.component.activemq.converter.ActiveMQConverter getActiveMQConverter() {
if (activeMQConverter == null) {
activeMQConverter = new org.apache.camel.component.activemq.converter.ActiveMQConverter();
CamelContextAware.trySetCamelContext(activeMQConverter, camelContext);
}
return activeMQConverter;
}
}
| ActiveMQConverterLoader |
java | elastic__elasticsearch | x-pack/plugin/slm/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java | {
"start": 3141,
"end": 47393
} | class ____ extends ESRestTestCase {
private static final String NEVER_EXECUTE_CRON_SCHEDULE = "* * * 31 FEB ? *";
public static TemporaryFolder repoDir = new TemporaryFolder();
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
// TODO: Revert to integ-test distro once we sort out issues with usage and info xpack apis
.distribution(DistributionType.DEFAULT)
.nodes(2)
.module("x-pack-slm")
.module("x-pack-ilm")
.module("searchable-snapshots")
.module("data-streams")
.setting("path.repo", () -> repoDir.getRoot().getAbsolutePath())
.setting("xpack.security.enabled", "false")
.setting("xpack.license.self_generated.type", "trial")
.setting("xpack.searchable.snapshot.shared_cache.size", "16MB")
.setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB")
.setting("indices.lifecycle.poll_interval", "1000ms")
.build();
@ClassRule
public static RuleChain rules = RuleChain.outerRule(repoDir).around(cluster);
// as we are testing the SLM history entries we'll preserve the "slm-history-ilm-policy" policy as it'll be associated with the
// .slm-history-* indices and we won't be able to delete it when we wipe out the cluster
@Override
protected boolean preserveILMPoliciesUponCompletion() {
return true;
}
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
public void testMissingRepo() throws Exception {
SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(
"missing-repo-policy",
"snap",
"0 0/15 * * * ?",
"missing-repo",
Collections.emptyMap(),
SnapshotRetentionConfiguration.EMPTY
);
Request putLifecycle = new Request("PUT", "/_slm/policy/missing-repo-policy");
XContentBuilder lifecycleBuilder = JsonXContent.contentBuilder();
policy.toXContent(lifecycleBuilder, ToXContent.EMPTY_PARAMS);
putLifecycle.setJsonEntity(Strings.toString(lifecycleBuilder));
ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(putLifecycle));
Response resp = e.getResponse();
assertThat(resp.getStatusLine().getStatusCode(), equalTo(400));
String jsonError = EntityUtils.toString(resp.getEntity());
assertThat(jsonError, containsString("\"type\":\"illegal_argument_exception\""));
assertThat(jsonError, containsString("\"reason\":\"no such repository [missing-repo]\""));
}
@SuppressWarnings("unchecked")
public void testFullPolicySnapshot() throws Exception {
final String indexName = "test";
final String policyName = "full-policy";
final String repoId = "full-policy-repo";
int docCount = randomIntBetween(10, 50);
for (int i = 0; i < docCount; i++) {
index(client(), indexName, "" + i, "foo", "bar");
}
// Create a snapshot repo
initializeRepo(repoId);
// allow arbitrarily frequent slm snapshots
disableSLMMinimumIntervalValidation();
var schedule = randomBoolean() ? "*/1 * * * * ?" : "1s";
createSnapshotPolicy(policyName, "snap", schedule, repoId, indexName, true);
// Check that the snapshot was actually taken
assertBusy(() -> {
Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/_all"));
Map<String, Object> snapshotResponseMap;
try (InputStream is = response.getEntity().getContent()) {
snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
assertThat(snapshotResponseMap.size(), greaterThan(0));
List<Map<String, Object>> snapResponse = ((List<Map<String, Object>>) snapshotResponseMap.get("snapshots"));
assertThat(snapResponse, not(empty()));
assertThat(snapResponse.get(0).get("indices"), equalTo(Collections.singletonList(indexName)));
assertThat((String) snapResponse.get(0).get("snapshot"), startsWith("snap-"));
Map<String, Object> metadata = (Map<String, Object>) snapResponse.get(0).get("metadata");
assertNotNull(metadata);
assertThat(metadata.get("policy"), equalTo(policyName));
});
assertBusy(() -> assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION));
// Check that the last success date was written to the cluster state
assertBusy(() -> {
Request getReq = new Request("GET", "/_slm/policy/" + policyName);
Response policyMetadata = client().performRequest(getReq);
Map<String, Object> policyResponseMap;
try (InputStream is = policyMetadata.getEntity().getContent()) {
policyResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
Map<String, Object> policyMetadataMap = (Map<String, Object>) policyResponseMap.get(policyName);
Map<String, Object> lastSuccessObject = (Map<String, Object>) policyMetadataMap.get("last_success");
assertNotNull(lastSuccessObject);
Long lastSuccess = (Long) lastSuccessObject.get("time");
Long modifiedDate = (Long) policyMetadataMap.get("modified_date_millis");
assertNotNull(lastSuccess);
assertNotNull(modifiedDate);
assertThat(lastSuccess, greaterThan(modifiedDate));
String lastSnapshotName = (String) lastSuccessObject.get("snapshot_name");
assertThat(lastSnapshotName, startsWith("snap-"));
});
// Check that the stats are written
assertBusy(() -> {
Map<String, Object> stats = getSLMStats();
Map<String, Object> policyStats = policyStatsAsMap(stats);
Map<String, Object> policyIdStats = (Map<String, Object>) policyStats.get(policyName);
int snapsTaken = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_TAKEN.getPreferredName());
int totalTaken = (int) stats.get(SnapshotLifecycleStats.TOTAL_TAKEN.getPreferredName());
assertThat(snapsTaken, greaterThanOrEqualTo(1));
assertThat(totalTaken, greaterThanOrEqualTo(1));
});
Request delReq = new Request("DELETE", "/_slm/policy/" + policyName);
assertOK(client().performRequest(delReq));
}
@SuppressWarnings("unchecked")
public void testPolicyFailure() throws Exception {
final String policyName = "failure-policy";
final String repoName = "policy-failure-repo";
final String indexPattern = "index-doesnt-exist";
initializeRepo(repoName);
// allow arbitrarily frequent slm snapshots
disableSLMMinimumIntervalValidation();
// Create a policy with ignore_unavailable: false and an index that doesn't exist
var schedule = randomBoolean() ? "*/1 * * * * ?" : "1s";
createSnapshotPolicy(policyName, "snap", schedule, repoName, indexPattern, false);
assertBusy(() -> {
// Check that the failure is written to the cluster state
Request getReq = new Request("GET", "/_slm/policy/" + policyName);
Response policyMetadata = client().performRequest(getReq);
try (InputStream is = policyMetadata.getEntity().getContent()) {
Map<String, Object> responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
Map<String, Object> policyMetadataMap = (Map<String, Object>) responseMap.get(policyName);
Map<String, Object> lastFailureObject = (Map<String, Object>) policyMetadataMap.get("last_failure");
assertNotNull(lastFailureObject);
Long lastFailure = (Long) lastFailureObject.get("time");
Long modifiedDate = (Long) policyMetadataMap.get("modified_date_millis");
assertNotNull(lastFailure);
assertNotNull(modifiedDate);
assertThat(lastFailure, greaterThan(modifiedDate));
String lastFailureInfo = (String) lastFailureObject.get("details");
assertNotNull(lastFailureInfo);
assertThat(lastFailureInfo, containsString("no such index [index-doesnt-exist]"));
String snapshotName = (String) lastFailureObject.get("snapshot_name");
assertNotNull(snapshotName);
assertThat(snapshotName, startsWith("snap-"));
}
assertHistoryIsPresent(policyName, false, repoName, CREATE_OPERATION);
Map<String, Object> stats = getSLMStats();
Map<String, Object> policyStats = policyStatsAsMap(stats);
Map<String, Object> policyIdStats = (Map<String, Object>) policyStats.get(policyName);
int snapsFailed = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_FAILED.getPreferredName());
int totalFailed = (int) stats.get(SnapshotLifecycleStats.TOTAL_FAILED.getPreferredName());
assertThat(snapsFailed, greaterThanOrEqualTo(1));
assertThat(totalFailed, greaterThanOrEqualTo(1));
});
}
@SuppressWarnings("unchecked")
@TestIssueLogging(
value = "org.elasticsearch.xpack.slm:TRACE,org.elasticsearch.xpack.core.slm:TRACE,org.elasticsearch.snapshots:DEBUG",
issueUrl = "https://github.com/elastic/elasticsearch/issues/48531"
)
public void testPolicyManualExecution() throws Exception {
final String indexName = "test";
final String policyName = "manual-policy";
final String repoId = "manual-execution-repo";
int docCount = randomIntBetween(10, 50);
for (int i = 0; i < docCount; i++) {
index(client(), indexName, "" + i, "foo", "bar");
}
logSLMPolicies();
// Create a snapshot repo
initializeRepo(repoId);
createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoId, indexName, true);
ResponseException badResp = expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("POST", "/_slm/policy/" + policyName + "-bad/_execute"))
);
assertThat(
EntityUtils.toString(badResp.getResponse().getEntity()),
containsString("no such snapshot lifecycle policy [" + policyName + "-bad]")
);
final String snapshotName = executePolicy(policyName);
logSLMPolicies();
// Check that the executed snapshot is created
assertBusy(() -> {
try {
Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName));
Map<String, Object> snapshotResponseMap;
try (InputStream is = response.getEntity().getContent()) {
snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
assertThat(snapshotResponseMap.size(), greaterThan(0));
final Map<String, Object> metadata = extractMetadata(snapshotResponseMap, snapshotName);
assertNotNull(metadata);
assertThat(metadata.get("policy"), equalTo(policyName));
assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION);
} catch (ResponseException e) {
fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity()));
}
Map<String, Object> stats = getSLMStats();
Map<String, Object> policyStats = policyStatsAsMap(stats);
Map<String, Object> policyIdStats = (Map<String, Object>) policyStats.get(policyName);
int snapsTaken = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_TAKEN.getPreferredName());
int totalTaken = (int) stats.get(SnapshotLifecycleStats.TOTAL_TAKEN.getPreferredName());
assertThat(snapsTaken, equalTo(1));
assertThat(totalTaken, equalTo(1));
});
}
@SuppressWarnings("unchecked")
public void testStartStopStatus() throws Exception {
final String indexName = "test";
final String policyName = "start-stop-policy";
final String repoId = "start-stop-repo";
int docCount = randomIntBetween(10, 50);
for (int i = 0; i < docCount; i++) {
index(client(), indexName, "" + i, "foo", "bar");
}
// Create a snapshot repo
initializeRepo(repoId);
// Stop SLM so nothing happens
client().performRequest(new Request("POST", "/_slm/stop"));
assertBusy(() -> {
logger.info("--> waiting for SLM to stop");
assertThat(
EntityUtils.toString(client().performRequest(new Request("GET", "/_slm/status")).getEntity()),
containsString("STOPPED")
);
});
try {
var schedule = randomBoolean() ? "0 0/15 * * * ?" : "15m";
createSnapshotPolicy(
policyName,
"snap",
schedule,
repoId,
indexName,
true,
new SnapshotRetentionConfiguration(TimeValue.ZERO, null, null)
);
long start = System.currentTimeMillis();
final String snapshotName = executePolicy(policyName);
// Check that the executed snapshot is created
assertBusy(() -> {
try {
logger.info("--> checking for snapshot creation...");
Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName));
Map<String, Object> snapshotResponseMap;
try (InputStream is = response.getEntity().getContent()) {
snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
assertThat(snapshotResponseMap.size(), greaterThan(0));
final Map<String, Object> metadata = extractMetadata(snapshotResponseMap, snapshotName);
assertNotNull(metadata);
assertThat(metadata.get("policy"), equalTo(policyName));
assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION);
} catch (ResponseException e) {
fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity()));
}
});
// Sleep for up to a second, but at least 1 second since we scheduled the policy so we can
// ensure it *would* have run if SLM were running
Thread.sleep(Math.min(0, TimeValue.timeValueSeconds(1).millis() - Math.min(0, System.currentTimeMillis() - start)));
client().performRequest(new Request("POST", "/_slm/_execute_retention"));
// Retention and the manually executed policy should still have run,
// but only the one we manually ran.
assertBusy(() -> {
logger.info("--> checking for stats updates...");
Map<String, Object> stats = getSLMStats();
Map<String, Object> policyStats = policyStatsAsMap(stats);
Map<String, Object> policyIdStats = (Map<String, Object>) policyStats.get(policyName);
int snapsTaken = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_TAKEN.getPreferredName());
int totalTaken = (int) stats.get(SnapshotLifecycleStats.TOTAL_TAKEN.getPreferredName());
int totalFailed = (int) stats.get(SnapshotLifecycleStats.TOTAL_FAILED.getPreferredName());
int totalDeleted = (int) stats.get(SnapshotLifecycleStats.TOTAL_DELETIONS.getPreferredName());
assertThat(snapsTaken, equalTo(1));
assertThat(totalTaken, equalTo(1));
assertThat(totalDeleted, equalTo(1));
assertThat(totalFailed, equalTo(0));
});
assertBusy(() -> {
try {
wipeSnapshots();
} catch (ResponseException e) {
logger.error("got exception wiping snapshots", e);
fail("got exception: " + EntityUtils.toString(e.getResponse().getEntity()));
}
});
} finally {
client().performRequest(new Request("POST", "/_slm/start"));
assertBusy(() -> {
logger.info("--> waiting for SLM to start");
assertThat(
EntityUtils.toString(client().performRequest(new Request("GET", "/_slm/status")).getEntity()),
containsString("RUNNING")
);
});
}
}
@SuppressWarnings("unchecked")
@TestIssueLogging(
value = "org.elasticsearch.xpack.slm:TRACE,org.elasticsearch.xpack.core.slm:TRACE,org.elasticsearch.snapshots:TRACE",
issueUrl = "https://github.com/elastic/elasticsearch/issues/48017"
)
public void testBasicTimeBasedRetention() throws Exception {
final String indexName = "test";
final String policyName = "basic-time-policy";
final String repoId = "time-based-retention-repo";
int docCount = randomIntBetween(10, 50);
List<IndexRequestBuilder> indexReqs = new ArrayList<>();
for (int i = 0; i < docCount; i++) {
index(client(), indexName, "" + i, "foo", "bar");
}
logSLMPolicies();
// Create a snapshot repo
initializeRepo(repoId);
// Create a policy with a retention period of 1 millisecond
createSnapshotPolicy(
policyName,
"snap",
NEVER_EXECUTE_CRON_SCHEDULE,
repoId,
indexName,
true,
new SnapshotRetentionConfiguration(TimeValue.timeValueMillis(1), null, null)
);
// Manually create a snapshot
final String snapshotName = executePolicy(policyName);
// Check that the executed snapshot is created
assertBusy(() -> {
try {
Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName));
Map<String, Object> snapshotResponseMap;
try (InputStream is = response.getEntity().getContent()) {
snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
assertThat(snapshotResponseMap.size(), greaterThan(0));
final Map<String, Object> metadata = extractMetadata(snapshotResponseMap, snapshotName);
assertNotNull(metadata);
assertThat(metadata.get("policy"), equalTo(policyName));
assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION);
} catch (ResponseException e) {
fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity()));
}
}, 60, TimeUnit.SECONDS);
// Run retention every second
ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
req.persistentSettings(Settings.builder().put(LifecycleSettings.SLM_RETENTION_SCHEDULE, "*/1 * * * * ?"));
try (XContentBuilder builder = jsonBuilder()) {
req.toXContent(builder, ToXContent.EMPTY_PARAMS);
Request r = new Request("PUT", "/_cluster/settings");
r.setJsonEntity(Strings.toString(builder));
Response updateSettingsResp = client().performRequest(r);
assertAcked(updateSettingsResp);
}
logSLMPolicies();
try {
// Check that the snapshot created by the policy has been removed by retention
assertBusy(() -> {
// We expect a failed response because the snapshot should not exist
try {
logger.info("--> checking to see if snapshot has been deleted...");
Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName));
assertThat(EntityUtils.toString(response.getEntity()), containsString("snapshot_missing_exception"));
} catch (ResponseException e) {
assertThat(EntityUtils.toString(e.getResponse().getEntity()), containsString("snapshot_missing_exception"));
}
assertHistoryIsPresent(policyName, true, repoId, DELETE_OPERATION);
Map<String, Object> stats = getSLMStats();
Map<String, Object> policyStats = policyStatsAsMap(stats);
Map<String, Object> policyIdStats = (Map<String, Object>) policyStats.get(policyName);
int snapsTaken = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_TAKEN.getPreferredName());
int snapsDeleted = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_DELETED.getPreferredName());
int retentionRun = (int) stats.get(SnapshotLifecycleStats.RETENTION_RUNS.getPreferredName());
int totalTaken = (int) stats.get(SnapshotLifecycleStats.TOTAL_TAKEN.getPreferredName());
int totalDeleted = (int) stats.get(SnapshotLifecycleStats.TOTAL_DELETIONS.getPreferredName());
assertThat(snapsTaken, greaterThanOrEqualTo(1));
assertThat(totalTaken, greaterThanOrEqualTo(1));
assertThat(retentionRun, greaterThanOrEqualTo(1));
assertThat(snapsDeleted, greaterThanOrEqualTo(1));
assertThat(totalDeleted, greaterThanOrEqualTo(1));
}, 60, TimeUnit.SECONDS);
} finally {
// Unset retention
ClusterUpdateSettingsRequest unsetRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
unsetRequest.persistentSettings(Settings.builder().put(LifecycleSettings.SLM_RETENTION_SCHEDULE, (String) null));
try (XContentBuilder builder = jsonBuilder()) {
unsetRequest.toXContent(builder, ToXContent.EMPTY_PARAMS);
Request r = new Request("PUT", "/_cluster/settings");
r.setJsonEntity(Strings.toString(builder));
client().performRequest(r);
}
}
}
@SuppressWarnings("unchecked")
public void testDataStreams() throws Exception {
String dataStreamName = "ds-test";
String repoId = "ds-repo";
String policyName = "ds-policy";
String mapping = """
{
"properties": {
"@timestamp": {
"type": "date"
}
}
}""";
Template template = new Template(null, new CompressedXContent(mapping), null);
createComposableTemplate(client(), "ds-template", dataStreamName, template);
client().performRequest(new Request("PUT", "_data_stream/" + dataStreamName));
/*
* We make the following request just to get the backing index name (we can't assume we know what it is based on the date because
* this test could run across midnight on two days.
*/
Response dataStreamResponse = client().performRequest(new Request("GET", "_data_stream/" + dataStreamName));
final String dataStreamIndexName;
try (InputStream is = dataStreamResponse.getEntity().getContent()) {
Map<String, Object> dataStreamResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
List<Map<String, Object>> dataStreams = (List<Map<String, Object>>) dataStreamResponseMap.get("data_streams");
assertThat(dataStreams.size(), equalTo(1));
List<Map<String, String>> indices = (List<Map<String, String>>) dataStreams.get(0).get("indices");
assertThat(indices.size(), equalTo(1));
dataStreamIndexName = indices.get(0).get("index_name");
assertNotNull(dataStreamIndexName);
}
// Create a snapshot repo
initializeRepo(repoId);
createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoId, dataStreamName, true);
final String snapshotName = executePolicy(policyName);
// Check that the executed snapshot is created
assertBusy(() -> {
try {
Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName));
Map<String, Object> snapshotResponseMap;
try (InputStream is = response.getEntity().getContent()) {
snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
assertThat(snapshotResponseMap.size(), greaterThan(0));
final Map<String, Object> snapshot = extractSnapshot(snapshotResponseMap, snapshotName);
assertEquals(Collections.singletonList(dataStreamName), snapshot.get("data_streams"));
assertEquals(Collections.singletonList(dataStreamIndexName), snapshot.get("indices"));
} catch (ResponseException e) {
fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity()));
}
});
}
@SuppressWarnings("unchecked")
public void testSLMXpackInfo() {
Map<String, Object> features = (Map<String, Object>) getLocation("/_xpack").get("features");
assertNotNull(features);
Map<String, Object> slm = (Map<String, Object>) features.get("slm");
assertNotNull(slm);
assertTrue((boolean) slm.get("available"));
assertTrue((boolean) slm.get("enabled"));
}
@SuppressWarnings("unchecked")
public void testSLMXpackUsage() throws Exception {
Map<String, Object> slm = (Map<String, Object>) getLocation("/_xpack/usage").get("slm");
assertNotNull(slm);
assertTrue((boolean) slm.get("available"));
assertTrue((boolean) slm.get("enabled"));
assertThat(slm.get("policy_count"), anyOf(equalTo(null), equalTo(0)));
// Create a snapshot repo
initializeRepo("repo");
// Create a policy with a retention period of 1 millisecond
createSnapshotPolicy(
"policy",
"snap",
NEVER_EXECUTE_CRON_SCHEDULE,
"repo",
"*",
true,
new SnapshotRetentionConfiguration(TimeValue.timeValueMillis(1), null, null)
);
final String snapshotName = executePolicy("policy");
// Check that the executed snapshot is created
assertBusy(() -> {
try {
logger.info("--> checking for snapshot creation...");
Response response = client().performRequest(new Request("GET", "/_snapshot/repo/" + snapshotName));
Map<String, Object> snapshotResponseMap;
try (InputStream is = response.getEntity().getContent()) {
snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
assertThat(snapshotResponseMap.size(), greaterThan(0));
} catch (ResponseException e) {
fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity()));
}
});
// Wait for stats to be updated
assertBusy(() -> {
logger.info("--> checking for stats to be updated...");
Map<String, Object> stats = getSLMStats();
Map<String, Object> policyStats = policyStatsAsMap(stats);
Map<String, Object> policyIdStats = (Map<String, Object>) policyStats.get("policy");
assertNotNull(policyIdStats);
});
slm = (Map<String, Object>) getLocation("/_xpack/usage").get("slm");
assertNotNull(slm);
assertTrue((boolean) slm.get("available"));
assertTrue((boolean) slm.get("enabled"));
assertThat("got: " + slm, slm.get("policy_count"), equalTo(1));
assertNotNull(slm.get("policy_stats"));
}
public void testSnapshotRetentionWithMissingRepo() throws Exception {
// Create two snapshot repositories
String repo = "test-repo";
initializeRepo(repo);
String missingRepo = "missing-repo";
initializeRepo(missingRepo);
// Create a policy per repository
final String indexName = "test";
final String policyName = "policy-1";
createSnapshotPolicy(
policyName,
"snap",
NEVER_EXECUTE_CRON_SCHEDULE,
repo,
indexName,
true,
new SnapshotRetentionConfiguration(TimeValue.timeValueMillis(1), null, null)
);
final String policyWithMissingRepo = "policy-2";
createSnapshotPolicy(
policyWithMissingRepo,
"snap",
NEVER_EXECUTE_CRON_SCHEDULE,
missingRepo,
indexName,
true,
new SnapshotRetentionConfiguration(TimeValue.timeValueMillis(1), null, null)
);
// Delete the repo of one of the policies
deleteRepository(missingRepo);
// Manually create a snapshot based on the "correct" policy
final String snapshotName = executePolicy(policyName);
// Check that the executed snapshot is created
assertBusy(() -> {
try {
Response response = client().performRequest(new Request("GET", "/_snapshot/" + repo + "/" + snapshotName));
Map<String, Object> snapshotResponseMap;
try (InputStream is = response.getEntity().getContent()) {
snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
assertThat(snapshotResponseMap.size(), greaterThan(0));
final Map<String, Object> metadata = extractMetadata(snapshotResponseMap, snapshotName);
assertNotNull(metadata);
assertThat(metadata.get("policy"), equalTo(policyName));
assertHistoryIsPresent(policyName, true, repo, CREATE_OPERATION);
} catch (ResponseException e) {
fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity()));
}
}, 60, TimeUnit.SECONDS);
execute_retention(client());
// Check that the snapshot created by the policy has been removed by retention
assertBusy(() -> {
try {
Response response = client().performRequest(new Request("GET", "/_snapshot/" + repo + "/" + snapshotName));
assertThat(EntityUtils.toString(response.getEntity()), containsString("snapshot_missing_exception"));
} catch (ResponseException e) {
assertThat(EntityUtils.toString(e.getResponse().getEntity()), containsString("snapshot_missing_exception"));
}
assertHistoryIsPresent(policyName, true, repo, DELETE_OPERATION);
}, 60, TimeUnit.SECONDS);
}
@SuppressWarnings("unchecked")
public void testGetIntervalSchedule() throws Exception {
final String indexName = "index-1";
final String policyName = "policy-1";
final String repoId = "repo-1";
initializeRepo(repoId);
var schedule = "30m";
var now = Instant.now();
createSnapshotPolicy(policyName, "snap", schedule, repoId, indexName, true);
assertBusy(() -> {
Request getReq = new Request("GET", "/_slm/policy/" + policyName);
Response policyMetadata = client().performRequest(getReq);
Map<String, Object> policyResponseMap;
try (InputStream is = policyMetadata.getEntity().getContent()) {
policyResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
Map<String, Object> policyMetadataMap = (Map<String, Object>) policyResponseMap.get(policyName);
Long nextExecutionMillis = (Long) policyMetadataMap.get("next_execution_millis");
assertNotNull(nextExecutionMillis);
Instant nextExecution = Instant.ofEpochMilli(nextExecutionMillis);
assertTrue(nextExecution.isAfter(now.plus(Duration.ofMinutes(29))));
assertTrue(nextExecution.isBefore(now.plus(Duration.ofMinutes(31))));
});
}
public Map<String, Object> getLocation(String path) {
try {
Response executeRepsonse = client().performRequest(new Request("GET", path));
try (
XContentParser parser = JsonXContent.jsonXContent.createParser(
XContentParserConfiguration.EMPTY,
EntityUtils.toByteArray(executeRepsonse.getEntity())
)
) {
return parser.map();
}
} catch (Exception e) {
fail("failed to execute GET request to " + path + " - got: " + e);
throw new RuntimeException(e);
}
}
private static void createComposableTemplate(RestClient client, String templateName, String indexPattern, Template template)
throws IOException {
XContentBuilder builder = jsonBuilder();
template.toXContent(builder, ToXContent.EMPTY_PARAMS);
StringEntity templateJSON = new StringEntity(String.format(Locale.ROOT, """
{
"index_patterns": "%s",
"data_stream": {},
"template": %s
}""", indexPattern, Strings.toString(builder)), ContentType.APPLICATION_JSON);
Request createIndexTemplateRequest = new Request("PUT", "_index_template/" + templateName);
createIndexTemplateRequest.setEntity(templateJSON);
client.performRequest(createIndexTemplateRequest);
}
/**
* Execute the given policy and return the generated snapshot name
*/
private String executePolicy(String policyId) {
try {
Response executeRepsonse = client().performRequest(new Request("POST", "/_slm/policy/" + policyId + "/_execute"));
try (
XContentParser parser = JsonXContent.jsonXContent.createParser(
XContentParserConfiguration.EMPTY,
EntityUtils.toByteArray(executeRepsonse.getEntity())
)
) {
return parser.mapStrings().get("snapshot_name");
}
} catch (Exception e) {
fail("failed to execute policy " + policyId + " - got: " + e);
throw new RuntimeException(e);
}
}
@SuppressWarnings("unchecked")
private static Map<String, Object> extractMetadata(Map<String, Object> snapshotResponseMap, String snapshotPrefix) {
return (Map<String, Object>) extractSnapshot(snapshotResponseMap, snapshotPrefix).get("metadata");
}
@SuppressWarnings("unchecked")
private static Map<String, Object> extractSnapshot(Map<String, Object> snapshotResponseMap, String snapshotPrefix) {
List<Map<String, Object>> snapResponse = ((List<Map<String, Object>>) snapshotResponseMap.get("snapshots"));
return snapResponse.stream()
.filter(snapshot -> ((String) snapshot.get("snapshot")).startsWith(snapshotPrefix))
.findFirst()
.orElse(null);
}
private Map<String, Object> getSLMStats() {
try {
Response response = client().performRequest(new Request("GET", "/_slm/stats"));
try (InputStream content = response.getEntity().getContent()) {
return XContentHelper.convertToMap(XContentType.JSON.xContent(), content, true);
}
} catch (Exception e) {
fail("exception retrieving stats: " + e);
throw new ElasticsearchException(e);
}
}
// This method should be called inside an assertBusy, it has no retry logic of its own
@SuppressWarnings("unchecked")
private void assertHistoryIsPresent(String policyName, boolean success, String repository, String operation) throws IOException {
final Request historySearchRequest = new Request("GET", ".slm-history*/_search");
historySearchRequest.setJsonEntity(Strings.format("""
{
"query": {
"bool": {
"must": [
{
"term": {
"policy": "%s"
}
},
{
"term": {
"success": %s
}
},
{
"term": {
"repository": "%s"
}
},
{
"term": {
"operation": "%s"
}
}
]
}
}
}""", policyName, success, repository, operation));
Response historyResponse;
try {
historyResponse = client().performRequest(historySearchRequest);
Map<String, Object> historyResponseMap;
try (InputStream is = historyResponse.getEntity().getContent()) {
historyResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
assertThat(
(int) ((Map<String, Object>) ((Map<String, Object>) historyResponseMap.get("hits")).get("total")).get("value"),
greaterThanOrEqualTo(1)
);
} catch (ResponseException e) {
// Throw AssertionError instead of an exception if the search fails so that assertBusy works as expected
logger.error(e);
fail("failed to perform search:" + e.getMessage());
}
}
private void createSnapshotPolicy(
String policyName,
String snapshotNamePattern,
String schedule,
String repoId,
String indexPattern,
boolean ignoreUnavailable
) throws IOException {
createSnapshotPolicy(
policyName,
snapshotNamePattern,
schedule,
repoId,
indexPattern,
ignoreUnavailable,
SnapshotRetentionConfiguration.EMPTY
);
}
private void createSnapshotPolicy(
String policyName,
String snapshotNamePattern,
String schedule,
String repoId,
String indexPattern,
boolean ignoreUnavailable,
SnapshotRetentionConfiguration retention
) throws IOException {
Map<String, Object> snapConfig = new HashMap<>();
snapConfig.put("indices", Collections.singletonList(indexPattern));
snapConfig.put("ignore_unavailable", ignoreUnavailable);
if (randomBoolean()) {
Map<String, Object> metadata = new HashMap<>();
int fieldCount = randomIntBetween(2, 5);
for (int i = 0; i < fieldCount; i++) {
metadata.put(
randomValueOtherThanMany(key -> "policy".equals(key) || metadata.containsKey(key), () -> randomAlphaOfLength(5)),
randomAlphaOfLength(4)
);
}
}
SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(
policyName,
snapshotNamePattern,
schedule,
repoId,
snapConfig,
retention
);
Request putLifecycle = new Request("PUT", "/_slm/policy/" + policyName);
XContentBuilder lifecycleBuilder = JsonXContent.contentBuilder();
policy.toXContent(lifecycleBuilder, ToXContent.EMPTY_PARAMS);
putLifecycle.setJsonEntity(Strings.toString(lifecycleBuilder));
final Response response = client().performRequest(putLifecycle);
assertAcked(response);
}
private void disableSLMMinimumIntervalValidation() throws IOException {
ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
req.persistentSettings(Settings.builder().put(LifecycleSettings.SLM_MINIMUM_INTERVAL, "0s"));
try (XContentBuilder builder = jsonBuilder()) {
req.toXContent(builder, ToXContent.EMPTY_PARAMS);
Request r = new Request("PUT", "/_cluster/settings");
r.setJsonEntity(Strings.toString(builder));
Response updateSettingsResp = client().performRequest(r);
assertAcked(updateSettingsResp);
}
}
private void initializeRepo(String repoName) throws IOException {
initializeRepo(repoName, "40mb");
}
private void initializeRepo(String repoName, String maxBytesPerSecond) throws IOException {
Request request = new Request("PUT", "/_snapshot/" + repoName);
request.setJsonEntity(
Strings.toString(
JsonXContent.contentBuilder()
.startObject()
.field("type", "fs")
.startObject("settings")
.field("compress", randomBoolean())
.field("location", repoDir.getRoot().getAbsolutePath())
.field("max_snapshot_bytes_per_sec", maxBytesPerSecond)
.endObject()
.endObject()
)
);
assertOK(client().performRequest(request));
}
private static void index(RestClient client, String index, String id, Object... fields) throws IOException {
XContentBuilder document = jsonBuilder().startObject();
for (int i = 0; i < fields.length; i += 2) {
document.field((String) fields[i], fields[i + 1]);
}
document.endObject();
final Request request = new Request("POST", "/" + index + "/_doc/" + id);
request.setJsonEntity(Strings.toString(document));
assertOK(client.performRequest(request));
}
private static void execute_retention(RestClient client) throws IOException {
final Request request = new Request("POST", "/_slm/_execute_retention");
assertOK(client.performRequest(request));
}
@SuppressWarnings("unchecked")
private static Map<String, Object> policyStatsAsMap(Map<String, Object> stats) {
return ((List<Map<String, Object>>) stats.get(SnapshotLifecycleStats.POLICY_STATS.getPreferredName())).stream()
.collect(
Collectors.toMap(
m -> (String) m.get(SnapshotLifecycleStats.SnapshotPolicyStats.POLICY_ID.getPreferredName()),
Function.identity()
)
);
}
private void assertAcked(Response response) throws IOException {
assertOK(response);
Map<String, Object> putLifecycleResponseMap;
try (InputStream is = response.getEntity().getContent()) {
putLifecycleResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true);
}
assertThat(putLifecycleResponseMap.get("acknowledged"), equalTo(true));
}
private void logSLMPolicies() throws IOException {
Request request = new Request("GET", "/_slm/policy?human");
Response response = client().performRequest(request);
assertOK(response);
logger.info("SLM policies: {}", EntityUtils.toString(response.getEntity()));
}
}
| SnapshotLifecycleRestIT |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/insertordering/InsertOrderingTest.java | {
"start": 1410,
"end": 2401
} | class ____ {
@Test
public void testBatchOrdering(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
int iterations = 12;
for ( int i = 0; i < iterations; i++ ) {
User user = new User( "user-" + i );
Group group = new Group( "group-" + i );
session.persist( user );
session.persist( group );
user.addMembership( group );
}
StatsBatch.reset();
}
);
// 1 for first 10 User (1)
// 1 for final 2 User (2)
// 1 for first 10 Group (3)
// 1 for last 2 Group (4)
// 1 for first 10 Membership (5)
// 1 for last 2 Membership (6)
assertEquals( 6, StatsBatch.numberOfBatches );
scope.inTransaction(
session -> {
Iterator users = session.createQuery(
"from User u left join fetch u.memberships m left join fetch m.group" ).list().iterator();
while ( users.hasNext() ) {
session.remove( users.next() );
}
}
);
}
@SuppressWarnings("unused")
public static | InsertOrderingTest |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/constructor/CreatingMocksWithConstructorTest.java | {
"start": 4713,
"end": 5350
} | class ____ {
public ThrowingConstructorClass() {
throw new RuntimeException();
}
}
@Test
public void explains_constructor_exceptions() {
try {
mock(
ThrowingConstructorClass.class,
withSettings().useConstructor().defaultAnswer(CALLS_REAL_METHODS));
fail();
} catch (MockitoException e) {
assertThat(e).hasRootCauseInstanceOf(RuntimeException.class);
assertThat(e.getCause())
.hasMessageContaining(
"Please ensure the target | ThrowingConstructorClass |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/inject/guice/AssistedInjectScoping.java | {
"start": 2887,
"end": 2965
} | class ____ AssistedInject factory is not allowed",
severity = ERROR)
public | of |
java | square__retrofit | retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java | {
"start": 9819,
"end": 10326
} | class ____ {
@POST("/") //
Call<ResponseBody> method(@FieldMap Map<String, String> a) {
return null;
}
}
try {
buildRequest(Example.class);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.isEqualTo(
"@FieldMap parameters can only be used with form encoding. (parameter 'a')\n for method Example.method");
}
}
@Test
public void formEncodingFailsOnNonBodyMethod() {
| Example |
java | apache__rocketmq | tools/src/main/java/org/apache/rocketmq/tools/command/export/ExportMetadataCommand.java | {
"start": 1731,
"end": 8236
} | class ____ implements SubCommand {
private static final String DEFAULT_FILE_PATH = "/tmp/rocketmq/export";
@Override
public String commandName() {
return "exportMetadata";
}
@Override
public String commandDesc() {
return "Export metadata.";
}
@Override
public Options buildCommandlineOptions(Options options) {
Option opt = new Option("c", "clusterName", true, "choose a cluster to export");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("b", "brokerAddr", true, "choose a broker to export");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("f", "filePath", true, "export metadata.json path | default /tmp/rocketmq/export");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("t", "topic", false, "only export topic metadata");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("g", "subscriptionGroup", false, "only export subscriptionGroup metadata");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("s", "specialTopic", false, "need retryTopic and dlqTopic");
opt.setRequired(false);
options.addOption(opt);
return options;
}
@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook)
throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
defaultMQAdminExt.start();
String filePath = !commandLine.hasOption('f') ? DEFAULT_FILE_PATH : commandLine.getOptionValue('f')
.trim();
boolean specialTopic = commandLine.hasOption('s');
if (commandLine.hasOption('b')) {
final String brokerAddr = commandLine.getOptionValue('b').trim();
if (commandLine.hasOption('t')) {
filePath = filePath + "/topic.json";
TopicConfigSerializeWrapper topicConfigSerializeWrapper = defaultMQAdminExt.getUserTopicConfig(
brokerAddr, specialTopic, 10000L);
MixAll.string2FileNotSafe(JSON.toJSONString(topicConfigSerializeWrapper, true), filePath);
System.out.printf("export %s success", filePath);
} else if (commandLine.hasOption('g')) {
filePath = filePath + "/subscriptionGroup.json";
SubscriptionGroupWrapper subscriptionGroupWrapper = defaultMQAdminExt.getUserSubscriptionGroup(
brokerAddr, 10000L);
MixAll.string2FileNotSafe(JSON.toJSONString(subscriptionGroupWrapper, true), filePath);
System.out.printf("export %s success", filePath);
}
} else if (commandLine.hasOption('c')) {
String clusterName = commandLine.getOptionValue('c').trim();
Set<String> masterSet =
CommandUtil.fetchMasterAddrByClusterName(defaultMQAdminExt, clusterName);
Map<String, TopicConfig> topicConfigMap = new HashMap<>();
Map<String, SubscriptionGroupConfig> subGroupConfigMap = new HashMap<>();
Map<String, Object> result = new HashMap<>();
for (String addr : masterSet) {
TopicConfigSerializeWrapper topicConfigSerializeWrapper = defaultMQAdminExt.getUserTopicConfig(
addr, specialTopic, 10000L);
SubscriptionGroupWrapper subscriptionGroupWrapper = defaultMQAdminExt.getUserSubscriptionGroup(
addr, 10000);
for (Map.Entry<String, TopicConfig> entry : topicConfigSerializeWrapper.getTopicConfigTable()
.entrySet()) {
TopicConfig topicConfig = topicConfigMap.get(entry.getKey());
if (null != topicConfig) {
entry.getValue().setWriteQueueNums(
topicConfig.getWriteQueueNums() + entry.getValue().getWriteQueueNums());
entry.getValue().setReadQueueNums(
topicConfig.getReadQueueNums() + entry.getValue().getReadQueueNums());
}
topicConfigMap.put(entry.getKey(), entry.getValue());
}
for (Map.Entry<String, SubscriptionGroupConfig> entry : subscriptionGroupWrapper.getSubscriptionGroupTable()
.entrySet()) {
SubscriptionGroupConfig subscriptionGroupConfig = subGroupConfigMap.get(entry.getKey());
if (null != subscriptionGroupConfig) {
entry.getValue().setRetryQueueNums(
subscriptionGroupConfig.getRetryQueueNums() + entry.getValue().getRetryQueueNums());
}
subGroupConfigMap.put(entry.getKey(), entry.getValue());
}
}
String exportPath;
if (commandLine.hasOption('t')) {
result.put("topicConfigTable", topicConfigMap);
exportPath = filePath + "/topic.json";
} else if (commandLine.hasOption('g')) {
result.put("subscriptionGroupTable", subGroupConfigMap);
exportPath = filePath + "/subscriptionGroup.json";
} else {
result.put("topicConfigTable", topicConfigMap);
result.put("subscriptionGroupTable", subGroupConfigMap);
exportPath = filePath + "/metadata.json";
}
result.put("exportTime", System.currentTimeMillis());
MixAll.string2FileNotSafe(JSON.toJSONString(result, true), exportPath);
System.out.printf("export %s success%n", exportPath);
} else {
ServerUtil.printCommandLineHelp("mqadmin " + this.commandName(), options);
}
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
}
| ExportMetadataCommand |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/array/StringArraySerializer.java | {
"start": 3718,
"end": 3917
} | class ____
extends SimpleTypeSerializerSnapshot<String[]> {
public StringArraySerializerSnapshot() {
super(() -> INSTANCE);
}
}
}
| StringArraySerializerSnapshot |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/EmbeddableInheritanceHierarchyOrderTest.java | {
"start": 3927,
"end": 4123
} | class ____ extends Mammal {
//private int bone;
// [...]
public Dog() {
}
public Dog(int age, String name, String mother) {
super( age, name, mother );
}
}
@Embeddable
static
| Dog |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/async/AsyncErasureCoding.java | {
"start": 2682,
"end": 10754
} | class ____ extends ErasureCoding {
/** RPC server to receive client calls. */
private final RouterRpcServer rpcServer;
/** RPC clients to connect to the Namenodes. */
private final RouterRpcClient rpcClient;
/** Interface to identify the active NN for a nameservice or blockpool ID. */
private final ActiveNamenodeResolver namenodeResolver;
public AsyncErasureCoding(RouterRpcServer server) {
super(server);
this.rpcServer = server;
this.rpcClient = this.rpcServer.getRPCClient();
this.namenodeResolver = this.rpcClient.getNamenodeResolver();
}
/**
* Asynchronously get an array of all erasure coding policies.
* This method checks the operation category and then invokes the
* getErasureCodingPolicies method concurrently across all namespaces.
* <p>
* The results are merged and returned as an array of ErasureCodingPolicyInfo.
*
* @return Array of ErasureCodingPolicyInfo.
* @throws IOException If an I/O error occurs.
*/
@Override
public ErasureCodingPolicyInfo[] getErasureCodingPolicies()
throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ);
RemoteMethod method = new RemoteMethod("getErasureCodingPolicies");
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
rpcClient.invokeConcurrent(
nss, method, true, false, ErasureCodingPolicyInfo[].class);
asyncApply(
(ApplyFunction<Map<FederationNamespaceInfo, ErasureCodingPolicyInfo[]>,
ErasureCodingPolicyInfo[]>) ret -> merge(ret, ErasureCodingPolicyInfo.class));
return asyncReturn(ErasureCodingPolicyInfo[].class);
}
/**
* Asynchronously get the erasure coding codecs available.
* This method checks the operation category and then invokes the
* getErasureCodingCodecs method concurrently across all namespaces.
* <p>
* The results are merged into a single map of codec names to codec properties.
*
* @return Map of erasure coding codecs.
* @throws IOException If an I/O error occurs.
*/
@Override
public Map<String, String> getErasureCodingCodecs() throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ);
RemoteMethod method = new RemoteMethod("getErasureCodingCodecs");
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
rpcClient.invokeConcurrent(
nss, method, true, false, Map.class);
asyncApply((ApplyFunction<Map<FederationNamespaceInfo,
Map<String, String>>, Map<String, String>>) retCodecs -> {
Map<String, String> ret = new HashMap<>();
Object obj = retCodecs;
@SuppressWarnings("unchecked")
Map<FederationNamespaceInfo, Map<String, String>> results =
(Map<FederationNamespaceInfo, Map<String, String>>)obj;
Collection<Map<String, String>> allCodecs = results.values();
for (Map<String, String> codecs : allCodecs) {
ret.putAll(codecs);
}
return ret;
});
return asyncReturn(Map.class);
}
/**
* Asynchronously add an array of erasure coding policies.
* This method checks the operation category and then invokes the
* addErasureCodingPolicies method concurrently across all namespaces.
* <p>
* The results are merged and returned as an array of AddErasureCodingPolicyResponse.
*
* @param policies Array of erasure coding policies to add.
* @return Array of AddErasureCodingPolicyResponse.
* @throws IOException If an I/O error occurs.
*/
@Override
public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
ErasureCodingPolicy[] policies) throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.WRITE);
RemoteMethod method = new RemoteMethod("addErasureCodingPolicies",
new Class<?>[] {ErasureCodingPolicy[].class}, new Object[] {policies});
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
rpcClient.invokeConcurrent(
nss, method, true, false, AddErasureCodingPolicyResponse[].class);
asyncApply(
(ApplyFunction<Map<FederationNamespaceInfo, AddErasureCodingPolicyResponse[]>,
AddErasureCodingPolicyResponse[]>) ret -> {
return merge(ret, AddErasureCodingPolicyResponse.class);
});
return asyncReturn(AddErasureCodingPolicyResponse[].class);
}
/**
* Asynchronously get the erasure coding policy for a given source path.
* This method checks the operation category and then invokes the
* getErasureCodingPolicy method sequentially for the given path.
* <p>
* The result is returned as an ErasureCodingPolicy object.
*
* @param src Source path to get the erasure coding policy for.
* @return ErasureCodingPolicy for the given path.
* @throws IOException If an I/O error occurs.
*/
@Override
public ErasureCodingPolicy getErasureCodingPolicy(String src)
throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ);
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod remoteMethod = new RemoteMethod("getErasureCodingPolicy",
new Class<?>[] {String.class}, new RemoteParam());
rpcClient.invokeSequential(
locations, remoteMethod, null, null);
asyncApply(ret -> {
return (ErasureCodingPolicy) ret;
});
return asyncReturn(ErasureCodingPolicy.class);
}
/**
* Asynchronously get the EC topology result for the given policies.
* This method checks the operation category and then invokes the
* getECTopologyResultForPolicies method concurrently across all namespaces.
* <p>
* The results are merged and the first unsupported result is returned.
*
* @param policyNames Array of policy names to check.
* @return ECTopologyVerifierResult for the policies.
* @throws IOException If an I/O error occurs.
*/
@Override
public ECTopologyVerifierResult getECTopologyResultForPolicies(
String[] policyNames) throws IOException {
RemoteMethod method = new RemoteMethod("getECTopologyResultForPolicies",
new Class<?>[] {String[].class}, new Object[] {policyNames});
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
if (nss.isEmpty()) {
throw new IOException("No namespace availaible.");
}
rpcClient.invokeConcurrent(nss, method, true, false,
ECTopologyVerifierResult.class);
asyncApply((ApplyFunction<Map<FederationNamespaceInfo, ECTopologyVerifierResult>,
ECTopologyVerifierResult>) ret -> {
for (Map.Entry<FederationNamespaceInfo, ECTopologyVerifierResult> entry :
ret.entrySet()) {
if (!entry.getValue().isSupported()) {
return entry.getValue();
}
}
// If no negative result, return the result from the first namespace.
return ret.get(nss.iterator().next());
});
return asyncReturn(ECTopologyVerifierResult.class);
}
/**
* Asynchronously get the erasure coding block group statistics.
* This method checks the operation category and then invokes the
* getECBlockGroupStats method concurrently across all namespaces.
* <p>
* The results are merged and returned as an ECBlockGroupStats object.
*
* @return ECBlockGroupStats for the erasure coding block groups.
* @throws IOException If an I/O error occurs.
*/
@Override
public ECBlockGroupStats getECBlockGroupStats() throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ);
RemoteMethod method = new RemoteMethod("getECBlockGroupStats");
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
rpcClient.invokeConcurrent(
nss, method, true, false, ECBlockGroupStats.class);
asyncApply((ApplyFunction<Map<FederationNamespaceInfo, ECBlockGroupStats>,
ECBlockGroupStats>) allStats -> {
return ECBlockGroupStats.merge(allStats.values());
});
return asyncReturn(ECBlockGroupStats.class);
}
} | AsyncErasureCoding |
java | spring-projects__spring-boot | build-plugin/spring-boot-gradle-plugin/src/test/java/com/example/boottestrun/classpath/BootTestRunClasspathApplication.java | {
"start": 857,
"end": 1019
} | class ____ {
protected BootTestRunClasspathApplication() {
}
public static void main(String[] args) {
System.out.println("Main | BootTestRunClasspathApplication |
java | apache__kafka | connect/runtime/src/test/resources/test-plugins/bad-packaging/test/plugins/DefaultConstructorThrowsConnector.java | {
"start": 1235,
"end": 1887
} | class ____ extends SinkConnector {
public DefaultConstructorThrowsConnector() {
throw new RuntimeException("I always throw an exception");
}
@Override
public String version() {
return null;
}
@Override
public void start(Map<String, String> props) {
}
@Override
public Class<? extends Task> taskClass() {
return null;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
return null;
}
@Override
public void stop() {
}
@Override
public ConfigDef config() {
return null;
}
}
| DefaultConstructorThrowsConnector |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/exc/PropertyBindingException.java | {
"start": 222,
"end": 402
} | class ____ {@link MismatchedInputException}s that are specifically related
* to problems related to binding an individual property.
*/
@SuppressWarnings("serial")
public abstract | for |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/typeutils/SerializerTestUtil.java | {
"start": 1693,
"end": 3675
} | class ____ {
/** Snapshot and restore the given serializer. Returns the restored serializer. */
public static <T> TypeSerializer<T> snapshotAndReconfigure(
TypeSerializer<T> serializer, SerializerGetter<T> serializerGetter) throws IOException {
TypeSerializerSnapshot<T> configSnapshot = serializer.snapshotConfiguration();
byte[] serializedConfig;
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
TypeSerializerSnapshotSerializationUtil.writeSerializerSnapshot(
new DataOutputViewStreamWrapper(out), configSnapshot);
serializedConfig = out.toByteArray();
}
TypeSerializerSnapshot<T> restoredConfig;
try (ByteArrayInputStream in = new ByteArrayInputStream(serializedConfig)) {
restoredConfig =
TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot(
new DataInputViewStreamWrapper(in),
Thread.currentThread().getContextClassLoader());
}
TypeSerializerSchemaCompatibility<T> strategy =
serializerGetter
.getSerializer()
.snapshotConfiguration()
.resolveSchemaCompatibility(restoredConfig);
final TypeSerializer<T> restoredSerializer;
if (strategy.isCompatibleAsIs()) {
restoredSerializer = restoredConfig.restoreSerializer();
} else if (strategy.isCompatibleWithReconfiguredSerializer()) {
restoredSerializer = strategy.getReconfiguredSerializer();
} else {
throw new AssertionError("Unable to restore serializer with " + strategy);
}
assertThat(restoredSerializer.getClass()).isEqualTo(serializer.getClass());
return restoredSerializer;
}
/** Used for snapshotAndReconfigure method to provide serializers when restoring. */
public | SerializerTestUtil |
java | apache__camel | components/camel-ai/camel-djl/src/main/java/org/apache/camel/component/djl/model/nlp/CustomQuestionAnswerPredictor.java | {
"start": 1284,
"end": 3183
} | class ____ extends AbstractPredictor {
private final String modelName;
private final String translatorName;
public CustomQuestionAnswerPredictor(DJLEndpoint endpoint) {
super(endpoint);
this.modelName = endpoint.getModel();
this.translatorName = endpoint.getTranslator();
}
@Override
public void process(Exchange exchange) throws Exception {
Object body = exchange.getIn().getBody();
String result;
if (body instanceof QAInput) {
QAInput input = exchange.getIn().getBody(QAInput.class);
result = predict(exchange, input);
} else if (body instanceof String[]) {
String[] strs = exchange.getIn().getBody(String[].class);
if (strs.length < 2) {
throw new RuntimeCamelException("Input String[] should have two elements");
}
QAInput input = new QAInput(strs[0], strs[1]);
result = predict(exchange, input);
} else {
throw new RuntimeCamelException("Data type is not supported. Body should be String[] or QAInput");
}
exchange.getIn().setBody(result);
}
protected String predict(Exchange exchange, QAInput input) {
Model model = exchange.getContext().getRegistry().lookupByNameAndType(modelName, Model.class);
@SuppressWarnings("unchecked")
Translator<QAInput, String> translator
= exchange.getContext().getRegistry().lookupByNameAndType(translatorName, Translator.class);
exchange.getIn().setHeader(DJLConstants.INPUT, input);
try (Predictor<QAInput, String> predictor = model.newPredictor(translator)) {
return predictor.predict(input);
} catch (TranslateException e) {
throw new RuntimeCamelException("Could not process input or output", e);
}
}
}
| CustomQuestionAnswerPredictor |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/spi/SharedSessionContractImplementor.java | {
"start": 12205,
"end": 20273
} | class ____ the given {@link EntityPersister},
* initializing the new instance with the given identifier.
* <p>
* This is more efficient than {@link #instantiate(String, Object)},
* but not always interchangeable, since a single persister might be
* responsible for multiple types.
*/
Object instantiate(EntityPersister persister, Object id) throws HibernateException;
/**
* Are entities and proxies loaded by this session read-only by default?
*/
boolean isDefaultReadOnly();
boolean isIdentifierRollbackEnabled();
void setCriteriaCopyTreeEnabled(boolean jpaCriteriaCopyComplianceEnabled);
boolean isCriteriaCopyTreeEnabled();
void setCriteriaPlanCacheEnabled(boolean jpaCriteriaCacheEnabled);
boolean isCriteriaPlanCacheEnabled();
boolean getNativeJdbcParametersIgnored();
void setNativeJdbcParametersIgnored(boolean nativeJdbcParametersIgnored);
/**
* Get the current {@link FlushMode} for this session.
*
* @return The flush mode
*/
@Override
FlushMode getHibernateFlushMode();
/**
* Flush this session.
*/
void flush();
/**
* Determines if this session implements {@link EventSource}.
* <p>
* Only stateful session are sources of events. If this object is
* a stateless session, this method return {@code false}.
*/
default boolean isEventSource() {
return false;
}
/**
* Cast this session to {@link EventSource} if possible.
* <p>
* Only stateful session are sources of events. If this object is
* a stateless session, this method throws.
*
* @throws ClassCastException if the cast is not possible
*/
default EventSource asEventSource() {
throw new ClassCastException( "session is not an EventSource" );
}
/**
* Whether the session {@linkplain StatelessSessionImplementor stateless}, as opposed tp
* {@linkplain SessionImplementor stateful}.
*
* @apiNote Essentially, whether casting this session to {@linkplain StatelessSessionImplementor} will succeed.
*/
default boolean isStateless() {
return false;
}
/**
* Called after each operation on a {@link org.hibernate.ScrollableResults},
* providing an opportunity for a stateless session to clear its
* temporary persistence context. For a stateful session, this method
* does nothing.
*/
void afterScrollOperation();
/**
* Get the {@link LoadQueryInfluencers} associated with this session.
*
* @return the {@link LoadQueryInfluencers} associated with this session;
* should never be null.
*/
LoadQueryInfluencers getLoadQueryInfluencers();
/**
* Obtain an {@link ExceptionConverter} for reporting an error.
* <p>
* The converter associated to a session might be lazily initialized,
* so only invoke this getter when there's an actual need to use it.
*
* @return the ExceptionConverter for this Session.
*/
ExceptionConverter getExceptionConverter();
/**
* Get the currently configured JDBC batch size, which might have
* been specified at either the session or factory level.
*
* @return the session-level JDBC batch size is set, or the
* factory-level setting otherwise
*
* @since 5.2
*
* @see org.hibernate.boot.spi.SessionFactoryOptions#getJdbcBatchSize
* @see org.hibernate.boot.SessionFactoryBuilder#applyJdbcBatchSize
*/
default Integer getConfiguredJdbcBatchSize() {
final Integer sessionJdbcBatchSize = getJdbcBatchSize();
return sessionJdbcBatchSize == null
? getFactory().getSessionFactoryOptions().getJdbcBatchSize()
: sessionJdbcBatchSize;
}
/**
* Similar to {@link #getPersistenceContext()}, with two differences:
* <ol>
* <li>this version performs better as it allows for inlining
* and probably better prediction, and
* <li>it skips some checks of the current state of the session.
* </ol>
* Choose wisely: performance is important, but correctness comes first.
*
* @return the {@link PersistenceContext} associated to this session.
*/
PersistenceContext getPersistenceContextInternal();
/**
* Is the given entity managed by this session?
*
* @return true if this is a stateful session and
* the entity belongs to its persistence
* context and was not removed
*/
boolean isManaged(Object entity);
/**
* detect in-memory changes, determine if the changes are to tables
* named in the query and, if so, complete execution the flush
*
* @param querySpaces the tables named in the query.
*
* @return true if flush is required, false otherwise.
*/
default boolean autoFlushIfRequired(Set<String> querySpaces) {
return autoFlushIfRequired( querySpaces, false );
}
/**
* detect in-memory changes, determine if the changes are to tables
* named in the query and, if so, complete execution the flush
*
* @param querySpaces the tables named in the query.
* @param skipPreFlush see {@link org.hibernate.event.spi.AutoFlushEvent#isSkipPreFlush}
*
* @return true if flush is required, false otherwise.
*/
boolean autoFlushIfRequired(Set<String> querySpaces, boolean skipPreFlush);
boolean autoPreFlushIfRequired(QueryParameterBindings parameterBindings);
/**
* Check if there is a Hibernate or JTA transaction in progress and,
* if there is not, flush if necessary, making sure that the connection
* has been committed (if it is not in autocommit mode), and finally
* run the after completion processing.
*
* @param success {@code true} if the operation a success
*/
void afterOperation(boolean success);
/**
* Cast this object to {@link SessionImplementor}, if possible.
*
* @throws ClassCastException if the cast is not possible
*
* @deprecated No longer useful, since Java made downcasting safer
*/
@Deprecated(since = "7.0", forRemoval = true)
default SessionImplementor asSessionImplementor() {
throw new ClassCastException( "session is not a SessionImplementor" );
}
/**
* Does this object implement {@link SessionImplementor}?
*
* @deprecated No longer useful, since Java made downcasting safer
*/
@Deprecated(since = "7.0", forRemoval = true)
default boolean isSessionImplementor() {
return this instanceof SessionImplementor;
}
/**
* Cast this object to {@link StatelessSession}, if possible.
*
* @throws ClassCastException if the cast is not possible
*
* @deprecated No longer useful, since Java made downcasting safer
*/
@Deprecated(since = "7.0", forRemoval = true)
default StatelessSession asStatelessSession() {
return (StatelessSession) this;
}
/**
* Does this object implement {@link StatelessSession}?
*
* @deprecated No longer useful, since Java made downcasting safer
*/
@Deprecated(since = "7.0", forRemoval = true)
default boolean isStatelessSession() {
return this instanceof StatelessSession;
}
/**
* Cascade the lock operation to the given child entity.
*/
void lock(String entityName, Object child, LockOptions lockOptions);
/**
* Attempts to load the entity from the second-level cache.
*
* @param persister The persister for the entity being requested for load
* @param entityKey The entity key
* @param instanceToLoad The instance that is being initialized, or null
* @param lockMode The lock mode
*
* @return The entity from the second-level cache, or null.
*
* @since 7.0
*/
@Incubating
Object loadFromSecondLevelCache(EntityPersister persister, EntityKey entityKey, Object instanceToLoad, LockMode lockMode);
/**
* Wrap all state that lazy loading interceptors might need to
* manage association with this session, or to handle lazy loading
* after detachment via the UUID of the SessionFactory.
* N.B. this captures the current Session, however it can get
* updated to a null session (for detached entities) or updated to
* a different Session.
*/
@Incubating
SessionAssociationMarkers getSessionAssociationMarkers();
@Override
<T> RootGraphImplementor<T> createEntityGraph(Class<T> rootType);
@Override
RootGraphImplementor<?> createEntityGraph(String graphName);
@Override
RootGraphImplementor<?> getEntityGraph(String graphName);
}
| of |
java | apache__flink | flink-core/src/test/java/org/apache/flink/util/WrappingProxyUtilTest.java | {
"start": 1046,
"end": 1891
} | class ____ {
@Test
void testThrowsExceptionIfTooManyProxies() {
assertThatThrownBy(
() ->
WrappingProxyUtil.stripProxy(
new SelfWrappingProxy(
WrappingProxyUtil.SAFETY_NET_MAX_ITERATIONS)))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Are there loops in the object graph?");
}
@Test
void testStripsAllProxies() {
final SelfWrappingProxy wrappingProxy =
new SelfWrappingProxy(WrappingProxyUtil.SAFETY_NET_MAX_ITERATIONS - 1);
assertThat(WrappingProxyUtil.stripProxy(wrappingProxy))
.isNotInstanceOf(SelfWrappingProxy.class);
}
private static | WrappingProxyUtilTest |
java | quarkusio__quarkus | extensions/smallrye-graphql/deployment/src/test/java/io/quarkus/smallrye/graphql/deployment/UniTest.java | {
"start": 4543,
"end": 5189
} | class ____ {
public String isbn;
public String title;
public LocalDate published;
public List<String> authors;
public Book() {
}
public Book(String isbn, String title, LocalDate published, String... authors) {
this.isbn = isbn;
this.title = title;
this.published = published;
this.authors = Arrays.asList(authors);
}
@Override
public String toString() {
return "Book{" + "isbn=" + isbn + ", title=" + title + ", published=" + published + ", authors=" + authors + '}';
}
}
public static | Book |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit/jupiter/SpringJUnitJupiterAutowiredConstructorInjectionTests.java | {
"start": 1538,
"end": 1772
} | class ____ extends BaseClass {
@Autowired
SpringAutowiredTests(ApplicationContext context, Person dilbert, Dog dog, @Value("${enigma}") Integer enigma) {
super(context, dilbert, dog, enigma);
}
}
@Nested
| SpringAutowiredTests |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/propagation/MdcFilter.java | {
"start": 491,
"end": 937
} | class ____ {
@RequestFilter
public void myRequestFilter(HttpRequest<?> request, MutablePropagatedContext mutablePropagatedContext) {
try {
String trackingId = request.getHeaders().get("X-TrackingId");
MDC.put("trackingId", trackingId);
mutablePropagatedContext.add(new MdcPropagationContext());
} finally {
MDC.remove("trackingId");
}
}
}
// end::class[]
| MdcFilter |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/Parser.java | {
"start": 4621,
"end": 5846
} | class ____ {
private StreamTokenizer tok;
Lexer(String s) {
tok = new StreamTokenizer(new CharArrayReader(s.toCharArray()));
tok.quoteChar('"');
tok.parseNumbers();
tok.ordinaryChar(',');
tok.ordinaryChar('(');
tok.ordinaryChar(')');
tok.wordChars('$','$');
tok.wordChars('_','_');
}
Token next() throws IOException {
int type = tok.nextToken();
switch (type) {
case StreamTokenizer.TT_EOF:
case StreamTokenizer.TT_EOL:
return null;
case StreamTokenizer.TT_NUMBER:
return new NumToken(tok.nval);
case StreamTokenizer.TT_WORD:
return new StrToken(TType.IDENT, tok.sval);
case '"':
return new StrToken(TType.QUOT, tok.sval);
default:
switch (type) {
case ',':
return new Token(TType.COMMA);
case '(':
return new Token(TType.LPAREN);
case ')':
return new Token(TType.RPAREN);
default:
throw new IOException("Unexpected: " + type);
}
}
}
}
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract static | Lexer |
java | apache__kafka | tools/src/main/java/org/apache/kafka/tools/VerifiableConsumer.java | {
"start": 11809,
"end": 11980
} | class ____ extends ConsumerEvent {
@Override
public String name() {
return "startup_complete";
}
}
private static | StartupComplete |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/Key.java | {
"start": 675,
"end": 1405
} | interface ____ {
String STRING_CHARSET_NAME = "UTF-8";
Charset CHARSET = Charset.forName(STRING_CHARSET_NAME);
/**
* Adds all uniquely identifying information to the given digest.
*
* <p>Note - Using {@link java.security.MessageDigest#reset()} inside of this method will result
* in undefined behavior.
*/
void updateDiskCacheKey(@NonNull MessageDigest messageDigest);
/**
* For caching to work correctly, implementations <em>must</em> implement this method and {@link
* #hashCode()}.
*/
@Override
boolean equals(Object o);
/**
* For caching to work correctly, implementations <em>must</em> implement this method and {@link
* #equals(Object)}.
*/
@Override
int hashCode();
}
| Key |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/JavaBeanBinderTests.java | {
"start": 31388,
"end": 31573
} | class ____ {
private int value;
String getValue() {
return String.valueOf(this.value);
}
void setValue(int value) {
this.value = value;
}
}
static | ExampleMismatchBean |
java | apache__kafka | tools/src/test/java/org/apache/kafka/tools/consumer/group/DeleteConsumerGroupsTest.java | {
"start": 3872,
"end": 19075
} | class ____ {
@Test
public void testDeleteWithTopicOption() {
String[] cgcArgs = new String[]{"--bootstrap-server", "localhost:62241", "--delete", "--group", getDummyGroupId(), "--topic"};
assertThrows(OptionException.class, () -> ConsumerGroupCommandOptions.fromArgs(cgcArgs));
}
@ClusterTest
public void testDeleteCmdNonExistingGroup(ClusterInstance cluster) {
String missingGroupId = getDummyGroupId();
String[] cgcArgs = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--delete", "--group", missingGroupId};
try (ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(cgcArgs)) {
String output = ToolsTestUtils.grabConsoleOutput(service::deleteGroups);
assertTrue(output.contains("Group '" + missingGroupId + "' could not be deleted due to:") && output.contains(Errors.GROUP_ID_NOT_FOUND.message()),
"The expected error (" + Errors.GROUP_ID_NOT_FOUND + ") was not detected while deleting consumer group");
}
}
@ClusterTest
public void testDeleteNonExistingGroup(ClusterInstance cluster) {
String missingGroupId = getDummyGroupId();
String[] cgcArgs = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--delete", "--group", missingGroupId};
try (ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(cgcArgs)) {
Map<String, Throwable> result = service.deleteGroups();
assertEquals(1, result.size());
assertNotNull(result.get(missingGroupId));
assertInstanceOf(GroupIdNotFoundException.class,
result.get(missingGroupId),
"The expected error (" + Errors.GROUP_ID_NOT_FOUND + ") was not detected while deleting consumer group");
}
}
@ClusterTest
public void testDeleteNonEmptyGroup(ClusterInstance cluster) throws Exception {
for (GroupProtocol groupProtocol : cluster.supportedGroupProtocols()) {
String groupId = composeGroupId(groupProtocol);
String topicName = composeTopicName(groupProtocol);
String[] cgcArgs = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--delete", "--group", groupId};
try (
AutoCloseable consumerGroupCloseable = consumerGroupClosable(cluster, groupProtocol, groupId, topicName);
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(cgcArgs)
) {
TestUtils.waitForCondition(
() -> service.collectGroupMembers(groupId).getValue().get().size() == 1,
"The group did not initialize as expected."
);
String output = ToolsTestUtils.grabConsoleOutput(service::deleteGroups);
Map<String, Throwable> result = service.deleteGroups();
assertTrue(output.contains("Group '" + groupId + "' could not be deleted due to:") && output.contains(Errors.NON_EMPTY_GROUP.message()),
"The expected error (" + Errors.NON_EMPTY_GROUP + ") was not detected while deleting consumer group. Output was: (" + output + ")");
assertNotNull(result.get(groupId),
"Group was deleted successfully, but it shouldn't have been. Result was:(" + result + ")");
assertEquals(1, result.size());
assertNotNull(result.get(groupId));
assertInstanceOf(GroupNotEmptyException.class,
result.get(groupId),
"The expected error (" + Errors.NON_EMPTY_GROUP + ") was not detected while deleting consumer group. Result was:(" + result + ")");
}
}
}
@ClusterTest
void testDeleteEmptyGroup(ClusterInstance cluster) throws Exception {
for (GroupProtocol groupProtocol : cluster.supportedGroupProtocols()) {
String groupId = composeGroupId(groupProtocol);
String topicName = composeTopicName(groupProtocol);
String[] cgcArgs = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--delete", "--group", groupId};
try (
AutoCloseable consumerGroupCloseable = consumerGroupClosable(cluster, groupProtocol, groupId, topicName);
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(cgcArgs)
) {
TestUtils.waitForCondition(
() -> service.listConsumerGroups().contains(groupId) && checkGroupState(service, groupId, STABLE),
"The group did not initialize as expected."
);
consumerGroupCloseable.close();
TestUtils.waitForCondition(
() -> checkGroupState(service, groupId, EMPTY),
"The group did not become empty as expected."
);
Map<String, Throwable> result = new HashMap<>();
String output = ToolsTestUtils.grabConsoleOutput(() -> result.putAll(service.deleteGroups()));
assertTrue(output.contains("Deletion of requested consumer groups ('" + groupId + "') was successful."),
"The consumer group could not be deleted as expected");
assertEquals(1, result.size());
assertTrue(result.containsKey(groupId));
assertNull(result.get(groupId), "The consumer group could not be deleted as expected");
}
}
}
@ClusterTest
public void testDeleteCmdAllGroups(ClusterInstance cluster) throws Exception {
for (GroupProtocol groupProtocol : cluster.supportedGroupProtocols()) {
String topicName = composeTopicName(groupProtocol);
// Create 3 groups with 1 consumer each
Map<String, AutoCloseable> groupIdToExecutor = IntStream.rangeClosed(1, 3)
.mapToObj(i -> composeGroupId(groupProtocol) + i)
.collect(Collectors.toMap(Function.identity(), group -> consumerGroupClosable(cluster, groupProtocol, group, topicName)));
String[] cgcArgs = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--delete", "--all-groups"};
try (ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(cgcArgs)) {
TestUtils.waitForCondition(() ->
new HashSet<>(service.listConsumerGroups()).equals(groupIdToExecutor.keySet()) &&
groupIdToExecutor.keySet().stream().allMatch(groupId -> assertDoesNotThrow(() -> checkGroupState(service, groupId, STABLE))),
"The group did not initialize as expected.");
// Shutdown consumers to empty out groups
for (AutoCloseable consumerGroupExecutor : groupIdToExecutor.values()) {
consumerGroupExecutor.close();
}
TestUtils.waitForCondition(() ->
groupIdToExecutor.keySet().stream().allMatch(groupId -> assertDoesNotThrow(() -> checkGroupState(service, groupId, EMPTY))),
"The group did not become empty as expected.");
String output = ToolsTestUtils.grabConsoleOutput(service::deleteGroups).trim();
Set<String> expectedGroupsForDeletion = groupIdToExecutor.keySet();
Set<String> deletedGroupsGrepped = Arrays.stream(output.substring(output.indexOf('(') + 1, output.indexOf(')')).split(","))
.map(str -> str.replaceAll("'", "").trim())
.collect(Collectors.toSet());
assertTrue(output.matches("Deletion of requested consumer groups (.*) was successful.")
&& Objects.equals(deletedGroupsGrepped, expectedGroupsForDeletion),
"The consumer group(s) could not be deleted as expected");
}
}
}
@ClusterTest
public void testDeleteCmdWithMixOfSuccessAndError(ClusterInstance cluster) throws Exception {
for (GroupProtocol groupProtocol : cluster.supportedGroupProtocols()) {
String groupId = composeGroupId(groupProtocol);
String topicName = composeTopicName(groupProtocol);
String missingGroupId = composeMissingGroupId(groupProtocol);
String[] cgcArgs = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--delete", "--group", groupId};
try (
AutoCloseable consumerGroupClosable = consumerGroupClosable(cluster, groupProtocol, groupId, topicName);
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(cgcArgs)
) {
TestUtils.waitForCondition(
() -> service.listConsumerGroups().contains(groupId) && checkGroupState(service, groupId, STABLE),
"The group did not initialize as expected.");
consumerGroupClosable.close();
TestUtils.waitForCondition(
() -> checkGroupState(service, groupId, EMPTY),
"The group did not become empty as expected.");
cgcArgs = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--delete", "--group", groupId, "--group", missingGroupId};
try (ConsumerGroupCommand.ConsumerGroupService service2 = getConsumerGroupService(cgcArgs)) {
String output = ToolsTestUtils.grabConsoleOutput(service2::deleteGroups);
assertTrue(output.contains("Group '" + missingGroupId + "' could not be deleted due to:")
&& output.contains(Errors.GROUP_ID_NOT_FOUND.message())
&& output.contains("These consumer groups were deleted successfully: '" + groupId + "'"),
"The consumer group deletion did not work as expected");
}
}
}
}
@ClusterTest
public void testDeleteWithMixOfSuccessAndError(ClusterInstance cluster) throws Exception {
for (GroupProtocol groupProtocol : cluster.supportedGroupProtocols()) {
String groupId = composeGroupId(groupProtocol);
String topicName = composeTopicName(groupProtocol);
String missingGroupId = composeMissingGroupId(groupProtocol);
String[] cgcArgs = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--delete", "--group", groupId};
try (
AutoCloseable executor = consumerGroupClosable(cluster, groupProtocol, groupId, topicName);
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(cgcArgs)
) {
TestUtils.waitForCondition(
() -> service.listConsumerGroups().contains(groupId) && checkGroupState(service, groupId, STABLE),
"The group did not initialize as expected.");
executor.close();
TestUtils.waitForCondition(
() -> checkGroupState(service, groupId, EMPTY),
"The group did not become empty as expected.");
cgcArgs = new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--delete", "--group", groupId, "--group", missingGroupId};
try (ConsumerGroupCommand.ConsumerGroupService service2 = getConsumerGroupService(cgcArgs)) {
Map<String, Throwable> result = service2.deleteGroups();
assertTrue(result.size() == 2 &&
result.containsKey(groupId) && result.get(groupId) == null &&
result.containsKey(missingGroupId) &&
result.get(missingGroupId).getMessage().contains(Errors.GROUP_ID_NOT_FOUND.message()),
"The consumer group deletion did not work as expected");
}
}
}
}
@Test
public void testDeleteWithUnrecognizedNewConsumerOption() {
String[] cgcArgs = new String[]{"--new-consumer", "--bootstrap-server", "localhost:62241", "--delete", "--group", getDummyGroupId()};
assertThrows(OptionException.class, () -> ConsumerGroupCommandOptions.fromArgs(cgcArgs));
}
private String getDummyGroupId() {
return composeGroupId(null);
}
private String composeGroupId(GroupProtocol protocol) {
String groupPrefix = "test.";
return protocol != null ? groupPrefix + protocol.name : groupPrefix + "dummy";
}
private String composeTopicName(GroupProtocol protocol) {
String topicPrefix = "foo.";
return protocol != null ? topicPrefix + protocol.name : topicPrefix + "dummy";
}
private String composeMissingGroupId(GroupProtocol protocol) {
String missingGroupPrefix = "missing.";
return protocol != null ? missingGroupPrefix + protocol.name : missingGroupPrefix + "dummy";
}
private AutoCloseable consumerGroupClosable(ClusterInstance cluster, GroupProtocol protocol, String groupId, String topicName) {
Map<String, Object> configs = composeConfigs(
cluster,
groupId,
protocol.name,
Map.of());
return ConsumerGroupCommandTestUtils.buildConsumers(
1,
false,
topicName,
() -> new KafkaConsumer<String, String>(configs)
);
}
private boolean checkGroupState(ConsumerGroupCommand.ConsumerGroupService service, String groupId, GroupState state) throws Exception {
return Objects.equals(service.collectGroupState(groupId).groupState(), state);
}
private ConsumerGroupCommand.ConsumerGroupService getConsumerGroupService(String[] args) {
ConsumerGroupCommandOptions opts = ConsumerGroupCommandOptions.fromArgs(args);
return new ConsumerGroupCommand.ConsumerGroupService(
opts,
Map.of(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
);
}
private Map<String, Object> composeConfigs(ClusterInstance cluster, String groupId, String groupProtocol, Map<String, Object> customConfigs) {
Map<String, Object> configs = new HashMap<>();
configs.put(BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers());
configs.put(GROUP_ID_CONFIG, groupId);
configs.put(KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
configs.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
configs.put(GROUP_PROTOCOL_CONFIG, groupProtocol);
if (GroupProtocol.CLASSIC.name.equalsIgnoreCase(groupProtocol)) {
configs.put(PARTITION_ASSIGNMENT_STRATEGY_CONFIG, RangeAssignor.class.getName());
}
configs.putAll(customConfigs);
return configs;
}
}
| DeleteConsumerGroupsTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/synonyms/TransportDeleteSynonymsAction.java | {
"start": 1070,
"end": 1955
} | class ____ extends HandledTransportAction<DeleteSynonymsAction.Request, AcknowledgedResponse> {
private final SynonymsManagementAPIService synonymsManagementAPIService;
@Inject
public TransportDeleteSynonymsAction(TransportService transportService, ActionFilters actionFilters, Client client) {
super(
DeleteSynonymsAction.NAME,
transportService,
actionFilters,
DeleteSynonymsAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.synonymsManagementAPIService = new SynonymsManagementAPIService(client);
}
@Override
protected void doExecute(Task task, DeleteSynonymsAction.Request request, ActionListener<AcknowledgedResponse> listener) {
synonymsManagementAPIService.deleteSynonymsSet(request.synonymsSetId(), listener);
}
}
| TransportDeleteSynonymsAction |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/runtime/src/main/java/io/quarkus/resteasy/reactive/server/runtime/exceptionmappers/AuthenticationCompletionExceptionMapper.java | {
"start": 258,
"end": 712
} | class ____ implements ExceptionMapper<AuthenticationCompletionException> {
@Override
public Response toResponse(AuthenticationCompletionException ex) {
if (LaunchMode.current().isDev() && ex.getMessage() != null) {
return Response.status(Response.Status.UNAUTHORIZED).entity(ex.getMessage()).build();
}
return Response.status(Response.Status.UNAUTHORIZED).build();
}
}
| AuthenticationCompletionExceptionMapper |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/cluster/api/NodeSelectionSupport.java | {
"start": 465,
"end": 1190
} | interface ____<API, CMD> {
/**
* @return number of nodes.
*/
int size();
/**
* @return commands API to run on this node selection.
*/
CMD commands();
/**
* Obtain the connection/commands to a particular node.
*
* @param index index of the node
* @return the connection/commands object
*/
API commands(int index);
/**
* Get the {@link RedisClusterNode}.
*
* @param index index of the cluster node
* @return the cluster node
*/
RedisClusterNode node(int index);
/**
* @return map of {@link RedisClusterNode} and the connection/commands objects
*/
Map<RedisClusterNode, API> asMap();
}
| NodeSelectionSupport |
java | google__auto | factory/src/test/resources/bad/FactoryExtendingAbstractClassWithConstructorParams.java | {
"start": 863,
"end": 1018
} | class ____ {
protected AbstractFactory(Object obj) {}
abstract FactoryExtendingAbstractClassWithConstructorParams newInstance();
}
}
| AbstractFactory |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/java/ObjectJavaType.java | {
"start": 234,
"end": 829
} | class ____ extends AbstractClassJavaType<Object> {
/**
* Singleton access
*/
public static final ObjectJavaType INSTANCE = new ObjectJavaType();
public ObjectJavaType() {
super( Object.class );
}
@Override
public boolean useObjectEqualsHashCode() {
return true;
}
@Override
public boolean isInstance(Object value) {
return true;
}
@Override
public <X> X unwrap(Object value, Class<X> type, WrapperOptions options) {
//noinspection unchecked
return (X) value;
}
@Override
public <X> Object wrap(X value, WrapperOptions options) {
return value;
}
}
| ObjectJavaType |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java | {
"start": 9270,
"end": 9345
} | class ____ build an instance of TimelineEntityFilters.
*/
public static | to |
java | grpc__grpc-java | netty/src/test/java/io/grpc/netty/NettyHandlerTestBase.java | {
"start": 5960,
"end": 22632
} | class ____ extends DefaultPromise<Void>
implements ScheduledFuture<Void> {
final java.util.concurrent.ScheduledFuture<?> future;
FakeClockScheduledNettyFuture(
EventLoop eventLoop, final Runnable command, long delay, TimeUnit timeUnit) {
super(eventLoop);
Runnable wrap = new Runnable() {
@Override
public void run() {
try {
command.run();
} catch (Throwable t) {
setFailure(t);
return;
}
if (!isDone()) {
Promise<Void> unused = setSuccess(null);
}
// else: The command itself, such as a shutdown task, might have cancelled all the
// scheduled tasks already.
}
};
future = fakeClock.getScheduledExecutorService().schedule(wrap, delay, timeUnit);
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
if (future.cancel(mayInterruptIfRunning)) {
return super.cancel(mayInterruptIfRunning);
}
return false;
}
@Override
public long getDelay(TimeUnit unit) {
return Math.max(future.getDelay(unit), 1L); // never return zero or negative delay.
}
@Override
public int compareTo(Delayed o) {
return future.compareTo(o);
}
}
protected final WriteQueue writeQueue() {
return writeQueue;
}
protected final T handler() {
return handler;
}
protected final EmbeddedChannel channel() {
return channel;
}
protected final ChannelHandlerContext ctx() {
return ctx;
}
protected final Http2FrameWriter frameWriter() {
return frameWriter;
}
protected final Http2FrameReader frameReader() {
return frameReader;
}
protected final ByteBuf content() {
return Unpooled.copiedBuffer(contentAsArray());
}
protected final byte[] contentAsArray() {
return "\000\000\000\000\rhello world".getBytes(UTF_8);
}
protected final Http2FrameWriter verifyWrite() {
return verify(frameWriter);
}
protected final Http2FrameWriter verifyWrite(VerificationMode verificationMode) {
return verify(frameWriter, verificationMode);
}
protected final void channelRead(Object obj) throws Exception {
channel.writeInbound(obj);
}
protected ByteBuf grpcFrame(byte[] message) {
final ByteBuf compressionFrame = Unpooled.buffer(message.length);
MessageFramer framer = new MessageFramer(
new MessageFramer.Sink() {
@Override
public void deliverFrame(
WritableBuffer frame, boolean endOfStream, boolean flush, int numMessages) {
if (frame != null) {
ByteBuf bytebuf = ((NettyWritableBuffer) frame).bytebuf();
compressionFrame.writeBytes(bytebuf);
bytebuf.release();
}
}
},
new NettyWritableBufferAllocator(ByteBufAllocator.DEFAULT),
StatsTraceContext.NOOP);
framer.writePayload(new ByteArrayInputStream(message));
framer.close();
return compressionFrame;
}
protected final ByteBuf grpcDataFrame(int streamId, boolean endStream, byte[] content) {
return dataFrame(streamId, endStream, grpcFrame(content));
}
protected final ByteBuf dataFrame(int streamId, boolean endStream, ByteBuf content) {
ChannelHandlerContext ctx = newMockContext();
new DefaultHttp2FrameWriter().writeData(ctx, streamId, content, 0, endStream, newPromise());
return captureWrite(ctx);
}
protected final ByteBuf pingFrame(boolean ack, long payload) {
ChannelHandlerContext ctx = newMockContext();
new DefaultHttp2FrameWriter().writePing(ctx, ack, payload, newPromise());
return captureWrite(ctx);
}
protected final ByteBuf headersFrame(int streamId, Http2Headers headers) {
ChannelHandlerContext ctx = newMockContext();
new DefaultHttp2FrameWriter().writeHeaders(ctx, streamId, headers, 0, false, newPromise());
return captureWrite(ctx);
}
protected final ByteBuf goAwayFrame(int lastStreamId) {
return goAwayFrame(lastStreamId, 0, Unpooled.EMPTY_BUFFER);
}
protected final ByteBuf goAwayFrame(int lastStreamId, int errorCode, ByteBuf data) {
ChannelHandlerContext ctx = newMockContext();
new DefaultHttp2FrameWriter().writeGoAway(ctx, lastStreamId, errorCode, data, newPromise());
return captureWrite(ctx);
}
protected final ByteBuf rstStreamFrame(int streamId, int errorCode) {
ChannelHandlerContext ctx = newMockContext();
new DefaultHttp2FrameWriter().writeRstStream(ctx, streamId, errorCode, newPromise());
return captureWrite(ctx);
}
protected final ByteBuf serializeSettings(Http2Settings settings) {
ChannelHandlerContext ctx = newMockContext();
new DefaultHttp2FrameWriter().writeSettings(ctx, settings, newPromise());
return captureWrite(ctx);
}
protected final ByteBuf windowUpdate(int streamId, int delta) {
ChannelHandlerContext ctx = newMockContext();
new DefaultHttp2FrameWriter().writeWindowUpdate(ctx, streamId, delta, newPromise());
return captureWrite(ctx);
}
protected final ChannelPromise newPromise() {
return channel.newPromise();
}
protected final Http2Connection connection() {
return handler().connection();
}
protected abstract AbstractStream stream() throws Exception;
@CanIgnoreReturnValue
protected final ChannelFuture enqueue(WriteQueue.QueuedCommand command) {
ChannelFuture future = writeQueue.enqueue(command, true);
channel.runPendingTasks();
return future;
}
protected final ChannelHandlerContext newMockContext() {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.alloc()).thenReturn(UnpooledByteBufAllocator.DEFAULT);
EventLoop eventLoop = mock(EventLoop.class);
when(ctx.executor()).thenReturn(eventLoop);
when(ctx.channel()).thenReturn(channel);
return ctx;
}
protected final ByteBuf captureWrite(ChannelHandlerContext ctx) {
ArgumentCaptor<ByteBuf> captor = ArgumentCaptor.forClass(ByteBuf.class);
verify(ctx, atLeastOnce()).write(captor.capture(), any(ChannelPromise.class));
CompositeByteBuf composite = Unpooled.compositeBuffer();
for (ByteBuf buf : captor.getAllValues()) {
composite.addComponent(buf);
composite.writerIndex(composite.writerIndex() + buf.readableBytes());
}
return composite;
}
protected abstract T newHandler() throws Http2Exception;
protected abstract WriteQueue initWriteQueue();
protected abstract void makeStream() throws Exception;
@Test
public void dataPingSentOnHeaderRecieved() throws Exception {
manualSetUp();
makeStream();
AbstractNettyHandler handler = (AbstractNettyHandler) handler();
handler.setAutoTuneFlowControl(true);
channelRead(dataFrame(3, false, content()));
assertEquals(1, handler.flowControlPing().getPingCount());
}
@Test
public void dataPingAckIsRecognized() throws Exception {
manualSetUp();
makeStream();
AbstractNettyHandler handler = (AbstractNettyHandler) handler();
handler.setAutoTuneFlowControl(true);
channelRead(dataFrame(3, false, content()));
long pingData = handler.flowControlPing().payload();
channelRead(pingFrame(true, pingData));
assertEquals(1, handler.flowControlPing().getPingCount());
assertEquals(1, handler.flowControlPing().getPingReturn());
}
@Test
public void dataSizeSincePingAccumulates() throws Exception {
manualSetUp();
makeStream();
AbstractNettyHandler handler = (AbstractNettyHandler) handler();
handler.setAutoTuneFlowControl(true);
long frameData = 123456;
ByteBuf buff = ctx().alloc().buffer(16);
buff.writeLong(frameData);
int length = buff.readableBytes();
channelRead(dataFrame(3, false, buff.copy()));
channelRead(dataFrame(3, false, buff.copy()));
channelRead(dataFrame(3, false, buff.copy()));
assertEquals(length * 3, handler.flowControlPing().getDataSincePing());
buff.release();
}
@Test
public void windowUpdateMatchesTarget() throws Exception {
manualSetUp();
Http2Stream connectionStream = connection().connectionStream();
Http2LocalFlowController localFlowController = connection().local().flowController();
makeStream();
AbstractNettyHandler handler = (AbstractNettyHandler) handler();
handler.setAutoTuneFlowControl(true);
byte[] data = initXkbBuffer(1);
int wireSize = data.length + 5; // 5 is the size of the header
ByteBuf frame = grpcDataFrame(3, false, data);
channelRead(frame);
int accumulator = wireSize;
// 40 is arbitrary, any number large enough to trigger a window update would work
for (int i = 0; i < 40; i++) {
channelRead(grpcDataFrame(3, false, data));
accumulator += wireSize;
}
long pingData = handler.flowControlPing().payload();
channelRead(pingFrame(true, pingData));
assertEquals(accumulator, handler.flowControlPing().getDataSincePing());
assertEquals(2 * accumulator, localFlowController.initialWindowSize(connectionStream));
}
@Test
public void windowShouldNotExceedMaxWindowSize() throws Exception {
manualSetUp();
makeStream();
AbstractNettyHandler handler = (AbstractNettyHandler) handler();
handler.setAutoTuneFlowControl(true);
Http2Stream connectionStream = connection().connectionStream();
Http2LocalFlowController localFlowController = connection().local().flowController();
int maxWindow = handler.flowControlPing().maxWindow();
fakeClock.forwardTime(10, TimeUnit.SECONDS);
handler.flowControlPing().setDataSizeAndSincePing(maxWindow);
fakeClock.forwardTime(1, TimeUnit.SECONDS);
long payload = handler.flowControlPing().payload();
channelRead(pingFrame(true, payload));
assertEquals(maxWindow, localFlowController.initialWindowSize(connectionStream));
}
@Test
public void transportTracer_windowSizeDefault() throws Exception {
manualSetUp();
TransportStats transportStats = transportTracer.getStats();
assertEquals(Http2CodecUtil.DEFAULT_WINDOW_SIZE, transportStats.localFlowControlWindow);
assertEquals(flowControlWindow, transportStats.remoteFlowControlWindow);
}
@Test
public void transportTracer_windowSize() throws Exception {
flowControlWindow = 1024 * 1024;
manualSetUp();
TransportStats transportStats = transportTracer.getStats();
assertEquals(Http2CodecUtil.DEFAULT_WINDOW_SIZE, transportStats.localFlowControlWindow);
assertEquals(flowControlWindow, transportStats.remoteFlowControlWindow);
}
@Test
public void transportTracer_windowUpdate_remote() throws Exception {
manualSetUp();
TransportStats before = transportTracer.getStats();
assertEquals(Http2CodecUtil.DEFAULT_WINDOW_SIZE, before.localFlowControlWindow);
assertEquals(Http2CodecUtil.DEFAULT_WINDOW_SIZE, before.remoteFlowControlWindow);
ByteBuf serializedSettings = windowUpdate(0, 1000);
channelRead(serializedSettings);
TransportStats after = transportTracer.getStats();
assertEquals(Http2CodecUtil.DEFAULT_WINDOW_SIZE + 1000,
after.localFlowControlWindow);
assertEquals(flowControlWindow, after.remoteFlowControlWindow);
}
@Test
public void transportTracer_windowUpdate_local() throws Exception {
manualSetUp();
TransportStats before = transportTracer.getStats();
assertEquals(Http2CodecUtil.DEFAULT_WINDOW_SIZE, before.localFlowControlWindow);
assertEquals(flowControlWindow, before.remoteFlowControlWindow);
// If the window size is below a certain threshold, netty will wait to apply the update.
// Use a large increment to be sure that it exceeds the threshold.
connection().local().flowController().incrementWindowSize(
connection().connectionStream(), 8 * Http2CodecUtil.DEFAULT_WINDOW_SIZE);
TransportStats after = transportTracer.getStats();
assertEquals(Http2CodecUtil.DEFAULT_WINDOW_SIZE, after.localFlowControlWindow);
assertEquals(flowControlWindow + 8 * Http2CodecUtil.DEFAULT_WINDOW_SIZE,
connection().local().flowController().windowSize(connection().connectionStream()));
}
private AbstractNettyHandler setupPingTest() throws Exception {
this.flowControlWindow = 1024 * 64;
manualSetUp();
makeStream();
AbstractNettyHandler handler = (AbstractNettyHandler) handler();
handler.setAutoTuneFlowControl(true);
return handler;
}
@Test
public void bdpPingLimitOutstanding() throws Exception {
AbstractNettyHandler handler = setupPingTest();
long pingData = handler.flowControlPing().payload();
byte[] data1KbBuf = initXkbBuffer(1);
byte[] data40KbBuf = initXkbBuffer(40);
readXCopies(1, data1KbBuf); // should initiate a ping
readXCopies(1, data40KbBuf); // no ping, already active
fakeClock().forwardTime(20, TimeUnit.MILLISECONDS);
readPingAck(pingData);
assertEquals(1, handler.flowControlPing().getPingCount());
assertEquals(1, handler.flowControlPing().getPingReturn());
readXCopies(4, data40KbBuf); // initiate ping
assertEquals(2, handler.flowControlPing().getPingCount());
fakeClock.forwardTime(1, TimeUnit.MILLISECONDS);
readPingAck(pingData);
readXCopies(1, data1KbBuf); // ping again since had 160K data since last ping started
assertEquals(3, handler.flowControlPing().getPingCount());
fakeClock.forwardTime(1, TimeUnit.MILLISECONDS);
readPingAck(pingData);
fakeClock.forwardTime(1, TimeUnit.MILLISECONDS);
readXCopies(1, data1KbBuf); // no ping, too little data
assertEquals(3, handler.flowControlPing().getPingCount());
}
@Test
public void testPingBackoff() throws Exception {
AbstractNettyHandler handler = setupPingTest();
long pingData = handler.flowControlPing().payload();
byte[] data40KbBuf = initXkbBuffer(40);
handler.flowControlPing().setDataSizeAndSincePing(200000);
for (int i = 0; i <= 10; i++) {
int beforeCount = handler.flowControlPing().getPingCount();
// should resize on 0
readXCopies(6, data40KbBuf); // initiate ping on i= {0, 1, 3, 6, 10}
int afterCount = handler.flowControlPing().getPingCount();
fakeClock().forwardNanos(200);
if (afterCount > beforeCount) {
readPingAck(pingData); // should increase backoff multiplier
}
}
assertEquals(6, handler.flowControlPing().getPingCount());
}
@Test
public void bdpPingWindowResizing() throws Exception {
this.flowControlWindow = 1024 * 8;
manualSetUp();
makeStream();
AbstractNettyHandler handler = (AbstractNettyHandler) handler();
handler.setAutoTuneFlowControl(true);
Http2LocalFlowController localFlowController = connection().local().flowController();
long pingData = handler.flowControlPing().payload();
int initialWindowSize = localFlowController.initialWindowSize();
byte[] data1Kb = initXkbBuffer(1);
byte[] data10Kb = initXkbBuffer(10);
readXCopies(1, data1Kb); // initiate ping
fakeClock().forwardNanos(2);
readPingAck(pingData); // should not resize window because of small target window
assertEquals(initialWindowSize, localFlowController.initialWindowSize());
readXCopies(2, data10Kb); // initiate ping on first
fakeClock().forwardNanos(200);
readPingAck(pingData); // should resize window
int windowSizeA = localFlowController.initialWindowSize();
Assert.assertNotEquals(initialWindowSize, windowSizeA);
readXCopies(3, data10Kb); // initiate ping w/ first 10K packet
fakeClock().forwardNanos(5000);
readPingAck(pingData); // should not resize window as bandwidth didn't increase
Assert.assertEquals(windowSizeA, localFlowController.initialWindowSize());
readXCopies(6, data10Kb); // initiate ping with fist packet
fakeClock().forwardNanos(100);
readPingAck(pingData); // should resize window
int windowSizeB = localFlowController.initialWindowSize();
Assert.assertNotEquals(windowSizeA, windowSizeB);
}
private void readPingAck(long pingData) throws Exception {
channelRead(pingFrame(true, pingData));
channel().releaseOutbound();
}
private void readXCopies(int copies, byte[] data) throws Exception {
for (int i = 0; i < copies; i++) {
channelRead(grpcDataFrame(STREAM_ID, false, data)); // buffer it
stream().request(1); // consume it
channel().releaseOutbound();
}
}
private byte[] initXkbBuffer(int multiple) {
ByteBuffer data = ByteBuffer.allocate(1024 * multiple);
for (int i = 0; i < multiple * 1024 / 4; i++) {
data.putInt(4 * i, 1111);
}
return data.array();
}
}
| FakeClockScheduledNettyFuture |
java | apache__flink | flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/types/PojoTestUtils.java | {
"start": 2930,
"end": 3267
} | class ____ all conditions to be serialized with the
* {@link PojoSerializer}, as documented <a
* href="https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/datastream/fault-tolerance/serialization/types_serialization/#pojos">here</a>,
* without any field being serialized with Kryo.
*
* @param clazz | fulfill |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java | {
"start": 31579,
"end": 31892
} | class ____ the job output data.
* @throws IllegalStateException if the job is submitted
*/
public void setOutputKeyClass(Class<?> theClass
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setOutputKeyClass(theClass);
}
/**
* Set the value | for |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/transport/TransportInterceptor.java | {
"start": 2186,
"end": 2347
} | interface ____ decorate
* {@link #sendRequest(Transport.Connection, String, TransportRequest, TransportRequestOptions, TransportResponseHandler)}
*/
| to |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1541/Issue1541Mapper.java | {
"start": 682,
"end": 3570
} | class ____ {
public static final Issue1541Mapper INSTANCE = Mappers.getMapper( Issue1541Mapper.class );
public abstract Target mapWithVarArgs(String code, String... parameters);
public abstract Target mapWithArray(String code, String[] parameters);
@Mapping(target = "parameters2", source = "parameters")
public abstract Target mapWithReassigningVarArgs(String code, String... parameters);
public abstract Target mapWithArrayAndVarArgs(String code, String[] parameters, String... parameters2);
@Mappings({
@Mapping(target = "parameters", ignore = true)
})
@BeanMapping(qualifiedByName = "afterMappingParametersAsArray")
public abstract Target mapParametersAsArrayInAfterMapping(String code, String... parameters);
@AfterMapping
@Named( "afterMappingParametersAsArray" )
protected void afterMappingParametersAsArray(@MappingTarget Target target, String[] parameters) {
target.setAfterMappingWithArrayCalled( true );
target.setParameters( parameters );
}
@Mappings({
@Mapping(target = "parameters", ignore = true)
})
@BeanMapping(qualifiedByName = "afterMappingParametersAsVarArgs")
public abstract Target mapParametersAsVarArgsInAfterMapping(String code, String... parameters);
@AfterMapping
@Named( "afterMappingParametersAsVarArgs" )
protected void afterMappingParametersAsVarArgs(@MappingTarget Target target, String... parameters) {
target.setAfterMappingWithVarArgsCalled( true );
target.setParameters( parameters );
}
@Mapping(target = "parameters2", ignore = true)
@BeanMapping(qualifiedByName = "afterMappingContextAsVarArgsUsingVarArgs")
public abstract Target mapContextWithVarArgsInAfterMappingWithVarArgs(String code, String[] parameters,
@Context String... context);
@AfterMapping
@Named( "afterMappingContextAsVarArgsUsingVarArgs" )
protected void afterMappingContextAsVarArgsUsingVarArgs(@MappingTarget Target target, @Context String... context) {
target.setAfterMappingContextWithVarArgsAsVarArgsCalled( true );
target.setParameters2( context );
}
@Mapping(target = "parameters2", ignore = true)
@BeanMapping(qualifiedByName = "afterMappingContextAsVarArgsUsingArray")
public abstract Target mapContextWithVarArgsInAfterMappingWithArray(String code, String[] parameters,
@Context String... context);
@AfterMapping
@Named( "afterMappingContextAsVarArgsUsingArray" )
protected void afterMappingContextAsVarArgsUsingArray(@MappingTarget Target target, @Context String[] context) {
target.setAfterMappingContextWithVarArgsAsArrayCalled( true );
target.setParameters2( context );
}
}
| Issue1541Mapper |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/util/IndexedStringMap.java | {
"start": 1154,
"end": 1384
} | interface ____ all key-value pairs as a sequence ordered by key, and allows
* keys and values to be accessed by their index in the sequence.
* </p>
*
* @see IndexedReadOnlyStringMap
* @see StringMap
* @since 2.8
*/
public | views |
java | google__truth | core/src/main/java/com/google/common/truth/Platform.java | {
"start": 8564,
"end": 8685
} | interface ____ to allow it to be swapped out for platforms
* that don't include JUnit {@link TestRule} support.
*/
| is |
java | elastic__elasticsearch | x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregatorSupplier.java | {
"start": 712,
"end": 1063
} | interface ____ {
Aggregator build(
String name,
ValuesSourceConfig config,
DocValueFormat formatter,
double compression,
TDigestExecutionHint executionHint,
AggregationContext context,
Aggregator parent,
Map<String, Object> metadata
) throws IOException;
}
| BoxplotAggregatorSupplier |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/ProxyDeletionTest.java | {
"start": 1819,
"end": 5244
} | class ____ {
@Test
public void testGetAndDeleteEEntity(SessionFactoryScope scope) {
scope.inTransaction( session -> {
EEntity entity = session.get( EEntity.class, 17L );
session.remove( entity );
session.remove( entity.getD() );
}
);
}
@BeforeEach
public void prepareTestData(SessionFactoryScope scope) {
scope.inTransaction( session -> {
DEntity d = new DEntity();
d.setD( "bla" );
d.setOid( 1 );
byte[] lBytes = "agdfagdfagfgafgsfdgasfdgfgasdfgadsfgasfdgasfdgasdasfdg".getBytes();
Blob lBlob = session.getLobCreator().createBlob( lBytes );
d.setBlob( lBlob );
BEntity b1 = new BEntity();
b1.setOid( 1 );
b1.setB1( 34 );
b1.setB2( "huhu" );
BEntity b2 = new BEntity();
b2.setOid( 2 );
b2.setB1( 37 );
b2.setB2( "haha" );
Set<BEntity> lBs = new HashSet<>();
lBs.add( b1 );
lBs.add( b2 );
d.setBs( lBs );
AEntity a = new AEntity();
a.setOid( 1 );
a.setA( "hihi" );
d.setA( a );
EEntity e = new EEntity();
e.setOid( 17 );
e.setE1( "Balu" );
e.setE2( "Bär" );
e.setD( d );
d.setE( e );
CEntity c = new CEntity();
c.setOid( 1 );
c.setC1( "ast" );
c.setC2( "qwert" );
c.setC3( "yxcv" );
d.setC( c );
GEntity g = new GEntity();
g.setOid( 1 );
g.getdEntities().add( d );
d.setG( g );
session.persist( b1 );
session.persist( b2 );
session.persist( a );
session.persist( c );
session.persist( g );
session.persist( d );
session.persist( e );
// create a slew of Activity objects, some with Instruction reference
// some without.
for ( int i = 0; i < 30; i++ ) {
final Activity activity = new Activity( i, "Activity #" + i, null );
if ( i % 2 == 0 ) {
final Instruction instr = new Instruction( i, "Instruction #" + i );
activity.setInstruction( instr );
session.persist( instr );
}
else {
final WebApplication webApplication = new WebApplication( i, "http://" + i + ".com" );
webApplication.setName( "name #" + i );
activity.setWebApplication( webApplication );
webApplication.getActivities().add( activity );
session.persist( webApplication );
}
session.persist( activity );
}
RoleEntity roleEntity = new RoleEntity();
roleEntity.setOid( 1L );
SpecializedKey specializedKey = new SpecializedKey();
specializedKey.setOid( 1L );
MoreSpecializedKey moreSpecializedKey = new MoreSpecializedKey();
moreSpecializedKey.setOid( 3L );
SpecializedEntity specializedEntity = new SpecializedEntity();
specializedEntity.setId( 2L );
specializedKey.addSpecializedEntity( specializedEntity );
specializedEntity.setSpecializedKey( specializedKey );
specializedKey.addRole( roleEntity );
roleEntity.setKey( specializedKey );
roleEntity.setSpecializedKey( moreSpecializedKey );
moreSpecializedKey.addRole( roleEntity );
session.persist( specializedEntity );
session.persist( roleEntity );
session.persist( specializedKey );
session.persist( moreSpecializedKey );
}
);
}
@AfterEach
public void cleanUpTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncateMappedObjects();
}
@MappedSuperclass
public static | ProxyDeletionTest |
java | apache__camel | components/camel-huawei/camel-huaweicloud-frs/src/test/java/org/apache/camel/component/huaweicloud/frs/real/LiveDetectionWithVideoFileTest.java | {
"start": 1412,
"end": 3489
} | class ____ extends CamelTestSupport {
TestConfiguration testConfiguration = new TestConfiguration();
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:trigger_route")
.setProperty(FaceRecognitionProperties.FACE_VIDEO_FILE_PATH,
constant(testConfiguration.getProperty("videoFilePath")))
.to("hwcloud-frs:faceLiveDetection?"
+ "accessKey=" + testConfiguration.getProperty("accessKey")
+ "&secretKey=" + testConfiguration.getProperty("secretKey")
+ "&projectId=" + testConfiguration.getProperty("projectId")
+ "®ion=" + testConfiguration.getProperty("region")
+ "&ignoreSslVerification=true")
.log("perform faceLiveDetection successful")
.to("mock:perform_live_detection_result");
}
};
}
/**
* Following test cases should be manually enabled to perform test against the actual Huawei Cloud Face Recognition
* service with real user credentials. To perform this test, manually comment out the @Disabled annotation and enter
* relevant service parameters in the placeholders above (static variables of this test class)
*
* @throws Exception Exception
*/
@Test
@Disabled("Manually comment out this line once you configure service parameters in placeholders above")
public void testCelebrityRecognition() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:perform_live_detection_result");
mock.expectedMinimumMessageCount(1);
template.sendBody("direct:trigger_route", "");
Exchange responseExchange = mock.getExchanges().get(0);
mock.assertIsSatisfied();
assertTrue(responseExchange.getIn().getBody() instanceof DetectLiveByFileResponse);
}
}
| LiveDetectionWithVideoFileTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/join/ReloadMultipleCollectionElementsTest.java | {
"start": 3689,
"end": 4660
} | class ____ {
private Long id;
private String name;
private Company company;
private Set<Customer> customers;
public Flight() {
}
@Id
@Column(name = "flight_id")
public Long getId() {
return id;
}
public void setId(Long long1) {
id = long1;
}
@Column(updatable = false, name = "flight_name", nullable = false, length = 50)
public String getName() {
return name;
}
public void setName(String string) {
name = string;
}
@ManyToOne(cascade = {CascadeType.ALL})
@JoinColumn(name = "comp_id")
public Company getCompany() {
return company;
}
public void setCompany(Company company) {
this.company = company;
}
@ManyToMany(cascade = {CascadeType.PERSIST, CascadeType.MERGE}, fetch = FetchType.EAGER)
public Set<Customer> getCustomers() {
return customers;
}
public void setCustomers(Set<Customer> customers) {
this.customers = customers;
}
}
@Entity( name = "Ticket" )
public static | Flight |
java | resilience4j__resilience4j | resilience4j-core/src/main/java/io/github/resilience4j/core/ContextPropagator.java | {
"start": 8986,
"end": 9408
} | class ____<T> implements ContextPropagator<T> {
@Override
public Supplier<Optional<T>> retrieve() {
return Optional::empty;
}
@Override
public Consumer<Optional<T>> copy() {
return t -> {
};
}
@Override
public Consumer<Optional<T>> clear() {
return t -> {
};
}
}
}
| EmptyContextPropagator |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/BeanFactoryDefinition.java | {
"start": 5589,
"end": 8155
} | class ____ use for creating and configuring the bean. The builder will use
* the properties values to configure the bean.
*/
public void setBuilderClass(String builderClass) {
this.builderClass = builderClass;
}
public String getBuilderMethod() {
return builderMethod;
}
/**
* Name of method when using builder class. This method is invoked after configuring to create the actual bean. This
* method is often named build (used by default).
*/
public void setBuilderMethod(String builderMethod) {
this.builderMethod = builderMethod;
}
public Map<Integer, Object> getConstructors() {
return constructors;
}
/**
* Optional constructor arguments for creating the bean. Arguments correspond to specific index of the constructor
* argument list, starting from zero.
*/
public void setConstructors(Map<Integer, Object> constructors) {
this.constructors = constructors;
}
public Map<String, Object> getProperties() {
return properties;
}
/**
* Optional properties to set on the created bean.
*/
public void setProperties(Map<String, Object> properties) {
this.properties = properties;
}
public String getScriptLanguage() {
return scriptLanguage;
}
/**
* The script language to use when using inlined script for creating the bean, such as groovy, java, javascript etc.
*/
public void setScriptLanguage(String scriptLanguage) {
this.scriptLanguage = scriptLanguage;
}
public String getScriptPropertyPlaceholders() {
return scriptPropertyPlaceholders;
}
/**
* Whether the script should support using Camel property placeholder syntax {{ }}.
*/
public void setScriptPropertyPlaceholders(String scriptPropertyPlaceholders) {
this.scriptPropertyPlaceholders = scriptPropertyPlaceholders;
}
/**
* The script to execute that creates the bean when using scripting languages.
*
* If the script use the prefix <tt>resource:</tt> such as <tt>resource:classpath:com/foo/myscript.groovy</tt>,
* <tt>resource:file:/var/myscript.groovy</tt>, then its loaded from the external resource.
*/
public void setScript(String script) {
this.script = script;
}
public String getScript() {
return script;
}
// fluent builders
// ----------------------------------------------------
/**
* What type to use for creating the bean. Can be one of: # | to |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/AsyncFunctionReturnsNullTest.java | {
"start": 876,
"end": 1599
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(AsyncFunctionReturnsNull.class, getClass());
@Test
public void positiveCase() {
compilationHelper
.addSourceLines(
"AsyncFunctionReturnsNullPositiveCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import static com.google.common.util.concurrent.Futures.immediateFuture;
import com.google.common.util.concurrent.AsyncFunction;
import com.google.common.util.concurrent.ListenableFuture;
/** Positive cases for {@link AsyncFunctionReturnsNull}. */
public | AsyncFunctionReturnsNullTest |
java | quarkusio__quarkus | extensions/grpc/deployment/src/test/java/io/quarkus/grpc/server/blocking/inheritance/BlockingFromTransactionalTest.java | {
"start": 792,
"end": 2303
} | class ____ {
private static final String BLOCKING = "blocking";
private static final String NON_BLOCKING = "nonblocking";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addPackage(BlockingTestServiceGrpc.class.getPackage())
.addClasses(BlockingTestService.class));
@GrpcClient
BlockingTestService client;
@Test
@Timeout(5)
void shouldBlockOnInheritedClassLevelTransactional() {
assertThat(client.notOverridden1(message()).await().indefinitely().getText()).isEqualTo(BLOCKING);
}
@Test
@Timeout(5)
void shouldNotBlockOnInheritedTransactionalMarkedNonBlocking() {
assertThat(client.notOverridden2(message()).await().indefinitely().getText()).isEqualTo(NON_BLOCKING);
}
@Test
@Timeout(5)
void shouldNotBlockOnOverriddenTransactionalMarkedNonBlocking() {
assertThat(client.overridden1(message()).await().indefinitely().getText()).isEqualTo(NON_BLOCKING);
}
@Test
@Timeout(5)
void shouldBlockOnOverriddenTransactional() {
assertThat(client.overridden2(message()).await().indefinitely().getText()).isEqualTo(BLOCKING);
}
private InheritenceTest.Msg message() {
return InheritenceTest.Msg.newBuilder().setText("foo").build();
}
@Transactional
public static | BlockingFromTransactionalTest |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/NeverConditionTests.java | {
"start": 708,
"end": 2076
} | class ____ extends ESTestCase {
public void testExecute() throws Exception {
ExecutableCondition executable = NeverCondition.INSTANCE;
assertFalse(executable.execute(null).met());
}
public void testParserValid() throws Exception {
XContentBuilder builder = jsonBuilder();
builder.startObject();
builder.endObject();
XContentParser parser = createParser(builder);
parser.nextToken();
ExecutableCondition executable = NeverCondition.parse("_id", parser);
assertFalse(executable.execute(null).met());
}
public void testParserInvalid() throws Exception {
XContentBuilder builder = jsonBuilder();
builder.startObject();
builder.field("foo", "bar");
builder.endObject();
XContentParser parser = createParser(builder);
parser.nextToken();
try {
NeverCondition.parse("_id", parser);
fail(
"expected a condition exception trying to parse an invalid condition XContent, ["
+ InternalAlwaysCondition.TYPE
+ "] condition should not parse with a body"
);
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), containsString("expected an empty object but found [foo]"));
}
}
}
| NeverConditionTests |
java | apache__rocketmq | broker/src/main/java/org/apache/rocketmq/broker/metrics/ConsumerLagCalculator.java | {
"start": 5997,
"end": 25228
} | class ____ extends BaseCalculateResult {
public long available;
public CalculateAvailableResult(String group, String topic, boolean isRetry) {
super(group, topic, isRetry);
}
}
private void processAllGroup(Consumer<ProcessGroupInfo> consumer) {
for (Map.Entry<String, SubscriptionGroupConfig> subscriptionEntry :
subscriptionGroupManager.getSubscriptionGroupTable().entrySet()) {
String group = subscriptionEntry.getKey();
ConsumerGroupInfo consumerGroupInfo = consumerManager.getConsumerGroupInfo(group, true);
boolean isPop = false;
if (consumerGroupInfo != null) {
isPop = consumerGroupInfo.getConsumeType() == ConsumeType.CONSUME_POP;
}
Set<String> topics;
if (brokerConfig.isUseStaticSubscription()) {
SubscriptionGroupConfig subscriptionGroupConfig = subscriptionEntry.getValue();
if (subscriptionGroupConfig.getSubscriptionDataSet() == null ||
subscriptionGroupConfig.getSubscriptionDataSet().isEmpty()) {
continue;
}
topics = subscriptionGroupConfig.getSubscriptionDataSet()
.stream()
.map(SimpleSubscriptionData::getTopic)
.collect(Collectors.toSet());
} else {
if (consumerGroupInfo == null) {
continue;
}
topics = consumerGroupInfo.getSubscribeTopics();
}
if (null == topics || topics.isEmpty()) {
continue;
}
for (String topic : topics) {
// skip retry topic
if (topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
continue;
}
TopicConfig topicConfig = topicConfigManager.selectTopicConfig(topic);
if (topicConfig == null) {
continue;
}
// skip no perm topic
int topicPerm = topicConfig.getPerm() & brokerConfig.getBrokerPermission();
if (!PermName.isReadable(topicPerm) && !PermName.isWriteable(topicPerm)) {
continue;
}
if (isPop) {
String retryTopic = KeyBuilder.buildPopRetryTopic(topic, group, brokerConfig.isEnableRetryTopicV2());
TopicConfig retryTopicConfig = topicConfigManager.selectTopicConfig(retryTopic);
if (retryTopicConfig != null) {
int retryTopicPerm = retryTopicConfig.getPerm() & brokerConfig.getBrokerPermission();
if (PermName.isReadable(retryTopicPerm) || PermName.isWriteable(retryTopicPerm)) {
consumer.accept(new ProcessGroupInfo(group, topic, true, retryTopic));
continue;
}
}
if (brokerConfig.isEnableRetryTopicV2() && brokerConfig.isRetrieveMessageFromPopRetryTopicV1()) {
String retryTopicV1 = KeyBuilder.buildPopRetryTopicV1(topic, group);
TopicConfig retryTopicConfigV1 = topicConfigManager.selectTopicConfig(retryTopicV1);
if (retryTopicConfigV1 != null) {
int retryTopicPerm = retryTopicConfigV1.getPerm() & brokerConfig.getBrokerPermission();
if (PermName.isReadable(retryTopicPerm) || PermName.isWriteable(retryTopicPerm)) {
consumer.accept(new ProcessGroupInfo(group, topic, true, retryTopicV1));
continue;
}
}
}
consumer.accept(new ProcessGroupInfo(group, topic, true, null));
} else {
consumer.accept(new ProcessGroupInfo(group, topic, false, null));
}
}
}
}
public void calculateLag(Consumer<CalculateLagResult> lagRecorder) {
List<CompletableFuture<CalculateLagResult>> futures = new ArrayList<>();
BiConsumer<ConsumerLagCalculator.ProcessGroupInfo,
CompletableFuture<ConsumerLagCalculator.CalculateLagResult>> biConsumer =
(info, future) -> calculate(info, future::complete);
processAllGroup(info -> {
if (info.group == null || info.topic == null) {
return;
}
CompletableFuture<CalculateLagResult> future = new CompletableFuture<>();
if (info.isPop && brokerConfig.isEnableNotifyBeforePopCalculateLag()) {
if (popLongPollingService.notifyMessageArriving(info.topic, -1, info.group,
true, null, 0, null, null,
new PopCommandCallback(biConsumer, info, future))) {
futures.add(future);
return;
}
}
calculate(info, lagRecorder);
});
// Set the maximum wait time to 10 seconds to avoid indefinite blocking
// in case of a fast fail that causes the future to not complete its execution.
try {
CompletableFuture.allOf(futures.toArray(
new CompletableFuture[0])).get(10, TimeUnit.SECONDS);
futures.forEach(future -> {
if (future.isDone() && !future.isCompletedExceptionally()) {
lagRecorder.accept(future.join());
}
});
} catch (Exception e) {
LOGGER.error("Calculate lag timeout after 10 seconds", e);
}
}
public void calculate(ProcessGroupInfo info, Consumer<CalculateLagResult> lagRecorder) {
CalculateLagResult result = new CalculateLagResult(info.group, info.topic, false);
try {
Pair<Long, Long> lag = getConsumerLagStats(info.group, info.topic, info.isPop);
if (lag != null) {
result.lag = lag.getObject1();
result.earliestUnconsumedTimestamp = lag.getObject2();
}
lagRecorder.accept(result);
} catch (ConsumeQueueException e) {
LOGGER.error("Failed to get lag stats", e);
}
if (info.isPop) {
try {
Pair<Long, Long> retryLag = getConsumerLagStats(info.group, info.retryTopic, true);
result = new CalculateLagResult(info.group, info.topic, true);
if (retryLag != null) {
result.lag = retryLag.getObject1();
result.earliestUnconsumedTimestamp = retryLag.getObject2();
}
lagRecorder.accept(result);
} catch (ConsumeQueueException e) {
LOGGER.error("Failed to get lag stats", e);
}
}
}
public void calculateInflight(Consumer<CalculateInflightResult> inflightRecorder) {
processAllGroup(info -> {
CalculateInflightResult result = new CalculateInflightResult(info.group, info.topic, false);
try {
Pair<Long, Long> inFlight = getInFlightMsgStats(info.group, info.topic, info.isPop);
if (inFlight != null) {
result.inFlight = inFlight.getObject1();
result.earliestUnPulledTimestamp = inFlight.getObject2();
}
inflightRecorder.accept(result);
} catch (ConsumeQueueException e) {
LOGGER.error("Failed to get inflight message stats", e);
}
if (info.isPop) {
try {
Pair<Long, Long> retryInFlight = getInFlightMsgStats(info.group, info.retryTopic, true);
result = new CalculateInflightResult(info.group, info.topic, true);
if (retryInFlight != null) {
result.inFlight = retryInFlight.getObject1();
result.earliestUnPulledTimestamp = retryInFlight.getObject2();
}
inflightRecorder.accept(result);
} catch (ConsumeQueueException e) {
LOGGER.error("Failed to get inflight message stats", e);
}
}
});
}
public void calculateAvailable(Consumer<CalculateAvailableResult> availableRecorder) {
processAllGroup(info -> {
CalculateAvailableResult result = new CalculateAvailableResult(info.group, info.topic, false);
try {
result.available = getAvailableMsgCount(info.group, info.topic, info.isPop);
availableRecorder.accept(result);
} catch (ConsumeQueueException e) {
LOGGER.error("Failed to get available message count", e);
}
if (info.isPop) {
try {
long retryAvailable = getAvailableMsgCount(info.group, info.retryTopic, true);
result = new CalculateAvailableResult(info.group, info.topic, true);
result.available = retryAvailable;
availableRecorder.accept(result);
} catch (ConsumeQueueException e) {
LOGGER.error("Failed to get available message count", e);
}
}
});
}
public Pair<Long, Long> getConsumerLagStats(String group, String topic, boolean isPop) throws ConsumeQueueException {
long total = 0L;
long earliestUnconsumedTimestamp = Long.MAX_VALUE;
if (group == null || topic == null) {
return new Pair<>(total, earliestUnconsumedTimestamp);
}
TopicConfig topicConfig = topicConfigManager.selectTopicConfig(topic);
if (topicConfig != null) {
for (int queueId = 0; queueId < topicConfig.getWriteQueueNums(); queueId++) {
Pair<Long, Long> pair = getConsumerLagStats(group, topic, queueId, isPop);
total += pair.getObject1();
earliestUnconsumedTimestamp = Math.min(earliestUnconsumedTimestamp, pair.getObject2());
}
} else {
LOGGER.warn("failed to get config of topic {}", topic);
}
if (earliestUnconsumedTimestamp < 0 || earliestUnconsumedTimestamp == Long.MAX_VALUE) {
earliestUnconsumedTimestamp = 0L;
}
LOGGER.debug("GetConsumerLagStats, topic={}, group={}, lag={}, latency={}", topic, group, total,
earliestUnconsumedTimestamp > 0 ? System.currentTimeMillis() - earliestUnconsumedTimestamp : 0);
return new Pair<>(total, earliestUnconsumedTimestamp);
}
public Pair<Long, Long> getConsumerLagStats(String group, String topic, int queueId, boolean isPop)
throws ConsumeQueueException {
long brokerOffset = messageStore.getMaxOffsetInQueue(topic, queueId);
if (brokerOffset < 0) {
brokerOffset = 0;
}
if (isPop && !brokerConfig.isPopConsumerKVServiceEnable()) {
long pullOffset = popBufferMergeService.getLatestOffset(topic, group, queueId);
if (pullOffset < 0) {
pullOffset = offsetManager.queryOffset(group, topic, queueId);
}
if (pullOffset < 0) {
pullOffset = brokerOffset;
}
long inFlightNum = popInflightMessageCounter.getGroupPopInFlightMessageNum(topic, group, queueId);
long lag = calculateMessageCount(group, topic, queueId, pullOffset, brokerOffset) + inFlightNum;
long consumerOffset = pullOffset - inFlightNum;
long consumerStoreTimeStamp = getStoreTimeStamp(topic, queueId, consumerOffset);
return new Pair<>(lag, consumerStoreTimeStamp);
}
long consumerOffset = offsetManager.queryOffset(group, topic, queueId);
if (consumerOffset < 0) {
consumerOffset = brokerOffset;
}
long lag = calculateMessageCount(group, topic, queueId, consumerOffset, brokerOffset);
long consumerStoreTimeStamp = getStoreTimeStamp(topic, queueId, consumerOffset);
return new Pair<>(lag, consumerStoreTimeStamp);
}
public Pair<Long, Long> getInFlightMsgStats(String group, String topic, boolean isPop) throws ConsumeQueueException {
long total = 0L;
long earliestUnPulledTimestamp = Long.MAX_VALUE;
if (group == null || topic == null) {
return new Pair<>(total, earliestUnPulledTimestamp);
}
TopicConfig topicConfig = topicConfigManager.selectTopicConfig(topic);
if (topicConfig != null) {
for (int queueId = 0; queueId < topicConfig.getWriteQueueNums(); queueId++) {
Pair<Long, Long> pair = getInFlightMsgStats(group, topic, queueId, isPop);
total += pair.getObject1();
earliestUnPulledTimestamp = Math.min(earliestUnPulledTimestamp, pair.getObject2());
}
} else {
LOGGER.warn("failed to get config of topic {}", topic);
}
if (earliestUnPulledTimestamp < 0 || earliestUnPulledTimestamp == Long.MAX_VALUE) {
earliestUnPulledTimestamp = 0L;
}
return new Pair<>(total, earliestUnPulledTimestamp);
}
public Pair<Long, Long> getInFlightMsgStats(String group, String topic, int queueId, boolean isPop)
throws ConsumeQueueException {
if (isPop && !brokerConfig.isPopConsumerKVServiceEnable()) {
long inflight = popInflightMessageCounter.getGroupPopInFlightMessageNum(topic, group, queueId);
long pullOffset = popBufferMergeService.getLatestOffset(topic, group, queueId);
if (pullOffset < 0) {
pullOffset = offsetManager.queryOffset(group, topic, queueId);
}
if (pullOffset < 0) {
pullOffset = messageStore.getMaxOffsetInQueue(topic, queueId);
}
long pullStoreTimeStamp = getStoreTimeStamp(topic, queueId, pullOffset);
return new Pair<>(inflight, pullStoreTimeStamp);
}
long pullOffset = offsetManager.queryPullOffset(group, topic, queueId);
if (pullOffset < 0) {
pullOffset = 0;
}
long commitOffset = offsetManager.queryOffset(group, topic, queueId);
if (commitOffset < 0) {
commitOffset = pullOffset;
}
long inflight = calculateMessageCount(group, topic, queueId, commitOffset, pullOffset);
long pullStoreTimeStamp = getStoreTimeStamp(topic, queueId, pullOffset);
return new Pair<>(inflight, pullStoreTimeStamp);
}
public long getAvailableMsgCount(String group, String topic, boolean isPop) throws ConsumeQueueException {
long total = 0L;
if (group == null || topic == null) {
return total;
}
TopicConfig topicConfig = topicConfigManager.selectTopicConfig(topic);
if (topicConfig != null) {
for (int queueId = 0; queueId < topicConfig.getWriteQueueNums(); queueId++) {
total += getAvailableMsgCount(group, topic, queueId, isPop);
}
} else {
LOGGER.warn("failed to get config of topic {}", topic);
}
return total;
}
public long getAvailableMsgCount(String group, String topic, int queueId, boolean isPop)
throws ConsumeQueueException {
long brokerOffset = messageStore.getMaxOffsetInQueue(topic, queueId);
if (brokerOffset < 0) {
brokerOffset = 0;
}
long pullOffset;
if (isPop && !brokerConfig.isPopConsumerKVServiceEnable()) {
pullOffset = popBufferMergeService.getLatestOffset(topic, group, queueId);
if (pullOffset < 0) {
pullOffset = offsetManager.queryOffset(group, topic, queueId);
}
} else {
pullOffset = offsetManager.queryPullOffset(group, topic, queueId);
}
if (pullOffset < 0) {
pullOffset = brokerOffset;
}
return calculateMessageCount(group, topic, queueId, pullOffset, brokerOffset);
}
public long getStoreTimeStamp(String topic, int queueId, long offset) {
long storeTimeStamp = Long.MAX_VALUE;
if (offset >= 0) {
storeTimeStamp = messageStore.getMessageStoreTimeStamp(topic, queueId, offset);
storeTimeStamp = storeTimeStamp > 0 ? storeTimeStamp : Long.MAX_VALUE;
}
return storeTimeStamp;
}
public long calculateMessageCount(String group, String topic, int queueId, long from, long to) {
long count = to - from;
if (brokerConfig.isEstimateAccumulation() && to > from) {
SubscriptionData subscriptionData = null;
if (brokerConfig.isUseStaticSubscription()) {
SubscriptionGroupConfig subscriptionGroupConfig = subscriptionGroupManager.findSubscriptionGroupConfig(group);
if (subscriptionGroupConfig != null) {
for (SimpleSubscriptionData simpleSubscriptionData : subscriptionGroupConfig.getSubscriptionDataSet()) {
if (topic.equals(simpleSubscriptionData.getTopic())) {
try {
subscriptionData = FilterAPI.buildSubscriptionData(simpleSubscriptionData.getTopic(),
simpleSubscriptionData.getExpression(), simpleSubscriptionData.getExpressionType());
} catch (Exception e) {
LOGGER.error("Try to build subscription for group:{}, topic:{} exception.", group, topic, e);
}
break;
}
}
}
} else {
ConsumerGroupInfo consumerGroupInfo = consumerManager.getConsumerGroupInfo(group, true);
if (consumerGroupInfo != null) {
subscriptionData = consumerGroupInfo.findSubscriptionData(topic);
}
}
if (null != subscriptionData) {
if (ExpressionType.TAG.equalsIgnoreCase(subscriptionData.getExpressionType())
&& !SubscriptionData.SUB_ALL.equals(subscriptionData.getSubString())) {
count = messageStore.estimateMessageCount(topic, queueId, from, to,
new DefaultMessageFilter(subscriptionData));
} else if (ExpressionType.SQL92.equalsIgnoreCase(subscriptionData.getExpressionType())) {
ConsumerFilterData consumerFilterData = consumerFilterManager.get(topic, group);
count = messageStore.estimateMessageCount(topic, queueId, from, to,
new ExpressionMessageFilter(subscriptionData,
consumerFilterData,
consumerFilterManager));
}
}
}
return count < 0 ? 0 : count;
}
}
| CalculateAvailableResult |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/deltajoin/StreamingDeltaJoinOperator.java | {
"start": 27792,
"end": 29060
} | class ____
implements StreamElementQueueEntry<RowData> {
private StreamRecord<?> inputRecord;
private Collection<RowData> completedElements;
private int inputIndex;
public InputIndexAwareStreamRecordQueueEntry() {}
public void reset(StreamRecord<?> inputRecord, int inputIndex) {
this.inputRecord = Preconditions.checkNotNull(inputRecord);
this.inputIndex = inputIndex;
this.completedElements = null;
}
@Override
public boolean isDone() {
return completedElements != null;
}
@Nonnull
@Override
public StreamRecord<?> getInputElement() {
return inputRecord;
}
@Override
public void emitResult(TimestampedCollector<RowData> output) {
output.setTimestamp(inputRecord);
for (RowData r : completedElements) {
output.collect(r);
}
}
@Override
public void complete(Collection<RowData> result) {
this.completedElements = Preconditions.checkNotNull(result);
}
public int getInputIndex() {
return inputIndex;
}
}
}
| InputIndexAwareStreamRecordQueueEntry |
java | apache__camel | components/camel-slack/src/main/java/org/apache/camel/component/slack/helper/SlackMessage.java | {
"start": 2194,
"end": 5327
} | class ____ {
private String fallback;
private String color;
private String pretext;
private String authorName;
private String authorLink;
private String authorIcon;
private String title;
private String titleLink;
private String text;
private String imageUrl;
private String thumbUrl;
private String footer;
private String footerIcon;
private Long ts;
private List<Field> fields;
public String getFallback() {
return fallback;
}
public void setFallback(String fallback) {
this.fallback = fallback;
}
public String getColor() {
return color;
}
public void setColor(String color) {
this.color = color;
}
public String getPretext() {
return pretext;
}
public void setPretext(String pretext) {
this.pretext = pretext;
}
public String getAuthorName() {
return authorName;
}
public void setAuthorName(String authorName) {
this.authorName = authorName;
}
public String getAuthorLink() {
return authorLink;
}
public void setAuthorLink(String authorLink) {
this.authorLink = authorLink;
}
public String getAuthorIcon() {
return authorIcon;
}
public void setAuthorIcon(String authorIcon) {
this.authorIcon = authorIcon;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getTitleLink() {
return titleLink;
}
public void setTitleLink(String titleLink) {
this.titleLink = titleLink;
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
public String getImageUrl() {
return imageUrl;
}
public void setImageUrl(String imageUrl) {
this.imageUrl = imageUrl;
}
public String getThumbUrl() {
return thumbUrl;
}
public void setThumbUrl(String thumbUrl) {
this.thumbUrl = thumbUrl;
}
public String getFooter() {
return footer;
}
public void setFooter(String footer) {
this.footer = footer;
}
public String getFooterIcon() {
return footerIcon;
}
public void setFooterIcon(String footerIcon) {
this.footerIcon = footerIcon;
}
public Long getTs() {
return ts;
}
public void setTs(Long ts) {
this.ts = ts;
}
public List<Field> getFields() {
return fields;
}
public void setFields(List<Field> fields) {
this.fields = fields;
}
public static | Attachment |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/logging/logback/LogbackRuntimeHints.java | {
"start": 1345,
"end": 2933
} | class ____ implements RuntimeHintsRegistrar {
@Override
public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) {
if (!ClassUtils.isPresent("ch.qos.logback.classic.LoggerContext", classLoader)) {
return;
}
ReflectionHints reflection = hints.reflection();
registerHintsForLogbackLoggingSystemTypeChecks(reflection, classLoader);
registerHintsForBuiltInLogbackConverters(reflection);
registerHintsForSpringBootConverters(reflection);
}
private void registerHintsForLogbackLoggingSystemTypeChecks(ReflectionHints reflection,
@Nullable ClassLoader classLoader) {
reflection.registerType(LoggerContext.class);
reflection.registerTypeIfPresent(classLoader, "org.slf4j.bridge.SLF4JBridgeHandler", (typeHint) -> {
});
}
private void registerHintsForBuiltInLogbackConverters(ReflectionHints reflection) {
registerForPublicConstructorInvocation(reflection, DateTokenConverter.class, IntegerTokenConverter.class,
SyslogStartConverter.class);
}
private void registerHintsForSpringBootConverters(ReflectionHints reflection) {
registerForPublicConstructorInvocation(reflection, ColorConverter.class,
EnclosedInSquareBracketsConverter.class, ExtendedWhitespaceThrowableProxyConverter.class,
WhitespaceThrowableProxyConverter.class, CorrelationIdConverter.class);
}
private void registerForPublicConstructorInvocation(ReflectionHints reflection, Class<?>... classes) {
reflection.registerTypes(TypeReference.listOf(classes),
(hint) -> hint.withMembers(MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS));
}
}
| LogbackRuntimeHints |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java | {
"start": 758,
"end": 1803
} | enum ____ implements Writeable {
// Indexer is running, but not actively indexing data (e.g. it's idle)
STARTED,
// Indexer is actively indexing data
INDEXING,
// Transition state to where an indexer has acknowledged the stop
// but is still in process of halting
STOPPING,
// Indexer is "paused" and ignoring scheduled triggers
STOPPED,
// Something (internal or external) has requested the indexer abort and shutdown
ABORTING;
public final ParseField STATE = new ParseField("job_state");
public static IndexerState fromString(String name) {
return valueOf(name.trim().toUpperCase(Locale.ROOT));
}
public static IndexerState fromStream(StreamInput in) throws IOException {
return in.readEnum(IndexerState.class);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
IndexerState state = this;
out.writeEnum(state);
}
public String value() {
return name().toLowerCase(Locale.ROOT);
}
}
| IndexerState |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.