language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | components/camel-bindy/src/main/java/org/apache/camel/dataformat/bindy/annotation/CsvRecord.java | {
"start": 3680,
"end": 5278
} | class ____ the model and must be declared
* one time.
*/
String quote() default "\"";
/**
* Indicate if the values (and headers) must be quoted when marshaling (optional)
*/
boolean quoting() default false;
/**
* Indicate if the values must be escaped when quoting (optional)
*/
boolean quotingEscaped() default false;
/**
* Indicate if the values should be quoted only when needed (optional) - if enabled then the value is only quoted
* when it contains the configured separator, quote, or crlf characters. The quoting option must also be enabled.
*/
boolean quotingOnlyWhenNeeded() default false;
/**
* Last record spans rest of line (optional) - if enabled then the last column is auto spanned to end of line, for
* example if it is a comment, etc this allows the line to contain all characters, also the delimiter char.
*/
boolean autospanLine() default false;
/**
* The allowEmptyStream parameter will allow to prcoess the unavaiable stream for CSV file.
*/
boolean allowEmptyStream() default false;
/**
* The endWithLineBreak parameter flags if the CSV file should end with a line break or not (optional)
*/
boolean endWithLineBreak() default true;
/**
* The remove quotes parameter flags if unmarshalling should try to remove quotes for each field
*/
boolean removeQuotes() default true;
/**
* Whether to trim each line (stand and end) before parsing the line into data fields.
*/
boolean trimLine() default true;
}
| of |
java | apache__dubbo | dubbo-metrics/dubbo-metrics-api/src/main/java/org/apache/dubbo/metrics/data/RtStatComposite.java | {
"start": 2424,
"end": 10989
} | class ____ extends AbstractMetricsExport {
private final AtomicBoolean samplesChanged = new AtomicBoolean(true);
public RtStatComposite(ApplicationModel applicationModel) {
super(applicationModel);
}
private final ConcurrentHashMap<String, List<LongContainer<? extends Number>>> rtStats = new ConcurrentHashMap<>();
public void init(MetricsPlaceValue... placeValues) {
if (placeValues == null) {
return;
}
for (MetricsPlaceValue placeValue : placeValues) {
List<LongContainer<? extends Number>> containers = initStats(placeValue);
for (LongContainer<? extends Number> container : containers) {
ConcurrentHashMapUtils.computeIfAbsent(
rtStats, container.getMetricsKeyWrapper().getType(), k -> new ArrayList<>())
.add(container);
}
}
samplesChanged.set(true);
}
private List<LongContainer<? extends Number>> initStats(MetricsPlaceValue placeValue) {
List<LongContainer<? extends Number>> singleRtStats = new ArrayList<>();
singleRtStats.add(new AtomicLongContainer(new MetricsKeyWrapper(MetricsKey.METRIC_RT_LAST, placeValue)));
singleRtStats.add(new LongAccumulatorContainer(
new MetricsKeyWrapper(MetricsKey.METRIC_RT_MIN, placeValue),
new LongAccumulator(Long::min, Long.MAX_VALUE)));
singleRtStats.add(new LongAccumulatorContainer(
new MetricsKeyWrapper(MetricsKey.METRIC_RT_MAX, placeValue),
new LongAccumulator(Long::max, Long.MIN_VALUE)));
singleRtStats.add(new AtomicLongContainer(
new MetricsKeyWrapper(MetricsKey.METRIC_RT_SUM, placeValue),
(responseTime, longAccumulator) -> longAccumulator.addAndGet(responseTime)));
// AvgContainer is a special counter that stores the number of times but outputs function of sum/times
AtomicLongContainer avgContainer = new AtomicLongContainer(
new MetricsKeyWrapper(MetricsKey.METRIC_RT_AVG, placeValue), (k, v) -> v.incrementAndGet());
avgContainer.setValueSupplier(applicationName -> {
LongContainer<? extends Number> totalContainer = rtStats.values().stream()
.flatMap(List::stream)
.filter(longContainer -> longContainer.isKeyWrapper(MetricsKey.METRIC_RT_SUM, placeValue.getType()))
.findFirst()
.get();
AtomicLong totalRtTimes = avgContainer.get(applicationName);
AtomicLong totalRtSum = (AtomicLong) totalContainer.get(applicationName);
return totalRtSum.get() / totalRtTimes.get();
});
singleRtStats.add(avgContainer);
return singleRtStats;
}
public void calcServiceKeyRt(String registryOpType, Long responseTime, Metric key) {
for (LongContainer container : rtStats.get(registryOpType)) {
Number current = (Number) container.get(key);
if (current == null) {
container.putIfAbsent(key, container.getInitFunc().apply(key));
samplesChanged.set(true);
current = (Number) container.get(key);
}
container.getConsumerFunc().accept(responseTime, current);
}
}
public void calcServiceKeyRt(Invocation invocation, String registryOpType, Long responseTime) {
List<Action> actions;
if (invocation.getServiceModel() != null && invocation.getServiceModel().getServiceKey() != null) {
Map<String, Object> attributeMap =
invocation.getServiceModel().getServiceMetadata().getAttributeMap();
Map<String, List<Action>> cache = (Map<String, List<Action>>) attributeMap.get("ServiceKeyRt");
if (cache == null) {
attributeMap.putIfAbsent("ServiceKeyRt", new ConcurrentHashMap<>(32));
cache = (Map<String, List<Action>>) attributeMap.get("ServiceKeyRt");
}
actions = cache.get(registryOpType);
if (actions == null) {
actions = calServiceRtActions(invocation, registryOpType);
cache.putIfAbsent(registryOpType, actions);
samplesChanged.set(true);
actions = cache.get(registryOpType);
}
} else {
actions = calServiceRtActions(invocation, registryOpType);
}
for (Action action : actions) {
action.run(responseTime);
}
}
private List<Action> calServiceRtActions(Invocation invocation, String registryOpType) {
List<Action> actions;
actions = new LinkedList<>();
ServiceKeyMetric key = new ServiceKeyMetric(getApplicationModel(), invocation.getTargetServiceUniqueName());
for (LongContainer container : rtStats.get(registryOpType)) {
Number current = (Number) container.get(key);
if (current == null) {
container.putIfAbsent(key, container.getInitFunc().apply(key));
samplesChanged.set(true);
current = (Number) container.get(key);
}
actions.add(new Action(container.getConsumerFunc(), current));
}
return actions;
}
public void calcMethodKeyRt(Invocation invocation, String registryOpType, Long responseTime) {
List<Action> actions;
if (getServiceLevel()
&& invocation.getServiceModel() != null
&& invocation.getServiceModel().getServiceMetadata() != null) {
Map<String, Object> attributeMap =
invocation.getServiceModel().getServiceMetadata().getAttributeMap();
Map<String, List<Action>> cache = (Map<String, List<Action>>) attributeMap.get("MethodKeyRt");
if (cache == null) {
attributeMap.putIfAbsent("MethodKeyRt", new ConcurrentHashMap<>(32));
cache = (Map<String, List<Action>>) attributeMap.get("MethodKeyRt");
}
actions = cache.get(registryOpType);
if (actions == null) {
actions = calMethodRtActions(invocation, registryOpType);
cache.putIfAbsent(registryOpType, actions);
samplesChanged.set(true);
actions = cache.get(registryOpType);
}
} else {
actions = calMethodRtActions(invocation, registryOpType);
}
for (Action action : actions) {
action.run(responseTime);
}
}
private List<Action> calMethodRtActions(Invocation invocation, String registryOpType) {
List<Action> actions;
actions = new LinkedList<>();
for (LongContainer container : rtStats.get(registryOpType)) {
MethodMetric key = new MethodMetric(getApplicationModel(), invocation, getServiceLevel());
Number current = (Number) container.get(key);
if (current == null) {
container.putIfAbsent(key, container.getInitFunc().apply(key));
samplesChanged.set(true);
current = (Number) container.get(key);
}
actions.add(new Action(container.getConsumerFunc(), current));
}
return actions;
}
public List<MetricSample> export(MetricsCategory category) {
List<MetricSample> list = new ArrayList<>();
for (List<LongContainer<? extends Number>> containers : rtStats.values()) {
for (LongContainer<? extends Number> container : containers) {
MetricsKeyWrapper metricsKeyWrapper = container.getMetricsKeyWrapper();
for (Metric key : container.keySet()) {
// Use keySet to obtain the original key instance reference of ConcurrentHashMap to avoid early
// recycling of the micrometer
list.add(new GaugeMetricSample<>(
metricsKeyWrapper.targetKey(),
metricsKeyWrapper.targetDesc(),
key.getTags(),
category,
key,
value -> container.getValueSupplier().apply(value)));
}
}
}
return list;
}
public List<LongContainer<? extends Number>> getRtStats() {
return rtStats.values().stream().flatMap(List::stream).collect(Collectors.toList());
}
private static | RtStatComposite |
java | google__guava | android/guava/src/com/google/common/collect/MinMaxPriorityQueue.java | {
"start": 7649,
"end": 17605
} | class ____<B> {
/*
* TODO(kevinb): when the dust settles, see if we still need this or can
* just default to DEFAULT_CAPACITY.
*/
private static final int UNSET_EXPECTED_SIZE = -1;
private final Comparator<B> comparator;
private int expectedSize = UNSET_EXPECTED_SIZE;
private int maximumSize = Integer.MAX_VALUE;
private Builder(Comparator<B> comparator) {
this.comparator = checkNotNull(comparator);
}
/**
* Configures this builder to build min-max priority queues with an initial expected size of
* {@code expectedSize}.
*/
@CanIgnoreReturnValue
public Builder<B> expectedSize(int expectedSize) {
checkArgument(expectedSize >= 0);
this.expectedSize = expectedSize;
return this;
}
/**
* Configures this builder to build {@code MinMaxPriorityQueue} instances that are limited to
* {@code maximumSize} elements. Each time a queue grows beyond this bound, it immediately
* removes its greatest element (according to its comparator), which might be the element that
* was just added.
*/
@CanIgnoreReturnValue
public Builder<B> maximumSize(int maximumSize) {
checkArgument(maximumSize > 0);
this.maximumSize = maximumSize;
return this;
}
/**
* Builds a new min-max priority queue using the previously specified options, and having no
* initial contents.
*/
public <T extends B> MinMaxPriorityQueue<T> create() {
return create(Collections.emptySet());
}
/**
* Builds a new min-max priority queue using the previously specified options, and having the
* given initial elements.
*/
public <T extends B> MinMaxPriorityQueue<T> create(Iterable<? extends T> initialContents) {
MinMaxPriorityQueue<T> queue =
new MinMaxPriorityQueue<>(
this, initialQueueSize(expectedSize, maximumSize, initialContents));
for (T element : initialContents) {
queue.offer(element);
}
return queue;
}
@SuppressWarnings("unchecked") // safe "contravariant cast"
private <T extends B> Ordering<T> ordering() {
return Ordering.from((Comparator<T>) comparator);
}
}
private final Heap minHeap;
private final Heap maxHeap;
@VisibleForTesting final int maximumSize;
private @Nullable Object[] queue;
private int size;
private int modCount;
private MinMaxPriorityQueue(Builder<? super E> builder, int queueSize) {
Ordering<E> ordering = builder.ordering();
this.minHeap = new Heap(ordering);
this.maxHeap = new Heap(ordering.reverse());
minHeap.otherHeap = maxHeap;
maxHeap.otherHeap = minHeap;
this.maximumSize = builder.maximumSize;
// TODO(kevinb): pad?
this.queue = new Object[queueSize];
}
@Override
public int size() {
return size;
}
/**
* Adds the given element to this queue. If this queue has a maximum size, after adding {@code
* element} the queue will automatically evict its greatest element (according to its comparator),
* which may be {@code element} itself.
*
* @return {@code true} always
*/
@CanIgnoreReturnValue
@Override
public boolean add(E element) {
offer(element);
return true;
}
@CanIgnoreReturnValue
@Override
public boolean addAll(Collection<? extends E> newElements) {
boolean modified = false;
for (E element : newElements) {
offer(element);
modified = true;
}
return modified;
}
/**
* Adds the given element to this queue. If this queue has a maximum size, after adding {@code
* element} the queue will automatically evict its greatest element (according to its comparator),
* which may be {@code element} itself.
*/
@CanIgnoreReturnValue
@Override
public boolean offer(E element) {
checkNotNull(element);
modCount++;
int insertIndex = size++;
growIfNeeded();
// Adds the element to the end of the heap and bubbles it up to the correct
// position.
heapForIndex(insertIndex).bubbleUp(insertIndex, element);
return size <= maximumSize || pollLast() != element;
}
@CanIgnoreReturnValue
@Override
public @Nullable E poll() {
return isEmpty() ? null : removeAndGet(0);
}
@SuppressWarnings("unchecked") // we must carefully only allow Es to get in
E elementData(int index) {
/*
* requireNonNull is safe as long as we're careful to call this method only with populated
* indexes.
*/
return (E) requireNonNull(queue[index]);
}
@Override
public @Nullable E peek() {
return isEmpty() ? null : elementData(0);
}
/** Returns the index of the max element. */
private int getMaxElementIndex() {
switch (size) {
case 1:
return 0; // The lone element in the queue is the maximum.
case 2:
return 1; // The lone element in the maxHeap is the maximum.
default:
// The max element must sit on the first level of the maxHeap. It is
// actually the *lesser* of the two from the maxHeap's perspective.
return (maxHeap.compareElements(1, 2) <= 0) ? 1 : 2;
}
}
/**
* Removes and returns the least element of this queue, or returns {@code null} if the queue is
* empty.
*/
@CanIgnoreReturnValue
public @Nullable E pollFirst() {
return poll();
}
/**
* Removes and returns the least element of this queue.
*
* @throws NoSuchElementException if the queue is empty
*/
@CanIgnoreReturnValue
public E removeFirst() {
return remove();
}
/**
* Retrieves, but does not remove, the least element of this queue, or returns {@code null} if the
* queue is empty.
*/
public @Nullable E peekFirst() {
return peek();
}
/**
* Removes and returns the greatest element of this queue, or returns {@code null} if the queue is
* empty.
*/
@CanIgnoreReturnValue
public @Nullable E pollLast() {
return isEmpty() ? null : removeAndGet(getMaxElementIndex());
}
/**
* Removes and returns the greatest element of this queue.
*
* @throws NoSuchElementException if the queue is empty
*/
@CanIgnoreReturnValue
public E removeLast() {
if (isEmpty()) {
throw new NoSuchElementException();
}
return removeAndGet(getMaxElementIndex());
}
/**
* Retrieves, but does not remove, the greatest element of this queue, or returns {@code null} if
* the queue is empty.
*/
public @Nullable E peekLast() {
return isEmpty() ? null : elementData(getMaxElementIndex());
}
/**
* Removes the element at position {@code index}.
*
* <p>Normally this method leaves the elements at up to {@code index - 1}, inclusive, untouched.
* Under these circumstances, it returns {@code null}.
*
* <p>Occasionally, in order to maintain the heap invariant, it must swap a later element of the
* list with one before {@code index}. Under these circumstances it returns a pair of elements as
* a {@link MoveDesc}. The first one is the element that was previously at the end of the heap and
* is now at some position before {@code index}. The second element is the one that was swapped
* down to replace the element at {@code index}. This fact is used by iterator.remove so as to
* visit elements during a traversal once and only once.
*/
@VisibleForTesting
@CanIgnoreReturnValue
@Nullable MoveDesc<E> removeAt(int index) {
checkPositionIndex(index, size);
modCount++;
size--;
if (size == index) {
queue[size] = null;
return null;
}
E actualLastElement = elementData(size);
int lastElementAt = heapForIndex(size).swapWithConceptuallyLastElement(actualLastElement);
if (lastElementAt == index) {
// 'actualLastElement' is now at 'lastElementAt', and the element that was at 'lastElementAt'
// is now at the end of queue. If that's the element we wanted to remove in the first place,
// don't try to (incorrectly) trickle it. Instead, just delete it and we're done.
queue[size] = null;
return null;
}
E toTrickle = elementData(size);
queue[size] = null;
MoveDesc<E> changes = fillHole(index, toTrickle);
if (lastElementAt < index) {
// Last element is moved to before index, swapped with trickled element.
if (changes == null) {
// The trickled element is still after index.
return new MoveDesc<>(actualLastElement, toTrickle);
} else {
// The trickled element is back before index, but the replaced element
// has now been moved after index.
return new MoveDesc<>(actualLastElement, changes.replaced);
}
}
// Trickled element was after index to begin with, no adjustment needed.
return changes;
}
private @Nullable MoveDesc<E> fillHole(int index, E toTrickle) {
Heap heap = heapForIndex(index);
// We consider elementData(index) a "hole", and we want to fill it
// with the last element of the heap, toTrickle.
// Since the last element of the heap is from the bottom level, we
// optimistically fill index position with elements from lower levels,
// moving the hole down. In most cases this reduces the number of
// comparisons with toTrickle, but in some cases we will need to bubble it
// all the way up again.
int vacated = heap.fillHoleAt(index);
// Try to see if toTrickle can be bubbled up min levels.
int bubbledTo = heap.bubbleUpAlternatingLevels(vacated, toTrickle);
if (bubbledTo == vacated) {
// Could not bubble toTrickle up min levels, try moving
// it from min level to max level (or max to min level) and bubble up
// there.
return heap.tryCrossOverAndBubbleUp(index, vacated, toTrickle);
} else {
return (bubbledTo < index) ? new MoveDesc<E>(toTrickle, elementData(index)) : null;
}
}
// Returned from removeAt() to iterator.remove()
private static final | Builder |
java | apache__camel | components/camel-vertx/camel-vertx-websocket/src/main/java/org/apache/camel/component/vertx/websocket/VertxWebsocketConsumer.java | {
"start": 1274,
"end": 5395
} | class ____ extends DefaultConsumer {
private final VertxWebsocketEndpoint endpoint;
public VertxWebsocketConsumer(VertxWebsocketEndpoint endpoint, Processor processor) {
super(endpoint, processor);
this.endpoint = endpoint;
}
@Override
public boolean isHostedService() {
return true;
}
@Override
protected void doStart() throws Exception {
getComponent().connectConsumer(this);
super.doStart();
}
@Override
protected void doStop() throws Exception {
getComponent().disconnectConsumer(this);
super.doStop();
}
@Override
public VertxWebsocketEndpoint getEndpoint() {
return endpoint;
}
public VertxWebsocketComponent getComponent() {
return endpoint.getComponent();
}
public void onMessage(String connectionKey, Object message, SocketAddress remote, RoutingContext routingContext) {
Exchange exchange = createExchange(true);
exchange.getMessage().setBody(message);
populateExchangeHeaders(exchange, connectionKey, remote, routingContext, VertxWebsocketEvent.MESSAGE);
processExchange(exchange, routingContext);
}
public void onException(String connectionKey, Throwable cause, SocketAddress remote, RoutingContext routingContext) {
if (cause == ConnectionBase.CLOSED_EXCEPTION) {
// Ignore as VertxWebsocketHost registers a closeHandler to trap WebSocket close events
return;
}
Exchange exchange = createExchange(false);
populateExchangeHeaders(exchange, connectionKey, remote, routingContext, VertxWebsocketEvent.ERROR);
getExceptionHandler().handleException("Error processing exchange", exchange, cause);
releaseExchange(exchange, false);
}
public void onOpen(String connectionKey, SocketAddress remote, RoutingContext routingContext, ServerWebSocket webSocket) {
Exchange exchange = createExchange(true);
populateExchangeHeaders(exchange, connectionKey, remote, routingContext, VertxWebsocketEvent.OPEN);
exchange.getMessage().setBody(webSocket);
processExchange(exchange, routingContext);
}
public void onClose(String connectionKey, SocketAddress remote, RoutingContext routingContext) {
Exchange exchange = createExchange(true);
populateExchangeHeaders(exchange, connectionKey, remote, routingContext, VertxWebsocketEvent.CLOSE);
processExchange(exchange, routingContext);
}
protected void populateExchangeHeaders(
Exchange exchange, String connectionKey, SocketAddress remote, RoutingContext routingContext,
VertxWebsocketEvent event) {
Message message = exchange.getMessage();
Map<String, Object> headers = message.getHeaders();
message.setHeader(VertxWebsocketConstants.REMOTE_ADDRESS, remote);
message.setHeader(VertxWebsocketConstants.CONNECTION_KEY, connectionKey);
message.setHeader(VertxWebsocketConstants.EVENT, event);
routingContext.queryParams()
.forEach((name, value) -> VertxWebsocketHelper.appendHeader(headers, name, value));
routingContext.pathParams()
.forEach((name, value) -> VertxWebsocketHelper.appendHeader(headers, name, value));
}
protected void processExchange(Exchange exchange, RoutingContext routingContext) {
routingContext.vertx().executeBlocking(() -> {
createUoW(exchange);
getProcessor().process(exchange);
return null;
}, false)
.onComplete(result -> {
try {
if (result.failed()) {
Throwable cause = result.cause();
getExceptionHandler().handleException(cause);
routingContext.fail(cause);
}
} finally {
doneUoW(exchange);
releaseExchange(exchange, false);
}
});
}
}
| VertxWebsocketConsumer |
java | elastic__elasticsearch | x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/CCMPersistentStorageServiceIT.java | {
"start": 607,
"end": 1585
} | class ____ extends CCMSingleNodeIT {
private static final AtomicReference<CCMPersistentStorageService> ccmPersistentStorageService = new AtomicReference<>();
public CCMPersistentStorageServiceIT() {
super(new Provider() {
@Override
public void store(CCMModel ccmModel, ActionListener<Void> listener) {
ccmPersistentStorageService.get().store(ccmModel, listener);
}
@Override
public void get(ActionListener<CCMModel> listener) {
ccmPersistentStorageService.get().get(listener);
}
@Override
public void delete(ActionListener<Void> listener) {
ccmPersistentStorageService.get().delete(listener);
}
});
}
@Before
public void createComponents() {
ccmPersistentStorageService.set(node().injector().getInstance(CCMPersistentStorageService.class));
}
}
| CCMPersistentStorageServiceIT |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/NestedPathFieldMapper.java | {
"start": 1977,
"end": 3052
} | class ____ extends StringFieldType {
private NestedPathFieldType(String name) {
super(name, IndexType.terms(true, false), false, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap());
}
@Override
public String typeName() {
return NAME;
}
@Override
public Query existsQuery(SearchExecutionContext context) {
throw new UnsupportedOperationException("Cannot run exists() query against the nested field path");
}
@Override
public ValueFetcher valueFetcher(SearchExecutionContext context, String format) {
throw new IllegalArgumentException("Cannot fetch values for internal field [" + name() + "].");
}
@Override
public boolean mayExistInIndex(SearchExecutionContext context) {
return false;
}
}
private NestedPathFieldMapper(String name) {
super(new NestedPathFieldType(name));
}
@Override
protected String contentType() {
return NAME;
}
}
| NestedPathFieldType |
java | elastic__elasticsearch | modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilterFactory.java | {
"start": 821,
"end": 1712
} | class ____ extends AbstractTokenFilterFactory {
static final String ONLY_ON_SAME_POSITION = "only_on_same_position";
private final boolean onlyOnSamePosition;
private final boolean correctPositionIncrement;
UniqueTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
this.onlyOnSamePosition = settings.getAsBoolean(ONLY_ON_SAME_POSITION, false);
this.correctPositionIncrement = indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.UNIQUE_TOKEN_FILTER_POS_FIX);
}
@Override
public TokenStream create(TokenStream tokenStream) {
if (correctPositionIncrement == false) {
return new XUniqueTokenFilter(tokenStream, onlyOnSamePosition);
}
return new UniqueTokenFilter(tokenStream, onlyOnSamePosition);
}
}
| UniqueTokenFilterFactory |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/ignored/AnimalMapper.java | {
"start": 301,
"end": 517
} | interface ____ {
AnimalMapper INSTANCE = Mappers.getMapper( AnimalMapper.class );
@Ignored( targets = { "publicAge", "age", "publicColor", "color" } )
AnimalDto animalToDto( Animal animal );
}
| AnimalMapper |
java | alibaba__nacos | console/src/main/java/com/alibaba/nacos/console/config/NacosConsoleBeanPostProcessorConfiguration.java | {
"start": 1532,
"end": 2055
} | class ____ {
@Bean
public InstantiationAwareBeanPostProcessor nacosDuplicateSpringBeanPostProcessor(
ConfigurableApplicationContext context) {
return new NacosDuplicateSpringBeanPostProcessor(context);
}
@Bean
public InstantiationAwareBeanPostProcessor nacosDuplicateConfigurationBeanPostProcessor(
ConfigurableApplicationContext context) {
return new NacosDuplicateConfigurationBeanPostProcessor(context);
}
}
| NacosConsoleBeanPostProcessorConfiguration |
java | apache__camel | components/camel-metrics/src/test/java/org/apache/camel/component/metrics/MeterEndpointTest.java | {
"start": 1508,
"end": 2837
} | class ____ {
private static final String METRICS_NAME = "metrics.name";
private static final Long VALUE = System.currentTimeMillis();
@Mock
private MetricRegistry registry;
private MetricsEndpoint endpoint;
private InOrder inOrder;
@BeforeEach
public void setUp() {
endpoint = new MetricsEndpoint(null, null, registry, MetricsType.METER, METRICS_NAME);
inOrder = Mockito.inOrder(registry);
}
@AfterEach
public void tearDown() {
inOrder.verifyNoMoreInteractions();
}
@Test
public void testMeterEndpoint() {
assertThat(endpoint, is(notNullValue()));
assertThat(endpoint.getRegistry(), is(registry));
assertThat(endpoint.getMetricsName(), is(METRICS_NAME));
}
@Test
public void testCreateProducer() throws Exception {
Producer producer = endpoint.createProducer();
assertThat(producer, is(notNullValue()));
assertThat(producer, is(instanceOf(MeterProducer.class)));
}
@Test
public void testGetMark() {
assertThat(endpoint.getMark(), is(nullValue()));
}
@Test
public void testSetMark() {
assertThat(endpoint.getMark(), is(nullValue()));
endpoint.setMark(VALUE);
assertThat(endpoint.getMark(), is(VALUE));
}
}
| MeterEndpointTest |
java | apache__dubbo | dubbo-metadata/dubbo-metadata-definition-protobuf/src/test/java/org/apache/dubbo/metadata/definition/protobuf/model/GooglePB.java | {
"start": 24471,
"end": 29296
} | class ____ {
static final com.google.protobuf.MapEntry<String, PhoneNumber> defaultEntry = com.google.protobuf.MapEntry
.<java.lang.String, org.apache.dubbo.metadata.definition.protobuf.model.GooglePB.PhoneNumber>
newDefaultInstance(
org.apache.dubbo.metadata.definition.protobuf.model.GooglePB
.internal_static_org_apache_dubbo_metadata_definition_protobuf_model_PBRequestType_DoubleMapEntry_descriptor,
com.google.protobuf.WireFormat.FieldType.STRING,
"",
com.google.protobuf.WireFormat.FieldType.MESSAGE,
org.apache.dubbo.metadata.definition.protobuf.model.GooglePB.PhoneNumber
.getDefaultInstance());
}
private com.google.protobuf.MapField<String, PhoneNumber> doubleMap_;
private com.google.protobuf.MapField<String, PhoneNumber> internalGetDoubleMap() {
if (doubleMap_ == null) {
return com.google.protobuf.MapField.emptyMapField(DoubleMapDefaultEntryHolder.defaultEntry);
}
return doubleMap_;
}
public int getDoubleMapCount() {
return internalGetDoubleMap().getMap().size();
}
/**
* <code>map<string, .org.apache.dubbo.metadata.definition.protobuf.model.PhoneNumber> doubleMap = 9;</code>
*/
public boolean containsDoubleMap(java.lang.String key) {
if (key == null) {
throw new java.lang.NullPointerException();
}
return internalGetDoubleMap().getMap().containsKey(key);
}
/**
* Use {@link #getDoubleMapMap()} instead.
*/
@java.lang.Deprecated
public java.util.Map<java.lang.String, org.apache.dubbo.metadata.definition.protobuf.model.GooglePB.PhoneNumber>
getDoubleMap() {
return getDoubleMapMap();
}
/**
* <code>map<string, .org.apache.dubbo.metadata.definition.protobuf.model.PhoneNumber> doubleMap = 9;</code>
*/
public java.util.Map<java.lang.String, org.apache.dubbo.metadata.definition.protobuf.model.GooglePB.PhoneNumber>
getDoubleMapMap() {
return internalGetDoubleMap().getMap();
}
/**
* <code>map<string, .org.apache.dubbo.metadata.definition.protobuf.model.PhoneNumber> doubleMap = 9;</code>
*/
public org.apache.dubbo.metadata.definition.protobuf.model.GooglePB.PhoneNumber getDoubleMapOrDefault(
java.lang.String key,
org.apache.dubbo.metadata.definition.protobuf.model.GooglePB.PhoneNumber defaultValue) {
if (key == null) {
throw new java.lang.NullPointerException();
}
java.util.Map<java.lang.String, org.apache.dubbo.metadata.definition.protobuf.model.GooglePB.PhoneNumber>
map = internalGetDoubleMap().getMap();
return map.containsKey(key) ? map.get(key) : defaultValue;
}
/**
* <code>map<string, .org.apache.dubbo.metadata.definition.protobuf.model.PhoneNumber> doubleMap = 9;</code>
*/
public org.apache.dubbo.metadata.definition.protobuf.model.GooglePB.PhoneNumber getDoubleMapOrThrow(
java.lang.String key) {
if (key == null) {
throw new java.lang.NullPointerException();
}
java.util.Map<java.lang.String, org.apache.dubbo.metadata.definition.protobuf.model.GooglePB.PhoneNumber>
map = internalGetDoubleMap().getMap();
if (!map.containsKey(key)) {
throw new java.lang.IllegalArgumentException();
}
return map.get(key);
}
public static final int BYTESLIST_FIELD_NUMBER = 10;
private java.util.List<com.google.protobuf.ByteString> bytesList_;
/**
* <code>repeated bytes bytesList = 10;</code>
*/
public java.util.List<com.google.protobuf.ByteString> getBytesListList() {
return bytesList_;
}
/**
* <code>repeated bytes bytesList = 10;</code>
*/
public int getBytesListCount() {
return bytesList_.size();
}
/**
* <code>repeated bytes bytesList = 10;</code>
*/
public com.google.protobuf.ByteString getBytesList(int index) {
return bytesList_.get(index);
}
public static final int BYTESMAP_FIELD_NUMBER = 11;
private static final | DoubleMapDefaultEntryHolder |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KeyVaultEndpointBuilderFactory.java | {
"start": 1576,
"end": 6406
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedKeyVaultEndpointBuilder advanced() {
return (AdvancedKeyVaultEndpointBuilder) this;
}
/**
* Determines the credential strategy to adopt.
*
* The option is a:
* <code>org.apache.camel.component.azure.key.vault.CredentialType</code> type.
*
* Default: CLIENT_SECRET
* Group: common
*
* @param credentialType the value to set
* @return the dsl builder
*/
default KeyVaultEndpointBuilder credentialType(org.apache.camel.component.azure.key.vault.CredentialType credentialType) {
doSetProperty("credentialType", credentialType);
return this;
}
/**
* Determines the credential strategy to adopt.
*
* The option will be converted to a
* <code>org.apache.camel.component.azure.key.vault.CredentialType</code> type.
*
* Default: CLIENT_SECRET
* Group: common
*
* @param credentialType the value to set
* @return the dsl builder
*/
default KeyVaultEndpointBuilder credentialType(String credentialType) {
doSetProperty("credentialType", credentialType);
return this;
}
/**
* Operation to be performed.
*
* The option is a:
* <code>org.apache.camel.component.azure.key.vault.KeyVaultOperation</code> type.
*
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default KeyVaultEndpointBuilder operation(org.apache.camel.component.azure.key.vault.KeyVaultOperation operation) {
doSetProperty("operation", operation);
return this;
}
/**
* Operation to be performed.
*
* The option will be converted to a
* <code>org.apache.camel.component.azure.key.vault.KeyVaultOperation</code> type.
*
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default KeyVaultEndpointBuilder operation(String operation) {
doSetProperty("operation", operation);
return this;
}
/**
* Instance of Secret client.
*
* The option is a:
* <code>com.azure.security.keyvault.secrets.SecretClient</code> type.
*
* Group: producer
*
* @param secretClient the value to set
* @return the dsl builder
*/
default KeyVaultEndpointBuilder secretClient(com.azure.security.keyvault.secrets.SecretClient secretClient) {
doSetProperty("secretClient", secretClient);
return this;
}
/**
* Instance of Secret client.
*
* The option will be converted to a
* <code>com.azure.security.keyvault.secrets.SecretClient</code> type.
*
* Group: producer
*
* @param secretClient the value to set
* @return the dsl builder
*/
default KeyVaultEndpointBuilder secretClient(String secretClient) {
doSetProperty("secretClient", secretClient);
return this;
}
/**
* Client Id to be used.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientId the value to set
* @return the dsl builder
*/
default KeyVaultEndpointBuilder clientId(String clientId) {
doSetProperty("clientId", clientId);
return this;
}
/**
* Client Secret to be used.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientSecret the value to set
* @return the dsl builder
*/
default KeyVaultEndpointBuilder clientSecret(String clientSecret) {
doSetProperty("clientSecret", clientSecret);
return this;
}
/**
* Tenant Id to be used.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param tenantId the value to set
* @return the dsl builder
*/
default KeyVaultEndpointBuilder tenantId(String tenantId) {
doSetProperty("tenantId", tenantId);
return this;
}
}
/**
* Advanced builder for endpoint for the Azure Key Vault component.
*/
public | KeyVaultEndpointBuilder |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java | {
"start": 1698,
"end": 6446
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(CodecRegistry.class);
private static CodecRegistry instance = new CodecRegistry();
public static CodecRegistry getInstance() {
return instance;
}
private Map<String, List<RawErasureCoderFactory>> coderMap;
private Map<String, String[]> coderNameMap;
// Protobuffer 2.5.0 doesn't support map<String, String[]> type well, so use
// the compact value instead
private HashMap<String, String> coderNameCompactMap;
private CodecRegistry() {
coderMap = new HashMap<>();
coderNameMap = new HashMap<>();
coderNameCompactMap = new HashMap<>();
final ServiceLoader<RawErasureCoderFactory> coderFactories =
ServiceLoader.load(RawErasureCoderFactory.class);
updateCoders(coderFactories);
}
/**
* Update coderMap and coderNameMap with iterable type of coder factories.
* @param coderFactories
*/
@VisibleForTesting
void updateCoders(Iterable<RawErasureCoderFactory> coderFactories) {
for (RawErasureCoderFactory coderFactory : coderFactories) {
String codecName = coderFactory.getCodecName();
List<RawErasureCoderFactory> coders = coderMap.get(codecName);
if (coders == null) {
coders = new ArrayList<>();
coders.add(coderFactory);
coderMap.put(codecName, coders);
LOG.debug("Codec registered: codec = {}, coder = {}",
coderFactory.getCodecName(), coderFactory.getCoderName());
} else {
Boolean hasConflit = false;
for (RawErasureCoderFactory coder : coders) {
if (coder.getCoderName().equals(coderFactory.getCoderName())) {
hasConflit = true;
LOG.error("Coder {} cannot be registered because its coder name " +
"{} has conflict with {}", coderFactory.getClass().getName(),
coderFactory.getCoderName(), coder.getClass().getName());
break;
}
}
if (!hasConflit) {
// set native coders as default if user does not
// specify a fallback order
if (coderFactory instanceof NativeRSRawErasureCoderFactory ||
coderFactory instanceof NativeXORRawErasureCoderFactory) {
coders.add(0, coderFactory);
} else {
coders.add(coderFactory);
}
LOG.debug("Codec registered: codec = {}, coder = {}",
coderFactory.getCodecName(), coderFactory.getCoderName());
}
}
}
// update coderNameMap accordingly
coderNameMap.clear();
for (Map.Entry<String, List<RawErasureCoderFactory>> entry :
coderMap.entrySet()) {
String codecName = entry.getKey();
List<RawErasureCoderFactory> coders = entry.getValue();
coderNameMap.put(codecName, coders.stream().
map(RawErasureCoderFactory::getCoderName).
collect(Collectors.toList()).toArray(new String[0]));
coderNameCompactMap.put(codecName, coders.stream().
map(RawErasureCoderFactory::getCoderName)
.collect(Collectors.joining(", ")));
}
}
/**
* Get all coder names of the given codec.
* @param codecName the name of codec
* @return an array of all coder names, null if not exist
*/
public String[] getCoderNames(String codecName) {
String[] coderNames = coderNameMap.get(codecName);
return coderNames;
}
/**
* Get all coder factories of the given codec.
* @param codecName the name of codec
* @return a list of all coder factories, null if not exist
*/
public List<RawErasureCoderFactory> getCoders(String codecName) {
List<RawErasureCoderFactory> coders = coderMap.get(codecName);
return coders;
}
/**
* Get all codec names.
* @return a set of all codec names
*/
public Set<String> getCodecNames() {
return coderMap.keySet();
}
/**
* Get a specific coder factory defined by codec name and coder name.
* @param codecName name of the codec
* @param coderName name of the coder
* @return the specific coder, null if not exist
*/
public RawErasureCoderFactory getCoderByName(
String codecName, String coderName) {
List<RawErasureCoderFactory> coders = getCoders(codecName);
// find the RawErasureCoderFactory with the name of coderName
for (RawErasureCoderFactory coder : coders) {
if (coder.getCoderName().equals(coderName)) {
return coder;
}
}
return null;
}
/**
* Get all codec names and their corresponding coder list.
* @return a map of all codec names, and their corresponding code list
* separated by ','.
*/
public Map<String, String> getCodec2CoderCompactMap() {
return coderNameCompactMap;
}
}
| CodecRegistry |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/creation/bytebuddy/SubclassBytecodeGenerator.java | {
"start": 7272,
"end": 7456
} | class ____ the user runtime package to allow for
// mocking package private types and methods.
// This also requires that we are able to access the package of the mocked | in |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/writer/BeanDefinitionWriter.java | {
"start": 10777,
"end": 39829
} | class ____"));
private static final Method POST_CONSTRUCT_METHOD = ReflectionUtils.getRequiredInternalMethod(AbstractInitializableBeanDefinition.class, "postConstruct", BeanResolutionContext.class, BeanContext.class, Object.class);
private static final Method INJECT_BEAN_METHOD =
ReflectionUtils.getRequiredInternalMethod(InjectableBeanDefinition.class, "inject", BeanResolutionContext.class, BeanContext.class, Object.class);
private static final Method PRE_DESTROY_METHOD = ReflectionUtils.getRequiredInternalMethod(AbstractInitializableBeanDefinition.class, "preDestroy", BeanResolutionContext.class, BeanContext.class, Object.class);
private static final Method GET_BEAN_FOR_CONSTRUCTOR_ARGUMENT = getBeanLookupMethod("getBeanForConstructorArgument", false);
private static final Method GET_BEAN_REGISTRATIONS_FOR_CONSTRUCTOR_ARGUMENT = getBeanLookupMethod("getBeanRegistrationsForConstructorArgument", true);
private static final Method GET_BEAN_REGISTRATION_FOR_CONSTRUCTOR_ARGUMENT = getBeanLookupMethod("getBeanRegistrationForConstructorArgument", true);
private static final Method GET_BEANS_OF_TYPE_FOR_CONSTRUCTOR_ARGUMENT = getBeanLookupMethod("getBeansOfTypeForConstructorArgument", true);
private static final Method GET_STREAM_OF_TYPE_FOR_CONSTRUCTOR_ARGUMENT = getBeanLookupMethod("getStreamOfTypeForConstructorArgument", true);
private static final Method GET_MAP_OF_TYPE_FOR_CONSTRUCTOR_ARGUMENT = getBeanLookupMethod("getMapOfTypeForConstructorArgument", true);
private static final Method FIND_BEAN_FOR_CONSTRUCTOR_ARGUMENT = getBeanLookupMethod("findBeanForConstructorArgument", true);
private static final Method GET_BEAN_FOR_FIELD = getBeanLookupMethod("getBeanForField", false);
private static final Method GET_BEAN_FOR_ANNOTATION = getBeanLookupMethod("getBeanForAnnotation", false);
private static final Method GET_BEAN_REGISTRATIONS_FOR_FIELD = getBeanLookupMethod("getBeanRegistrationsForField", true);
private static final Method GET_BEAN_REGISTRATION_FOR_FIELD = getBeanLookupMethod("getBeanRegistrationForField", true);
private static final Method GET_BEANS_OF_TYPE_FOR_FIELD = getBeanLookupMethod("getBeansOfTypeForField", true);
private static final Method GET_VALUE_FOR_FIELD = getBeanLookupMethod("getValueForField", false);
private static final Method GET_STREAM_OF_TYPE_FOR_FIELD = getBeanLookupMethod("getStreamOfTypeForField", true);
private static final Method GET_MAP_OF_TYPE_FOR_FIELD = getBeanLookupMethod("getMapOfTypeForField", true);
private static final Method FIND_BEAN_FOR_FIELD = getBeanLookupMethod("findBeanForField", true);
private static final Method GET_VALUE_FOR_PATH = ReflectionUtils.getRequiredInternalMethod(AbstractInitializableBeanDefinition.class, "getValueForPath", BeanResolutionContext.class, BeanContext.class, Argument.class, String.class);
private static final Method CONTAINS_PROPERTIES_METHOD = ReflectionUtils.getRequiredInternalMethod(AbstractInitializableBeanDefinition.class, "containsProperties", BeanResolutionContext.class, BeanContext.class);
private static final Method GET_BEAN_FOR_METHOD_ARGUMENT = getBeanLookupMethodForArgument("getBeanForMethodArgument", false);
private static final Method GET_BEAN_REGISTRATIONS_FOR_METHOD_ARGUMENT = getBeanLookupMethodForArgument("getBeanRegistrationsForMethodArgument", true);
private static final Method GET_BEAN_REGISTRATION_FOR_METHOD_ARGUMENT = getBeanLookupMethodForArgument("getBeanRegistrationForMethodArgument", true);
private static final Method GET_BEANS_OF_TYPE_FOR_METHOD_ARGUMENT = getBeanLookupMethodForArgument("getBeansOfTypeForMethodArgument", true);
private static final Method GET_STREAM_OF_TYPE_FOR_METHOD_ARGUMENT = getBeanLookupMethodForArgument("getStreamOfTypeForMethodArgument", true);
private static final Method GET_MAP_OF_TYPE_FOR_METHOD_ARGUMENT = getBeanLookupMethodForArgument("getMapOfTypeForMethodArgument", true);
private static final Method FIND_BEAN_FOR_METHOD_ARGUMENT = getBeanLookupMethodForArgument("findBeanForMethodArgument", true);
private static final Method CHECK_INJECTED_BEAN_PROPERTY_VALUE = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"checkInjectedBeanPropertyValue",
String.class,
Object.class,
String.class,
String.class);
private static final Method GET_PROPERTY_VALUE_FOR_METHOD_ARGUMENT = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getPropertyValueForMethodArgument",
BeanResolutionContext.class,
BeanContext.class,
int.class,
int.class,
String.class,
String.class);
private static final Method GET_PROPERTY_PLACEHOLDER_VALUE_FOR_METHOD_ARGUMENT = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getPropertyPlaceholderValueForMethodArgument",
BeanResolutionContext.class,
BeanContext.class,
int.class,
int.class,
String.class);
private static final Method GET_EVALUATED_EXPRESSION_VALUE_FOR_METHOD_ARGUMENT = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getEvaluatedExpressionValueForMethodArgument",
int.class,
int.class);
private static final Method GET_BEAN_FOR_SETTER = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getBeanForSetter",
BeanResolutionContext.class,
BeanContext.class,
String.class,
Argument.class,
Qualifier.class);
private static final Method GET_BEANS_OF_TYPE_FOR_SETTER = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getBeansOfTypeForSetter",
BeanResolutionContext.class,
BeanContext.class,
String.class,
Argument.class,
Argument.class,
Qualifier.class);
private static final Method GET_PROPERTY_VALUE_FOR_SETTER = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getPropertyValueForSetter",
BeanResolutionContext.class,
BeanContext.class,
String.class,
Argument.class,
String.class,
String.class);
private static final Method GET_PROPERTY_PLACEHOLDER_VALUE_FOR_SETTER = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getPropertyPlaceholderValueForSetter",
BeanResolutionContext.class,
BeanContext.class,
String.class,
Argument.class,
String.class);
private static final Method GET_PROPERTY_VALUE_FOR_CONSTRUCTOR_ARGUMENT = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getPropertyValueForConstructorArgument",
BeanResolutionContext.class,
BeanContext.class,
int.class,
String.class,
String.class);
private static final Method GET_PROPERTY_PLACEHOLDER_VALUE_FOR_CONSTRUCTOR_ARGUMENT = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getPropertyPlaceholderValueForConstructorArgument",
BeanResolutionContext.class,
BeanContext.class,
int.class,
String.class);
private static final Method GET_EVALUATED_EXPRESSION_VALUE_FOR_CONSTRUCTOR_ARGUMENT = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getEvaluatedExpressionValueForConstructorArgument",
int.class);
private static final Method GET_PROPERTY_VALUE_FOR_FIELD = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getPropertyValueForField",
BeanResolutionContext.class,
BeanContext.class,
Argument.class,
String.class,
String.class);
private static final Method GET_PROPERTY_PLACEHOLDER_VALUE_FOR_FIELD = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"getPropertyPlaceholderValueForField",
BeanResolutionContext.class,
BeanContext.class,
Argument.class,
String.class);
private static final Method CONTAINS_PROPERTIES_VALUE_METHOD = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"containsPropertiesValue",
BeanResolutionContext.class,
BeanContext.class,
String.class);
private static final Method CONTAINS_PROPERTY_VALUE_METHOD = ReflectionUtils.getRequiredInternalMethod(
AbstractInitializableBeanDefinition.class,
"containsPropertyValue",
BeanResolutionContext.class,
BeanContext.class,
String.class);
private static final ClassTypeDef TYPE_ABSTRACT_BEAN_DEFINITION_AND_REFERENCE = ClassTypeDef.of(AbstractInitializableBeanDefinitionAndReference.class);
private static final Method METHOD_OPTIONAL_EMPTY = ReflectionUtils.getRequiredMethod(Optional.class, "empty");
private static final ClassTypeDef TYPE_OPTIONAL = ClassTypeDef.of(Optional.class);
private static final Method METHOD_OPTIONAL_OF = ReflectionUtils.getRequiredMethod(Optional.class, "of", Object.class);
private static final String METHOD_NAME_INSTANTIATE = "instantiate";
private static final Method METHOD_BEAN_CONSTRUCTOR_INSTANTIATE = ReflectionUtils.getRequiredMethod(
BeanConstructor.class,
METHOD_NAME_INSTANTIATE,
Object[].class
);
private static final Method METHOD_DESCRIPTOR_CONSTRUCTOR_INSTANTIATE = ReflectionUtils.getRequiredMethod(ConstructorInterceptorChain.class, METHOD_NAME_INSTANTIATE,
BeanResolutionContext.class,
BeanContext.class,
List.class,
BeanDefinition.class,
BeanConstructor.class,
int.class,
Object[].class
);
private static final Method BEAN_LOCATOR_METHOD_GET_BEAN = ReflectionUtils.getRequiredInternalMethod(BeanLocator.class, "getBean", Class.class, Qualifier.class);
private static final Method COLLECTION_TO_ARRAY = ReflectionUtils.getRequiredInternalMethod(Collection.class, "toArray", Object[].class);
private static final Method DISPOSE_INTERCEPTOR_METHOD =
ReflectionUtils.getRequiredInternalMethod(MethodInterceptorChain.class, "dispose",
BeanResolutionContext.class,
BeanContext.class,
BeanDefinition.class,
ExecutableMethod.class,
Object.class);
private static final Method INITIALIZE_INTERCEPTOR_METHOD =
ReflectionUtils.getRequiredInternalMethod(MethodInterceptorChain.class, "initialize",
BeanResolutionContext.class,
BeanContext.class,
BeanDefinition.class,
ExecutableMethod.class,
Object.class);
private static final Method SET_FIELD_WITH_REFLECTION_METHOD =
ReflectionUtils.getRequiredMethod(AbstractInitializableBeanDefinition.class, "setFieldWithReflection", BeanResolutionContext.class, BeanContext.class, int.class, Object.class, Object.class);
private static final Method INVOKE_WITH_REFLECTION_METHOD =
ReflectionUtils.getRequiredMethod(AbstractInitializableBeanDefinition.class, "invokeMethodWithReflection", BeanResolutionContext.class, BeanContext.class, int.class, Object.class, Object[].class);
private static final Method IS_METHOD_RESOLVED =
ReflectionUtils.getRequiredMethod(AbstractInitializableBeanDefinition.class, "isMethodResolved", int.class, Object[].class);
private static final ClassTypeDef TYPE_REFLECTION_UTILS = ClassTypeDef.of(ReflectionUtils.class);
private static final Method GET_FIELD_WITH_REFLECTION_METHOD =
ReflectionUtils.getRequiredInternalMethod(ReflectionUtils.class, "getField", Class.class, String.class, Object.class);
private static final Method METHOD_INVOKE_INACCESSIBLE_METHOD =
ReflectionUtils.getRequiredInternalMethod(ReflectionUtils.class, "invokeInaccessibleMethod", Object.class, Method.class, Object[].class);
private static final Method METHOD_GET_DEFAULT_IMPLEMENTATION =
ReflectionUtils.getRequiredInternalMethod(BeanDefinition.class, "getDefaultImplementation");
private static final Method METHOD_IS_CAN_BE_REPLACED =
ReflectionUtils.getRequiredInternalMethod(BeanDefinition.class, "isCanBeReplaced");
private static final Method METHOD_GET_REPLACES_DEFINITION =
ReflectionUtils.getRequiredInternalMethod(BeanDefinition.class, "getReplacesDefinition");
private static final Constructor<?> CONSTRUCTOR_DEFAULT_REPLACES_DEFINITION =
ReflectionUtils.getRequiredInternalConstructor(DefaultReplacesDefinition.class, Class.class, Class.class, Qualifier.class, Class.class);
private static final Optional<Constructor<AbstractInitializableBeanDefinitionAndReference>> BEAN_DEFINITION_CLASS_CONSTRUCTOR1 = ReflectionUtils.findConstructor(
AbstractInitializableBeanDefinitionAndReference.class,
Class.class, // beanType
AbstractInitializableBeanDefinition.MethodOrFieldReference.class, // constructor
AnnotationMetadata.class, // annotationMetadata
AbstractInitializableBeanDefinition.MethodReference[].class, // methodInjection
AbstractInitializableBeanDefinition.FieldReference[].class, // fieldInjection
AbstractInitializableBeanDefinition.AnnotationReference[].class, // annotationInjection
ExecutableMethodsDefinition.class, // executableMethodsDefinition
Map.class, // typeArgumentsMap
AbstractInitializableBeanDefinition.PrecalculatedInfo.class // precalculated info
);
private static final Optional<Constructor<AbstractInitializableBeanDefinitionAndReference>> BEAN_DEFINITION_CLASS_CONSTRUCTOR2 = ReflectionUtils.findConstructor(
AbstractInitializableBeanDefinitionAndReference.class,
Class.class, // beanType
AbstractInitializableBeanDefinition.MethodOrFieldReference.class, // constructor
AnnotationMetadata.class, // annotationMetadata
AbstractInitializableBeanDefinition.MethodReference[].class, // methodInjection
AbstractInitializableBeanDefinition.FieldReference[].class, // fieldInjection
AbstractInitializableBeanDefinition.AnnotationReference[].class, // annotationInjection
ExecutableMethodsDefinition.class, // executableMethodsDefinition
Map.class, // typeArgumentsMap
AbstractInitializableBeanDefinition.PrecalculatedInfo.class, // precalculated info
Condition[].class, // pre conditions
Condition[].class, // post conditions
Throwable.class // failed initialization
);
private static final Constructor<?> PRECALCULATED_INFO_CONSTRUCTOR = ReflectionUtils.getRequiredInternalConstructor(AbstractInitializableBeanDefinition.PrecalculatedInfo.class,
Optional.class, // scope
boolean.class, // isAbstract
boolean.class, // isIterable
boolean.class, // isSingleton
boolean.class, // isPrimary
boolean.class, // isConfigurationProperties
boolean.class, // isContainerType
boolean.class, // requiresMethodProcessing,
boolean.class // hasEvaluatedExpressions
);
private static final String FIELD_CONSTRUCTOR = "$CONSTRUCTOR";
private static final String FIELD_EXECUTABLE_METHODS = "$EXEC";
private static final String FIELD_INJECTION_METHODS = "$INJECTION_METHODS";
private static final String FIELD_INJECTION_FIELDS = "$INJECTION_FIELDS";
private static final String FIELD_ANNOTATION_INJECTIONS = "$ANNOTATION_INJECTIONS";
private static final String FIELD_TYPE_ARGUMENTS = "$TYPE_ARGUMENTS";
private static final String FIELD_INNER_CLASSES = "$INNER_CONFIGURATION_CLASSES";
private static final String FIELD_EXPOSED_TYPES = "$EXPOSED_TYPES";
private static final String FIELD_REPLACES = "$REPLACES";
private static final String FIELD_FAILED_INITIALIZATION = "$FAILURE";
private static final String FIELD_PRECALCULATED_INFO = "$INFO";
private static final String FIELD_PRE_START_CONDITIONS = "$PRE_CONDITIONS";
private static final String FIELD_POST_START_CONDITIONS = "$POST_CONDITIONS";
private static final Constructor<?> METHOD_REFERENCE_CONSTRUCTOR = ReflectionUtils.getRequiredInternalConstructor(AbstractInitializableBeanDefinition.MethodReference.class,
Class.class, // declaringType,
String.class, // methodName
Argument[].class, // arguments
AnnotationMetadata.class// annotationMetadata
);
private static final Constructor<?> METHOD_REFERENCE_CONSTRUCTOR_POST_PRE = ReflectionUtils.getRequiredInternalConstructor(AbstractInitializableBeanDefinition.MethodReference.class,
Class.class, // declaringType,
String.class, // methodName
Argument[].class, // arguments
AnnotationMetadata.class, // annotationMetadata
boolean.class, // isPostConstructMethod
boolean.class // isPreDestroyMethod,
);
private static final Constructor<?> FIELD_REFERENCE_CONSTRUCTOR = ReflectionUtils.getRequiredInternalConstructor(AbstractInitializableBeanDefinition.FieldReference.class, Class.class, Argument.class);
private static final Constructor<?> ANNOTATION_REFERENCE_CONSTRUCTOR = ReflectionUtils.getRequiredInternalConstructor(AbstractInitializableBeanDefinition.AnnotationReference.class, Argument.class);
private static final Method METHOD_QUALIFIER_FOR_ARGUMENT =
ReflectionUtils.getRequiredMethod(Qualifiers.class, "forArgument", Argument.class);
private static final Method METHOD_QUALIFIER_BY_NAME = ReflectionUtils.getRequiredMethod(Qualifiers.class, "byName", String.class);
private static final Method METHOD_QUALIFIER_BY_STEREOTYPE = ReflectionUtils.getRequiredMethod(Qualifiers.class, "byStereotype", Class.class);
private static final Method METHOD_QUALIFIER_BY_ANNOTATION =
ReflectionUtils.getRequiredMethod(Qualifiers.class, "byAnnotationSimple", AnnotationMetadata.class, String.class);
private static final Method METHOD_QUALIFIER_BY_REPEATABLE_ANNOTATION =
ReflectionUtils.getRequiredMethod(Qualifiers.class, "byRepeatableAnnotation", AnnotationMetadata.class, String.class);
private static final Method METHOD_QUALIFIER_BY_QUALIFIERS =
ReflectionUtils.getRequiredMethod(Qualifiers.class, "byQualifiers", Qualifier[].class);
private static final Method METHOD_QUALIFIER_BY_INTERCEPTOR_BINDING =
ReflectionUtils.getRequiredMethod(Qualifiers.class, "byInterceptorBinding", AnnotationMetadata.class);
private static final Method METHOD_QUALIFIER_BY_TYPE = ReflectionUtils.getRequiredMethod(Qualifiers.class, "byType", Class[].class);
private static final Method METHOD_BEAN_RESOLUTION_CONTEXT_MARK_FACTORY = ReflectionUtils.getRequiredMethod(BeanResolutionContext.class, "markDependentAsFactory");
private static final Method METHOD_PROXY_TARGET_TYPE = ReflectionUtils.getRequiredInternalMethod(ProxyBeanDefinition.class, "getTargetDefinitionType");
private static final Method METHOD_PROXY_TARGET_CLASS = ReflectionUtils.getRequiredInternalMethod(ProxyBeanDefinition.class, "getTargetType");
private static final ClassTypeDef TYPE_QUALIFIERS = ClassTypeDef.of(Qualifiers.class);
private static final ClassTypeDef TYPE_QUALIFIER = ClassTypeDef.of(Qualifier.class);
private static final String MESSAGE_ONLY_SINGLE_CALL_PERMITTED = "Only a single call to visitBeanFactoryMethod(..) is permitted";
private static final int INJECT_METHOD_BEAN_RESOLUTION_CONTEXT_PARAM = 0;
private static final int INJECT_METHOD_BEAN_CONTEXT_PARAM = 1;
private static final int INSTANTIATE_METHOD_BEAN_RESOLUTION_CONTEXT_PARAM = 0;
private static final int INSTANTIATE_METHOD_BEAN_CONTEXT_PARAM = 1;
private static final Method METHOD_BEAN_CONTEXT_GET_CONVERSION_SERVICE = ReflectionUtils.getRequiredMethod(ConversionServiceProvider.class, "getConversionService");
private static final Method METHOD_INVOKE_INTERNAL =
ReflectionUtils.getRequiredInternalMethod(AbstractExecutableMethod.class, "invokeInternal", Object.class, Object[].class);
private static final Method METHOD_INITIALIZE =
ReflectionUtils.getRequiredInternalMethod(InitializingBeanDefinition.class, "initialize", BeanResolutionContext.class, BeanContext.class, Object.class);
private static final Method METHOD_DISPOSE =
ReflectionUtils.getRequiredInternalMethod(DisposableBeanDefinition.class, "dispose", BeanResolutionContext.class, BeanContext.class, Object.class);
private static final Method DESTROY_INJECT_SCOPED_BEANS_METHOD = ReflectionUtils.getRequiredInternalMethod(BeanResolutionContext.class, "destroyInjectScopedBeans");
private static final Method CHECK_IF_SHOULD_LOAD_METHOD = ReflectionUtils.getRequiredMethod(AbstractInitializableBeanDefinition.class,
"checkIfShouldLoad",
BeanResolutionContext.class,
BeanContext.class);
private static final Method GET_MAP_METHOD = ReflectionUtils.getRequiredMethod(Map.class, "get", Object.class);
private static final Method LOAD_REFERENCE_METHOD = ReflectionUtils.getRequiredMethod(BeanDefinitionReference.class, "load");
private static final Method IS_CONTEXT_SCOPE_METHOD = ReflectionUtils.getRequiredMethod(BeanDefinitionReference.class, "isContextScope");
private static final Method IS_PROXIED_BEAN_METHOD = ReflectionUtils.getRequiredMethod(BeanDefinitionReference.class, "isProxiedBean");
private static final Method IS_ENABLED_METHOD = ReflectionUtils.getRequiredMethod(BeanContextConditional.class, "isEnabled", BeanContext.class);
private static final Method IS_ENABLED2_METHOD = ReflectionUtils.getRequiredMethod(BeanContextConditional.class, "isEnabled", BeanContext.class, BeanResolutionContext.class);
private static final Method GET_INTERCEPTED_TYPE_METHOD = ReflectionUtils.getRequiredMethod(AdvisedBeanType.class, "getInterceptedType");
private static final Method DO_INSTANTIATE_METHOD = ReflectionUtils.getRequiredMethod(AbstractInitializableBeanDefinition.class, "doInstantiate", BeanResolutionContext.class, BeanContext.class, Map.class);
private static final Method INSTANTIATE_METHOD = ReflectionUtils.getRequiredMethod(InstantiatableBeanDefinition.class, "instantiate", BeanResolutionContext.class, BeanContext.class);
private static final Method COLLECTION_UTILS_ENUM_SET_METHOD = ReflectionUtils.getRequiredMethod(CollectionUtils.class, "enumSet", Enum[].class);
private static final Method IS_INNER_CONFIGURATION_METHOD = ReflectionUtils.getRequiredMethod(AbstractInitializableBeanDefinition.class, "isInnerConfiguration", Class.class);
private static final Method CONTAINS_METHOD = ReflectionUtils.getRequiredMethod(Collection.class, "contains", Object.class);
private static final Method GET_EXPOSED_TYPES_METHOD = ReflectionUtils.getRequiredMethod(AbstractInitializableBeanDefinition.class, "getExposedTypes");
private static final Method IS_CANDIDATE_BEAN_METHOD = ReflectionUtils.getRequiredMethod(BeanDefinition.class, "isCandidateBean", Argument.class);
private static final Method GET_ORDER_METHOD = ReflectionUtils.getRequiredMethod(Ordered.class, "getOrder");
private static final Constructor<HashSet> HASH_SET_COLLECTION_CONSTRUCTOR = ReflectionUtils.getRequiredInternalConstructor(HashSet.class, Collection.class);
private static final Method ARRAYS_AS_LIST_METHOD = ReflectionUtils.getRequiredMethod(Arrays.class, "asList", Object[].class);
private static final Method COLLECTIONS_SINGLETON_METHOD = ReflectionUtils.getRequiredMethod(Collections.class, "singleton", Object.class);
private static final Method OPTIONAL_IS_PRESENT_METHOD = ReflectionUtils.getRequiredMethod(Optional.class, "isPresent");
private static final Method OPTIONAL_GET_METHOD = ReflectionUtils.getRequiredMethod(Optional.class, "get");
private static final Method DURATION_TO_MILLIS_METHOD = ReflectionUtils.getRequiredMethod(Duration.class, "toMillis");
private static final Method PROVIDER_GET_ANNOTATION_METADATA_METHOD = ReflectionUtils.getRequiredMethod(AnnotationMetadataProvider.class, "getAnnotationMetadata");
private static final Method IS_PROXY_TARGET_METHOD = ReflectionUtils.getRequiredMethod(BeanDefinitionReference.class, "isProxyTarget");
private static final Method GET_CONFIGURATION_PATH_METHOD = ReflectionUtils.getRequiredInternalMethod(BeanResolutionContext.class, "getConfigurationPath");
private static final Constructor<AbstractExecutableMethod> ABSTRACT_EXECUTABLE_METHOD_CONSTRUCTOR = ReflectionUtils.getRequiredInternalConstructor(AbstractExecutableMethod.class, Class.class, String.class);
private static final Method GET_TYPE_PARAMETERS_METHOD = ReflectionUtils.getRequiredInternalMethod(TypeVariableResolver.class, "getTypeParameters");
private static final Method ARGUMENT_OF_METHOD = ReflectionUtils.getRequiredInternalMethod(Argument.class, "of", Class.class);
private static final Method BD_GET_INDEXES_OF_EXECUTABLE_METHODS_FOR_PROCESSING = ReflectionUtils.getRequiredInternalMethod(AbstractInitializableBeanDefinition.class, "getIndexesOfExecutableMethodsForProcessing");
private static final Method GET_INDEXES_METHOD = ReflectionUtils.getRequiredMethod(BeanDefinitionReference.class, "getIndexes");
private static final Method IS_PARALLEL_METHOD = ReflectionUtils.getRequiredMethod(BeanDefinitionReference.class, "isParallel");
private static final Method IS_ASSIGNABLE_METHOD = ReflectionUtils.getRequiredMethod(Class.class, "isAssignableFrom", Class.class);
private static final Set<String> IGNORED_EXPOSED_INTERFACES = Set.of(
AutoCloseable.class.getName(), LifeCycle.class.getName(), Ordered.class.getName(), Closeable.class.getName(),
Named.class.getName(), Described.class.getName(),
Record.class.getName(), Enum.class.getName(), Toggleable.class.getName(), Iterable.class.getName(),
Serializable.class.getName()
);
private final String beanFullClassName;
private final String beanDefinitionName;
private final TypeDef beanTypeDef;
private final Map<String, MethodDef> loadTypeMethods = new LinkedHashMap<>();
private final String packageName;
private final String beanSimpleClassName;
private final ClassTypeDef beanDefinitionTypeDef;
private final boolean isInterface;
private final boolean isAbstract;
private final boolean isConfigurationProperties;
private final Element beanProducingElement;
private final ClassElement beanTypeElement;
private final VisitorContext visitorContext;
private final List<String> beanTypeInnerClasses;
private final EvaluatedExpressionProcessor evaluatedExpressionProcessor;
private ClassTypeDef superType = TYPE_ABSTRACT_BEAN_DEFINITION_AND_REFERENCE;
private boolean superBeanDefinition = false;
private boolean isSuperFactory = false;
private final AnnotationMetadata annotationMetadata;
private boolean preprocessMethods = false;
private Map<String, Map<String, ClassElement>> typeArguments;
@Nullable
private String interceptedType;
@Nullable
private Set<ClassElement> exposes;
private final List<FieldVisitData> fieldInjectionPoints = new ArrayList<>(2);
private final List<MethodVisitData> methodInjectionPoints = new ArrayList<>(2);
private final List<MethodVisitData> postConstructMethodVisits = new ArrayList<>(2);
private final List<MethodVisitData> preDestroyMethodVisits = new ArrayList<>(2);
private final List<MethodVisitData> allMethodVisits = new ArrayList<>(2);
private final Map<ClassElement, List<AnnotationVisitData>> annotationInjectionPoints = new LinkedHashMap<>(2);
private final Map<String, Boolean> isLifeCycleCache = new HashMap<>(2);
private ExecutableMethodsDefinitionWriter executableMethodsDefinitionWriter;
private Object constructor; // MethodElement or FieldElement
private boolean disabled = false;
private final boolean keepConfPropInjectPoints;
private boolean proxiedBean = false;
private boolean isProxyTarget = false;
private String proxyBeanDefinitionName, proxyBeanTypeName;
private final OriginatingElements originatingElements;
private ClassDef.ClassDefBuilder classDefBuilder;
private BuildMethodDefinition buildMethodDefinition;
private final List<InjectMethodCommand> injectCommands = new ArrayList<>();
private boolean validated;
private final Function<String, ExpressionDef> loadClassValueExpressionFn;
private Map<String, byte[]> output;
/**
* Creates a bean definition writer.
*
* @param classElement The | path |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/hhh14112/HHH14112Test.java | {
"start": 1397,
"end": 1560
} | class ____ {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
public Long id;
public boolean deleted;
}
@Entity(name = "SubObject")
public static | Super |
java | netty__netty | codec-classes-quic/src/main/java/io/netty/handler/codec/quic/EpollQuicUtils.java | {
"start": 1759,
"end": 2374
} | class ____ implements SegmentedDatagramPacketAllocator {
private final int maxNumSegments;
EpollSegmentedDatagramPacketAllocator(int maxNumSegments) {
this.maxNumSegments = maxNumSegments;
}
@Override
public int maxNumSegments() {
return maxNumSegments;
}
@Override
public DatagramPacket newPacket(ByteBuf buffer, int segmentSize, InetSocketAddress remoteAddress) {
return new io.netty.channel.unix.SegmentedDatagramPacket(buffer, segmentSize, remoteAddress);
}
}
}
| EpollSegmentedDatagramPacketAllocator |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/StreamOperatorStateHandlerTest.java | {
"start": 10424,
"end": 11098
} | class ____
extends StateSnapshotContextSynchronousImpl {
public TestStateSnapshotContextSynchronousImpl(
long checkpointId, long timestamp, CloseableRegistry closeableRegistry) {
super(
checkpointId,
timestamp,
new MemCheckpointStreamFactory(1024),
new KeyGroupRange(0, 2),
closeableRegistry);
this.keyedStateCheckpointClosingFuture = new CancelableFuture<>();
this.operatorStateCheckpointClosingFuture = new CancelableFuture<>();
}
}
private static | TestStateSnapshotContextSynchronousImpl |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embedded/one2many/EmbeddableWithOne2ManyTest.java | {
"start": 627,
"end": 1731
} | class ____ {
@Test
@FailureExpected(jiraKey = "HHH-4883")
public void testJoinAcrossEmbedded(SessionFactoryScope scope) {
// NOTE : this may or may not work now with HHH-4883 fixed,
// but i cannot do this checking until HHH-4599 is done.
scope.inTransaction(
session -> {
session.createQuery( "from Person p join p.name.aliases a where a.source = 'FBI'", Person.class )
.list();
}
);
}
@Test
@FailureExpected(jiraKey = "HHH-4599")
public void testBasicOps(SessionFactoryScope scope) {
Person person = new Person( "John", "Dillinger" );
scope.inTransaction(
session -> {
Alias alias = new Alias( "Public Enemy", "Number 1", "FBI" );
session.persist( alias );
person.getName().getAliases().add( alias );
session.persist( person );
}
);
scope.inTransaction(
session -> {
Person p = session.getReference( Person.class, person.getId() );
session.remove( p );
List<Alias> aliases = session.createQuery( "from Alias", Alias.class ).list();
assertEquals( 0, aliases.size() );
}
);
}
}
| EmbeddableWithOne2ManyTest |
java | google__guava | android/guava/src/com/google/common/base/Converter.java | {
"start": 16114,
"end": 20631
} | class ____ neither legacy nor
* non-legacy behavior; it just needs to let the behaviors of the backing converters shine
* through (which might even differ from each other!). So, we override the correctedDo* methods,
* after which the do* methods should never be reached.
*/
@Override
protected C doForward(A a) {
throw new AssertionError();
}
@Override
protected A doBackward(C c) {
throw new AssertionError();
}
@Override
@Nullable C correctedDoForward(@Nullable A a) {
return second.correctedDoForward(first.correctedDoForward(a));
}
@Override
@Nullable A correctedDoBackward(@Nullable C c) {
return first.correctedDoBackward(second.correctedDoBackward(c));
}
@Override
public boolean equals(@Nullable Object object) {
if (object instanceof ConverterComposition) {
ConverterComposition<?, ?, ?> that = (ConverterComposition<?, ?, ?>) object;
return this.first.equals(that.first) && this.second.equals(that.second);
}
return false;
}
@Override
public int hashCode() {
return 31 * first.hashCode() + second.hashCode();
}
@Override
public String toString() {
return first + ".andThen(" + second + ")";
}
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0L;
}
/**
* @deprecated Provided to satisfy the {@code Function} interface; use {@link #convert} instead.
*/
@Deprecated
@Override
@InlineMe(replacement = "this.convert(a)")
public final B apply(A a) {
/*
* Given that we declare this method as accepting and returning non-nullable values (because we
* implement Function<A, B>, as discussed in a class-level comment), it would make some sense to
* perform runtime null checks on the input and output. (That would also make NullPointerTester
* happy!) However, since we didn't do that for many years, we're not about to start now.
* (Runtime checks could be particularly bad for users of LegacyConverter.)
*
* Luckily, our nullness checker is smart enough to realize that `convert` has @PolyNull-like
* behavior, so it knows that `convert(a)` returns a non-nullable value, and we don't need to
* perform even a cast, much less a runtime check.
*
* All that said, don't forget that everyone should call converter.convert() instead of
* converter.apply(), anyway. If clients use only converter.convert(), then their nullness
* checkers are unlikely to ever look at the annotations on this declaration.
*
* Historical note: At one point, we'd declared this method as accepting and returning nullable
* values. For details on that, see earlier revisions of this file.
*/
return convert(a);
}
/**
* <i>May</i> return {@code true} if {@code object} is a {@code Converter} that behaves
* identically to this converter.
*
* <p><b>Warning: do not depend</b> on the behavior of this method.
*
* <p>Historically, {@code Converter} instances in this library have implemented this method to
* recognize certain cases where distinct {@code Converter} instances would in fact behave
* identically. However, this is not true of {@code Converter} implementations in general. It is
* best not to depend on it.
*/
@Override
public boolean equals(@Nullable Object object) {
return super.equals(object);
}
// Static converters
/**
* Returns a converter based on separate forward and backward functions. This is useful if the
* function instances already exist, or so that you can supply lambda expressions. If those
* circumstances don't apply, you probably don't need to use this; subclass {@code Converter} and
* implement its {@link #doForward} and {@link #doBackward} methods directly.
*
* <p>These functions will never be passed {@code null} and must not under any circumstances
* return {@code null}. If a value cannot be converted, the function should throw an unchecked
* exception (typically, but not necessarily, {@link IllegalArgumentException}).
*
* <p>The returned converter is serializable if both provided functions are.
*
* @since 17.0
*/
public static <A, B> Converter<A, B> from(
Function<? super A, ? extends B> forwardFunction,
Function<? super B, ? extends A> backwardFunction) {
return new FunctionBasedConverter<>(forwardFunction, backwardFunction);
}
private static final | has |
java | elastic__elasticsearch | x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS2FailureStoreRestIT.java | {
"start": 957,
"end": 9741
} | class ____ extends AbstractRemoteClusterSecurityFailureStoreRestIT {
private static final AtomicReference<Map<String, Object>> API_KEY_MAP_REF = new AtomicReference<>();
static {
fulfillingCluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.name("fulfilling-cluster")
.apply(commonClusterConfig)
.setting("remote_cluster_server.enabled", "true")
.setting("remote_cluster.port", "0")
.setting("xpack.security.remote_cluster_server.ssl.enabled", "true")
.setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key")
.setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt")
.setting("xpack.security.authc.token.enabled", "true")
.keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password")
.build();
queryCluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.name("query-cluster")
.apply(commonClusterConfig)
.setting("xpack.security.remote_cluster_client.ssl.enabled", "true")
.setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt")
.setting("xpack.security.authc.token.enabled", "true")
.keystore("cluster.remote.my_remote_cluster.credentials", () -> {
API_KEY_MAP_REF.compareAndSet(null, createCrossClusterAccessApiKey("""
{
"search": [
{
"names": ["test*"]
}
]
}"""));
return (String) API_KEY_MAP_REF.get().get("encoded");
})
.rolesFile(Resource.fromClasspath("roles.yml"))
.build();
}
@ClassRule
// Use a RuleChain to ensure that fulfilling cluster is started before query cluster
public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster);
public void testRCS2CrossClusterSearch() throws Exception {
// configure remote cluster using API Key-based authentication
configureRemoteCluster();
final String crossClusterAccessApiKeyId = (String) API_KEY_MAP_REF.get().get("id");
final boolean ccsMinimizeRoundtrips = randomBoolean();
// fulfilling cluster setup
setupTestDataStreamOnFulfillingCluster();
// query cluster setup
setupLocalDataOnQueryCluster();
setupUserAndRoleOnQueryCluster();
final Tuple<String, String> backingIndices = getSingleDataAndFailureIndices("test1");
final String backingDataIndexName = backingIndices.v1();
final String backingFailureIndexName = backingIndices.v2();
{
// query remote cluster without selectors should succeed
final boolean alsoSearchLocally = randomBoolean();
final Request dataSearchRequest = new Request(
"GET",
String.format(
Locale.ROOT,
"/%s%s:%s/_search?ccs_minimize_roundtrips=%s&ignore_unavailable=false",
alsoSearchLocally ? "local_index," : "",
randomFrom("my_remote_cluster", "*", "my_remote_*"),
randomFrom("test1", "test*", "*", backingDataIndexName),
ccsMinimizeRoundtrips
)
);
final String[] expectedIndices = alsoSearchLocally
? new String[] { "local_index", backingDataIndexName }
: new String[] { backingDataIndexName };
assertSearchResponseContainsIndices(performRequestWithRemoteSearchUser(dataSearchRequest), expectedIndices);
}
{
// query remote cluster using ::data selector should fail
final boolean alsoSearchLocally = randomBoolean();
final Request dataSearchRequest = new Request(
"GET",
String.format(
Locale.ROOT,
"/%s:%s/_search?ccs_minimize_roundtrips=%s&ignore_unavailable=false",
randomFrom("my_remote_cluster", "*", "my_remote_*"),
randomFrom("test1::data", "test*::data", "*::data", "non-existing::data"),
ccsMinimizeRoundtrips
)
);
final ResponseException exception = expectThrows(
ResponseException.class,
() -> performRequestWithRemoteSearchUser(dataSearchRequest)
);
assertSelectorsNotSupported(exception);
}
{
// query remote cluster using ::failures selector should fail
final ResponseException exception = expectThrows(
ResponseException.class,
() -> performRequestWithRemoteSearchUser(
new Request(
"GET",
String.format(
Locale.ROOT,
"/my_remote_cluster:%s/_search?ccs_minimize_roundtrips=%s",
randomFrom("test1::failures", "test*::failures", "*::failures", "non-existing::failures"),
ccsMinimizeRoundtrips
)
)
)
);
assertSelectorsNotSupported(exception);
}
{
// direct access to backing failure index is not allowed - no explicit read privileges over .fs-* indices
Request failureIndexSearchRequest = new Request(
"GET",
String.format(
Locale.ROOT,
"/my_remote_cluster:%s/_search?ccs_minimize_roundtrips=%s",
backingFailureIndexName,
ccsMinimizeRoundtrips
)
);
final ResponseException exception = expectThrows(
ResponseException.class,
() -> performRequestWithRemoteSearchUser(failureIndexSearchRequest)
);
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(403));
assertThat(
exception.getMessage(),
containsString(
"action ["
+ (ccsMinimizeRoundtrips ? "indices:data/read/search" : "indices:admin/search/search_shards")
+ "] towards remote cluster is unauthorized for user [remote_search_user] "
+ "with assigned roles [remote_search] authenticated by API key id ["
+ crossClusterAccessApiKeyId
+ "] of user [test_user] on indices ["
+ backingFailureIndexName
+ "], this action is granted by the index privileges ["
+ (ccsMinimizeRoundtrips ? "read,all" : "view_index_metadata,manage,read_cross_cluster,all")
+ "]"
)
);
}
}
private static void setupLocalDataOnQueryCluster() throws IOException {
// Index some documents, to use them in a mixed-cluster search
final var indexDocRequest = new Request("POST", "/local_index/_doc?refresh=true");
indexDocRequest.setJsonEntity("{\"local_foo\": \"local_bar\"}");
assertOK(client().performRequest(indexDocRequest));
}
private static void setupUserAndRoleOnQueryCluster() throws IOException {
final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE);
putRoleRequest.setJsonEntity("""
{
"description": "Role with privileges for remote and local indices.",
"indices": [
{
"names": ["local_index"],
"privileges": ["read"]
}
],
"remote_indices": [
{
"names": ["test*"],
"privileges": ["read", "read_cross_cluster"],
"clusters": ["my_remote_cluster"]
}
]
}""");
assertOK(adminClient().performRequest(putRoleRequest));
final var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER);
putUserRequest.setJsonEntity("""
{
"password": "x-pack-test-password",
"roles" : ["remote_search"]
}""");
assertOK(adminClient().performRequest(putUserRequest));
}
}
| RemoteClusterSecurityRCS2FailureStoreRestIT |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/naming/VariableNamingTest.java | {
"start": 905,
"end": 1927
} | class ____ {
@ProcessorTest
public void shouldGenerateImplementationsOfMethodsWithProblematicVariableNmes() {
Source source = new Source();
source.setSomeNumber( 42 );
source.setValues( Arrays.asList( 42L, 121L ) );
Map<Long, Date> map = new HashMap<>();
map.put( 42L, new GregorianCalendar( 1980, Calendar.JANUARY, 1 ).getTime() );
map.put( 121L, new GregorianCalendar( 2013, Calendar.JULY, 20 ).getTime() );
source.setMap( map );
Break target = SourceTargetMapper.INSTANCE.sourceToBreak( source );
assertThat( target ).isNotNull();
assertThat( target.getValues() ).isNotNull();
assertThat( target.getValues() ).containsOnly( "42", "121" );
assertThat( target.getSomeNumber() ).isEqualTo( "42" );
assertThat( target.getMap() ).hasSize( 2 );
assertThat( target.getMap() ).contains(
entry( "42", "01.01.1980" ),
entry( "121", "20.07.2013" )
);
}
}
| VariableNamingTest |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeConnectProtocol.java | {
"start": 3216,
"end": 12935
} | class ____ {
public static final String ALLOCATION_KEY_NAME = "allocation";
public static final String REVOKED_KEY_NAME = "revoked";
public static final String SCHEDULED_DELAY_KEY_NAME = "delay";
public static final short CONNECT_PROTOCOL_V1 = 1;
public static final short CONNECT_PROTOCOL_V2 = 2;
public static final boolean TOLERATE_MISSING_FIELDS_WITH_DEFAULTS = true;
/**
* Connect Protocol Header V1:
* <pre>
* Version => Int16
* </pre>
*/
private static final Struct CONNECT_PROTOCOL_HEADER_V1 = new Struct(CONNECT_PROTOCOL_HEADER_SCHEMA)
.set(VERSION_KEY_NAME, CONNECT_PROTOCOL_V1);
/**
* Connect Protocol Header V2:
* <pre>
* Version => Int16
* </pre>
* The V2 protocol is schematically identical to V1, but is used to signify that internal request
* verification and distribution of session keys is enabled (for more information, see
* <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-507%3A+Securing+Internal+Connect+REST+Endpoints">KIP-507</a>)
*/
private static final Struct CONNECT_PROTOCOL_HEADER_V2 = new Struct(CONNECT_PROTOCOL_HEADER_SCHEMA)
.set(VERSION_KEY_NAME, CONNECT_PROTOCOL_V2);
/**
* Config State V1:
* <pre>
* Url => [String]
* ConfigOffset => Int64
* </pre>
*/
public static final Schema CONFIG_STATE_V1 = CONFIG_STATE_V0;
/**
* Allocation V1
* <pre>
* Current Assignment => [Byte]
* </pre>
*/
public static final Schema ALLOCATION_V1 = new Schema(
TOLERATE_MISSING_FIELDS_WITH_DEFAULTS,
new Field(ALLOCATION_KEY_NAME, NULLABLE_BYTES, null, true, null));
/**
*
* Connector Assignment V1:
* <pre>
* Connector => [String]
* Tasks => [Int32]
* </pre>
*
* <p>Assignments for each worker are a set of connectors and tasks. These are categorized by
* connector ID. A sentinel task ID (CONNECTOR_TASK) is used to indicate the connector itself
* (i.e. that the assignment includes responsibility for running the Connector instance in
* addition to any tasks it generates).</p>
*/
public static final Schema CONNECTOR_ASSIGNMENT_V1 = CONNECTOR_ASSIGNMENT_V0;
/**
* Raw (non versioned) assignment V1:
* <pre>
* Error => Int16
* Leader => [String]
* LeaderUrl => [String]
* ConfigOffset => Int64
* Assignment => [Connector Assignment]
* Revoked => [Connector Assignment]
* ScheduledDelay => Int32
* </pre>
*/
public static final Schema ASSIGNMENT_V1 = new Schema(
TOLERATE_MISSING_FIELDS_WITH_DEFAULTS,
new Field(ERROR_KEY_NAME, Type.INT16),
new Field(LEADER_KEY_NAME, Type.STRING),
new Field(LEADER_URL_KEY_NAME, Type.STRING),
new Field(CONFIG_OFFSET_KEY_NAME, Type.INT64),
new Field(ASSIGNMENT_KEY_NAME, ArrayOf.nullable(CONNECTOR_ASSIGNMENT_V1), null, true, null),
new Field(REVOKED_KEY_NAME, ArrayOf.nullable(CONNECTOR_ASSIGNMENT_V1), null, true, null),
new Field(SCHEDULED_DELAY_KEY_NAME, Type.INT32, null, 0));
/**
* The fields are serialized in sequence as follows:
* Subscription V1:
* <pre>
* Version => Int16
* Url => [String]
* ConfigOffset => Int64
* Current Assignment => [Byte]
* </pre>
*/
public static ByteBuffer serializeMetadata(ExtendedWorkerState workerState, boolean sessioned) {
Struct configState = new Struct(CONFIG_STATE_V1)
.set(URL_KEY_NAME, workerState.url())
.set(CONFIG_OFFSET_KEY_NAME, workerState.offset());
// Not a big issue if we embed the protocol version with the assignment in the metadata
Struct allocation = new Struct(ALLOCATION_V1)
.set(ALLOCATION_KEY_NAME, serializeAssignment(workerState.assignment(), sessioned));
Struct connectProtocolHeader = sessioned ? CONNECT_PROTOCOL_HEADER_V2 : CONNECT_PROTOCOL_HEADER_V1;
ByteBuffer buffer = ByteBuffer.allocate(connectProtocolHeader.sizeOf()
+ CONFIG_STATE_V1.sizeOf(configState)
+ ALLOCATION_V1.sizeOf(allocation));
connectProtocolHeader.writeTo(buffer);
CONFIG_STATE_V1.write(buffer, configState);
ALLOCATION_V1.write(buffer, allocation);
buffer.flip();
return buffer;
}
/**
* Returns the collection of Connect protocols that are supported by this version along
* with their serialized metadata. The protocols are ordered by preference.
*
* @param workerState the current state of the worker metadata
* @param sessioned whether the {@link ConnectProtocolCompatibility#SESSIONED} protocol should
* be included in the collection of supported protocols
* @return the collection of Connect protocol metadata
*/
public static JoinGroupRequestProtocolCollection metadataRequest(ExtendedWorkerState workerState, boolean sessioned) {
// Order matters in terms of protocol preference
List<JoinGroupRequestProtocol> joinGroupRequestProtocols = new ArrayList<>();
if (sessioned) {
joinGroupRequestProtocols.add(new JoinGroupRequestProtocol()
.setName(SESSIONED.protocol())
.setMetadata(IncrementalCooperativeConnectProtocol.serializeMetadata(workerState, true).array())
);
}
joinGroupRequestProtocols.add(new JoinGroupRequestProtocol()
.setName(COMPATIBLE.protocol())
.setMetadata(IncrementalCooperativeConnectProtocol.serializeMetadata(workerState, false).array())
);
joinGroupRequestProtocols.add(new JoinGroupRequestProtocol()
.setName(EAGER.protocol())
.setMetadata(ConnectProtocol.serializeMetadata(workerState).array())
);
return new JoinGroupRequestProtocolCollection(joinGroupRequestProtocols.iterator());
}
/**
* Given a byte buffer that contains protocol metadata return the deserialized form of the
* metadata.
*
* @param buffer A buffer containing the protocols metadata
* @return the deserialized metadata
* @throws SchemaException on incompatible Connect protocol version
*/
public static ExtendedWorkerState deserializeMetadata(ByteBuffer buffer) {
Struct header = CONNECT_PROTOCOL_HEADER_SCHEMA.read(buffer);
Short version = header.getShort(VERSION_KEY_NAME);
checkVersionCompatibility(version);
Struct configState = CONFIG_STATE_V1.read(buffer);
long configOffset = configState.getLong(CONFIG_OFFSET_KEY_NAME);
String url = configState.getString(URL_KEY_NAME);
Struct allocation = ALLOCATION_V1.read(buffer);
// Protocol version is embedded with the assignment in the metadata
ExtendedAssignment assignment = deserializeAssignment(allocation.getBytes(ALLOCATION_KEY_NAME));
return new ExtendedWorkerState(url, configOffset, assignment);
}
/**
* The fields are serialized in sequence as follows:
* Complete Assignment V1:
* <pre>
* Version => Int16
* Error => Int16
* Leader => [String]
* LeaderUrl => [String]
* ConfigOffset => Int64
* Assignment => [Connector Assignment]
* Revoked => [Connector Assignment]
* ScheduledDelay => Int32
* </pre>
*/
public static ByteBuffer serializeAssignment(ExtendedAssignment assignment, boolean sessioned) {
// comparison depends on reference equality for now
if (assignment == null || ExtendedAssignment.empty().equals(assignment)) {
return null;
}
Struct struct = assignment.toStruct();
Struct protocolHeader = sessioned ? CONNECT_PROTOCOL_HEADER_V2 : CONNECT_PROTOCOL_HEADER_V1;
ByteBuffer buffer = ByteBuffer.allocate(protocolHeader.sizeOf()
+ ASSIGNMENT_V1.sizeOf(struct));
protocolHeader.writeTo(buffer);
ASSIGNMENT_V1.write(buffer, struct);
buffer.flip();
return buffer;
}
/**
* Given a byte buffer that contains an assignment as defined by this protocol, return the
* deserialized form of the assignment.
*
* @param buffer the buffer containing a serialized assignment
* @return the deserialized assignment
* @throws SchemaException on incompatible Connect protocol version
*/
public static ExtendedAssignment deserializeAssignment(ByteBuffer buffer) {
if (buffer == null) {
return null;
}
Struct header = CONNECT_PROTOCOL_HEADER_SCHEMA.read(buffer);
Short version = header.getShort(VERSION_KEY_NAME);
checkVersionCompatibility(version);
Struct struct = ASSIGNMENT_V1.read(buffer);
return ExtendedAssignment.fromStruct(version, struct);
}
private static void checkVersionCompatibility(short version) {
// check for invalid versions
if (version < CONNECT_PROTOCOL_V0)
throw new SchemaException("Unsupported subscription version: " + version);
// otherwise, assume versions can be parsed
}
}
| IncrementalCooperativeConnectProtocol |
java | elastic__elasticsearch | libs/lz4/src/test/java/org/elasticsearch/lz4/ESLZ4CompressorTests.java | {
"start": 801,
"end": 3749
} | class ____ extends ESTestCase {
public void testCompressRealisticUnicode() {
for (int i = 0; i < 15; ++i) {
int stringLengthMultiplier = randomFrom(5, 10, 20, 40, 80, 160, 320);
final String uncompressedString = randomRealisticUnicodeOfCodepointLength(stringLengthMultiplier * 1024);
byte[] uncompressed = uncompressedString.getBytes(StandardCharsets.UTF_8);
byte[] compressed = new byte[uncompressed.length + uncompressed.length / 255 + 16];
byte[] unForkedCompressed = new byte[uncompressed.length + uncompressed.length / 255 + 16];
LZ4Compressor compressor = ESLZ4Compressor.INSTANCE;
int forkedCompressedSize = compressor.compress(uncompressed, compressed);
LZ4Compressor unForkedCompressor = LZ4Factory.safeInstance().fastCompressor();
int unForkedCompressedSize = unForkedCompressor.compress(uncompressed, unForkedCompressed);
assertEquals(unForkedCompressedSize, forkedCompressedSize);
assertArrayEquals(compressed, unForkedCompressed);
LZ4FastDecompressor decompressor = LZ4Factory.safeInstance().fastDecompressor();
byte[] output = new byte[uncompressed.length];
decompressor.decompress(compressed, output);
assertArrayEquals(uncompressed, output);
}
}
public void testCompressRandomIntBytes() throws IOException {
for (int i = 0; i < 15; ++i) {
int uncompressedBytesLength = randomFrom(16, 32, 64, 128, 256, 512, 1024) * 1024;
BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(uncompressedBytesLength);
for (int j = 0; j < uncompressedBytesLength / 4; ++j) {
bytesStreamOutput.writeInt(randomFrom(0, 1, randomInt()));
}
byte[] uncompressed = new byte[uncompressedBytesLength];
bytesStreamOutput.bytes().streamInput().read(uncompressed);
byte[] compressed = new byte[uncompressed.length + uncompressed.length / 255 + 16];
byte[] unForkedCompressed = new byte[uncompressed.length + uncompressed.length / 255 + 16];
LZ4Compressor compressor = ESLZ4Compressor.INSTANCE;
int forkedCompressedSize = compressor.compress(uncompressed, compressed);
LZ4Compressor unForkedCompressor = LZ4Factory.safeInstance().fastCompressor();
int unForkedCompressedSize = unForkedCompressor.compress(uncompressed, unForkedCompressed);
assertEquals(unForkedCompressedSize, forkedCompressedSize);
assertArrayEquals(unForkedCompressed, compressed);
LZ4FastDecompressor decompressor = LZ4Factory.safeInstance().fastDecompressor();
byte[] output = new byte[uncompressed.length];
decompressor.decompress(compressed, output);
assertArrayEquals(uncompressed, output);
}
}
}
| ESLZ4CompressorTests |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/stubbing/Stubber.java | {
"start": 1133,
"end": 1962
} | interface ____ extends BaseStubber {
/**
* Allows to choose a method when stubbing in doThrow()|doAnswer()|doNothing()|doReturn() style
* <p>
* Example:
* <pre class="code"><code class="java">
* doThrow(new RuntimeException())
* .when(mockedList).clear();
*
* //following throws RuntimeException:
* mockedList.clear();
* </code></pre>
*
* Read more about those methods:
* <p>
* {@link Mockito#doThrow(Throwable[])}
* <p>
* {@link Mockito#doAnswer(Answer)}
* <p>
* {@link Mockito#doNothing()}
* <p>
* {@link Mockito#doReturn(Object)}
* <p>
*
* See examples in javadoc for {@link Mockito}
*
* @param mock The mock
* @return select method for stubbing
*/
<T> T when(T mock);
}
| Stubber |
java | grpc__grpc-java | benchmarks/src/generated/main/grpc/io/grpc/benchmarks/proto/BenchmarkServiceGrpc.java | {
"start": 13244,
"end": 15809
} | interface ____ {
/**
* <pre>
* One request followed by one response.
* The server returns the client payload as-is.
* </pre>
*/
default void unaryCall(io.grpc.benchmarks.proto.Messages.SimpleRequest request,
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getUnaryCallMethod(), responseObserver);
}
/**
* <pre>
* Repeated sequence of one request followed by one response.
* Should be called streaming ping-pong
* The server returns the client payload as-is on each response
* </pre>
*/
default io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleRequest> streamingCall(
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getStreamingCallMethod(), responseObserver);
}
/**
* <pre>
* Single-sided unbounded streaming from client to server
* The server returns the client payload as-is once the client does WritesDone
* </pre>
*/
default io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleRequest> streamingFromClient(
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getStreamingFromClientMethod(), responseObserver);
}
/**
* <pre>
* Single-sided unbounded streaming from server to client
* The server repeatedly returns the client payload as-is
* </pre>
*/
default void streamingFromServer(io.grpc.benchmarks.proto.Messages.SimpleRequest request,
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getStreamingFromServerMethod(), responseObserver);
}
/**
* <pre>
* Two-sided unbounded streaming between server to client
* Both sides send the content of their own choice to the other
* </pre>
*/
default io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleRequest> streamingBothWays(
io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Messages.SimpleResponse> responseObserver) {
return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getStreamingBothWaysMethod(), responseObserver);
}
}
/**
* Base | AsyncService |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/io/checkpointing/AlternatingWaitingForFirstBarrierUnaligned.java | {
"start": 1329,
"end": 4399
} | class ____ implements BarrierHandlerState {
private final boolean alternating;
private final ChannelState channelState;
AlternatingWaitingForFirstBarrierUnaligned(boolean alternating, ChannelState channelState) {
this.alternating = alternating;
this.channelState = channelState;
}
@Override
public BarrierHandlerState alignedCheckpointTimeout(
Controller controller, CheckpointBarrier checkpointBarrier) {
// ignore already processing unaligned checkpoints
return this;
}
@Override
public BarrierHandlerState announcementReceived(
Controller controller, InputChannelInfo channelInfo, int sequenceNumber)
throws IOException {
channelState.getInputs()[channelInfo.getGateIdx()].convertToPriorityEvent(
channelInfo.getInputChannelIdx(), sequenceNumber);
return this;
}
@Override
public BarrierHandlerState barrierReceived(
Controller controller,
InputChannelInfo channelInfo,
CheckpointBarrier checkpointBarrier,
boolean markChannelBlocked)
throws CheckpointException, IOException {
// we received an out of order aligned barrier, we should book keep this channel as blocked,
// as it is being blocked by the credit-based network
if (markChannelBlocked
&& !checkpointBarrier.getCheckpointOptions().isUnalignedCheckpoint()) {
channelState.blockChannel(channelInfo);
}
CheckpointBarrier unalignedBarrier = checkpointBarrier.asUnaligned();
controller.initInputsCheckpoint(unalignedBarrier);
for (CheckpointableInput input : channelState.getInputs()) {
input.checkpointStarted(unalignedBarrier);
}
controller.triggerGlobalCheckpoint(unalignedBarrier);
if (controller.allBarriersReceived()) {
for (CheckpointableInput input : channelState.getInputs()) {
input.checkpointStopped(unalignedBarrier.getId());
}
return stopCheckpoint();
}
return new AlternatingCollectingBarriersUnaligned(alternating, channelState);
}
@Override
public BarrierHandlerState abort(long cancelledId) throws IOException {
return stopCheckpoint();
}
@Override
public BarrierHandlerState endOfPartitionReceived(
Controller controller, InputChannelInfo channelInfo)
throws IOException, CheckpointException {
channelState.channelFinished(channelInfo);
// Do nothing since we have no pending checkpoint.
return this;
}
private BarrierHandlerState stopCheckpoint() throws IOException {
channelState.unblockAllChannels();
if (alternating) {
return new AlternatingWaitingForFirstBarrier(channelState.emptyState());
} else {
return new AlternatingWaitingForFirstBarrierUnaligned(false, channelState.emptyState());
}
}
}
| AlternatingWaitingForFirstBarrierUnaligned |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/hybrid/index/TestingFileDataIndexRegionHelper.java | {
"start": 2540,
"end": 3810
} | class ____ {
private BiConsumerWithException<FileChannel, TestingFileDataIndexRegion, IOException>
writeRegionToFileConsumer = (fileChannel, testRegion) -> {};
private BiFunctionWithException<FileChannel, Long, TestingFileDataIndexRegion, IOException>
readRegionFromFileFunction = (fileChannel, fileOffset) -> null;
public TestingFileDataIndexRegionHelper.Builder setWriteRegionToFileConsumer(
BiConsumerWithException<FileChannel, TestingFileDataIndexRegion, IOException>
writeRegionToFileConsumer) {
this.writeRegionToFileConsumer = writeRegionToFileConsumer;
return this;
}
public TestingFileDataIndexRegionHelper.Builder setReadRegionFromFileFunction(
BiFunctionWithException<FileChannel, Long, TestingFileDataIndexRegion, IOException>
readRegionFromFileFunction) {
this.readRegionFromFileFunction = readRegionFromFileFunction;
return this;
}
public TestingFileDataIndexRegionHelper build() {
return new TestingFileDataIndexRegionHelper(
writeRegionToFileConsumer, readRegionFromFileFunction);
}
}
}
| Builder |
java | micronaut-projects__micronaut-core | http-server-netty/src/test/groovy/io/micronaut/http/server/netty/routing/RootRoutingTest.java | {
"start": 346,
"end": 606
} | class ____ {
@Inject
MyClient client;
@Test
void testRootEndpoint() {
KeyValue kv = client.getRoot();
Assertions.assertEquals("hello", kv.getKey());
Assertions.assertEquals("world", kv.getValue());
}
}
| RootRoutingTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/TestSupportJmxCleanup.java | {
"start": 1045,
"end": 2332
} | class ____ {
public static final String DEFAULT_DOMAIN = "org.apache.camel";
private static final Logger LOG = LoggerFactory.getLogger(TestSupportJmxCleanup.class);
private TestSupportJmxCleanup() {
// no instances
}
public static void removeMBeans(String domain) throws Exception {
MBeanServer mbsc = ManagementFactory.getPlatformMBeanServer();
Set<ObjectName> s = mbsc.queryNames(new ObjectName(getDomainName(domain) + ":*"), null);
for (ObjectName on : s) {
mbsc.unregisterMBean(on);
}
}
// useful helper to invoke in TestSupport to figure out what test leave junk
// behind
public static void traceMBeans(String domain) throws Exception {
MBeanServer mbsc = ManagementFactory.getPlatformMBeanServer();
String d = getDomainName(domain);
Set<ObjectName> s = mbsc.queryNames(new ObjectName(d + ":*"), null);
if (!s.isEmpty()) {
LOG.warn(" + {} ObjectNames registered in domain \"{}\"", s.size(), d);
for (ObjectName on : s) {
LOG.warn(" | {}", on);
}
}
}
private static String getDomainName(String domain) {
return domain == null ? DEFAULT_DOMAIN : domain;
}
}
| TestSupportJmxCleanup |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/form/FormConfigurationProperties.java | {
"start": 897,
"end": 2872
} | class ____ implements FormConfiguration {
/**
* Prefix for Micronaut form settings.
*/
public static final String PREFIX = "micronaut.http.forms";
/**
* The default maximum of decoded key value parameters used in {@link io.micronaut.http.form.FormUrlEncodedDecoder}.
*/
@SuppressWarnings("WeakerAccess")
private static final int DEFAULT_MAX_DECODED_KEY_VALUE_PARAMETERS = 1024;
/**
* Default value indicating whether the semicolon is treated as a normal character
* used in {@link io.micronaut.http.form.FormUrlEncodedDecoder}.
*/
@SuppressWarnings("WeakerAccess")
private static final boolean DEFAULT_SEMICOLON_IS_NORMAL_CHAR = false;
private int maxDecodedKeyValueParameters = DEFAULT_MAX_DECODED_KEY_VALUE_PARAMETERS;
private boolean semicolonIsNormalChar = DEFAULT_SEMICOLON_IS_NORMAL_CHAR;
/**
*
* @return default maximum of decoded key value parameters
*/
@Override
public int getMaxDecodedKeyValueParameters() {
return maxDecodedKeyValueParameters;
}
/**
* @return true if the semicolon is treated as a normal character, false otherwise
*/
@Override
public boolean isSemicolonIsNormalChar() {
return semicolonIsNormalChar;
}
/**
* default maximum of decoded key value parameters. Default value {@link #DEFAULT_MAX_DECODED_KEY_VALUE_PARAMETERS}.
* @param maxDecodedKeyValueParameters default maximum of decoded key value parameters
*/
public void setMaxDecodedKeyValueParameters(int maxDecodedKeyValueParameters) {
this.maxDecodedKeyValueParameters = maxDecodedKeyValueParameters;
}
/**
* @param semicolonIsNormalChar true if the semicolon should be treated as a normal character, false otherwise
*/
public void setSemicolonIsNormalChar(boolean semicolonIsNormalChar) {
this.semicolonIsNormalChar = semicolonIsNormalChar;
}
}
| FormConfigurationProperties |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/strings/Strings_assertEqualsNormalizingUnicode_Test.java | {
"start": 1692,
"end": 3564
} | class ____ extends StringsBaseTest {
@Test
void should_fail_if_actual_is_not_null_and_expected_is_null() {
assertThatNullPointerException().isThrownBy(() -> strings.assertEqualsToNormalizingUnicode(someInfo(), "\u0041", null))
.withMessage(charSequenceToLookForIsNull());
}
@Test
void should_fail_if_both_Strings_are_not_equal_after_unicode_is_normalized() {
// GIVEN
String actual = "\u00C4";
String expected = "\u0062";
AssertionInfo info = someInfo();
// WHEN
expectAssertionError(() -> strings.assertEqualsToNormalizingUnicode(info, actual, expected));
// THEN
verify(failures).failure(info, shouldBeEqualNormalizingUnicode(actual, expected, "Ä", expected), "Ä", expected);
}
@ParameterizedTest
@MethodSource("equalNormalizingUnicodeGenerator")
void should_pass_if_both_Strings_are_equal_after_unicode_is_normalized(String actual, String expected) {
strings.assertEqualsToNormalizingUnicode(someInfo(), actual, expected);
}
public static Stream<Arguments> equalNormalizingUnicodeGenerator() {
return Stream.of(
Arguments.of("A", "A"),
Arguments.of("", ""),
// Ä, Ä
Arguments.of("\u00C4", "\u0041\u0308"),
// Amélie, Amélie
Arguments.of("\u0041\u006d\u00e9\u006c\u0069\u0065", "\u0041\u006d\u0065\u0301\u006c\u0069\u0065"),
// ñ, ñ
Arguments.of("\u00F1", "\u006E\u0303"),
Arguments.of("Zoë", "Zoë"),
Arguments.of("sabiá", "sabiá"),
// ffi, ffi
Arguments.of("ffi", "\uFB03"),
// schön, schön
Arguments.of("schön", "scho\u0308n"));
}
}
| Strings_assertEqualsNormalizingUnicode_Test |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayServiceIT.java | {
"start": 1568,
"end": 1911
} | class ____ extends ESIntegTestCase {
public static final Setting<Boolean> TEST_SETTING = Setting.boolSetting(
"gateway.test.setting",
false,
Setting.Property.NodeScope,
Setting.Property.Dynamic
);
public static final String ALLOCATOR_NAME = "test-shards-allocator";
public static | GatewayServiceIT |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/TaskConfig.java | {
"start": 53545,
"end": 54531
} | class ____
final String className = this.config.getString(classNameKey, null);
if (className == null) {
return null;
}
// instantiate the class
@SuppressWarnings("unchecked")
final Class<TypeComparatorFactory<T>> superClass =
(Class<TypeComparatorFactory<T>>) (Class<?>) TypeComparatorFactory.class;
final TypeComparatorFactory<T> factory;
try {
Class<? extends TypeComparatorFactory<T>> clazz =
Class.forName(className, true, cl).asSubclass(superClass);
factory = InstantiationUtil.instantiate(clazz, superClass);
} catch (ClassNotFoundException cnfex) {
throw new RuntimeException(
"The class '"
+ className
+ "', noted in the configuration as "
+ "comparator factory, could not be found. It is not part of the user code's | name |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java | {
"start": 1634,
"end": 4469
} | class ____ extends ESTestCase {
private final ThreadPool threadPool = new TestThreadPool(getClass().getName());
@Override
public void tearDown() throws Exception {
super.tearDown();
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
public void testCCSCompatibilityCheck() throws Exception {
Settings settings = Settings.builder()
.put("node.name", TransportResolveIndexActionTests.class.getSimpleName())
.put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true")
.build();
ActionFilters actionFilters = mock(ActionFilters.class);
when(actionFilters.filters()).thenReturn(new ActionFilter[0]);
TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersion.minimumCCSVersion(), true);
try {
TransportService transportService = MockTransportService.createNewService(
Settings.EMPTY,
VersionInformation.CURRENT,
transportVersion,
threadPool
);
ResolveIndexAction.Request request = new ResolveIndexAction.Request(new String[] { "test" }) {
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (out.getTransportVersion().before(transportVersion)) {
throw new IllegalArgumentException("This request isn't serializable before transport version " + transportVersion);
}
}
};
ClusterService clusterService = new ClusterService(
settings,
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool,
null
);
ResolveIndexAction.TransportAction action = new ResolveIndexAction.TransportAction(
transportService,
clusterService,
actionFilters,
TestProjectResolvers.DEFAULT_PROJECT_ONLY,
Settings.EMPTY,
null
);
IllegalArgumentException ex = expectThrows(
IllegalArgumentException.class,
() -> action.doExecute(null, request, ActionListener.noop())
);
assertThat(ex.getMessage(), containsString("not compatible with version"));
assertThat(ex.getMessage(), containsString("and the 'search.check_ccs_compatibility' setting is enabled."));
assertEquals("This request isn't serializable before transport version " + transportVersion, ex.getCause().getMessage());
} finally {
assertTrue(ESTestCase.terminate(threadPool));
}
}
}
| TransportResolveIndexActionTests |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/CustomStickyTaskAssignorTest.java | {
"start": 4860,
"end": 44342
} | class ____ {
private TaskAssignor assignor;
@BeforeEach
public void setUp() {
assignor = new StickyTaskAssignor();
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignOneActiveTaskToEachProcessWhenTaskCountSameAsProcessCount(final String rackAwareStrategy) {
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty()),
mkStreamState(2, 1, Optional.empty()),
mkStreamState(3, 1, Optional.empty())
);
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false)
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
for (final KafkaStreamsAssignment assignment : assignments.values()) {
assertThat(assignment.tasks().size(), equalTo(1));
}
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignTopicGroupIdEvenlyAcrossClientsWithNoStandByTasks(final String rackAwareStrategy) {
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 2, Optional.empty()),
mkStreamState(2, 2, Optional.empty()),
mkStreamState(3, 2, Optional.empty())
);
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_1_0, false),
mkTaskInfo(TASK_1_1, false),
mkTaskInfo(TASK_2_2, false),
mkTaskInfo(TASK_2_0, false),
mkTaskInfo(TASK_2_1, false),
mkTaskInfo(TASK_1_2, false)
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
assertActiveTaskTopicGroupIdsEvenlyDistributed(assignments);
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignTopicGroupIdEvenlyAcrossClientsWithStandByTasks(final String rackAwareStrategy) {
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 2, Optional.empty()),
mkStreamState(2, 2, Optional.empty()),
mkStreamState(3, 2, Optional.empty())
);
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_2_0, false),
mkTaskInfo(TASK_1_1, false),
mkTaskInfo(TASK_1_2, false),
mkTaskInfo(TASK_1_0, false),
mkTaskInfo(TASK_2_1, false),
mkTaskInfo(TASK_2_2, false)
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, 1, rackAwareStrategy);
assertActiveTaskTopicGroupIdsEvenlyDistributed(assignments);
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldNotMigrateActiveTaskToOtherProcess(final String rackAwareStrategy) {
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(TASK_0_0), Set.of()),
mkStreamState(2, 1, Optional.empty(), Set.of(TASK_0_1), Set.of())
);
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false)
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
assertHasAssignment(assignments, 1, TASK_0_0, ACTIVE);
assertHasAssignment(assignments, 2, TASK_0_1, ACTIVE);
final Map<ProcessId, KafkaStreamsState> streamStates2 = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(TASK_0_1), Set.of()),
mkStreamState(2, 1, Optional.empty(), Set.of(TASK_0_0), Set.of())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments2 = assign(streamStates2, tasks, rackAwareStrategy);
assertHasAssignment(assignments2, 1, TASK_0_1, ACTIVE);
assertHasAssignment(assignments2, 2, TASK_0_0, ACTIVE);
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldMigrateActiveTasksToNewProcessWithoutChangingAllAssignments(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(TASK_0_0, TASK_0_2), Set.of()),
mkStreamState(2, 1, Optional.empty(), Set.of(TASK_0_1), Set.of()),
mkStreamState(3, 1, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
assertThat(assignments.get(processId(1)).tasks().values().size(), equalTo(1));
assertThat(assignments.get(processId(2)).tasks().values().size(), equalTo(1));
assertThat(assignments.get(processId(3)).tasks().values().size(), equalTo(1));
assertHasAssignment(assignments, 2, TASK_0_1, ACTIVE);
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignBasedOnCapacity(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty()),
mkStreamState(2, 2, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
assertThat(assignments.get(processId(1)).tasks().values().size(), equalTo(1));
assertThat(assignments.get(processId(2)).tasks().values().size(), equalTo(2));
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignTasksEvenlyWithUnequalTopicGroupSizes(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_1_0, false),
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false),
mkTaskInfo(TASK_0_3, false),
mkTaskInfo(TASK_0_4, false),
mkTaskInfo(TASK_0_5, false)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_0_4, TASK_0_5, TASK_1_0), Set.of()),
mkStreamState(2, 1, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
final Set<TaskId> client1Tasks = assignments.get(processId(1)).tasks().values().stream()
.filter(t -> t.type() == ACTIVE)
.map(AssignedTask::id)
.collect(Collectors.toSet());
final Set<TaskId> client2Tasks = assignments.get(processId(2)).tasks().values().stream()
.filter(t -> t.type() == ACTIVE)
.map(AssignedTask::id)
.collect(Collectors.toSet());
final Set<TaskId> allTasks = tasks.keySet();
// one client should get 3 tasks and the other should have 4
assertThat(
(client1Tasks.size() == 3 && client2Tasks.size() == 4) ||
(client1Tasks.size() == 4 && client2Tasks.size() == 3),
is(true));
allTasks.removeAll(client1Tasks);
// client2 should have all the remaining tasks not assigned to client 1
assertThat(client2Tasks, equalTo(allTasks));
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldKeepActiveTaskStickinessWhenMoreClientThanActiveTasks(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(TASK_0_0), Set.of()),
mkStreamState(2, 1, Optional.empty(), Set.of(TASK_0_2), Set.of()),
mkStreamState(3, 1, Optional.empty(), Set.of(TASK_0_1), Set.of()),
mkStreamState(4, 1, Optional.empty()),
mkStreamState(5, 1, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
assertThat(assignments.get(processId(1)).tasks().size(), is(1));
assertThat(assignments.get(processId(2)).tasks().size(), is(1));
assertThat(assignments.get(processId(3)).tasks().size(), is(1));
assertThat(assignments.get(processId(4)).tasks().size(), is(0));
assertThat(assignments.get(processId(5)).tasks().size(), is(0));
assertHasAssignment(assignments, 1, TASK_0_0, ACTIVE);
assertHasAssignment(assignments, 2, TASK_0_2, ACTIVE);
assertHasAssignment(assignments, 3, TASK_0_1, ACTIVE);
final Map<ProcessId, KafkaStreamsState> streamStates2 = mkMap(
mkStreamState(1, 1, Optional.empty()),
mkStreamState(2, 1, Optional.empty()),
mkStreamState(3, 1, Optional.empty(), Set.of(TASK_0_1), Set.of()),
mkStreamState(4, 1, Optional.empty(), Set.of(TASK_0_0), Set.of()),
mkStreamState(5, 1, Optional.empty(), Set.of(TASK_0_2), Set.of())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments2 = assign(streamStates2, tasks, rackAwareStrategy);
assertThat(assignments2.get(processId(1)).tasks().size(), is(0));
assertThat(assignments2.get(processId(2)).tasks().size(), is(0));
assertThat(assignments2.get(processId(3)).tasks().size(), is(1));
assertThat(assignments2.get(processId(4)).tasks().size(), is(1));
assertThat(assignments2.get(processId(5)).tasks().size(), is(1));
assertHasAssignment(assignments2, 3, TASK_0_1, ACTIVE);
assertHasAssignment(assignments2, 4, TASK_0_0, ACTIVE);
assertHasAssignment(assignments2, 5, TASK_0_2, ACTIVE);
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignTasksToClientWithPreviousStandbyTasks(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(), Set.of(TASK_0_2)),
mkStreamState(2, 1, Optional.empty(), Set.of(), Set.of(TASK_0_1)),
mkStreamState(3, 1, Optional.empty(), Set.of(), Set.of(TASK_0_0))
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
assertHasAssignment(assignments, 1, TASK_0_2, ACTIVE);
assertHasAssignment(assignments, 2, TASK_0_1, ACTIVE);
assertHasAssignment(assignments, 3, TASK_0_0, ACTIVE);
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignBasedOnCapacityWhenMultipleClientHaveStandbyTasks(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(TASK_0_0), Set.of(TASK_0_1)),
mkStreamState(2, 2, Optional.empty(), Set.of(TASK_0_2), Set.of(TASK_0_1))
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
assertThat(assignments.get(processId(1)).tasks().size(), is(1));
assertThat(assignments.get(processId(2)).tasks().size(), is(2));
assertHasAssignment(assignments, 1, TASK_0_0, ACTIVE);
assertHasAssignment(assignments, 2, TASK_0_1, ACTIVE);
assertHasAssignment(assignments, 2, TASK_0_2, ACTIVE);
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignStandbyTasksToDifferentClientThanCorrespondingActiveTaskIsAssignedTo(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, true),
mkTaskInfo(TASK_0_1, true),
mkTaskInfo(TASK_0_2, true),
mkTaskInfo(TASK_0_3, true)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(TASK_0_0), Set.of()),
mkStreamState(2, 1, Optional.empty(), Set.of(TASK_0_1), Set.of()),
mkStreamState(3, 1, Optional.empty(), Set.of(TASK_0_2), Set.of()),
mkStreamState(4, 1, Optional.empty(), Set.of(TASK_0_3), Set.of())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, 1, rackAwareStrategy);
assertThat(standbyTasks(assignments, 1).size(), lessThanOrEqualTo(2));
assertThat(standbyTasks(assignments, 2).size(), lessThanOrEqualTo(2));
assertThat(standbyTasks(assignments, 3).size(), lessThanOrEqualTo(2));
assertThat(standbyTasks(assignments, 4).size(), lessThanOrEqualTo(2));
assertThat(standbyTasks(assignments, 1), not(hasItems(TASK_0_0)));
assertThat(standbyTasks(assignments, 2), not(hasItems(TASK_0_1)));
assertThat(standbyTasks(assignments, 3), not(hasItems(TASK_0_2)));
assertThat(standbyTasks(assignments, 4), not(hasItems(TASK_0_3)));
assertThat(activeTasks(assignments, 1), hasItems(TASK_0_0));
assertThat(activeTasks(assignments, 2), hasItems(TASK_0_1));
assertThat(activeTasks(assignments, 3), hasItems(TASK_0_2));
assertThat(activeTasks(assignments, 4), hasItems(TASK_0_3));
int nonEmptyStandbyTaskCount = 0;
for (int i = 1; i <= 4; i++) {
nonEmptyStandbyTaskCount += standbyTasks(assignments, i).isEmpty() ? 0 : 1;
}
assertThat(nonEmptyStandbyTaskCount, greaterThanOrEqualTo(3));
final Set<TaskId> allStandbyTasks = allTasks(assignments).stream()
.filter(t -> t.type() == STANDBY)
.map(AssignedTask::id)
.collect(Collectors.toSet());
assertThat(allStandbyTasks, equalTo(Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)));
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignMultipleReplicasOfStandbyTask(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, true),
mkTaskInfo(TASK_0_1, true),
mkTaskInfo(TASK_0_2, true)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(TASK_0_0), Set.of()),
mkStreamState(2, 1, Optional.empty(), Set.of(TASK_0_1), Set.of()),
mkStreamState(3, 1, Optional.empty(), Set.of(TASK_0_2), Set.of())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, 2, rackAwareStrategy);
assertThat(activeTasks(assignments, 1), equalTo(Set.of(TASK_0_0)));
assertThat(activeTasks(assignments, 2), equalTo(Set.of(TASK_0_1)));
assertThat(activeTasks(assignments, 3), equalTo(Set.of(TASK_0_2)));
assertThat(standbyTasks(assignments, 1), equalTo(Set.of(TASK_0_1, TASK_0_2)));
assertThat(standbyTasks(assignments, 2), equalTo(Set.of(TASK_0_0, TASK_0_2)));
assertThat(standbyTasks(assignments, 3), equalTo(Set.of(TASK_0_0, TASK_0_1)));
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldNotAssignStandbyTaskReplicasWhenNoClientAvailableWithoutHavingTheTaskAssigned(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, true)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(TASK_0_0), Set.of())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, 2, rackAwareStrategy);
assertThat(activeTasks(assignments, 1), equalTo(Set.of(TASK_0_0)));
assertThat(standbyTasks(assignments, 1), equalTo(Set.of()));
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignActiveAndStandbyTasks(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, true),
mkTaskInfo(TASK_0_1, true),
mkTaskInfo(TASK_0_2, true)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty()),
mkStreamState(2, 1, Optional.empty()),
mkStreamState(3, 1, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, 1, rackAwareStrategy);
final List<AssignedTask> allTasks = allTasks(assignments);
assertThat(allTasks.stream().filter(t -> t.type() == ACTIVE).map(AssignedTask::id).collect(
Collectors.toSet()), equalTo(Set.of(TASK_0_0, TASK_0_1, TASK_0_2)));
assertThat(allTasks.stream().filter(t -> t.type() == STANDBY).map(AssignedTask::id).collect(
Collectors.toSet()), equalTo(Set.of(TASK_0_0, TASK_0_1, TASK_0_2)));
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignAtLeastOneTaskToEachClientIfPossible(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 3, Optional.empty()),
mkStreamState(2, 1, Optional.empty()),
mkStreamState(3, 1, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
assertThat(activeTasks(assignments, 1).size(), is(1));
assertThat(activeTasks(assignments, 2).size(), is(1));
assertThat(activeTasks(assignments, 3).size(), is(1));
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignEachActiveTaskToOneClientWhenMoreClientsThanTasks(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty()),
mkStreamState(2, 1, Optional.empty()),
mkStreamState(3, 1, Optional.empty()),
mkStreamState(4, 1, Optional.empty()),
mkStreamState(5, 1, Optional.empty()),
mkStreamState(6, 1, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
final List<AssignedTask> allTasks = allTasks(assignments);
assertThat(allTasks.stream().filter(t -> t.type() == ACTIVE).map(AssignedTask::id).collect(
Collectors.toSet()), equalTo(Set.of(TASK_0_0, TASK_0_1, TASK_0_2)));
assertThat(allTasks.stream().filter(t -> t.type() == STANDBY).map(AssignedTask::id).collect(
Collectors.toSet()), equalTo(Set.of()));
final int clientsWithATask = assignments.values().stream().mapToInt(assignment -> assignment.tasks().isEmpty() ? 0 : 1).sum();
assertThat(clientsWithATask, greaterThanOrEqualTo(3));
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldBalanceActiveAndStandbyTasksAcrossAvailableClients(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, true),
mkTaskInfo(TASK_0_1, true),
mkTaskInfo(TASK_0_2, true)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty()),
mkStreamState(2, 1, Optional.empty()),
mkStreamState(3, 1, Optional.empty()),
mkStreamState(4, 1, Optional.empty()),
mkStreamState(5, 1, Optional.empty()),
mkStreamState(6, 1, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, 1, rackAwareStrategy);
for (final KafkaStreamsAssignment assignment : assignments.values()) {
assertThat(assignment.tasks().values(), not(hasSize(0)));
}
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignMoreTasksToClientWithMoreCapacity(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, false),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false),
mkTaskInfo(TASK_1_0, false),
mkTaskInfo(TASK_1_1, false),
mkTaskInfo(TASK_1_2, false),
mkTaskInfo(TASK_2_0, false),
mkTaskInfo(TASK_2_1, false),
mkTaskInfo(TASK_2_2, false),
mkTaskInfo(TASK_3_0, false),
mkTaskInfo(TASK_3_1, false),
mkTaskInfo(TASK_3_2, false)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty()),
mkStreamState(2, 2, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, rackAwareStrategy);
assertThat(activeTasks(assignments, 1).size(), equalTo(4));
assertThat(activeTasks(assignments, 2).size(), equalTo(8));
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@Test
public void shouldEvenlyDistributeByTaskIdAndPartition() {
// TODO: port shouldEvenlyDistributeByTaskIdAndPartition from StickyTaskAssignorTest
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldNotHaveSameAssignmentOnAnyTwoHosts(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, true),
mkTaskInfo(TASK_0_1, true),
mkTaskInfo(TASK_0_2, true),
mkTaskInfo(TASK_0_3, true)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty()),
mkStreamState(2, 1, Optional.empty()),
mkStreamState(3, 1, Optional.empty()),
mkStreamState(4, 1, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, 1, rackAwareStrategy);
for (final KafkaStreamsState client1: streamStates.values()) {
for (final KafkaStreamsState client2: streamStates.values()) {
if (!client1.processId().equals(client2.processId())) {
final Set<TaskId> assignedTasks1 = assignments.get(client1.processId()).tasks().keySet();
final Set<TaskId> assignedTasks2 = assignments.get(client2.processId()).tasks().keySet();
assertThat("clients shouldn't have same task assignment", assignedTasks1,
not(equalTo(assignedTasks2)));
}
}
}
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldNotHaveSameAssignmentOnAnyTwoHostsWhenThereArePreviousActiveTasks(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, true),
mkTaskInfo(TASK_0_1, true),
mkTaskInfo(TASK_0_2, true),
mkTaskInfo(TASK_0_3, true)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty(), Set.of(TASK_0_1, TASK_0_2), Set.of()),
mkStreamState(2, 1, Optional.empty(), Set.of(TASK_0_3), Set.of()),
mkStreamState(3, 1, Optional.empty(), Set.of(TASK_0_0), Set.of()),
mkStreamState(4, 1, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, 1, rackAwareStrategy);
for (final KafkaStreamsState client1: streamStates.values()) {
for (final KafkaStreamsState client2: streamStates.values()) {
if (!client1.processId().equals(client2.processId())) {
final Set<TaskId> assignedTasks1 = assignments.get(client1.processId()).tasks().keySet();
final Set<TaskId> assignedTasks2 = assignments.get(client2.processId()).tasks().keySet();
assertThat("clients shouldn't have same task assignment", assignedTasks1,
not(equalTo(assignedTasks2)));
}
}
}
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void shouldAssignMultipleStandbys(final String rackAwareStrategy) {
final Map<TaskId, TaskInfo> tasks = mkMap(
mkTaskInfo(TASK_0_0, true),
mkTaskInfo(TASK_0_1, false),
mkTaskInfo(TASK_0_2, false)
);
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap(
mkStreamState(1, 1, Optional.empty()),
mkStreamState(2, 1, Optional.empty()),
mkStreamState(3, 1, Optional.empty()),
mkStreamState(4, 1, Optional.empty())
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, 3, rackAwareStrategy);
assertThat(standbyTasks(assignments, 1), equalTo(Set.of()));
assertThat(standbyTasks(assignments, 2), equalTo(Set.of(TASK_0_0)));
assertThat(standbyTasks(assignments, 3), equalTo(Set.of(TASK_0_0)));
assertThat(standbyTasks(assignments, 4), equalTo(Set.of(TASK_0_0)));
}
@Timeout(value = 3, unit = TimeUnit.MINUTES)
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY,
})
public void largeAssignmentShouldTerminateWithinAcceptableTime(final String rackAwareStrategy) {
final int topicCount = 10;
final int taskPerTopic = 30;
final int numStandbys = 2;
final int clientCount = 20;
final int clientCapacity = 50;
final Map<TaskId, TaskInfo> tasks = mkMap();
for (int i = 0; i < topicCount; i++) {
for (int j = 0; j < taskPerTopic; j++) {
final TaskId newTaskId = new TaskId(i, j);
final Set<String> partitionRacks = Set.of(
String.format("rack-%d", (i * j) % 31)
);
final Map.Entry<TaskId, TaskInfo> newTask = mkTaskInfo(newTaskId, true, partitionRacks);
tasks.put(newTask.getKey(), newTask.getValue());
}
}
final Map<ProcessId, KafkaStreamsState> streamStates = mkMap();
for (int i = 0; i < clientCount; i++) {
final Map.Entry<ProcessId, KafkaStreamsState> newClient = mkStreamState(
i + 1,
clientCapacity,
Optional.of(String.format("rack-%d", i % 31)),
Set.of(),
Set.of()
);
streamStates.put(newClient.getKey(), newClient.getValue());
}
final AssignmentConfigs assignmentConfigs = new AssignmentConfigs(
0L,
1,
numStandbys,
60_000L,
Collections.emptyList(),
OptionalInt.of(1),
OptionalInt.of(2),
rackAwareStrategy
);
final Map<ProcessId, KafkaStreamsAssignment> assignments = assign(streamStates, tasks, assignmentConfigs);
final List<TaskId> allActiveTasks = allTasks(assignments).stream().filter(t -> t.type() == ACTIVE)
.map(AssignedTask::id)
.collect(Collectors.toList());
assertThat(allActiveTasks.size(), equalTo(topicCount * taskPerTopic));
final List<TaskId> allStandbyTasks = allTasks(assignments).stream().filter(t -> t.type() == STANDBY)
.map(AssignedTask::id)
.collect(Collectors.toList());
assertThat(allStandbyTasks.size(), equalTo(topicCount * taskPerTopic * numStandbys));
}
private Map<ProcessId, KafkaStreamsAssignment> assign(final Map<ProcessId, KafkaStreamsState> streamStates,
final Map<TaskId, TaskInfo> tasks,
final String rackAwareStrategy) {
return assign(streamStates, tasks, 0, rackAwareStrategy);
}
private Map<ProcessId, KafkaStreamsAssignment> assign(final Map<ProcessId, KafkaStreamsState> streamStates,
final Map<TaskId, TaskInfo> tasks,
final int numStandbys,
final String rackAwareStrategy) {
return assign(streamStates, tasks, defaultAssignmentConfigs(numStandbys, rackAwareStrategy));
}
private Map<ProcessId, KafkaStreamsAssignment> assign(final Map<ProcessId, KafkaStreamsState> streamStates,
final Map<TaskId, TaskInfo> tasks,
final AssignmentConfigs assignmentConfigs) {
final ApplicationState applicationState = new TaskAssignmentUtilsTest.TestApplicationState(
assignmentConfigs,
streamStates,
tasks
);
final TaskAssignment taskAssignment = assignor.assign(applicationState);
final TaskAssignor.AssignmentError assignmentError = TaskAssignmentUtils.validateTaskAssignment(applicationState, taskAssignment);
assertThat(assignmentError, equalTo(TaskAssignor.AssignmentError.NONE));
return indexAssignment(taskAssignment.assignment());
}
public AssignmentConfigs defaultAssignmentConfigs(final int numStandbys, final String rackAwareStrategy) {
return new AssignmentConfigs(
0L,
1,
numStandbys,
60_000L,
Collections.emptyList(),
OptionalInt.empty(),
OptionalInt.empty(),
rackAwareStrategy
);
}
private Map<ProcessId, KafkaStreamsAssignment> indexAssignment(final Collection<KafkaStreamsAssignment> assignments) {
return assignments.stream().collect(Collectors.toMap(KafkaStreamsAssignment::processId, assignment -> assignment));
}
private Set<TaskId> activeTasks(final Map<ProcessId, KafkaStreamsAssignment> assignments,
final int client) {
final KafkaStreamsAssignment assignment = assignments.getOrDefault(processId(client), null);
if (assignment == null) {
return Set.of();
}
return assignment.tasks().values().stream().filter(t -> t.type() == ACTIVE)
.map(AssignedTask::id)
.collect(Collectors.toSet());
}
private Set<TaskId> standbyTasks(final Map<ProcessId, KafkaStreamsAssignment> assignments,
final int client) {
final KafkaStreamsAssignment assignment = assignments.getOrDefault(processId(client), null);
if (assignment == null) {
return Set.of();
}
return assignment.tasks().values().stream().filter(t -> t.type() == STANDBY)
.map(AssignedTask::id)
.collect(Collectors.toSet());
}
private List<AssignedTask> allTasks(final Map<ProcessId, KafkaStreamsAssignment> assignments) {
final List<AssignedTask> allTasks = new ArrayList<>();
assignments.values().forEach(assignment -> allTasks.addAll(assignment.tasks().values()));
return allTasks;
}
private void assertHasAssignment(final Map<ProcessId, KafkaStreamsAssignment> assignments,
final int client,
final TaskId taskId,
final AssignedTask.Type taskType) {
final KafkaStreamsAssignment assignment = assignments.getOrDefault(processId(client), null);
assertThat(assignment, notNullValue());
final AssignedTask assignedTask = assignment.tasks().getOrDefault(taskId, null);
assertThat(assignedTask, notNullValue());
assertThat(assignedTask.id().equals(taskId), is(true));
assertThat(assignedTask.type().equals(taskType), is(true));
}
private void assertActiveTaskTopicGroupIdsEvenlyDistributed(final Map<ProcessId, KafkaStreamsAssignment> assignments) {
for (final KafkaStreamsAssignment assignment : assignments.values()) {
final List<Integer> topicGroupIds = new ArrayList<>();
final Set<TaskId> activeTasks = assignment.tasks().values().stream()
.map(AssignedTask::id)
.collect(Collectors.toSet());
for (final TaskId activeTask : activeTasks) {
topicGroupIds.add(activeTask.subtopology());
}
Collections.sort(topicGroupIds);
assertThat(topicGroupIds, equalTo(asList(1, 2)));
}
}
}
| CustomStickyTaskAssignorTest |
java | apache__kafka | server/src/main/java/org/apache/kafka/server/metrics/DefaultClientTelemetryPayload.java | {
"start": 1088,
"end": 1153
} | interface ____ the metrics payload sent by the client.
*/
public | for |
java | apache__maven | impl/maven-impl/src/main/java/org/apache/maven/impl/di/SessionScope.java | {
"start": 4690,
"end": 5980
} | class
____ = superType.getInterfaces();
}
List<Class<?>> nonInterfaces =
Stream.of(value).filter(c -> !c.isInterface()).toList();
if (!nonInterfaces.isEmpty()) {
throw new IllegalArgumentException(
"The Typed annotation must contain only interfaces but the following types are not: "
+ nonInterfaces);
}
return value;
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
throw new IllegalStateException(e);
}
}
}
throw new IllegalArgumentException(
"The use of session scoped proxies require a org.apache.maven.api.di.Typed, org.eclipse.sisu.Typed or javax.enterprise.inject.Typed annotation");
}
protected boolean isTypeAnnotation(Class<? extends Annotation> annotationType) {
return "org.apache.maven.api.di.Typed".equals(annotationType.getName());
}
/**
* A provider wrapping an existing provider with a cache
* @param <T> the provided type
*/
protected static | value |
java | google__dagger | javatests/dagger/internal/codegen/ComponentShardTest.java | {
"start": 7231,
"end": 7661
} | interface ____ {",
" TestSubcomponent subcomponent();",
"}"),
CompilerTests.javaSource(
"dagger.internal.codegen.TestSubcomponent",
"package dagger.internal.codegen;",
"",
"import dagger.Subcomponent;",
"import javax.inject.Provider;",
"",
"@SubcomponentScope",
"@Subcomponent",
" | TestComponent |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java | {
"start": 1192,
"end": 5190
} | class ____ extends ESIntegTestCase {
@Override
protected boolean forbidPrivateIndexSettings() {
return false;
}
public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception {
Map<PreBuiltAnalyzers, List<IndexVersion>> loadedAnalyzers = new HashMap<>();
List<String> indexNames = new ArrayList<>();
final int numIndices = scaledRandomIntBetween(2, 4);
for (int i = 0; i < numIndices; i++) {
String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
indexNames.add(indexName);
int randomInt = randomInt(PreBuiltAnalyzers.values().length - 1);
PreBuiltAnalyzers preBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt];
String name = preBuiltAnalyzer.name().toLowerCase(Locale.ROOT);
IndexVersion randomVersion = IndexVersionUtils.randomWriteVersion();
if (loadedAnalyzers.containsKey(preBuiltAnalyzer) == false) {
loadedAnalyzers.put(preBuiltAnalyzer, new ArrayList<>());
}
loadedAnalyzers.get(preBuiltAnalyzer).add(randomVersion);
final XContentBuilder mapping = jsonBuilder().startObject()
.startObject("_doc")
.startObject("properties")
.startObject("foo")
.field("type", "text")
.field("analyzer", name)
.endObject()
.endObject()
.endObject()
.endObject();
indicesAdmin().prepareCreate(indexName).setMapping(mapping).setSettings(settings(randomVersion)).get();
}
ensureGreen();
final int numDocs = randomIntBetween(10, 100);
// index some amount of data
for (int i = 0; i < numDocs; i++) {
String randomIndex = indexNames.get(randomInt(indexNames.size() - 1));
String randomId = randomInt() + "";
Map<String, Object> data = new HashMap<>();
data.put("foo", randomAlphaOfLength(scaledRandomIntBetween(5, 50)));
index(randomIndex, randomId, data);
}
refresh();
// close some of the indices
int amountOfIndicesToClose = randomInt(numIndices - 1);
for (int i = 0; i < amountOfIndicesToClose; i++) {
String indexName = indexNames.get(i);
indicesAdmin().prepareClose(indexName).get();
}
ensureGreen();
// check that all above configured analyzers have been loaded
assertThatAnalyzersHaveBeenLoaded(loadedAnalyzers);
// check that all of the prebuilt analyzers are still open
assertLuceneAnalyzersAreNotClosed(loadedAnalyzers);
}
private void assertThatAnalyzersHaveBeenLoaded(Map<PreBuiltAnalyzers, List<IndexVersion>> expectedLoadedAnalyzers) {
for (Map.Entry<PreBuiltAnalyzers, List<IndexVersion>> entry : expectedLoadedAnalyzers.entrySet()) {
for (IndexVersion version : entry.getValue()) {
// if it is not null in the cache, it has been loaded
assertThat(entry.getKey().getCache().get(version), is(notNullValue()));
}
}
}
// ensure analyzers are still open by checking there is no ACE
private void assertLuceneAnalyzersAreNotClosed(Map<PreBuiltAnalyzers, List<IndexVersion>> loadedAnalyzers) throws IOException {
for (Map.Entry<PreBuiltAnalyzers, List<IndexVersion>> preBuiltAnalyzerEntry : loadedAnalyzers.entrySet()) {
for (IndexVersion version : preBuiltAnalyzerEntry.getValue()) {
Analyzer analyzer = preBuiltAnalyzerEntry.getKey().getCache().get(version);
try (TokenStream stream = analyzer.tokenStream("foo", "bar")) {
stream.reset();
while (stream.incrementToken()) {
}
stream.end();
}
}
}
}
}
| PreBuiltAnalyzerIntegrationIT |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkOutputMultiplexer.java | {
"start": 2435,
"end": 2855
} | interface ____ {
/** Called when the watermark increases. */
void onWatermarkUpdate(long watermark);
/** Called when the idle state changes. */
void onIdleUpdate(boolean idle);
}
/**
* The {@link WatermarkOutput} that we use to emit our multiplexed watermark updates. We assume
* that outside code holds a coordinating lock so we don't lock in this | WatermarkUpdateListener |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStore.java | {
"start": 1461,
"end": 1571
} | interface ____ extends Service,
ApplicationHistoryReader, ApplicationHistoryWriter {
}
| ApplicationHistoryStore |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/test/java/org/springframework/boot/devtools/remote/server/HttpStatusHandlerTests.java | {
"start": 1391,
"end": 2580
} | class ____ {
private MockHttpServletRequest servletRequest;
private MockHttpServletResponse servletResponse;
private ServerHttpResponse response;
private ServerHttpRequest request;
@BeforeEach
void setup() {
this.servletRequest = new MockHttpServletRequest();
this.servletResponse = new MockHttpServletResponse();
this.request = new ServletServerHttpRequest(this.servletRequest);
this.response = new ServletServerHttpResponse(this.servletResponse);
}
@Test
@SuppressWarnings("NullAway") // Test null check
void statusMustNotBeNull() {
assertThatIllegalArgumentException().isThrownBy(() -> new HttpStatusHandler(null))
.withMessageContaining("'status' must not be null");
}
@Test
void respondsOk() throws Exception {
HttpStatusHandler handler = new HttpStatusHandler();
handler.handle(this.request, this.response);
assertThat(this.servletResponse.getStatus()).isEqualTo(200);
}
@Test
void respondsWithStatus() throws Exception {
HttpStatusHandler handler = new HttpStatusHandler(HttpStatus.EXPECTATION_FAILED);
handler.handle(this.request, this.response);
assertThat(this.servletResponse.getStatus()).isEqualTo(417);
}
}
| HttpStatusHandlerTests |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/modelsnapshots/GetModelSnapshotsTests.java | {
"start": 486,
"end": 1232
} | class ____ extends ESTestCase {
public void testModelSnapshots_GivenNegativeFrom() {
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> new GetModelSnapshotsAction.Request("foo", null).setPageParams(new PageParams(-5, 10))
);
assertEquals("Parameter [from] cannot be < 0", e.getMessage());
}
public void testModelSnapshots_GivenNegativeSize() {
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> new GetModelSnapshotsAction.Request("foo", null).setPageParams(new PageParams(10, -5))
);
assertEquals("Parameter [size] cannot be < 0", e.getMessage());
}
}
| GetModelSnapshotsTests |
java | junit-team__junit5 | junit-jupiter-migrationsupport/src/main/java/org/junit/jupiter/migrationsupport/rules/ExternalResourceSupport.java | {
"start": 1659,
"end": 2138
} | class ____ implements BeforeEachCallback, AfterEachCallback {
private final TestRuleSupport support = new TestRuleSupport(ExternalResourceAdapter::new, ExternalResource.class);
public ExternalResourceSupport() {
}
@Override
public void beforeEach(ExtensionContext context) throws Exception {
this.support.beforeEach(context);
}
@Override
public void afterEach(ExtensionContext context) throws Exception {
this.support.afterEach(context);
}
}
| ExternalResourceSupport |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java | {
"start": 816,
"end": 4034
} | class ____ extends ESTestCase {
public void testNullValuesValidForNonIds() {
final var request = new UpdateApiKeyRequest("id", null, null, null);
assertNull(request.validate());
}
public void testMetadataKeyValidation() {
final var reservedKey = "_" + randomAlphaOfLengthBetween(0, 10);
final var metadataValue = randomAlphaOfLengthBetween(1, 10);
UpdateApiKeyRequest request = new UpdateApiKeyRequest(randomAlphaOfLength(10), null, Map.of(reservedKey, metadataValue), null);
final ActionRequestValidationException ve = request.validate();
assertNotNull(ve);
assertThat(ve.validationErrors().size(), equalTo(1));
assertThat(ve.validationErrors().get(0), containsString("API key metadata keys may not start with [_]"));
}
public void testRoleDescriptorValidation() {
final List<String> unknownWorkflows = randomList(1, 2, () -> randomAlphaOfLengthBetween(4, 10));
final List<String> workflows = new ArrayList<>(unknownWorkflows.size() + 1);
workflows.addAll(unknownWorkflows);
workflows.add(WorkflowResolver.SEARCH_APPLICATION_QUERY_WORKFLOW.name());
final var request1 = new UpdateApiKeyRequest(
randomAlphaOfLength(10),
List.of(
new RoleDescriptor(
randomAlphaOfLength(5),
new String[] { "manage_index_template" },
new RoleDescriptor.IndicesPrivileges[] {
RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("rad").build() },
new RoleDescriptor.ApplicationResourcePrivileges[] {
RoleDescriptor.ApplicationResourcePrivileges.builder()
.application(randomFrom("app*tab", "app 1"))
.privileges(randomFrom(" ", "\n"))
.resources("resource")
.build() },
null,
null,
Map.of("_key", "value"),
null,
null,
null,
new RoleDescriptor.Restriction(workflows.toArray(String[]::new)),
null
)
),
null,
null
);
final ActionRequestValidationException ve1 = request1.validate();
assertNotNull(ve1);
assertThat(ve1.validationErrors().get(0), containsString("unknown cluster privilege"));
assertThat(ve1.validationErrors().get(1), containsString("unknown index privilege"));
assertThat(ve1.validationErrors().get(2), containsStringIgnoringCase("application name"));
assertThat(ve1.validationErrors().get(3), containsStringIgnoringCase("Application privilege names"));
assertThat(ve1.validationErrors().get(4), containsStringIgnoringCase("role descriptor metadata keys may not start with "));
for (int i = 0; i < unknownWorkflows.size(); i++) {
assertThat(ve1.validationErrors().get(5 + i), containsStringIgnoringCase("unknown workflow [" + unknownWorkflows.get(i) + "]"));
}
}
}
| UpdateApiKeyRequestTests |
java | google__guice | core/src/com/google/inject/spi/InjectionPoint.java | {
"start": 22450,
"end": 26703
} | class ____ {
final InjectableMembers injectableMembers;
Map<Signature, List<InjectableMethod>> bySignature;
Position position = Position.TOP;
OverrideIndex(InjectableMembers injectableMembers) {
this.injectableMembers = injectableMembers;
}
/* Caches the signature for the last method. */
Method lastMethod;
Signature lastSignature;
/**
* Removes a method overridden by the given method, if present. In order to remain backwards
* compatible with prior Guice versions, this will *not* remove overridden methods if
* 'alwaysRemove' is false and the overridden signature was annotated with a
* com.google.inject.Inject.
*
* @param method The method used to determine what is overridden and should be removed.
* @param alwaysRemove true if overridden methods should be removed even if they were
* guice @Inject
* @param injectableMethod if this method overrode any guice @Inject methods, {@link
* InjectableMethod#overrodeGuiceInject} is set to true
*/
boolean removeIfOverriddenBy(
Method method, boolean alwaysRemove, InjectableMethod injectableMethod) {
if (position == Position.TOP) {
// If we're at the top of the hierarchy, there's nothing to override.
return false;
}
if (bySignature == null) {
// We encountered a method in a subclass. Time to index the
// methods in the parent class.
bySignature = new HashMap<>();
for (InjectableMember member = injectableMembers.head;
member != null;
member = member.next) {
if (!(member instanceof InjectableMethod)) {
continue;
}
InjectableMethod im = (InjectableMethod) member;
if (im.isFinal()) {
continue;
}
List<InjectableMethod> methods = new ArrayList<>();
methods.add(im);
bySignature.put(new Signature(im.method), methods);
}
}
lastMethod = method;
Signature signature = lastSignature = new Signature(method);
List<InjectableMethod> methods = bySignature.get(signature);
boolean removed = false;
if (methods != null) {
for (Iterator<InjectableMethod> iterator = methods.iterator(); iterator.hasNext(); ) {
InjectableMethod possiblyOverridden = iterator.next();
if (overrides(method, possiblyOverridden.method)) {
boolean wasGuiceInject =
!possiblyOverridden.specInject || possiblyOverridden.overrodeGuiceInject;
if (injectableMethod != null) {
injectableMethod.overrodeGuiceInject = wasGuiceInject;
}
// Only actually remove the methods if we want to force
// remove or if the signature never specified @com.google.inject.Inject
// somewhere.
if (alwaysRemove || !wasGuiceInject) {
removed = true;
iterator.remove();
injectableMembers.remove(possiblyOverridden);
}
}
}
}
return removed;
}
/**
* Adds the given method to the list of injection points. Keeps track of it in this index in
* case it gets overridden.
*/
void add(InjectableMethod injectableMethod) {
injectableMembers.add(injectableMethod);
if (position == Position.BOTTOM || injectableMethod.isFinal()) {
// This method can't be overridden, so there's no need to index it.
return;
}
if (bySignature != null) {
// Try to reuse the signature we created during removal
@SuppressWarnings("ReferenceEquality")
Signature signature =
injectableMethod.method == lastMethod
? lastSignature
: new Signature(injectableMethod.method);
bySignature.computeIfAbsent(signature, k -> new ArrayList<>()).add(injectableMethod);
}
}
}
/**
* Returns an ordered, immutable set of injection points for the given type. Members in
* superclasses come before members in subclasses. Within a class, fields come before methods.
* Overridden methods are filtered out. The order of fields/methods within a | OverrideIndex |
java | spring-projects__spring-framework | spring-aspects/src/test/java/org/springframework/transaction/aspectj/TransactionAspectTests.java | {
"start": 5675,
"end": 5804
} | class ____ extends TransactionalAnnotationOnlyOnClassWithNoInterface {
}
public static | SubclassOfClassWithTransactionalAnnotation |
java | google__guava | android/guava-tests/test/com/google/common/eventbus/SubscriberRegistryTest.java | {
"start": 5654,
"end": 5801
} | class ____ implements HierarchyFixtureSubinterface {
// Exists only for hierarchy mapping; no members.
}
private static | HierarchyFixtureParent |
java | apache__thrift | lib/javame/src/org/apache/thrift/protocol/TJSONProtocol.java | {
"start": 1400,
"end": 6016
} | class ____ implements TProtocolFactory {
public TProtocol getProtocol(TTransport trans) {
return new TJSONProtocol(trans);
}
}
private static final byte[] COMMA = new byte[] { ',' };
private static final byte[] COLON = new byte[] { ':' };
private static final byte[] LBRACE = new byte[] { '{' };
private static final byte[] RBRACE = new byte[] { '}' };
private static final byte[] LBRACKET = new byte[] { '[' };
private static final byte[] RBRACKET = new byte[] { ']' };
private static final byte[] QUOTE = new byte[] { '"' };
private static final byte[] BACKSLASH = new byte[] { '\\' };
private static final byte[] ZERO = new byte[] { '0' };
private static final byte[] ESCSEQ = new byte[] { '\\', 'u', '0', '0' };
private static final long VERSION = 1;
private static final byte[] JSON_CHAR_TABLE = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0, // 0
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1
1, 1, '"', 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
};
private static final String ESCAPE_CHARS = "\"\\/bfnrt";
private static final byte[] ESCAPE_CHAR_VALS = {
'"', '\\', '/', '\b', '\f', '\n', '\r', '\t',
};
private static final int DEF_STRING_SIZE = 16;
private static final byte[] NAME_BOOL = new byte[] { 't', 'f' };
private static final byte[] NAME_BYTE = new byte[] { 'i', '8' };
private static final byte[] NAME_I16 = new byte[] { 'i', '1', '6' };
private static final byte[] NAME_I32 = new byte[] { 'i', '3', '2' };
private static final byte[] NAME_I64 = new byte[] { 'i', '6', '4' };
private static final byte[] NAME_DOUBLE = new byte[] { 'd', 'b', 'l' };
private static final byte[] NAME_STRUCT = new byte[] { 'r', 'e', 'c' };
private static final byte[] NAME_STRING = new byte[] { 's', 't', 'r' };
private static final byte[] NAME_MAP = new byte[] { 'm', 'a', 'p' };
private static final byte[] NAME_LIST = new byte[] { 'l', 's', 't' };
private static final byte[] NAME_SET = new byte[] { 's', 'e', 't' };
private static final TStruct ANONYMOUS_STRUCT = new TStruct();
private static final byte[] getTypeNameForTypeID(byte typeID)
throws TException {
switch (typeID) {
case TType.BOOL:
return NAME_BOOL;
case TType.BYTE:
return NAME_BYTE;
case TType.I16:
return NAME_I16;
case TType.I32:
return NAME_I32;
case TType.I64:
return NAME_I64;
case TType.DOUBLE:
return NAME_DOUBLE;
case TType.STRING:
return NAME_STRING;
case TType.STRUCT:
return NAME_STRUCT;
case TType.MAP:
return NAME_MAP;
case TType.SET:
return NAME_SET;
case TType.LIST:
return NAME_LIST;
default:
throw new TProtocolException(TProtocolException.NOT_IMPLEMENTED,
"Unrecognized type");
}
}
private static final byte getTypeIDForTypeName(byte[] name)
throws TException {
byte result = TType.STOP;
if (name.length > 1) {
switch (name[0]) {
case 'd':
result = TType.DOUBLE;
break;
case 'i':
switch (name[1]) {
case '8':
result = TType.BYTE;
break;
case '1':
result = TType.I16;
break;
case '3':
result = TType.I32;
break;
case '6':
result = TType.I64;
break;
}
break;
case 'l':
result = TType.LIST;
break;
case 'm':
result = TType.MAP;
break;
case 'r':
result = TType.STRUCT;
break;
case 's':
if (name[1] == 't') {
result = TType.STRING;
}
else if (name[1] == 'e') {
result = TType.SET;
}
break;
case 't':
result = TType.BOOL;
break;
}
}
if (result == TType.STOP) {
throw new TProtocolException(TProtocolException.NOT_IMPLEMENTED,
"Unrecognized type");
}
return result;
}
// Base | Factory |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/request/RequestListener.java | {
"start": 1241,
"end": 2198
} | interface ____<R> {
/**
* Called when an exception occurs during a load, immediately before {@link
* Target#onLoadFailed(Drawable)}. Will only be called if we currently want to display an image
* for the given model in the given target. It is recommended to create a single instance per
* activity/fragment rather than instantiate a new object for each call to {@code
* Glide.with(fragment/activity).load()} to avoid object churn.
*
* <p>It is not safe to reload this or a different model in this callback. If you need to do so
* use {@link com.bumptech.glide.RequestBuilder#error(RequestBuilder)} instead.
*
* <p>Although you can't start an entirely new load, it is safe to change what is displayed in the
* {@link Target} at this point, as long as you return {@code true} from the method to prevent
* {@link Target#onLoadFailed(Drawable)} from being called.
*
* <p>For threading guarantees, see the | RequestListener |
java | alibaba__nacos | console/src/main/java/com/alibaba/nacos/console/handler/impl/noop/naming/InstanceNoopHandler.java | {
"start": 1481,
"end": 2317
} | class ____ implements InstanceHandler {
private static final String MCP_NOT_ENABLED_MESSAGE = "Current functionMode is `config`, naming module is disabled.";
@Override
public Page<? extends Instance> listInstances(String namespaceId, String serviceNameWithoutGroup, String groupName,
String clusterName, int page, int pageSize) throws NacosException {
throw new NacosApiException(NacosException.SERVER_NOT_IMPLEMENTED, ErrorCode.API_FUNCTION_DISABLED,
MCP_NOT_ENABLED_MESSAGE);
}
@Override
public void updateInstance(InstanceForm instanceForm, Instance instance) throws NacosException {
throw new NacosApiException(NacosException.SERVER_NOT_IMPLEMENTED, ErrorCode.API_FUNCTION_DISABLED,
MCP_NOT_ENABLED_MESSAGE);
}
}
| InstanceNoopHandler |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/lifecycle/providers/packaging/WarLifecycleMappingProvider.java | {
"start": 1080,
"end": 2271
} | class ____ extends AbstractLifecycleMappingProvider {
// START SNIPPET: war
private static final String[] BINDINGS = {
"process-resources",
"org.apache.maven.plugins:maven-resources-plugin:" + RESOURCES_PLUGIN_VERSION + ":resources",
"compile", "org.apache.maven.plugins:maven-compiler-plugin:" + COMPILER_PLUGIN_VERSION + ":compile",
"process-test-resources",
"org.apache.maven.plugins:maven-resources-plugin:" + RESOURCES_PLUGIN_VERSION + ":testResources",
"test-compile", "org.apache.maven.plugins:maven-compiler-plugin:" + COMPILER_PLUGIN_VERSION + ":testCompile",
"test", "org.apache.maven.plugins:maven-surefire-plugin:" + SUREFIRE_PLUGIN_VERSION + ":test",
"package", "org.apache.maven.plugins:maven-war-plugin:" + WAR_PLUGIN_VERSION + ":war",
"install", "org.apache.maven.plugins:maven-install-plugin:" + INSTALL_PLUGIN_VERSION + ":install",
"deploy", "org.apache.maven.plugins:maven-deploy-plugin:" + DEPLOY_PLUGIN_VERSION + ":deploy"
};
// END SNIPPET: war
@Inject
public WarLifecycleMappingProvider() {
super(BINDINGS);
}
}
| WarLifecycleMappingProvider |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java | {
"start": 1255,
"end": 1500
} | class ____ extends TextView implements SubView {
protected static final String UNAVAILABLE = "N/A";
protected static final long BYTES_IN_MB = 1024 * 1024;
protected static final String DATE_PATTERN = "yyyy-MM-dd HH:mm:ss";
public | HtmlBlock |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/MonoNext.java | {
"start": 1478,
"end": 3127
} | class ____<T> implements InnerOperator<T, T> {
final CoreSubscriber<? super T> actual;
@SuppressWarnings("NotNullFieldNotInitialized") // s initialized in onSubscribe
Subscription s;
boolean done;
volatile int wip;
@SuppressWarnings("rawtypes")
static final AtomicIntegerFieldUpdater<NextSubscriber> WIP =
AtomicIntegerFieldUpdater.newUpdater(NextSubscriber.class, "wip");
NextSubscriber(CoreSubscriber<? super T> actual) {
this.actual = actual;
}
@Override
public void onSubscribe(Subscription s) {
if (Operators.validate(this.s, s)) {
this.s = s;
actual.onSubscribe(this);
}
}
@Override
public void onNext(T t) {
if (done) {
Operators.onNextDropped(t, actual.currentContext());
return;
}
s.cancel();
actual.onNext(t);
onComplete();
}
@Override
public void onError(Throwable t) {
if (done) {
Operators.onErrorDropped(t, actual.currentContext());
return;
}
done = true;
actual.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
actual.onComplete();
}
@Override
public void request(long n) {
if (WIP.compareAndSet(this, 0, 1)) {
s.request(Long.MAX_VALUE);
}
}
@Override
public void cancel() {
s.cancel();
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.TERMINATED) return done;
if (key == Attr.PARENT) return s;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return InnerOperator.super.scanUnsafe(key);
}
@Override
public CoreSubscriber<? super T> actual() {
return actual;
}
}
}
| NextSubscriber |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/bindingdefaultvalue/BindingDefaultValueTest.java | {
"start": 1013,
"end": 1242
} | class ____ {
@MyTransactional // This should only match AlphaInterceptor
String ping() {
return "foo";
}
}
@MyTransactional
@Priority(1)
@Interceptor
public static | SimpleBean |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/event/parser/event/SearchClickAnalyticsEventTests.java | {
"start": 1983,
"end": 3459
} | class ____ extends AnalyticsEventParserTestCase {
@Override
protected ContextParser<AnalyticsEvent.Context, AnalyticsEvent> parser() {
return SearchClickAnalyticsEvent::fromXContent;
}
@Override
protected AnalyticsEvent createTestInstance() throws IOException {
return randomSearchClickEvent();
}
@Override
protected List<String> requiredFields() {
return Stream.of(SESSION_FIELD, USER_FIELD, SEARCH_FIELD).map(ParseField::getPreferredName).collect(Collectors.toList());
}
@Override
protected String parserName() {
return "search_click_event";
}
protected Predicate<String> isFieldRequired() {
return super.isFieldRequired().or(DOCUMENT_FIELD.getPreferredName()::equals).or(PAGE_FIELD.getPreferredName()::equals);
}
public static AnalyticsEvent randomSearchClickEvent() throws IOException {
Map<String, Object> payloadBuilder = Map.ofEntries(
entry(SESSION_FIELD.getPreferredName(), randomEventSessionField()),
entry(USER_FIELD.getPreferredName(), randomEventUserField()),
entry(PAGE_FIELD.getPreferredName(), randomEventPageField()),
entry(DOCUMENT_FIELD.getPreferredName(), randomEventDocumentField()),
entry(SEARCH_FIELD.getPreferredName(), randomEventSearchField())
);
return randomAnalyticsEvent(AnalyticsEvent.Type.SEARCH_CLICK, payloadBuilder);
}
}
| SearchClickAnalyticsEventTests |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/service/LongPollingService.java | {
"start": 2478,
"end": 11842
} | class ____ {
private static final int SAMPLE_PERIOD = 100;
private static final int SAMPLE_TIMES = 3;
private static final String TRUE_STR = "true";
private Map<String, Long> retainIps = new ConcurrentHashMap<>();
public SampleResult getSubscribleInfo(String dataId, String group, String tenant) {
String groupKey = GroupKey.getKeyTenant(dataId, group, tenant);
SampleResult sampleResult = new SampleResult();
Map<String, String> lisentersGroupkeyStatus = new HashMap<>(50);
for (ClientLongPolling clientLongPolling : allSubs) {
if (clientLongPolling.clientMd5Map.containsKey(groupKey)) {
lisentersGroupkeyStatus.put(clientLongPolling.ip, clientLongPolling.clientMd5Map.get(groupKey).getMd5());
}
}
sampleResult.setLisentersGroupkeyStatus(lisentersGroupkeyStatus);
return sampleResult;
}
public SampleResult getSubscribleInfoByIp(String clientIp) {
SampleResult sampleResult = new SampleResult();
Map<String, String> lisenersGroupkeyStatus = new HashMap<>(50);
for (ClientLongPolling clientLongPolling : allSubs) {
if (clientLongPolling.ip.equals(clientIp)) {
// One ip can have multiple listener.
for (Map.Entry<String, ConfigListenState> entry : clientLongPolling.clientMd5Map.entrySet()) {
lisenersGroupkeyStatus.put(entry.getKey(), entry.getValue().getMd5());
}
}
}
sampleResult.setLisentersGroupkeyStatus(lisenersGroupkeyStatus);
return sampleResult;
}
/**
* Aggregate the sampling IP and monitoring configuration information in the sampling results. There is no problem
* for the merging strategy to cover the previous one with the latter.
*
* @param sampleResults sample Results.
* @return Results.
*/
public SampleResult mergeSampleResult(List<SampleResult> sampleResults) {
SampleResult mergeResult = new SampleResult();
Map<String, String> lisentersGroupkeyStatus = new HashMap<>(50);
for (SampleResult sampleResult : sampleResults) {
Map<String, String> lisentersGroupkeyStatusTmp = sampleResult.getLisentersGroupkeyStatus();
for (Map.Entry<String, String> entry : lisentersGroupkeyStatusTmp.entrySet()) {
lisentersGroupkeyStatus.put(entry.getKey(), entry.getValue());
}
}
mergeResult.setLisentersGroupkeyStatus(lisentersGroupkeyStatus);
return mergeResult;
}
public SampleResult getCollectSubscribleInfo(String dataId, String group, String tenant) {
List<SampleResult> sampleResultLst = new ArrayList<>(50);
for (int i = 0; i < SAMPLE_TIMES; i++) {
SampleResult sampleTmp = getSubscribleInfo(dataId, group, tenant);
if (sampleTmp != null) {
sampleResultLst.add(sampleTmp);
}
if (i < SAMPLE_TIMES - 1) {
try {
Thread.sleep(SAMPLE_PERIOD);
} catch (InterruptedException e) {
LogUtil.CLIENT_LOG.error("sleep wrong", e);
}
}
}
return mergeSampleResult(sampleResultLst);
}
public SampleResult getCollectSubscribleInfoByIp(String ip) {
SampleResult sampleResult = new SampleResult();
sampleResult.setLisentersGroupkeyStatus(new HashMap<>(50));
for (int i = 0; i < SAMPLE_TIMES; i++) {
SampleResult sampleTmp = getSubscribleInfoByIp(ip);
if (sampleTmp != null) {
if (sampleTmp.getLisentersGroupkeyStatus() != null && !sampleResult.getLisentersGroupkeyStatus()
.equals(sampleTmp.getLisentersGroupkeyStatus())) {
sampleResult.getLisentersGroupkeyStatus().putAll(sampleTmp.getLisentersGroupkeyStatus());
}
}
if (i < SAMPLE_TIMES - 1) {
try {
Thread.sleep(SAMPLE_PERIOD);
} catch (InterruptedException e) {
LogUtil.CLIENT_LOG.error("sleep wrong", e);
}
}
}
return sampleResult;
}
/**
* Add LongPollingClient.
*
* @param req HttpServletRequest.
* @param rsp HttpServletResponse.
* @param clientMd5Map clientMd5Map.
* @param probeRequestSize probeRequestSize.
*/
public void addLongPollingClient(HttpServletRequest req, HttpServletResponse rsp, Map<String, ConfigListenState> clientMd5Map,
int probeRequestSize) {
String noHangUpFlag = req.getHeader(LongPollingService.LONG_POLLING_NO_HANG_UP_HEADER);
long start = System.currentTimeMillis();
Map<String, ConfigListenState> changedGroups = MD5Util.compareMd5(req, rsp, clientMd5Map);
if (changedGroups.size() > 0) {
generateResponse(req, rsp, changedGroups);
LogUtil.CLIENT_LOG.info("{}|{}|{}|{}|{}|{}|{}", System.currentTimeMillis() - start, "instant",
RequestUtil.getRemoteIp(req), "polling", clientMd5Map.size(), probeRequestSize,
changedGroups.size());
return;
} else if (noHangUpFlag != null && noHangUpFlag.equalsIgnoreCase(TRUE_STR)) {
LogUtil.CLIENT_LOG.info("{}|{}|{}|{}|{}|{}|{}", System.currentTimeMillis() - start, "nohangup",
RequestUtil.getRemoteIp(req), "polling", clientMd5Map.size(), probeRequestSize,
changedGroups.size());
return;
}
// Must be called by http thread, or send response.
final AsyncContext asyncContext = req.startAsync();
// AsyncContext.setTimeout() is incorrect, Control by oneself
asyncContext.setTimeout(0L);
String ip = RequestUtil.getRemoteIp(req);
ConnectionCheckResponse connectionCheckResponse = checkLimit(req);
if (!connectionCheckResponse.isSuccess()) {
RpcScheduledExecutor.CONTROL_SCHEDULER.schedule(
() -> generate503Response(asyncContext, rsp, connectionCheckResponse.getMessage()),
1000L + new Random().nextInt(2000), TimeUnit.MILLISECONDS);
return;
}
String appName = req.getHeader(RequestUtil.CLIENT_APPNAME_HEADER);
String tag = req.getHeader("Vipserver-Tag");
int delayTime = SwitchService.getSwitchInteger(SwitchService.FIXED_DELAY_TIME, 500);
int minLongPoolingTimeout = SwitchService.getSwitchInteger("MIN_LONG_POOLING_TIMEOUT", 10000);
// Add delay time for LoadBalance, and one response is returned 500 ms in advance to avoid client timeout.
String requestLongPollingTimeOut = req.getHeader(LongPollingService.LONG_POLLING_HEADER);
long timeout = Math.max(minLongPoolingTimeout, Long.parseLong(requestLongPollingTimeOut) - delayTime);
ConfigExecutor.executeLongPolling(
new ClientLongPolling(asyncContext, clientMd5Map, ip, probeRequestSize, timeout, appName, tag));
}
private ConnectionCheckResponse checkLimit(HttpServletRequest httpServletRequest) {
String ip = RequestUtil.getRemoteIp(httpServletRequest);
String appName = httpServletRequest.getHeader(RequestUtil.CLIENT_APPNAME_HEADER);
ConnectionCheckRequest connectionCheckRequest = new ConnectionCheckRequest(ip, appName, "LongPolling");
ConnectionCheckResponse checkResponse = ControlManagerCenter.getInstance().getConnectionControlManager()
.check(connectionCheckRequest);
return checkResponse;
}
public static boolean isSupportLongPolling(HttpServletRequest req) {
return null != req.getHeader(LONG_POLLING_HEADER);
}
@SuppressWarnings("PMD.ThreadPoolCreationRule")
public LongPollingService() {
allSubs = new ConcurrentLinkedQueue<>();
ConfigExecutor.scheduleLongPolling(new StatTask(), 0L, 10L, TimeUnit.SECONDS);
// Register LocalDataChangeEvent to NotifyCenter.
NotifyCenter.registerToPublisher(LocalDataChangeEvent.class, NotifyCenter.ringBufferSize);
// Register A Subscriber to subscribe LocalDataChangeEvent.
NotifyCenter.registerSubscriber(new Subscriber() {
@Override
public void onEvent(Event event) {
if (event instanceof LocalDataChangeEvent) {
LocalDataChangeEvent evt = (LocalDataChangeEvent) event;
ConfigExecutor.executeLongPolling(new DataChangeTask(evt.groupKey));
}
}
@Override
public Class<? extends Event> subscribeType() {
return LocalDataChangeEvent.class;
}
});
}
public static final String LONG_POLLING_HEADER = "Long-Pulling-Timeout";
public static final String LONG_POLLING_NO_HANG_UP_HEADER = "Long-Pulling-Timeout-No-Hangup";
/**
* ClientLongPolling subscibers.
*/
final Queue<ClientLongPolling> allSubs;
| LongPollingService |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/io/DefaultResourceLoader.java | {
"start": 6640,
"end": 7149
} | class ____ extends ClassPathResource implements ContextResource {
public ClassPathContextResource(String path, @Nullable ClassLoader classLoader) {
super(path, classLoader);
}
@Override
public String getPathWithinContext() {
return getPath();
}
@Override
public Resource createRelative(String relativePath) {
String pathToUse = StringUtils.applyRelativePath(getPath(), relativePath);
return new ClassPathContextResource(pathToUse, getClassLoader());
}
}
}
| ClassPathContextResource |
java | playframework__playframework | core/play-integration-test/src/test/java/play/routing/AbstractRoutingDslTest.java | {
"start": 956,
"end": 1090
} | class ____ in the integration tests so that we have the right helper classes to build a
* request with to test it.
*/
public abstract | is |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/accesstype/Item.java | {
"start": 417,
"end": 1184
} | class ____ {
long id;
int quantity;
Product product;
Order order;
Detail detail;
@Id
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public int getQuantity() {
return quantity;
}
public void setQuantity(int quantity) {
this.quantity = quantity;
}
@ManyToOne
public Product getProduct() {
return product;
}
public void setProduct(Product product) {
this.product = product;
}
@ManyToOne
public Order getOrder() {
return order;
}
public void setOrder(Order order) {
this.order = order;
}
@OneToMany
public Map<String, Order> getNamedOrders() {
return null;
}
public Detail getDetail() {
return detail;
}
public void setDetail(Detail detail) {
this.detail = detail;
}
}
| Item |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/node/DoubleNode.java | {
"start": 410,
"end": 5963
} | class ____
extends NumericFPNode
{
private static final long serialVersionUID = 3L;
protected final double _value;
/*
/**********************************************************************
/* Construction
/**********************************************************************
*/
public DoubleNode(double v) { _value = v; }
public static DoubleNode valueOf(double v) { return new DoubleNode(v); }
/*
/**********************************************************************
/* Overridden JsonNode methods, simple properties
/**********************************************************************
*/
@Override
public JsonParser.NumberType numberType() { return JsonParser.NumberType.DOUBLE; }
@Override
public boolean isDouble() { return true; }
@Override
public boolean isNaN() {
return NumberOutput.notFinite(_value);
}
/*
/**********************************************************************
/* Overridden JsonNode methods, scalar access, non-numeric
/**********************************************************************
*/
@Override
protected String _asString() {
return String.valueOf(_value);
}
/*
/**********************************************************************
/* Overridden JsonNode methods, scalar access, numeric
/**********************************************************************
*/
@Override
public Number numberValue() {
return Double.valueOf(_value);
}
@Override
public float floatValue() {
float f = (float) _value;
if (Float.isFinite(f)) {
return f;
}
return _reportFloatCoercionRangeFail("floatValue()");
}
@Override
public float floatValue(float defaultValue) {
float f = (float) _value;
if (Float.isFinite(f)) {
return f;
}
return defaultValue;
}
@Override
public Optional<Float> floatValueOpt() {
float f = (float) _value;
if (Float.isFinite(f)) {
return Optional.of(f);
}
return Optional.empty();
}
@Override
public float asFloat() {
float f = (float) _value;
if (Float.isFinite(f)) {
return f;
}
return _reportFloatCoercionRangeFail("asFloat()");
}
@Override
public float asFloat(float defaultValue) {
float f = (float) _value;
if (Float.isFinite(f)) {
return f;
}
return defaultValue;
}
@Override
public Optional<Float> asFloatOpt() {
float f = (float) _value;
if (Float.isFinite(f)) {
return Optional.of(f);
}
return Optional.empty();
}
@Override
public double doubleValue() {
return _value;
}
@Override
public double doubleValue(double defaultValue) {
return _value;
}
@Override
public OptionalDouble doubleValueOpt() {
return OptionalDouble.of(_value);
}
@Override
public double asDouble() {
return _value;
}
@Override
public double asDouble(double defaultValue) {
return _value;
}
@Override
public OptionalDouble asDoubleOpt() {
return OptionalDouble.of(_value);
}
/*
/**********************************************************************
/* NumericFPNode abstract method impls
/**********************************************************************
*/
@Override
public short _asShortValueUnchecked() {
return (short) _value;
}
@Override
public int _asIntValueUnchecked() {
return (int) _value;
}
@Override
public long _asLongValueUnchecked() {
return (long) _value;
}
@Override
protected BigInteger _asBigIntegerValueUnchecked() {
return BigDecimal.valueOf(_value).toBigInteger();
}
@Override
protected BigDecimal _asDecimalValueUnchecked() {
return BigDecimal.valueOf(_value);
}
@Override
public boolean hasFractionalPart() { return _value != Math.rint(_value); }
@Override
public boolean inShortRange() {
return !isNaN() && (_value >= Short.MIN_VALUE) && (_value <= Short.MAX_VALUE);
}
@Override
public boolean inIntRange() {
return !isNaN() && (_value >= Integer.MIN_VALUE) && (_value <= Integer.MAX_VALUE);
}
@Override
public boolean inLongRange() {
return !isNaN() && (_value >= Long.MIN_VALUE) && (_value <= Long.MAX_VALUE);
}
/*
/**********************************************************************
/* Overrides, other
/**********************************************************************
*/
@Override
public final void serialize(JsonGenerator g, SerializationContext provider)
throws JacksonException
{
g.writeNumber(_value);
}
@Override
public boolean equals(Object o)
{
if (o == this) return true;
if (o == null) return false;
if (o instanceof DoubleNode otherNode) {
// We must account for NaNs: NaN does not equal NaN, therefore we have
// to use Double.compare().
final double otherValue = otherNode._value;
return Double.compare(_value, otherValue) == 0;
}
return false;
}
@Override
public int hashCode()
{
// same as hashCode Double. | DoubleNode |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/enums/EnumSerializationTest.java | {
"start": 13033,
"end": 13202
} | enum
____ NOT_OK2 {
V2("v2");
protected String key;
// any runtime-persistent annotation is fine
NOT_OK2(@JsonProperty String key) { this.key = key; }
}
| enum |
java | spring-projects__spring-framework | spring-r2dbc/src/main/java/org/springframework/r2dbc/core/binding/NamedBindMarkers.java | {
"start": 2237,
"end": 2814
} | class ____ implements BindMarker {
private final String placeholder;
private final String identifier;
NamedBindMarker(String placeholder, String identifier) {
this.placeholder = placeholder;
this.identifier = identifier;
}
@Override
public String getPlaceholder() {
return this.placeholder;
}
@Override
public void bind(BindTarget target, Object value) {
target.bind(this.identifier, value);
}
@Override
public void bindNull(BindTarget target, Class<?> valueType) {
target.bindNull(this.identifier, valueType);
}
}
}
| NamedBindMarker |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/IfSectionHelper.java | {
"start": 15997,
"end": 28426
} | enum ____ {
EQ(2, "eq", "==", "is"),
NE(2, "ne", "!="),
GT(3, "gt", ">"),
GE(3, "ge", ">="),
LE(3, "le", "<="),
LT(3, "lt", "<"),
AND(1, "and", "&&"),
OR(1, "or", "||"),
NOT(4, IfSectionHelper.LOGICAL_COMPLEMENT);
private final List<String> aliases;
private final int precedence;
Operator(int precedence, String... aliases) {
this.aliases = List.of(aliases);
this.precedence = precedence;
}
int getPrecedence() {
return precedence;
}
boolean evaluate(Object op1, Object op2) {
switch (this) {
case EQ:
return equals(op1, op2);
case NE:
return !equals(op1, op2);
case GE:
case GT:
case LE:
case LT:
return compare(op1, op2);
case AND:
case OR:
return !isFalsy(op2);
default:
throw new TemplateException("Not a binary operator: " + this);
}
}
boolean equals(Object op1, Object op2) {
if (Objects.equals(op1, op2)) {
return true;
}
if (op1 != null && op2 != null && (op1 instanceof Number || op2 instanceof Number)) {
// Both operands are not null and at least one of them is a number
return getDecimal(op1).compareTo(getDecimal(op2)) == 0;
}
return false;
}
@SuppressWarnings({ "rawtypes", "unchecked" })
boolean compare(Object op1, Object op2) {
if (op1 == null || op2 == null) {
throw new TemplateException("Unable to compare null operands [op1=" + op1 + ", op2=" + op2 + "]");
}
Comparable c1;
Comparable c2;
if (op1 instanceof Comparable && op1.getClass().equals(op2.getClass())) {
c1 = (Comparable) op1;
c2 = (Comparable) op2;
} else {
c1 = getDecimal(op1);
c2 = getDecimal(op2);
}
int result = c1.compareTo(c2);
switch (this) {
case GE:
return result >= 0;
case GT:
return result > 0;
case LE:
return result <= 0;
case LT:
return result < 0;
default:
return false;
}
}
Boolean evaluate(Object op1) {
switch (this) {
case AND:
return isFalsy(op1) ? Boolean.FALSE : null;
case OR:
return isFalsy(op1) ? null : Boolean.TRUE;
default:
throw new TemplateException("Not a short-circuiting operator: " + this);
}
}
boolean isShortCircuiting() {
return AND.equals(this) || OR.equals(this);
}
boolean isBinary() {
return !NOT.equals(this);
}
static Operator from(String value) {
if (value == null || value.isEmpty()) {
return null;
}
for (Operator operator : values()) {
if (operator.aliases.contains(value)) {
return operator;
}
}
return null;
}
static BigDecimal getDecimal(Object value) {
if (value instanceof BigDecimal decimal) {
return decimal;
} else if (value instanceof BigInteger bigInteger) {
return new BigDecimal(bigInteger);
} else if (value instanceof Integer integer) {
return BigDecimal.valueOf(integer);
} else if (value instanceof Long _long) {
return BigDecimal.valueOf(_long);
} else if (value instanceof Double _double) {
return BigDecimal.valueOf(_double);
} else if (value instanceof Float _float) {
return BigDecimal.valueOf(_float);
} else if (value instanceof String string) {
return new BigDecimal(string);
}
throw new TemplateException("Cannot coerce " + value + " to a BigDecimal");
}
}
static <B extends ErrorInitializer & WithOrigin> List<Object> parseParams(List<Object> params, B block) {
replaceOperatorsAndCompositeParams(params, block);
int highestPrecedence = getHighestPrecedence(params);
if (!isGroupingNeeded(params)) {
// No operators or all of the same precedence
return params;
}
// Take the operators with highest precedence and form groups
// For example "user.active && target.status == NEW && !target.voted" becomes "user.active && [target.status == NEW] && [!target.voted]"
// The algorithm used is not very robust and should be improved later
List<Object> highestGroup = null;
List<Object> ret = new ArrayList<>();
int lastGroupdIdx = 0;
for (ListIterator<Object> iterator = params.listIterator(); iterator.hasNext();) {
int prevIdx = iterator.previousIndex();
Object param = iterator.next();
if (param instanceof Operator) {
Operator op = (Operator) param;
if (op.precedence == highestPrecedence) {
if (highestGroup == null) {
highestGroup = new ArrayList<>();
if (op.isBinary()) {
highestGroup.add(params.get(prevIdx));
}
}
highestGroup.add(param);
// Add non-grouped elements
if (prevIdx > lastGroupdIdx) {
int from = lastGroupdIdx > 0 ? lastGroupdIdx + 1 : 0;
int to = op.isBinary() ? prevIdx : prevIdx + 1;
ret.addAll(params.subList(from, to));
}
} else if (op.precedence < highestPrecedence) {
if (highestGroup != null) {
ret.add(highestGroup);
lastGroupdIdx = prevIdx;
highestGroup = null;
}
} else {
throw new IllegalStateException();
}
} else if (highestGroup != null) {
highestGroup.add(param);
}
}
if (highestGroup != null) {
ret.add(highestGroup);
} else {
// Add all remaining non-grouped elements
if (lastGroupdIdx + 1 != params.size()) {
ret.addAll(params.subList(lastGroupdIdx + 1, params.size()));
}
}
return parseParams(ret, block);
}
private static boolean isGroupingNeeded(List<Object> params) {
Integer lastPrecedence = null;
for (Object param : params) {
if (param instanceof Operator) {
Operator op = (Operator) param;
if (lastPrecedence == null) {
lastPrecedence = op.getPrecedence();
} else if (!lastPrecedence.equals(op.getPrecedence())) {
return true;
}
}
}
return false;
}
private static <B extends ErrorInitializer & WithOrigin> void replaceOperatorsAndCompositeParams(List<Object> params,
B block) {
for (ListIterator<Object> iterator = params.listIterator(); iterator.hasNext();) {
Object param = iterator.next();
if (param instanceof String) {
String stringParam = param.toString();
Operator operator = Operator.from(stringParam);
if (operator != null) {
if (operator.isBinary() && !iterator.hasNext()) {
throw block.error(
"binary operator [{operator}] set but the second operand not present for \\{#if\\} section")
.argument("operator", operator)
.code(Code.BINARY_OPERATOR_MISSING_SECOND_OPERAND)
.origin(block.getOrigin())
.build();
}
iterator.set(operator);
} else {
if (stringParam.length() > 1 && stringParam.startsWith(LOGICAL_COMPLEMENT)) {
// !item.active
iterator.set(Operator.NOT);
stringParam = stringParam.substring(1);
if (stringParam.charAt(0) == Parser.START_COMPOSITE_PARAM) {
iterator.add(processCompositeParam(stringParam, block));
} else {
iterator.add(stringParam);
}
} else {
if (stringParam.charAt(0) == Parser.START_COMPOSITE_PARAM) {
iterator.set(processCompositeParam(stringParam, block));
}
}
}
}
}
}
private static int getHighestPrecedence(List<Object> params) {
int highestPrecedence = 0;
for (Object param : params) {
if (param instanceof Operator) {
Operator op = (Operator) param;
if (op.precedence > highestPrecedence) {
highestPrecedence = op.precedence;
}
}
}
return highestPrecedence;
}
static <B extends ErrorInitializer & WithOrigin> List<Object> processCompositeParam(String stringParam, B block) {
// Composite params
if (!stringParam.endsWith("" + Parser.END_COMPOSITE_PARAM)) {
throw new TemplateException("Invalid composite parameter found: " + stringParam);
}
List<Object> split = new ArrayList<>();
Parser.splitSectionParams(stringParam.substring(1, stringParam.length() - 1),
block)
.forEachRemaining(split::add);
return parseParams(split, block);
}
@SuppressWarnings("unchecked")
static Condition createCondition(Object param, SectionBlock block, Operator operator, SectionInitContext context) {
Condition condition;
if (param instanceof String) {
String stringParam = param.toString();
boolean logicalComplement = stringParam.startsWith(LOGICAL_COMPLEMENT);
if (logicalComplement) {
stringParam = stringParam.substring(1);
}
Expression expr = block.expressions.get(stringParam);
if (expr == null) {
throw new TemplateException("Expression not found for param [" + stringParam + "]: " + block);
}
condition = new OperandCondition(operator, expr);
} else if (param instanceof List) {
List<Object> params = (List<Object>) param;
if (params.size() == 1) {
return createCondition(params.get(0), block, operator, context);
}
List<Condition> conditions = new ArrayList<>();
Operator nextOperator = null;
for (Object p : params) {
if (p instanceof Operator) {
nextOperator = (Operator) p;
} else {
conditions.add(createCondition(p, block, nextOperator, context));
nextOperator = null;
}
}
if (operator == null && conditions.size() == 1) {
condition = conditions.get(0);
} else if (conditions.size() == 2) {
condition = new DoubletonCondition(conditions.get(0), conditions.get(1), operator);
} else {
condition = new CompositeCondition(operator, ImmutableList.copyOf(conditions));
}
} else {
throw new TemplateException("Unsupported param type: " + param);
}
return condition;
}
| Operator |
java | apache__rocketmq | tools/src/main/java/org/apache/rocketmq/tools/admin/common/AdminToolsResultCodeEnum.java | {
"start": 858,
"end": 1304
} | enum ____ {
/**
*
*/
SUCCESS(200),
REMOTING_ERROR(-1001),
MQ_BROKER_ERROR(-1002),
MQ_CLIENT_ERROR(-1003),
INTERRUPT_ERROR(-1004),
TOPIC_ROUTE_INFO_NOT_EXIST(-2001),
CONSUMER_NOT_ONLINE(-2002),
BROADCAST_CONSUMPTION(-2003);
private int code;
AdminToolsResultCodeEnum(int code) {
this.code = code;
}
public int getCode() {
return code;
}
}
| AdminToolsResultCodeEnum |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FutureTransformAsyncTest.java | {
"start": 13708,
"end": 14236
} | class ____ {
private Executor executor;
ListenableFuture<String> foo(String s) {
return Futures.immediateFuture(s);
}
ListenableFuture<Void> test() {
ListenableFuture<Void> future =
Futures.transformAsync(foo("x"), value -> Futures.immediateVoidFuture(), executor);
return future;
}
}
""")
.addOutputLines(
"out/Test.java",
"""
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.Executor;
| Test |
java | greenrobot__greendao | tests/DaoTestBase/src/main/java/org/greenrobot/greendao/daotest2/KeepEntity.java | {
"start": 326,
"end": 912
} | class ____ {
@Id
private Long id;
// KEEP FIELDS - put your custom fields here
String extra = Build.VERSION.SDK;
// KEEP FIELDS END
@Generated
public KeepEntity() {
}
@Generated
public KeepEntity(Long id) {
this.id = id;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
// KEEP METHODS - put your custom methods here
@Override
public String toString() {
return "KeepEntity ID=42 (extra=" + extra + ")";
}
// KEEP METHODS END
}
| KeepEntity |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java | {
"start": 1837,
"end": 2553
} | class ____ extends Plugin implements AnalysisPlugin {
* @Override
* public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
* return singletonMap("phonetic", PhoneticTokenFilterFactory::new);
* }
* }
* }</pre>
*
* Elasticsearch doesn't have any automatic mechanism to share these components between indexes. If any component is heavy enough to warrant
* such sharing then it is the Plugin's responsibility to do it in their {@link AnalysisProvider} implementation. We recommend against doing
* this unless absolutely necessary because it can be difficult to get the caching right given things like behavior changes across versions.
*/
public | AnalysisPhoneticPlugin |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/iterables/Iterables_assertAreExactly_Test.java | {
"start": 1532,
"end": 3140
} | class ____ extends IterablesWithConditionsBaseTest {
@Test
void should_pass_if_satisfies_exactly_times_condition() {
actual = newArrayList("Yoda", "Luke", "Leia");
iterables.assertAreExactly(someInfo(), actual, 2, jedi);
verify(conditions).assertIsNotNull(jedi);
}
@Test
void should_throw_error_if_condition_is_null() {
assertThatNullPointerException().isThrownBy(() -> {
actual = newArrayList("Yoda", "Luke");
iterables.assertAreExactly(someInfo(), actual, 2, null);
}).withMessage("The condition to evaluate should not be null");
verify(conditions).assertIsNotNull(null);
}
@Test
void should_fail_if_condition_is_not_met_enough() {
testCondition.shouldMatch(false);
AssertionInfo info = someInfo();
actual = newArrayList("Yoda", "Solo", "Leia");
Throwable error = catchThrowable(() -> iterables.assertAreExactly(someInfo(), actual, 2, jedi));
assertThat(error).isInstanceOf(AssertionError.class);
verify(conditions).assertIsNotNull(jedi);
verify(failures).failure(info, elementsShouldBeExactly(actual, 2, jedi));
}
@Test
void should_fail_if_condition_is_met_much() {
testCondition.shouldMatch(false);
AssertionInfo info = someInfo();
actual = newArrayList("Yoda", "Luke", "Obiwan");
Throwable error = catchThrowable(() -> iterables.assertAreExactly(someInfo(), actual, 2, jedi));
assertThat(error).isInstanceOf(AssertionError.class);
verify(conditions).assertIsNotNull(jedi);
verify(failures).failure(info, elementsShouldBeExactly(actual, 2, jedi));
}
}
| Iterables_assertAreExactly_Test |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/throttle/SpringThrottlerGroupingTest.java | {
"start": 1540,
"end": 2183
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) throws Exception {
String key = (String) exchange.getMessage().getHeader("key");
assertTrue(
semaphores.computeIfAbsent(key, k -> new Semaphore(
exchange.getMessage().getHeader("throttleValue") == null
? CONCURRENT_REQUESTS : (Integer) exchange.getMessage().getHeader("throttleValue")))
.tryAcquire(),
"too many requests for key " + key);
}
}
public static | IncrementProcessor |
java | hibernate__hibernate-orm | hibernate-jcache/src/test/java/org/hibernate/orm/test/jcache/InsertedDataTest.java | {
"start": 8785,
"end": 9334
} | class ____ {
private Long id;
private String name;
public CacheableItem() {
}
public CacheableItem(String name) {
this.name = name;
}
@Id
@GeneratedValue(generator = "increment")
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
@Entity(name = "CacheableEmbeddedIdItem")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region = "item")
public static | CacheableItem |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestProtosLegacy.java | {
"start": 1431,
"end": 10173
} | class ____ extends
com.google.protobuf.GeneratedMessage
implements EmptyRequestProtoOrBuilder {
// Use EmptyRequestProto.newBuilder() to construct.
private EmptyRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private EmptyRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final EmptyRequestProto defaultInstance;
public static EmptyRequestProto getDefaultInstance() {
return defaultInstance;
}
public EmptyRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private EmptyRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.Builder.class);
}
public static com.google.protobuf.Parser<EmptyRequestProto> PARSER =
new com.google.protobuf.AbstractParser<EmptyRequestProto>() {
public EmptyRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new EmptyRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<EmptyRequestProto> getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.EmptyRequestProto}
*/
public static final | EmptyRequestProto |
java | playframework__playframework | documentation/manual/working/javaGuide/main/forms/code/javaguide/forms/FormattersProvider.java | {
"start": 630,
"end": 1779
} | class ____ implements Provider<Formatters> {
private final MessagesApi messagesApi;
@Inject
public FormattersProvider(MessagesApi messagesApi) {
this.messagesApi = messagesApi;
}
@Override
public Formatters get() {
Formatters formatters = new Formatters(messagesApi);
formatters.register(
LocalTime.class,
new SimpleFormatter<LocalTime>() {
private Pattern timePattern = Pattern.compile("([012]?\\d)(?:[\\s:\\._\\-]+([0-5]\\d))?");
@Override
public LocalTime parse(String input, Locale l) throws ParseException {
Matcher m = timePattern.matcher(input);
if (!m.find()) throw new ParseException("No valid Input", 0);
int hour = Integer.valueOf(m.group(1));
int min = m.group(2) == null ? 0 : Integer.valueOf(m.group(2));
return LocalTime.of(hour, min);
}
@Override
public String print(LocalTime localTime, Locale l) {
return localTime.format(DateTimeFormatter.ofPattern("HH:mm"));
}
});
return formatters;
}
}
// #register-formatter
| FormattersProvider |
java | apache__rocketmq | client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java | {
"start": 3089,
"end": 3380
} | class ____ various <code>send</code> methods to deliver messages to broker(s). Each of them has pros and
* cons; you'd better understand strengths and weakness of them before actually coding. </p>
*
* <p> <strong>Thread Safety:</strong> After configuring and starting process, this | aggregates |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/odps/ast/OdpsSelectQueryBlock.java | {
"start": 1164,
"end": 3128
} | class ____ extends SQLSelectQueryBlock {
private SQLZOrderBy zOrderBy;
public OdpsSelectQueryBlock() {
dbType = DbType.odps;
clusterBy = new ArrayList<SQLSelectOrderByItem>();
distributeBy = new ArrayList<SQLSelectOrderByItem>();
sortBy = new ArrayList<SQLSelectOrderByItem>(2);
}
public OdpsSelectQueryBlock clone() {
OdpsSelectQueryBlock x = new OdpsSelectQueryBlock();
cloneTo(x);
return x;
}
@Override
protected void accept0(SQLASTVisitor visitor) {
if (visitor instanceof OdpsASTVisitor) {
accept0((OdpsASTVisitor) visitor);
return;
}
super.accept0(visitor);
}
public void accept0(OdpsASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, this.hints);
acceptChild(visitor, this.selectList);
acceptChild(visitor, this.from);
acceptChild(visitor, this.where);
acceptChild(visitor, this.groupBy);
acceptChild(visitor, this.orderBy);
acceptChild(visitor, this.zOrderBy);
acceptChild(visitor, this.clusterBy);
acceptChild(visitor, this.distributeBy);
acceptChild(visitor, this.sortBy);
acceptChild(visitor, this.limit);
acceptChild(visitor, this.into);
}
visitor.endVisit(this);
}
public String toString() {
return SQLUtils.toOdpsString(this);
}
public void limit(int rowCount, int offset) {
if (offset > 0) {
throw new UnsupportedOperationException("not support offset");
}
setLimit(new SQLLimit(new SQLIntegerExpr(rowCount)));
}
public SQLZOrderBy getZOrderBy() {
return zOrderBy;
}
public void setZOrderBy(SQLZOrderBy x) {
if (x != null) {
x.setParent(this);
}
this.zOrderBy = x;
}
}
| OdpsSelectQueryBlock |
java | apache__flink | flink-table/flink-table-api-java-bridge/src/test/java/org/apache/flink/connector/datagen/table/types/DecimalDataRandomGeneratorTest.java | {
"start": 1130,
"end": 4786
} | class ____ {
@Test
void testGenerateDecimalValues() {
for (int precision = 1; precision <= 38; precision++) {
for (int scale = 0; scale <= precision; scale++) {
DecimalDataRandomGenerator gen =
new DecimalDataRandomGenerator(
precision, scale, Double.MIN_VALUE, Double.MAX_VALUE, 0f);
DecimalData value = gen.next();
assertThat(value)
.as("Null value for DECIMAL(" + precision + "," + scale + ")")
.isNotNull();
String strRepr = String.valueOf(value);
if (strRepr.charAt(0) == '-') {
// drop the negative sign
strRepr = strRepr.substring(1);
}
if (scale != precision) {
// need to account for decimal . and potential leading zeros
assertThat(strRepr)
.as(
"Wrong length for DECIMAL("
+ precision
+ ","
+ scale
+ ") = "
+ strRepr)
.hasSizeLessThanOrEqualTo(precision + 1);
} else {
// need to account for decimal . and potential leading zeros
assertThat(strRepr)
.as(
"Wrong length for DECIMAL("
+ precision
+ ","
+ scale
+ ") = "
+ strRepr)
.hasSizeLessThanOrEqualTo(precision + 2);
}
if (scale != 0) {
String decimalPart = strRepr.split("\\.")[1];
assertThat(decimalPart)
.as(
"Wrong length for DECIMAL("
+ precision
+ ","
+ scale
+ ") = "
+ strRepr)
.hasSize(scale);
}
}
}
}
@Test
void testMinMax() {
for (int precision = 1; precision <= 38; precision++) {
for (int scale = 0; scale <= precision; scale++) {
BigDecimal min = BigDecimal.valueOf(-10.0);
BigDecimal max = BigDecimal.valueOf(10.0);
DecimalDataRandomGenerator gen =
new DecimalDataRandomGenerator(
precision, scale, min.doubleValue(), max.doubleValue(), 0f);
DecimalData result = gen.next();
assertThat(result)
.as("Null value for DECIMAL(" + precision + "," + scale + ")")
.isNotNull();
assertThat(result.toBigDecimal())
.as("value must be greater than or equal to min")
.isGreaterThanOrEqualTo(min)
.as("value must be less than or equal to max")
.isLessThanOrEqualTo(max);
}
}
}
}
| DecimalDataRandomGeneratorTest |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/util/CompositeSetTests.java | {
"start": 868,
"end": 1365
} | class ____ {
@Test
void testEquals() {
Set<String> first = Set.of("foo", "bar");
Set<String> second = Set.of("baz", "qux");
CompositeSet<String> composite = new CompositeSet<>(first, second);
Set<String> all = new HashSet<>(first);
all.addAll(second);
assertThat(composite.equals(all)).isTrue();
assertThat(composite.equals(first)).isFalse();
assertThat(composite.equals(second)).isFalse();
assertThat(composite.equals(Collections.emptySet())).isFalse();
}
}
| CompositeSetTests |
java | apache__camel | dsl/camel-jbang/camel-jbang-core/src/main/java/org/apache/camel/dsl/jbang/core/commands/CamelCommand.java | {
"start": 5422,
"end": 6045
} | class ____<T> implements IParameterConsumer {
@Override
public void consumeParameters(Stack<String> args, ArgSpec argSpec, CommandSpec cmdSpec) {
if (failIfEmptyArgs() && args.isEmpty()) {
throw new ParameterException(cmdSpec.commandLine(), "Error: missing required parameter");
}
T cmd = (T) cmdSpec.userObject();
doConsumeParameters(args, cmd);
}
protected abstract void doConsumeParameters(Stack<String> args, T cmd);
protected boolean failIfEmptyArgs() {
return true;
}
}
}
| ParameterConsumer |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/AEHuangliang2Test.java | {
"start": 495,
"end": 18054
} | class ____ extends TestCase {
static String jsonData = "{\n" +
" \"areas\": [\n" +
" {\n" +
" \"@type\": \"section\",\n" +
" \"templateId\": \"grid\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"column-count\":\"2\",\n" +
" \"aspect-ratio\":\"2\",\n" +
" \"margins\":\"16 0 16 16\",\n" +
" \"background-color\": \"#ffffff\",\n" +
" \"column-gap\": \"10\"\n" +
" },\n" +
" \"children\": [\n" +
" {\n" +
" \"@type\": \"section\",\n" +
" \"templateId\": \"grid\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"column-count\":\"2\",\n" +
" \"aspect-ratio\":\"2\",\n" +
" \"margins\":\"16 0 16 16\",\n" +
" \"background-color\": \"#ffffff\",\n" +
" \"column-gap\": \"10\"\n" +
" },\n" +
" \"children\": [\n" +
" {\n" +
" \"@type\": \"floorV2\",\n" +
" \"templateId\": \"base\",\n" +
" \"image\": \"http://xxx\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"index\": 0,\n" +
" \"value\": \"xxxx\",\n" +
" \"type\": \"text\",\n" +
" \"track\": {\n" +
" \"name\": \"track name\",\n" +
" \"params\": {\n" +
" \"trackParam1\": \"trackParam1\"\n" +
" }\n" +
" },\n" +
" \"extInfo\": {\n" +
" \"likeByMe\": \"true\",\n" +
" \"isFollowed\": \"true\"\n" +
" },\n" +
" \"action\": {\n" +
" \"type\": \"click\",\n" +
" \"action\": \"aecmd://nativie/invokeApi?name=key1&likeId=111&likeByMe=true\"\n" +
" }\n" +
" }\n" +
" ],\n" +
" \"bizId\": \"banner-myae-1-746877468\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"background-color\": \"#000000\"\n" +
" },\n" +
" \"isTest\": false\n" +
" },\n" +
" {\n" +
" \"@type\": \"floorV2\",\n" +
" \"templateId\": \"base\",\n" +
" \"image\": \"http://xxx\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"index\": 0,\n" +
" \"value\": \"xxxx\",\n" +
" \"type\": \"text\",\n" +
" \"track\": {\n" +
" \"name\": \"track name\",\n" +
" \"params\": {\n" +
" \"trackParam1\": \"trackParam1\"\n" +
" }\n" +
" },\n" +
" \"action\": {\n" +
" \"type\": \"click\",\n" +
" \"action\": \"aecmd://xxxx\"\n" +
" }\n" +
" }\n" +
" ],\n" +
" \"extInfo\": {\n" +
" \"likeByMe\": \"true\"\n" +
" },\n" +
" \"bizId\": \"banner-myae-1-746877468\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"background-color\": \"#ffc1c1\"\n" +
" },\n" +
" \"isTest\": false\n" +
" }\n" +
" ]\n" +
" },\n" +
" {\n" +
" \"@type\": \"floorV2\",\n" +
" \"templateId\": \"base\",\n" +
" \"image\": \"http://xxx\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"index\": 0,\n" +
" \"value\": \"xxxx\",\n" +
" \"type\": \"text\",\n" +
" \"track\": {\n" +
" \"name\": \"track name\",\n" +
" \"params\": {\n" +
" \"trackParam1\": \"trackParam1\"\n" +
" }\n" +
" },\n" +
" \"extInfo\": {\n" +
" \"likeByMe\": \"true\",\n" +
" \"isFollowed\": \"true\"\n" +
" },\n" +
" \"action\": {\n" +
" \"type\": \"click\",\n" +
" \"action\": \"aecmd://nativie/invokeApi?name=key1&likeId=111&likeByMe=true\"\n" +
" }\n" +
" }\n" +
" ],\n" +
" \"bizId\": \"banner-myae-1-746877468\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"background-color\": \"#000000\"\n" +
" },\n" +
" \"isTest\": false\n" +
" },\n" +
" {\n" +
" \"@type\": \"floorV2\",\n" +
" \"templateId\": \"base\",\n" +
" \"image\": \"http://xxx\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"index\": 0,\n" +
" \"value\": \"xxxx\",\n" +
" \"type\": \"text\",\n" +
" \"track\": {\n" +
" \"name\": \"track name\",\n" +
" \"params\": {\n" +
" \"trackParam1\": \"trackParam1\"\n" +
" }\n" +
" },\n" +
" \"action\": {\n" +
" \"type\": \"click\",\n" +
" \"action\": \"aecmd://xxxx\"\n" +
" }\n" +
" }\n" +
" ],\n" +
" \"extInfo\": {\n" +
" \"likeByMe\": \"true\"\n" +
" },\n" +
" \"bizId\": \"banner-myae-1-746877468\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"background-color\": \"#ffc1c1\"\n" +
" },\n" +
" \"isTest\": false\n" +
" }\n" +
" ]\n" +
" }\n" +
" ],\n" +
" \"version\": 3,\n" +
" \"currency\": \"RUB\"\n" +
" }";
static String floordata = "{\n" +
" \"isTest\": true,\n" +
" \"mockResult\": {\n" +
" \"body\": {\n" +
" \"areas\": [\n" +
" {\n" +
" \"@type\": \"section\",\n" +
" \"templateId\": \"grid\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"column-count\":\"2\",\n" +
" \"aspect-ratio\":\"2\",\n" +
" \"margins\":\"16 0 16 16\",\n" +
" \"background-color\": \"#ffffff\",\n" +
" \"column-gap\": \"10\"\n" +
" },\n" +
" \"children\": [\n" +
" {\n" +
" \"@type\": \"section\",\n" +
" \"templateId\": \"grid\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"column-count\":\"2\",\n" +
" \"aspect-ratio\":\"2\",\n" +
" \"margins\":\"16 0 16 16\",\n" +
" \"background-color\": \"#ffffff\",\n" +
" \"column-gap\": \"10\"\n" +
" },\n" +
" \"children\": [\n" +
" {\n" +
" \"@type\": \"floorV2\",\n" +
" \"templateId\": \"base\",\n" +
" \"image\": \"http://xxx\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"index\": 0,\n" +
" \"value\": \"xxxx\",\n" +
" \"type\": \"text\",\n" +
" \"track\": {\n" +
" \"name\": \"track name\",\n" +
" \"params\": {\n" +
" \"trackParam1\": \"trackParam1\"\n" +
" }\n" +
" },\n" +
" \"extInfo\": {\n" +
" \"likeByMe\": \"true\",\n" +
" \"isFollowed\": \"true\"\n" +
" },\n" +
" \"action\": {\n" +
" \"type\": \"click\",\n" +
" \"action\": \"aecmd://nativie/invokeApi?name=key1&likeId=111&likeByMe=true\"\n" +
" }\n" +
" }\n" +
" ],\n" +
" \"bizId\": \"banner-myae-1-746877468\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"background-color\": \"#000000\"\n" +
" },\n" +
" \"isTest\": false\n" +
" },\n" +
" {\n" +
" \"@type\": \"floorV2\",\n" +
" \"templateId\": \"base\",\n" +
" \"image\": \"http://xxx\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"index\": 0,\n" +
" \"value\": \"xxxx\",\n" +
" \"type\": \"text\",\n" +
" \"track\": {\n" +
" \"name\": \"track name\",\n" +
" \"params\": {\n" +
" \"trackParam1\": \"trackParam1\"\n" +
" }\n" +
" },\n" +
" \"action\": {\n" +
" \"type\": \"click\",\n" +
" \"action\": \"aecmd://xxxx\"\n" +
" }\n" +
" }\n" +
" ],\n" +
" \"extInfo\": {\n" +
" \"likeByMe\": \"true\"\n" +
" },\n" +
" \"bizId\": \"banner-myae-1-746877468\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"background-color\": \"#ffc1c1\"\n" +
" },\n" +
" \"isTest\": false\n" +
" }\n" +
" ]\n" +
" },\n" +
" {\n" +
" \"@type\": \"floorV2\",\n" +
" \"templateId\": \"base\",\n" +
" \"image\": \"http://xxx\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"index\": 0,\n" +
" \"value\": \"xxxx\",\n" +
" \"type\": \"text\",\n" +
" \"track\": {\n" +
" \"name\": \"track name\",\n" +
" \"params\": {\n" +
" \"trackParam1\": \"trackParam1\"\n" +
" }\n" +
" },\n" +
" \"extInfo\": {\n" +
" \"likeByMe\": \"true\",\n" +
" \"isFollowed\": \"true\"\n" +
" },\n" +
" \"action\": {\n" +
" \"type\": \"click\",\n" +
" \"action\": \"aecmd://nativie/invokeApi?name=key1&likeId=111&likeByMe=true\"\n" +
" }\n" +
" }\n" +
" ],\n" +
" \"bizId\": \"banner-myae-1-746877468\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"background-color\": \"#000000\"\n" +
" },\n" +
" \"isTest\": false\n" +
" },\n" +
" {\n" +
" \"@type\": \"floorV2\",\n" +
" \"templateId\": \"base\",\n" +
" \"image\": \"http://xxx\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"index\": 0,\n" +
" \"value\": \"xxxx\",\n" +
" \"type\": \"text\",\n" +
" \"track\": {\n" +
" \"name\": \"track name\",\n" +
" \"params\": {\n" +
" \"trackParam1\": \"trackParam1\"\n" +
" }\n" +
" },\n" +
" \"action\": {\n" +
" \"type\": \"click\",\n" +
" \"action\": \"aecmd://xxxx\"\n" +
" }\n" +
" }\n" +
" ],\n" +
" \"extInfo\": {\n" +
" \"likeByMe\": \"true\"\n" +
" },\n" +
" \"bizId\": \"banner-myae-1-746877468\",\n" +
" \"style\": {\n" +
" \"card\" : \"true\",\n" +
" \"background-color\": \"#ffc1c1\"\n" +
" },\n" +
" \"isTest\": false\n" +
" }\n" +
" ]\n" +
" }\n" +
" ],\n" +
" \"version\": 3,\n" +
" \"currency\": \"RUB\"\n" +
" },\n" +
" \"head\": {\n" +
" \"message\": \"\",\n" +
" \"serverTime\": 1489473042814,\n" +
" \"code\": \"200\",\n" +
" \"ab\": \"yepxf_B\"\n" +
" }\n" +
"}\n" +
"}";
public void test_for_issue() throws Exception {
ParserConfig.getGlobalInstance().putDeserializer(Area.class, new ObjectDeserializer() {
public <T> T deserialze(DefaultJSONParser parser, Type type, Object fieldName) {
JSONObject jsonObject = (JSONObject) parser.parse();
String areaType;
if (jsonObject.get("type") instanceof String) {
areaType = (String) jsonObject.get("type");
} else {
return null;
}
if (Area.TYPE_SECTION.equals(areaType)) {
String text = jsonObject.toJSONString();
return (T) JSON.parseObject(text, Section.class);
} else if (Area.TYPE_FLOORV1.equals(areaType)) {
String text = jsonObject.toJSONString();
return (T) JSON.parseObject(text, FloorV1.class);
} else if (Area.TYPE_FLOORV2.equals(areaType)) {
String text = jsonObject.toJSONString();
return (T) JSON.parseObject(text, FloorV2.class);
}
return null;
}
public int getFastMatchToken() {
return JSONToken.LBRACE;
}
});
ParserConfig.getGlobalInstance().addAccept("section");
ParserConfig.getGlobalInstance().addAccept("floorV2");
MockResult data = JSON.parseObject(floordata, MockResult.class);
String mockResultJson = JSON.toJSONString(data.mockResult);
NetResponse response = JSON.parseObject(mockResultJson, NetResponse.class);
String bodyJson = JSON.toJSONString(response.body);
System.out.println(bodyJson);
FloorPageData pageData = JSON.parseObject(bodyJson, FloorPageData.class);
assertNotNull(pageData.areas);
}
}
| AEHuangliang2Test |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/CommonAnnotationBeanPostProcessorTests.java | {
"start": 25140,
"end": 25526
} | interface ____ {
@Resource
void setTestBean2(TestBean testBean2);
@Resource
default void setTestBean7(INestedTestBean testBean7) {
increaseCounter();
}
@PostConstruct
default void initDefault() {
increaseCounter();
}
@PreDestroy
default void destroyDefault() {
increaseCounter();
}
void increaseCounter();
}
public static | InterfaceWithDefaultMethod |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/aot/hint/JdkProxyHintTests.java | {
"start": 965,
"end": 2913
} | class ____ {
@Test
void equalsWithSameInstanceIsTrue() {
JdkProxyHint hint = new Builder().proxiedInterfaces(Function.class, Consumer.class).build();
assertThat(hint).isEqualTo(hint);
}
@Test
void equalsWithSameProxiedInterfacesIsTrue() {
JdkProxyHint first = new Builder().proxiedInterfaces(Function.class, Consumer.class).build();
JdkProxyHint second = new Builder().proxiedInterfaces(TypeReference.of(Function.class.getName()),
TypeReference.of(Consumer.class)).build();
assertThat(first).isEqualTo(second);
}
@Test
void equalsWithSameProxiedInterfacesAndDifferentConditionIsFalse() {
JdkProxyHint first = new Builder().proxiedInterfaces(Function.class, Consumer.class)
.onReachableType(TypeReference.of(String.class)).build();
JdkProxyHint second = new Builder().proxiedInterfaces(TypeReference.of(Function.class.getName()),
TypeReference.of(Consumer.class)).onReachableType(TypeReference.of(Function.class)).build();
assertThat(first).isNotEqualTo(second);
}
@Test
void equalsWithSameProxiedInterfacesDifferentOrderIsFalse() {
JdkProxyHint first = new Builder().proxiedInterfaces(Function.class, Consumer.class).build();
JdkProxyHint second = new Builder().proxiedInterfaces(TypeReference.of(Consumer.class),
TypeReference.of(Function.class.getName())).build();
assertThat(first).isNotEqualTo(second);
}
@Test
void equalsWithDifferentProxiedInterfacesIsFalse() {
JdkProxyHint first = new Builder().proxiedInterfaces(Function.class).build();
JdkProxyHint second = new Builder().proxiedInterfaces(TypeReference.of(Function.class.getName()),
TypeReference.of(Consumer.class)).build();
assertThat(first).isNotEqualTo(second);
}
@Test
void equalsWithNonJdkProxyHintIsFalse() {
JdkProxyHint first = new Builder().proxiedInterfaces(Function.class).build();
TypeReference second = TypeReference.of(Function.class);
assertThat(first).isNotEqualTo(second);
}
}
| JdkProxyHintTests |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/minicluster/TestingMiniCluster.java | {
"start": 2474,
"end": 10007
} | class ____ {
private final TestingMiniClusterConfiguration configuration;
@Nullable private Supplier<HighAvailabilityServices> highAvailabilityServicesSupplier;
@Nullable
private Supplier<DispatcherResourceManagerComponentFactory>
dispatcherResourceManagerComponentFactorySupplier;
public Builder(TestingMiniClusterConfiguration configuration) {
this.configuration = configuration;
}
public Builder setHighAvailabilityServicesSupplier(
@Nullable Supplier<HighAvailabilityServices> highAvailabilityServicesSupplier) {
this.highAvailabilityServicesSupplier = highAvailabilityServicesSupplier;
return this;
}
public Builder setDispatcherResourceManagerComponentFactorySupplier(
@Nullable
Supplier<DispatcherResourceManagerComponentFactory>
dispatcherResourceManagerComponentFactorySupplier) {
this.dispatcherResourceManagerComponentFactorySupplier =
dispatcherResourceManagerComponentFactorySupplier;
return this;
}
public TestingMiniCluster build() {
return new TestingMiniCluster(
configuration,
highAvailabilityServicesSupplier,
dispatcherResourceManagerComponentFactorySupplier);
}
}
private final int numberDispatcherResourceManagerComponents;
private final boolean localCommunication;
@Nullable private final Supplier<HighAvailabilityServices> highAvailabilityServicesSupplier;
@Nullable
private final Supplier<DispatcherResourceManagerComponentFactory>
dispatcherResourceManagerComponentFactorySupplier;
private TestingMiniCluster(
TestingMiniClusterConfiguration miniClusterConfiguration,
@Nullable Supplier<HighAvailabilityServices> highAvailabilityServicesSupplier,
@Nullable
Supplier<DispatcherResourceManagerComponentFactory>
dispatcherResourceManagerComponentFactorySupplier) {
super(miniClusterConfiguration);
this.numberDispatcherResourceManagerComponents =
miniClusterConfiguration.getNumberDispatcherResourceManagerComponents();
this.highAvailabilityServicesSupplier = highAvailabilityServicesSupplier;
this.dispatcherResourceManagerComponentFactorySupplier =
dispatcherResourceManagerComponentFactorySupplier;
this.localCommunication = miniClusterConfiguration.isLocalCommunication();
}
@Override
protected boolean useLocalCommunication() {
return localCommunication;
}
@Override
protected HighAvailabilityServices createHighAvailabilityServices(
Configuration configuration, Executor executor) throws Exception {
if (highAvailabilityServicesSupplier != null) {
return highAvailabilityServicesSupplier.get();
} else {
return super.createHighAvailabilityServices(configuration, executor);
}
}
@Override
protected DispatcherResourceManagerComponentFactory
createDispatcherResourceManagerComponentFactory() {
if (dispatcherResourceManagerComponentFactorySupplier != null) {
return dispatcherResourceManagerComponentFactorySupplier.get();
} else {
return super.createDispatcherResourceManagerComponentFactory();
}
}
@Override
protected Collection<? extends DispatcherResourceManagerComponent>
createDispatcherResourceManagerComponents(
Configuration configuration,
RpcServiceFactory rpcServiceFactory,
BlobServer blobServer,
HeartbeatServices heartbeatServices,
DelegationTokenManager delegationTokenManager,
MetricRegistry metricRegistry,
MetricQueryServiceRetriever metricQueryServiceRetriever,
FatalErrorHandler fatalErrorHandler)
throws Exception {
DispatcherResourceManagerComponentFactory dispatcherResourceManagerComponentFactory =
createDispatcherResourceManagerComponentFactory();
final List<DispatcherResourceManagerComponent> result =
new ArrayList<>(numberDispatcherResourceManagerComponents);
for (int i = 0; i < numberDispatcherResourceManagerComponents; i++) {
// FLINK-24038 relies on the fact that there is only one leader election instance per
// JVM that is freed when the JobManager stops. This is simulated in the
// TestingMiniCluster by providing individual HighAvailabilityServices per
// DispatcherResourceManagerComponent to allow running more-than-once JobManager tests
final HighAvailabilityServices thisHaServices =
createHighAvailabilityServices(configuration, getIOExecutor());
final DispatcherResourceManagerComponent dispatcherResourceManagerComponent =
dispatcherResourceManagerComponentFactory.create(
configuration,
ResourceID.generate(),
getIOExecutor(),
rpcServiceFactory.createRpcService(),
thisHaServices,
blobServer,
heartbeatServices,
delegationTokenManager,
metricRegistry,
new MemoryExecutionGraphInfoStore(),
metricQueryServiceRetriever,
Collections.emptySet(),
fatalErrorHandler);
final CompletableFuture<Void> shutDownFuture =
dispatcherResourceManagerComponent
.getShutDownFuture()
.thenCompose(
applicationStatus ->
dispatcherResourceManagerComponent.stopApplication(
applicationStatus, null))
.thenRun(
() -> {
try {
// The individual HighAvailabilityServices have to be
// closed explicitly to trigger the revocation of the
// leadership when shutting down the JobManager
thisHaServices.close();
} catch (Exception ex) {
throw new CompletionException(
"HighAvailabilityServices were not expected to fail but did",
ex);
}
});
FutureUtils.assertNoException(shutDownFuture);
result.add(dispatcherResourceManagerComponent);
}
return result;
}
@Override
public CompletableFuture<DispatcherGateway> getDispatcherGatewayFuture() {
return super.getDispatcherGatewayFuture();
}
}
| Builder |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/function/FailableObjDoubleConsumer.java | {
"start": 915,
"end": 1158
} | interface ____ {@link ObjDoubleConsumer} that declares a {@link Throwable}.
*
* @param <T> the type of the object argument to the operation.
* @param <E> The kind of thrown exception or error.
* @since 3.11
*/
@FunctionalInterface
public | like |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/tenantid/TenantIdToOneBidirectionalTest.java | {
"start": 1546,
"end": 3674
} | class ____ implements SessionFactoryProducer {
private static String currentTenant;
@Test
public void testExistingRoot(SessionFactoryScope scope) {
currentTenant = "tenant_1";
scope.inTransaction( session -> {
final var child = session.find( ChildEntity.class, 1L );
assertThat( child.getRoot() ).isNotNull().extracting( RootEntity::getChild ).isSameAs( child );
} );
}
@Test
public void testRemovedRoot(SessionFactoryScope scope) {
currentTenant = "tenant_2";
scope.inTransaction( session -> {
final var child = session.find( ChildEntity.class, 2L );
assertThat( child.getRoot() ).isNull();
} );
}
@Test
public void testNoRoot(SessionFactoryScope scope) {
currentTenant = "tenant_3";
scope.inTransaction( session -> {
final var child = session.find( ChildEntity.class, 3L );
assertThat( child.getRoot() ).isNull();
} );
}
@BeforeAll
public void setUp(SessionFactoryScope scope) {
currentTenant = "tenant_1";
scope.inTransaction( session -> {
final var withChild = new RootEntity( 1L );
withChild.setChild( new ChildEntity( 1L ) );
session.persist( withChild );
} );
currentTenant = "tenant_2";
scope.inTransaction( session -> {
final var deletedRoot = new RootEntity( 2L );
final var child = new ChildEntity( 2L );
deletedRoot.setChild( child );
session.persist( deletedRoot );
session.flush();
session.clear();
session.remove( deletedRoot );
} );
currentTenant = "tenant_3";
scope.inTransaction( session -> session.persist( new ChildEntity( 3L ) ) );
}
@Override
public SessionFactoryImplementor produceSessionFactory(MetadataImplementor model) {
final SessionFactoryBuilder sfb = model.getSessionFactoryBuilder();
sfb.applyCurrentTenantIdentifierResolver( new CurrentTenantIdentifierResolver<String>() {
@Override
public String resolveCurrentTenantIdentifier() {
return currentTenant;
}
@Override
public boolean validateExistingCurrentSessions() {
return false;
}
} );
return (SessionFactoryImplementor) sfb.build();
}
@Entity( name = "RootEntity" )
static | TenantIdToOneBidirectionalTest |
java | quarkusio__quarkus | integration-tests/main/src/test/java/io/quarkus/it/main/SimpleBeanITCase.java | {
"start": 114,
"end": 169
} | class ____ extends SimpleBeanTestCase {
}
| SimpleBeanITCase |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/SedaComponentBuilderFactory.java | {
"start": 10359,
"end": 12107
} | class ____
extends AbstractComponentBuilder<SedaComponent>
implements SedaComponentBuilder {
@Override
protected SedaComponent buildConcreteComponent() {
return new SedaComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "bridgeErrorHandler": ((SedaComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "concurrentConsumers": ((SedaComponent) component).setConcurrentConsumers((int) value); return true;
case "defaultPollTimeout": ((SedaComponent) component).setDefaultPollTimeout((int) value); return true;
case "lazyStartProducer": ((SedaComponent) component).setLazyStartProducer((boolean) value); return true;
case "defaultBlockWhenFull": ((SedaComponent) component).setDefaultBlockWhenFull((boolean) value); return true;
case "defaultDiscardWhenFull": ((SedaComponent) component).setDefaultDiscardWhenFull((boolean) value); return true;
case "defaultOfferTimeout": ((SedaComponent) component).setDefaultOfferTimeout((long) value); return true;
case "autowiredEnabled": ((SedaComponent) component).setAutowiredEnabled((boolean) value); return true;
case "defaultQueueFactory": ((SedaComponent) component).setDefaultQueueFactory((org.apache.camel.component.seda.BlockingQueueFactory) value); return true;
case "queueSize": ((SedaComponent) component).setQueueSize((int) value); return true;
default: return false;
}
}
}
} | SedaComponentBuilderImpl |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/NamedEntityGraphAnnotation.java | {
"start": 355,
"end": 1477
} | class ____ implements NamedEntityGraph {
private String name;
private String graph;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public NamedEntityGraphAnnotation(ModelsContext modelContext) {
name = "";
}
/**
* Used in creating annotation instances from JDK variant
*/
public NamedEntityGraphAnnotation(NamedEntityGraph annotation, ModelsContext modelContext) {
this.name = annotation.name();
this.graph = annotation.graph();
}
/**
* Used in creating annotation instances from Jandex variant
*/
public NamedEntityGraphAnnotation(Map<String, Object> attributeValues, ModelsContext modelContext) {
this.name = (String) attributeValues.get( "name" );
this.graph = (String) attributeValues.get( "graph" );
}
@Override
public Class<? extends Annotation> annotationType() {
return NamedEntityGraph.class;
}
@Override
public String name() {
return name;
}
public void name(String name) {
this.name = name;
}
@Override
public String graph() {
return graph;
}
public void graph(String graph) {
this.graph = graph;
}
}
| NamedEntityGraphAnnotation |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/fetching/FetchModeSubselectTest.java | {
"start": 2481,
"end": 3293
} | class ____ {
@Id
private Long id;
private String name;
//tag::fetching-strategies-fetch-mode-subselect-mapping-example[]
@OneToMany(mappedBy = "department", fetch = FetchType.LAZY)
@Fetch(FetchMode.SUBSELECT)
private List<Employee> employees = new ArrayList<>();
//end::fetching-strategies-fetch-mode-subselect-mapping-example[]
//Getters and setters omitted for brevity
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<Employee> getEmployees() {
return employees;
}
public void setEmployees(List<Employee> employees) {
this.employees = employees;
}
}
@Entity(name = "Employee")
public static | Department |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/injection/guice/spi/Dependency.java | {
"start": 1053,
"end": 3508
} | class ____<T> {
private final InjectionPoint injectionPoint;
private final Key<T> key;
private final boolean nullable;
private final int parameterIndex;
Dependency(InjectionPoint injectionPoint, Key<T> key, boolean nullable, int parameterIndex) {
this.injectionPoint = injectionPoint;
this.key = key;
this.nullable = nullable;
this.parameterIndex = parameterIndex;
}
/**
* Returns a new dependency that is not attached to an injection point. The returned dependency is
* nullable.
*/
public static <T> Dependency<T> get(Key<T> key) {
return new Dependency<>(null, key, true, -1);
}
/**
* Returns the key to the binding that satisfies this dependency.
*/
public Key<T> getKey() {
return this.key;
}
/**
* Returns true if null is a legal value for this dependency.
*/
public boolean isNullable() {
return nullable;
}
/**
* Returns the injection point to which this dependency belongs, or null if this dependency isn't
* attached to a particular injection point.
*/
public InjectionPoint getInjectionPoint() {
return injectionPoint;
}
/**
* Returns the index of this dependency in the injection point's parameter list, or {@code -1} if
* this dependency does not belong to a parameter list. Only method and constructor dependencies
* are elements in a parameter list.
*/
public int getParameterIndex() {
return parameterIndex;
}
@Override
public int hashCode() {
return Objects.hash(injectionPoint, parameterIndex, key);
}
@Override
public boolean equals(Object o) {
if (o instanceof Dependency<?> dependency) {
return Objects.equals(injectionPoint, dependency.injectionPoint)
&& Objects.equals(parameterIndex, dependency.parameterIndex)
&& Objects.equals(key, dependency.key);
} else {
return false;
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(key);
if (injectionPoint != null) {
builder.append("@").append(injectionPoint);
if (parameterIndex != -1) {
builder.append("[").append(parameterIndex).append("]");
}
}
return builder.toString();
}
}
| Dependency |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/validator/ValidatorsDefinition.java | {
"start": 1388,
"end": 2238
} | class ____ {
@XmlElements({
@XmlElement(name = "endpointValidator", type = EndpointValidatorDefinition.class),
@XmlElement(name = "predicateValidator", type = PredicateValidatorDefinition.class),
@XmlElement(name = "customValidator", type = CustomValidatorDefinition.class) })
private List<ValidatorDefinition> validators;
public ValidatorsDefinition() {
}
protected ValidatorsDefinition(ValidatorsDefinition source) {
this.validators = ProcessorDefinitionHelper.deepCopyDefinitions(source.validators);
}
/**
* The configured transformers
*/
public void setValidators(List<ValidatorDefinition> validators) {
this.validators = validators;
}
public List<ValidatorDefinition> getValidators() {
return validators;
}
}
| ValidatorsDefinition |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/job/metrics/TaskManagerMetricsMessageParameters.java | {
"start": 1287,
"end": 1624
} | class ____ extends TaskManagerMessageParameters {
public final MetricsFilterParameter metricsFilterParameter = new MetricsFilterParameter();
@Override
public Collection<MessageQueryParameter<?>> getQueryParameters() {
return Collections.singletonList(metricsFilterParameter);
}
}
| TaskManagerMetricsMessageParameters |
java | google__dagger | javatests/artifacts/dagger/build-tests/src/test/java/buildtests/TransitiveBindsQualifierTest.java | {
"start": 4735,
"end": 4812
} | interface ____ {",
" @Component.Factory",
" | MyComponent |
java | google__dagger | hilt-android/main/java/dagger/hilt/android/internal/migration/HasCustomInject.java | {
"start": 711,
"end": 781
} | interface ____ application's using
* {@code CustomInject}.
*/
public | for |
java | spring-projects__spring-framework | spring-orm/src/main/java/org/springframework/orm/jpa/AbstractEntityManagerFactoryBean.java | {
"start": 10033,
"end": 11163
} | interface ____.
* @see JpaVendorAdapter#getEntityManagerInterface()
* @see EntityManagerFactoryInfo#getEntityManagerInterface()
*/
public void setEntityManagerInterface(@Nullable Class<? extends EntityManager> emInterface) {
this.entityManagerInterface = emInterface;
}
@Override
public @Nullable Class<? extends EntityManager> getEntityManagerInterface() {
return this.entityManagerInterface;
}
/**
* Specify the vendor-specific JpaDialect implementation to associate with
* this EntityManagerFactory. This will be exposed through the
* EntityManagerFactoryInfo interface, to be picked up as default dialect by
* accessors that intend to use JpaDialect functionality.
* @see EntityManagerFactoryInfo#getJpaDialect()
*/
public void setJpaDialect(@Nullable JpaDialect jpaDialect) {
this.jpaDialect = jpaDialect;
}
@Override
public @Nullable JpaDialect getJpaDialect() {
return this.jpaDialect;
}
/**
* Specify the JpaVendorAdapter implementation for the desired JPA provider,
* if any. This will initialize appropriate defaults for the given provider,
* such as persistence provider | else |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DeadThreadTest.java | {
"start": 855,
"end": 1130
} | class ____ {
private final CompilationTestHelper testHelper =
CompilationTestHelper.newInstance(DeadThread.class, getClass());
@Test
public void positive() {
testHelper
.addSourceLines(
"Test.java",
"""
| DeadThreadTest |
java | elastic__elasticsearch | x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java | {
"start": 3320,
"end": 13572
} | class ____ extends TransportMasterNodeAction<
MountSearchableSnapshotRequest,
RestoreSnapshotResponse> {
private static final Collection<Setting<String>> DATA_TIER_ALLOCATION_SETTINGS = List.of(DataTier.TIER_PREFERENCE_SETTING);
private final Client client;
private final RepositoriesService repositoriesService;
private final XPackLicenseState licenseState;
private final SystemIndices systemIndices;
@Inject
public TransportMountSearchableSnapshotAction(
TransportService transportService,
ClusterService clusterService,
Client client,
ThreadPool threadPool,
RepositoriesService repositoriesService,
ActionFilters actionFilters,
XPackLicenseState licenseState,
SystemIndices systemIndices
) {
super(
MountSearchableSnapshotAction.NAME,
transportService,
clusterService,
threadPool,
actionFilters,
MountSearchableSnapshotRequest::new,
RestoreSnapshotResponse::new,
// Use SNAPSHOT_META pool since we are slow due to loading repository metadata in this action
threadPool.executor(ThreadPool.Names.SNAPSHOT_META)
);
this.client = client;
this.repositoriesService = repositoriesService;
this.licenseState = Objects.requireNonNull(licenseState);
this.systemIndices = Objects.requireNonNull(systemIndices);
}
@Override
protected ClusterBlockException checkBlock(MountSearchableSnapshotRequest request, ClusterState state) {
// The restore action checks the cluster blocks.
return null;
}
/**
* Return the index settings required to make a snapshot searchable
*/
private static Settings buildIndexSettings(
String repoUuid,
String repoName,
SnapshotId snapshotId,
IndexId indexId,
MountSearchableSnapshotRequest.Storage storage
) {
final Settings.Builder settings = Settings.builder();
if (repoUuid.equals(RepositoryData.MISSING_UUID) == false) {
settings.put(SearchableSnapshots.SNAPSHOT_REPOSITORY_UUID_SETTING.getKey(), repoUuid);
}
settings.put(SearchableSnapshots.SNAPSHOT_REPOSITORY_NAME_SETTING.getKey(), repoName)
.put(SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING.getKey(), snapshotId.getName())
.put(SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING.getKey(), snapshotId.getUUID())
.put(SearchableSnapshots.SNAPSHOT_INDEX_NAME_SETTING.getKey(), indexId.getName())
.put(SearchableSnapshots.SNAPSHOT_INDEX_ID_SETTING.getKey(), indexId.getId())
.put(INDEX_STORE_TYPE_SETTING.getKey(), SEARCHABLE_SNAPSHOT_STORE_TYPE)
.put(IndexMetadata.SETTING_BLOCKS_WRITE, true)
.put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey(), SearchableSnapshotAllocator.ALLOCATOR_NAME)
.put(INDEX_RECOVERY_TYPE_SETTING.getKey(), SearchableSnapshots.SNAPSHOT_RECOVERY_STATE_FACTORY_KEY);
if (storage == MountSearchableSnapshotRequest.Storage.SHARED_CACHE) {
settings.put(SearchableSnapshotsSettings.SNAPSHOT_PARTIAL_SETTING.getKey(), true)
.put(DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS.getKey(), true);
settings.put(ShardLimitValidator.INDEX_SETTING_SHARD_LIMIT_GROUP.getKey(), ShardLimitValidator.FROZEN_GROUP);
}
return settings.build();
}
@Override
protected void masterOperation(
Task task,
final MountSearchableSnapshotRequest request,
final ClusterState state,
final ActionListener<RestoreSnapshotResponse> listener
) {
SearchableSnapshots.ensureValidLicense(licenseState);
final String mountedIndexName = request.mountedIndexName();
if (systemIndices.isSystemIndex(mountedIndexName)) {
throw new ElasticsearchException("system index [{}] cannot be mounted as searchable snapshots", mountedIndexName);
}
final String repoName = request.repositoryName();
final String snapName = request.snapshotName();
final String indexName = request.snapshotIndexName();
// Retrieve IndexId and SnapshotId instances, which are then used to create a new restore
// request, which is then sent on to the actual snapshot restore mechanism
final Repository repository = repositoriesService.repository(repoName);
SearchableSnapshots.getSearchableRepository(repository); // just check it's valid
final ListenableFuture<RepositoryData> repositoryDataListener = new ListenableFuture<>();
repository.getRepositoryData(
EsExecutors.DIRECT_EXECUTOR_SERVICE, // TODO fork to SNAPSHOT_META and drop the forking below, see #101445
repositoryDataListener
);
repositoryDataListener.addListener(listener.delegateFailureAndWrap((delegate, repoData) -> {
final Map<String, IndexId> indexIds = repoData.getIndices();
if (indexIds.containsKey(indexName) == false) {
throw new IndexNotFoundException("index [" + indexName + "] not found in repository [" + repoName + "]");
}
final IndexId indexId = indexIds.get(indexName);
final Optional<SnapshotId> matchingSnapshotId = repoData.getSnapshotIds()
.stream()
.filter(s -> snapName.equals(s.getName()))
.findFirst();
if (matchingSnapshotId.isEmpty()) {
throw new ElasticsearchException("snapshot [" + snapName + "] not found in repository [" + repoName + "]");
}
final SnapshotId snapshotId = matchingSnapshotId.get();
final IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData(repoData, snapshotId, indexId);
if (indexMetadata.isSearchableSnapshot()) {
throw new IllegalArgumentException(
String.format(
Locale.ROOT,
"index [%s] in snapshot [%s/%s:%s] is a snapshot of a searchable snapshot index "
+ "backed by index [%s] in snapshot [%s/%s:%s] and cannot be mounted; did you mean to restore it instead?",
indexName,
repoName,
repository.getMetadata().uuid(),
snapName,
SearchableSnapshots.SNAPSHOT_INDEX_NAME_SETTING.get(indexMetadata.getSettings()),
SearchableSnapshots.SNAPSHOT_REPOSITORY_NAME_SETTING.get(indexMetadata.getSettings()),
SearchableSnapshots.SNAPSHOT_REPOSITORY_UUID_SETTING.get(indexMetadata.getSettings()),
SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING.get(indexMetadata.getSettings())
)
);
}
final Set<String> ignoreIndexSettings = new LinkedHashSet<>(Arrays.asList(request.ignoreIndexSettings()));
ignoreIndexSettings.add(IndexMetadata.SETTING_DATA_PATH);
for (final String indexSettingKey : indexMetadata.getSettings().keySet()) {
if (indexSettingKey.startsWith(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX)
|| indexSettingKey.startsWith(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX)
|| indexSettingKey.startsWith(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX)) {
ignoreIndexSettings.add(indexSettingKey);
}
}
final Settings indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) // can be overridden
.put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, false) // can be overridden
.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), false) // can be overridden
.put(DataTier.TIER_PREFERENCE, request.storage().defaultDataTiersPreference())
.put(request.indexSettings())
.put(buildIndexSettings(repoData.getUuid(), request.repositoryName(), snapshotId, indexId, request.storage()))
.build();
// todo: restore archives bad settings, for now we verify just the data tiers, since we know their dependencies are available
// in settings
for (Setting<String> dataTierAllocationSetting : DATA_TIER_ALLOCATION_SETTINGS) {
dataTierAllocationSetting.get(indexSettings);
}
RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(request.masterNodeTimeout(), repoName, snapName)
// Restore the single index specified
.indices(indexName)
// Always rename it to the desired mounted index name
.renamePattern(".+")
.renameReplacement(mountedIndexName)
// Pass through index settings, adding the index-level settings required to use searchable snapshots
.indexSettings(indexSettings)
// Pass through ignored index settings
.ignoreIndexSettings(ignoreIndexSettings.toArray(new String[0]))
// Don't include global state
.includeGlobalState(false)
// Don't include aliases
.includeAliases(false)
// Pass through the wait-for-completion flag
.waitForCompletion(request.waitForCompletion())
// Fail the restore if the snapshot found above is swapped out from under us before the restore happens
.snapshotUuid(snapshotId.getUUID())
// Log snapshot restore at the DEBUG log level
.quiet(true);
// Specify the mount task as the parent of the refresh task
restoreSnapshotRequest.setParentTask(clusterService.localNode().getId(), task.getId());
client.admin().cluster().restoreSnapshot(restoreSnapshotRequest, delegate);
}), threadPool.executor(ThreadPool.Names.SNAPSHOT_META), null);
}
}
| TransportMountSearchableSnapshotAction |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/XdsConfig.java | {
"start": 3145,
"end": 4685
} | class ____ {
private final String clusterName;
private final CdsUpdate clusterResource;
private final ClusterChild children; // holds details
XdsClusterConfig(String clusterName, CdsUpdate clusterResource, ClusterChild details) {
this.clusterName = checkNotNull(clusterName, "clusterName");
this.clusterResource = checkNotNull(clusterResource, "clusterResource");
this.children = checkNotNull(details, "details");
}
@Override
public int hashCode() {
return clusterName.hashCode() + clusterResource.hashCode() + children.hashCode();
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof XdsClusterConfig)) {
return false;
}
XdsClusterConfig o = (XdsClusterConfig) obj;
return Objects.equals(clusterName, o.clusterName)
&& Objects.equals(clusterResource, o.clusterResource)
&& Objects.equals(children, o.children);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("XdsClusterConfig{clusterName=").append(clusterName)
.append(", clusterResource=").append(clusterResource)
.append(", children={").append(children)
.append("}");
return builder.toString();
}
public String getClusterName() {
return clusterName;
}
public CdsUpdate getClusterResource() {
return clusterResource;
}
public ClusterChild getChildren() {
return children;
}
| XdsClusterConfig |
java | apache__dubbo | dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/client/DefaultRegistryClusterIdentifier.java | {
"start": 978,
"end": 1291
} | class ____ implements RegistryClusterIdentifier {
@Override
public String providerKey(URL url) {
return url.getParameter(REGISTRY_CLUSTER_KEY);
}
@Override
public String consumerKey(URL url) {
return url.getParameter(REGISTRY_CLUSTER_KEY);
}
}
| DefaultRegistryClusterIdentifier |
java | processing__processing4 | core/src/processing/core/PGraphics.java | {
"start": 1874,
"end": 2188
} | class ____ you need to draw into an off-screen
* graphics buffer. A PGraphics object can be constructed with the
* <b>createGraphics()</b> function. The <b>beginDraw()</b> and <b>endDraw()</b>
* methods (see above example) are necessary to set up the buffer and to
* finalize it. The fields and methods for this | if |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.