language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/matchers/NotNull.java | {
"start": 260,
"end": 780
} | class ____<T> implements ArgumentMatcher<T>, Serializable {
public static final NotNull<Object> NOT_NULL = new NotNull<>(Object.class);
private final Class<T> type;
public NotNull(Class<T> type) {
this.type = Objects.requireNonNull(type);
}
@Override
public boolean matches(Object actual) {
return actual != null;
}
@Override
public Class<T> type() {
return type;
}
@Override
public String toString() {
return "notNull()";
}
}
| NotNull |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/google/SetGenerators.java | {
"start": 4091,
"end": 4492
} | class ____ extends TestStringSetGenerator {
@Override
protected Set<String> create(String[] elements) {
ImmutableSet.Builder<String> builder =
ImmutableSet.builderWithExpectedSize(max(0, newHashSet(elements).size() - 1));
for (String e : elements) {
builder.add(e);
}
return builder.build();
}
}
public static | ImmutableSetTooSmallBuilderGenerator |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/util/Filters.java | {
"start": 1330,
"end": 5595
} | class ____ {
private static final Method RECORD_COMPONENTS_METHOD;
static {
Method recordComponentsMethod;
try {
recordComponentsMethod = TypeElement.class.getMethod( "getRecordComponents" );
}
catch ( NoSuchMethodException e ) {
recordComponentsMethod = null;
}
RECORD_COMPONENTS_METHOD = recordComponentsMethod;
}
private final AccessorNamingUtils accessorNaming;
private final TypeUtils typeUtils;
private final TypeMirror typeMirror;
public Filters(AccessorNamingUtils accessorNaming, TypeUtils typeUtils, TypeMirror typeMirror) {
this.accessorNaming = accessorNaming;
this.typeUtils = typeUtils;
this.typeMirror = typeMirror;
}
public List<ReadAccessor> getterMethodsIn(List<ExecutableElement> elements) {
return elements.stream()
.filter( accessorNaming::isGetterMethod )
.map( method -> ReadAccessor.fromGetter( method, getReturnType( method ) ) )
.collect( Collectors.toCollection( LinkedList::new ) );
}
@SuppressWarnings("unchecked")
public List<Element> recordComponentsIn(TypeElement typeElement) {
if ( RECORD_COMPONENTS_METHOD == null ) {
return java.util.Collections.emptyList();
}
try {
return (List<Element>) RECORD_COMPONENTS_METHOD.invoke( typeElement );
}
catch ( IllegalAccessException | InvocationTargetException e ) {
return java.util.Collections.emptyList();
}
}
public Map<String, ReadAccessor> recordAccessorsIn(Collection<Element> recordComponents) {
if ( recordComponents.isEmpty() ) {
return java.util.Collections.emptyMap();
}
Map<String, ReadAccessor> recordAccessors = new LinkedHashMap<>();
for ( Element recordComponent : recordComponents ) {
recordAccessors.put(
recordComponent.getSimpleName().toString(),
ReadAccessor.fromRecordComponent(
recordComponent,
typeUtils.asMemberOf( (DeclaredType) typeMirror, recordComponent )
)
);
}
return recordAccessors;
}
private TypeMirror getReturnType(ExecutableElement executableElement) {
return getWithinContext( executableElement ).getReturnType();
}
public <T> List<T> fieldsIn(List<VariableElement> accessors, BiFunction<VariableElement, TypeMirror, T> creator) {
return accessors.stream()
.filter( Fields::isFieldAccessor )
.map( variableElement -> creator.apply( variableElement, getWithinContext( variableElement ) ) )
.collect( Collectors.toCollection( LinkedList::new ) );
}
public List<ExecutableElement> presenceCheckMethodsIn(List<ExecutableElement> elements) {
return elements.stream()
.filter( accessorNaming::isPresenceCheckMethod )
.collect( Collectors.toCollection( LinkedList::new ) );
}
public List<Accessor> setterMethodsIn(List<ExecutableElement> elements) {
return elements.stream()
.filter( accessorNaming::isSetterMethod )
.map( method -> new ElementAccessor( method, getFirstParameter( method ), SETTER ) )
.collect( Collectors.toCollection( LinkedList::new ) );
}
private TypeMirror getFirstParameter(ExecutableElement executableElement) {
return first( getWithinContext( executableElement ).getParameterTypes() );
}
private ExecutableType getWithinContext( ExecutableElement executableElement ) {
return (ExecutableType) typeUtils.asMemberOf( (DeclaredType) typeMirror, executableElement );
}
private TypeMirror getWithinContext( VariableElement variableElement ) {
return typeUtils.asMemberOf( (DeclaredType) typeMirror, variableElement );
}
public List<Accessor> adderMethodsIn(List<ExecutableElement> elements) {
return elements.stream()
.filter( accessorNaming::isAdderMethod )
.map( method -> new ElementAccessor( method, getFirstParameter( method ), ADDER ) )
.collect( Collectors.toCollection( LinkedList::new ) );
}
}
| Filters |
java | google__guava | android/guava-testlib/test/com/google/common/collect/testing/MapTestSuiteBuilderTests.java | {
"start": 5297,
"end": 9096
} | class ____ extends AbstractSet<Map.Entry<String, String>> {
@Override
public Iterator<Entry<String, String>> iterator() {
return new Iterator<Entry<String, String>>() {
final Iterator<Entry<String, String>> iterator = map.entrySet().iterator();
@Override
public void remove() {
iterator.remove();
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public Entry<String, String> next() {
return transform(iterator.next());
}
private Entry<String, String> transform(Entry<String, String> next) {
return new Entry<String, String>() {
@Override
public String setValue(String value) {
checkNotNull(value);
return next.setValue(value);
}
@Override
public String getValue() {
return next.getValue();
}
@Override
public String getKey() {
return next.getKey();
}
@Override
public boolean equals(@Nullable Object obj) {
return next.equals(obj);
}
@Override
public int hashCode() {
return next.hashCode();
}
};
}
};
}
@Override
public int size() {
return map.size();
}
@Override
public boolean remove(Object o) {
return map.entrySet().remove(o);
}
@Override
public boolean containsAll(Collection<?> c) {
return map.entrySet().containsAll(c);
}
@Override
public boolean removeAll(Collection<?> c) {
return map.entrySet().removeAll(c);
}
@Override
public boolean retainAll(Collection<?> c) {
return map.entrySet().retainAll(c);
}
@Override
public int hashCode() {
return map.entrySet().hashCode();
}
@Override
public boolean equals(@Nullable Object o) {
return map.entrySet().equals(o);
}
@Override
public String toString() {
return map.entrySet().toString();
}
}
@Override
public @Nullable String put(String key, String value) {
checkNotNull(value);
return map.put(key, value);
}
};
}
},
"HashMap w/out null values",
ALLOWS_NULL_KEYS);
}
/**
* Map generator that verifies that {@code setUp()} methods are called in all the test cases. The
* {@code setUpRan} parameter is set true by the {@code setUp} that every test case is supposed to
* have registered, and set false by the {@code tearDown}. We use a dynamic proxy to intercept all
* of the {@code Map} method calls and check that {@code setUpRan} is true.
*/
private static | EntrySet |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/io/ClassPathResourceTests.java | {
"start": 1500,
"end": 1945
} | class ____ {
private static final String PACKAGE_PATH = "org/springframework/core/io";
private static final String NONEXISTENT_RESOURCE_NAME = "nonexistent.xml";
private static final String ABSOLUTE_PATH_TO_NONEXISTENT_RESOURCE = PACKAGE_PATH + '/' + NONEXISTENT_RESOURCE_NAME;
private static final String ABSOLUTE_PATH_TO_NONEXISTENT_RESOURCE_WITH_LEADING_SLASH = '/' + ABSOLUTE_PATH_TO_NONEXISTENT_RESOURCE;
@Nested
| ClassPathResourceTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ServiceBusEndpointBuilderFactory.java | {
"start": 23865,
"end": 29720
} | interface ____
extends
EndpointConsumerBuilder {
default ServiceBusEndpointConsumerBuilder basic() {
return (ServiceBusEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedServiceBusEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedServiceBusEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedServiceBusEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedServiceBusEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedServiceBusEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedServiceBusEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
}
/**
* Builder for endpoint producers for the Azure ServiceBus component.
*/
public | AdvancedServiceBusEndpointConsumerBuilder |
java | quarkusio__quarkus | core/builder/src/main/java/io/quarkus/builder/item/BuildItem.java | {
"start": 999,
"end": 1068
} | class ____ be leaf (final) types: " + getClass());
}
}
}
| must |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMROutputFormat.java | {
"start": 2032,
"end": 3447
} | class ____ extends InputFormat<IntWritable, IntWritable> {
@Override
public RecordReader<IntWritable, IntWritable> createRecordReader(
InputSplit split, TaskAttemptContext context) throws IOException,
InterruptedException {
return new RecordReader<IntWritable, IntWritable>() {
private boolean done = false;
@Override
public void close() throws IOException {
}
@Override
public IntWritable getCurrentKey() throws IOException,
InterruptedException {
return new IntWritable(0);
}
@Override
public IntWritable getCurrentValue() throws IOException,
InterruptedException {
return new IntWritable(0);
}
@Override
public float getProgress() throws IOException, InterruptedException {
return done ? 0 : 1;
}
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!done) {
done = true;
return true;
}
return false;
}
};
}
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException,
InterruptedException {
List<InputSplit> list = new ArrayList<InputSplit>();
list.add(new TestInputSplit());
return list;
}
}
| TestInputFormat |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/errors/ProductionExceptionHandler.java | {
"start": 8766,
"end": 10432
} | enum ____ {
/** Resume processing.
*
* <p> For this case, output records which could not be written successfully are lost.
* Use this option only if you can tolerate data loss.
*/
RESUME(0, "RESUME"),
/** Fail processing.
*
* <p> Kafka Streams will raise an exception and the {@code StreamsThread} will fail.
* No offsets (for {@link org.apache.kafka.streams.StreamsConfig#AT_LEAST_ONCE at-least-once}) or transactions
* (for {@link org.apache.kafka.streams.StreamsConfig#EXACTLY_ONCE_V2 exactly-once}) will be committed.
*/
FAIL(1, "FAIL"),
/** Retry the failed operation.
*
* <p> Retrying might imply that a {@link TaskCorruptedException} exception is thrown, and that the retry
* is started from the last committed offset.
*
* <p> <b>NOTE:</b> {@code RETRY} is only a valid return value for
* {@link org.apache.kafka.common.errors.RetriableException retriable exceptions}.
* If {@code RETRY} is returned for a non-retriable exception it will be interpreted as {@link #FAIL}.
*/
RETRY(2, "RETRY");
/**
* An english description for the used option. This is for debugging only and may change.
*/
public final String name;
/**
* The permanent and immutable id for the used option. This can't change ever.
*/
public final int id;
Result(final int id, final String name) {
this.id = id;
this.name = name;
}
/**
* Converts the deprecated | Result |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/sort/BinaryExternalSorter.java | {
"start": 14153,
"end": 26763
} | class ____ to the spawned threads
ClassLoader contextLoader = Thread.currentThread().getContextClassLoader();
if (contextLoader != null) {
if (this.sortThread != null) {
this.sortThread.setContextClassLoader(contextLoader);
}
if (this.spillThread != null) {
this.spillThread.setContextClassLoader(contextLoader);
}
if (this.mergeThread != null) {
this.mergeThread.setContextClassLoader(contextLoader);
}
}
}
// ------------------------------------------------------------------------
// Factory Methods
// ------------------------------------------------------------------------
/** Starts all the threads that are used by this sorter. */
public void startThreads() {
if (this.sortThread != null) {
this.sortThread.start();
}
if (this.spillThread != null) {
this.spillThread.start();
}
if (this.mergeThread != null) {
this.mergeThread.start();
}
}
/**
* Shuts down all the threads initiated by this sorter. Also releases all previously allocated
* memory, if it has not yet been released by the threads, and closes and deletes all channels
* (removing the temporary files).
*
* <p>The threads are set to exit directly, but depending on their operation, it may take a
* while to actually happen. The sorting thread will for example not finish before the current
* batch is sorted. This method attempts to wait for the working thread to exit. If it is
* however interrupted, the method exits immediately and is not guaranteed how long the threads
* continue to exist and occupy resources afterwards.
*/
@Override
public void close() {
// check if the sorter has been closed before
synchronized (this) {
if (this.closed) {
return;
}
// mark as closed
this.closed = true;
}
// from here on, the code is in a try block, because even through errors might be thrown in
// this block,
// we need to make sure that all the memory is released.
try {
// if the result iterator has not been obtained yet, set the exception
synchronized (this.iteratorLock) {
if (this.iteratorException == null) {
this.iteratorException = new IOException("The sorter has been closed.");
this.iteratorLock.notifyAll();
}
}
// stop all the threads
if (this.sortThread != null) {
try {
this.sortThread.shutdown();
} catch (Throwable t) {
LOG.error("Error shutting down sorter thread: " + t.getMessage(), t);
}
}
if (this.spillThread != null) {
try {
this.spillThread.shutdown();
} catch (Throwable t) {
LOG.error("Error shutting down spilling thread: " + t.getMessage(), t);
}
}
if (this.mergeThread != null) {
try {
this.mergeThread.shutdown();
} catch (Throwable t) {
LOG.error("Error shutting down merging thread: " + t.getMessage(), t);
}
}
try {
if (this.sortThread != null) {
this.sortThread.join();
this.sortThread = null;
}
if (this.spillThread != null) {
this.spillThread.join();
this.spillThread = null;
}
if (this.mergeThread != null) {
this.mergeThread.join();
this.mergeThread = null;
}
} catch (InterruptedException iex) {
LOG.debug(
"Closing of sort/merger was interrupted. "
+ "The reading/sorting/spilling/merging threads may still be working.",
iex);
}
} finally {
releaseSortMemory();
// Eliminate object references for MemorySegments.
circularQueues = null;
currWriteBuffer = null;
iterator = null;
merger.close();
channelManager.close();
}
}
private void releaseSortMemory() {
// RELEASE ALL MEMORY. If the threads and channels are still running, this should cause
// exceptions, because their memory segments are freed
try {
// floating segments are released in `dispose()` method
this.sortBuffers.forEach(BinaryInMemorySortBuffer::dispose);
this.sortBuffers.clear();
} catch (Throwable e) {
LOG.info("error.", e);
}
sortReadMemory.forEach(LazyMemorySegmentPool::close);
sortReadMemory.clear();
}
private ThreadBase getSortingThread(
ExceptionHandler<IOException> exceptionHandler, CircularQueues queues) {
return new SortingThread(exceptionHandler, queues);
}
private SpillingThread getSpillingThread(
ExceptionHandler<IOException> exceptionHandler,
CircularQueues queues,
IOManager ioManager,
BinaryRowDataSerializer serializer,
RecordComparator comparator) {
return new SpillingThread(exceptionHandler, queues, ioManager, serializer, comparator);
}
private MergingThread getMergingThread(
ExceptionHandler<IOException> exceptionHandler,
CircularQueues queues,
int maxNumFileHandles,
BinaryExternalMerger merger) {
return new MergingThread(exceptionHandler, queues, maxNumFileHandles, merger);
}
public void write(RowData current) throws IOException {
checkArgument(!writingDone, "Adding already done!");
try {
while (true) {
if (closed) {
throw new IOException("Already closed!", iteratorException);
}
synchronized (writeLock) {
// grab the next buffer
if (currWriteBuffer == null) {
try {
currWriteBuffer = this.circularQueues.empty.poll(1, TimeUnit.SECONDS);
if (currWriteBuffer == null) {
// maybe something happened, release lock.
continue;
}
if (!currWriteBuffer.buffer.isEmpty()) {
throw new IOException("New buffer is not empty.");
}
} catch (InterruptedException iex) {
throw new IOException(iex);
}
}
final BinaryInMemorySortBuffer buffer = currWriteBuffer.buffer;
if (LOG.isDebugEnabled()) {
LOG.debug("Retrieved empty read buffer " + currWriteBuffer.id + ".");
}
long occupancy = buffer.getOccupancy();
if (!buffer.write(current)) {
if (buffer.isEmpty()) {
// did not fit in a fresh buffer, must be large...
throw new IOException(
"The record exceeds the maximum size of a sort buffer (current maximum: "
+ buffer.getCapacity()
+ " bytes).");
} else {
// buffer is full, send the buffer
if (LOG.isDebugEnabled()) {
LOG.debug("Emitting full buffer: " + currWriteBuffer.id + ".");
}
this.circularQueues.sort.add(currWriteBuffer);
// Deadlocks may occur when there are fewer MemorySegments, because of
// the fragmentation of buffer.getOccupancy ().
if (bytesUntilSpilling > 0 && circularQueues.empty.size() == 0) {
bytesUntilSpilling = 0;
this.circularQueues.sort.add(SPILLING_MARKER);
}
currWriteBuffer = null;
// continue to process current record.
}
} else {
// successfully added record
// it may be that the last currWriteBuffer would have crossed the
// spilling threshold, so check it
if (bytesUntilSpilling > 0) {
bytesUntilSpilling -= buffer.getOccupancy() - occupancy;
if (bytesUntilSpilling <= 0) {
bytesUntilSpilling = 0;
this.circularQueues.sort.add(SPILLING_MARKER);
}
}
break;
}
}
}
} catch (Throwable e) {
IOException ioe = new IOException(e);
if (this.exceptionHandler != null) {
this.exceptionHandler.handleException(ioe);
}
throw ioe;
}
}
@VisibleForTesting
public void write(MutableObjectIterator<BinaryRowData> iterator) throws IOException {
BinaryRowData row = serializer.createInstance();
while ((row = iterator.next(row)) != null) {
write(row);
}
}
@Override
public MutableObjectIterator<BinaryRowData> getIterator() throws InterruptedException {
if (!writingDone) {
writingDone = true;
if (currWriteBuffer != null) {
this.circularQueues.sort.add(currWriteBuffer);
}
// add the sentinel to notify the receivers that the work is done
// send the EOF marker
this.circularQueues.sort.add(EOF_MARKER);
LOG.debug("Sending done.");
}
synchronized (this.iteratorLock) {
// wait while both the iterator and the exception are not set
while (this.iterator == null && this.iteratorException == null) {
this.iteratorLock.wait();
}
if (this.iteratorException != null) {
throw new RuntimeException(
"Error obtaining the sorted input: " + this.iteratorException.getMessage(),
this.iteratorException);
} else {
return this.iterator;
}
}
}
// ------------------------------------------------------------------------
// Inter-Thread Communication
// ------------------------------------------------------------------------
/**
* Sets the result iterator. By setting the result iterator, all threads that are waiting for
* the result iterator are notified and will obtain it.
*
* @param iterator The result iterator to set.
*/
private void setResultIterator(MutableObjectIterator<BinaryRowData> iterator) {
synchronized (this.iteratorLock) {
// set the result iterator only, if no exception has occurred
if (this.iteratorException == null) {
this.iterator = iterator;
this.iteratorLock.notifyAll();
}
}
}
/**
* Reports an exception to all threads that are waiting for the result iterator.
*
* @param ioex The exception to be reported to the threads that wait for the result iterator.
*/
private void setResultIteratorException(IOException ioex) {
synchronized (this.iteratorLock) {
if (this.iteratorException == null) {
this.iteratorException = ioex;
this.iteratorLock.notifyAll();
}
}
}
/**
* Class representing buffers that circulate between the reading, sorting and spilling thread.
*/
private static final | loader |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/io/LocatableInputSplitAssigner.java | {
"start": 1498,
"end": 9995
} | class ____ implements InputSplitAssigner {
private static final Logger LOG = LoggerFactory.getLogger(LocatableInputSplitAssigner.class);
// unassigned input splits
private final Set<LocatableInputSplitWithCount> unassigned =
new HashSet<LocatableInputSplitWithCount>();
// input splits indexed by host for local assignment
private final ConcurrentHashMap<String, LocatableInputSplitChooser> localPerHost =
new ConcurrentHashMap<String, LocatableInputSplitChooser>();
// unassigned splits for remote assignment
private final LocatableInputSplitChooser remoteSplitChooser;
private int localAssignments; // lock protected by the unassigned set lock
private int remoteAssignments; // lock protected by the unassigned set lock
// --------------------------------------------------------------------------------------------
public LocatableInputSplitAssigner(Collection<LocatableInputSplit> splits) {
for (LocatableInputSplit split : splits) {
this.unassigned.add(new LocatableInputSplitWithCount(split));
}
this.remoteSplitChooser = new LocatableInputSplitChooser(unassigned);
}
public LocatableInputSplitAssigner(LocatableInputSplit[] splits) {
for (LocatableInputSplit split : splits) {
this.unassigned.add(new LocatableInputSplitWithCount(split));
}
this.remoteSplitChooser = new LocatableInputSplitChooser(unassigned);
}
// --------------------------------------------------------------------------------------------
@Override
public LocatableInputSplit getNextInputSplit(String host, int taskId) {
// for a null host, we return a remote split
if (host == null) {
synchronized (this.remoteSplitChooser) {
synchronized (this.unassigned) {
LocatableInputSplitWithCount split =
this.remoteSplitChooser.getNextUnassignedMinLocalCountSplit(
this.unassigned);
if (split != null) {
// got a split to assign. Double check that it hasn't been assigned before.
if (this.unassigned.remove(split)) {
if (LOG.isInfoEnabled()) {
LOG.info("Assigning split to null host (random assignment).");
}
remoteAssignments++;
return split.getSplit();
} else {
throw new IllegalStateException(
"Chosen InputSplit has already been assigned. This should not happen!");
}
} else {
// all splits consumed
return null;
}
}
}
}
host = host.toLowerCase(Locale.US);
// for any non-null host, we take the list of non-null splits
LocatableInputSplitChooser localSplits = this.localPerHost.get(host);
// if we have no list for this host yet, create one
if (localSplits == null) {
localSplits = new LocatableInputSplitChooser();
// lock the list, to be sure that others have to wait for that host's local list
synchronized (localSplits) {
LocatableInputSplitChooser prior = this.localPerHost.putIfAbsent(host, localSplits);
// if someone else beat us in the case to create this list, then we do not populate
// this one, but
// simply work with that other list
if (prior == null) {
// we are the first, we populate
// first, copy the remaining splits to release the lock on the set early
// because that is shared among threads
LocatableInputSplitWithCount[] remaining;
synchronized (this.unassigned) {
remaining =
this.unassigned.toArray(
new LocatableInputSplitWithCount[this.unassigned.size()]);
}
for (LocatableInputSplitWithCount isw : remaining) {
if (isLocal(host, isw.getSplit().getHostnames())) {
// Split is local on host.
// Increment local count
isw.incrementLocalCount();
// and add to local split list
localSplits.addInputSplit(isw);
}
}
} else {
// someone else was faster
localSplits = prior;
}
}
}
// at this point, we have a list of local splits (possibly empty)
// we need to make sure no one else operates in the current list (that protects against
// list creation races) and that the unassigned set is consistent
// NOTE: we need to obtain the locks in this order, strictly!!!
synchronized (localSplits) {
synchronized (this.unassigned) {
LocatableInputSplitWithCount split =
localSplits.getNextUnassignedMinLocalCountSplit(this.unassigned);
if (split != null) {
// found a valid split. Double check that it hasn't been assigned before.
if (this.unassigned.remove(split)) {
if (LOG.isInfoEnabled()) {
LOG.info("Assigning local split to host " + host);
}
localAssignments++;
return split.getSplit();
} else {
throw new IllegalStateException(
"Chosen InputSplit has already been assigned. This should not happen!");
}
}
}
}
// we did not find a local split, return a remote split
synchronized (this.remoteSplitChooser) {
synchronized (this.unassigned) {
LocatableInputSplitWithCount split =
this.remoteSplitChooser.getNextUnassignedMinLocalCountSplit(
this.unassigned);
if (split != null) {
// found a valid split. Double check that it hasn't been assigned yet.
if (this.unassigned.remove(split)) {
if (LOG.isInfoEnabled()) {
LOG.info("Assigning remote split to host " + host);
}
remoteAssignments++;
return split.getSplit();
} else {
throw new IllegalStateException(
"Chosen InputSplit has already been assigned. This should not happen!");
}
} else {
// all splits consumed
return null;
}
}
}
}
@Override
public void returnInputSplit(List<InputSplit> splits, int taskId) {
synchronized (this.unassigned) {
for (InputSplit split : splits) {
LocatableInputSplitWithCount lisw =
new LocatableInputSplitWithCount((LocatableInputSplit) split);
this.remoteSplitChooser.addInputSplit(lisw);
this.unassigned.add(lisw);
}
}
}
private static final boolean isLocal(String flinkHost, String[] hosts) {
if (flinkHost == null || hosts == null) {
return false;
}
for (String h : hosts) {
if (h != null && NetUtils.getHostnameFromFQDN(h.toLowerCase()).equals(flinkHost)) {
return true;
}
}
return false;
}
public int getNumberOfLocalAssignments() {
return localAssignments;
}
public int getNumberOfRemoteAssignments() {
return remoteAssignments;
}
/**
* Wraps a LocatableInputSplit and adds a count for the number of observed hosts that can access
* the split locally.
*/
private static | LocatableInputSplitAssigner |
java | quarkusio__quarkus | extensions/smallrye-graphql-client/deployment/src/test/java/io/quarkus/smallrye/graphql/client/deployment/ssl/TypesafeGraphQLClientServerAndClientAuthenticationTest.java | {
"start": 921,
"end": 2281
} | class ____ {
private static final int PORT = 63805;
private static final SSLTestingTools TOOLS = new SSLTestingTools();
private static final String EXPECTED_RESPONSE = "HelloWorld";
private static HttpServer server;
private static final String CONFIGURATION = """
quarkus.smallrye-graphql-client.my-client.tls-configuration-name=my-tls-client
quarkus.tls.my-tls-client.key-store.p12.path=target/certs/graphql-client-keystore.p12
quarkus.tls.my-tls-client.key-store.p12.password=password
quarkus.tls.my-tls-client.trust-store.p12.path=target/certs/graphql-client-truststore.p12
quarkus.tls.my-tls-client.trust-store.p12.password=password
quarkus.smallrye-graphql-client.my-client.url=https://127.0.0.1:%d/
""".formatted(PORT);
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyApi.class, SSLTestingTools.class)
.addAsResource(new StringAsset(CONFIGURATION),
"application.properties")
.addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"));
@GraphQLClientApi(configKey = "my-client")
private | TypesafeGraphQLClientServerAndClientAuthenticationTest |
java | greenrobot__greendao | tests/DaoTestBase/src/main/java/org/greenrobot/greendao/daotest/AutoincrementEntityDao.java | {
"start": 782,
"end": 3387
} | class ____ {
public final static Property Id = new Property(0, Long.class, "id", true, "_id");
}
public AutoincrementEntityDao(DaoConfig config) {
super(config);
}
public AutoincrementEntityDao(DaoConfig config, DaoSession daoSession) {
super(config, daoSession);
}
/** Creates the underlying database table. */
public static void createTable(Database db, boolean ifNotExists) {
String constraint = ifNotExists? "IF NOT EXISTS ": "";
db.execSQL("CREATE TABLE " + constraint + "\"AUTOINCREMENT_ENTITY\" (" + //
"\"_id\" INTEGER PRIMARY KEY AUTOINCREMENT );"); // 0: id
}
/** Drops the underlying database table. */
public static void dropTable(Database db, boolean ifExists) {
String sql = "DROP TABLE " + (ifExists ? "IF EXISTS " : "") + "\"AUTOINCREMENT_ENTITY\"";
db.execSQL(sql);
}
@Override
protected final void bindValues(DatabaseStatement stmt, AutoincrementEntity entity) {
stmt.clearBindings();
Long id = entity.getId();
if (id != null) {
stmt.bindLong(1, id);
}
}
@Override
protected final void bindValues(SQLiteStatement stmt, AutoincrementEntity entity) {
stmt.clearBindings();
Long id = entity.getId();
if (id != null) {
stmt.bindLong(1, id);
}
}
@Override
public Long readKey(Cursor cursor, int offset) {
return cursor.isNull(offset + 0) ? null : cursor.getLong(offset + 0);
}
@Override
public AutoincrementEntity readEntity(Cursor cursor, int offset) {
AutoincrementEntity entity = new AutoincrementEntity( //
cursor.isNull(offset + 0) ? null : cursor.getLong(offset + 0) // id
);
return entity;
}
@Override
public void readEntity(Cursor cursor, AutoincrementEntity entity, int offset) {
entity.setId(cursor.isNull(offset + 0) ? null : cursor.getLong(offset + 0));
}
@Override
protected final Long updateKeyAfterInsert(AutoincrementEntity entity, long rowId) {
entity.setId(rowId);
return rowId;
}
@Override
public Long getKey(AutoincrementEntity entity) {
if(entity != null) {
return entity.getId();
} else {
return null;
}
}
@Override
public boolean hasKey(AutoincrementEntity entity) {
return entity.getId() != null;
}
@Override
protected final boolean isEntityUpdateable() {
return true;
}
}
| Properties |
java | spring-projects__spring-boot | module/spring-boot-webmvc-test/src/test/java/org/springframework/boot/webmvc/test/autoconfigure/mockmvc/WebMvcTestWithAutoConfigureMockMvcIntegrationTests.java | {
"start": 1709,
"end": 2316
} | class ____ {
@Autowired
private ApplicationContext context;
@Autowired
private MockMvcTester mvc;
@Test
void shouldNotAddFilters() {
assertThat(this.mvc.get().uri("/one")).doesNotContainHeader("x-test");
}
@Test
void shouldNotHaveWebDriver() {
assertThatExceptionOfType(NoSuchBeanDefinitionException.class)
.isThrownBy(() -> this.context.getBean(WebDriver.class));
}
@Test
void shouldNotHaveWebClient() {
assertThatExceptionOfType(NoSuchBeanDefinitionException.class)
.isThrownBy(() -> this.context.getBean(WebClient.class));
}
}
| WebMvcTestWithAutoConfigureMockMvcIntegrationTests |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/vld/BasicPTVTest.java | {
"start": 965,
"end": 1225
} | class ____ extends BaseValue {
protected ValueB() { }
public ValueB(int x) {
super();
this.x = x;
}
}
// // // Value types
// make this type `final` to avoid polymorphic handling
static final | ValueB |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java | {
"start": 5803,
"end": 6894
} | class ____ {
public static final FieldType FIELD_TYPE;
public static final FieldType FIELD_TYPE_WITH_SKIP_DOC_VALUES;
static {
FieldType ft = new FieldType();
ft.setTokenized(false);
ft.setOmitNorms(true);
ft.setIndexOptions(IndexOptions.DOCS);
ft.setDocValuesType(DocValuesType.SORTED_SET);
FIELD_TYPE = freezeAndDeduplicateFieldType(ft);
}
static {
FieldType ft = new FieldType();
ft.setTokenized(false);
ft.setOmitNorms(true);
ft.setIndexOptions(IndexOptions.NONE);
ft.setDocValuesType(DocValuesType.SORTED_SET);
ft.setDocValuesSkipIndexType(DocValuesSkipIndexType.RANGE);
FIELD_TYPE_WITH_SKIP_DOC_VALUES = freezeAndDeduplicateFieldType(ft);
}
public static final TextSearchInfo TEXT_SEARCH_INFO = new TextSearchInfo(
FIELD_TYPE,
null,
Lucene.KEYWORD_ANALYZER,
Lucene.KEYWORD_ANALYZER
);
}
public static | Defaults |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/compression/Lz4BlockCompressor.java | {
"start": 1434,
"end": 3463
} | class ____ implements BlockCompressor {
private final LZ4Compressor compressor;
public Lz4BlockCompressor() {
this.compressor = LZ4Factory.fastestInstance().fastCompressor();
}
@Override
public int getMaxCompressedSize(int srcSize) {
return HEADER_LENGTH + compressor.maxCompressedLength(srcSize);
}
@Override
public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dst, int dstOff)
throws BufferCompressionException {
try {
final int prevSrcOff = src.position() + srcOff;
final int prevDstOff = dst.position() + dstOff;
int maxCompressedSize = compressor.maxCompressedLength(srcLen);
int compressedLength =
compressor.compress(
src,
prevSrcOff,
srcLen,
dst,
prevDstOff + HEADER_LENGTH,
maxCompressedSize);
src.position(prevSrcOff + srcLen);
dst.position(prevDstOff);
dst.order(ByteOrder.LITTLE_ENDIAN);
dst.putInt(compressedLength);
dst.putInt(srcLen);
dst.position(prevDstOff + compressedLength + HEADER_LENGTH);
return HEADER_LENGTH + compressedLength;
} catch (Exception e) {
throw new BufferCompressionException(e);
}
}
@Override
public int compress(byte[] src, int srcOff, int srcLen, byte[] dst, int dstOff)
throws BufferCompressionException {
try {
int compressedLength =
compressor.compress(src, srcOff, srcLen, dst, dstOff + HEADER_LENGTH);
writeIntLE(compressedLength, dst, dstOff);
writeIntLE(srcLen, dst, dstOff + 4);
return HEADER_LENGTH + compressedLength;
} catch (Exception e) {
throw new BufferCompressionException(e);
}
}
}
| Lz4BlockCompressor |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidation.java | {
"start": 3668,
"end": 4129
} | class ____ {
private List<String> ids;
private List<String> messages;
public Builder setIds(List<String> ids) {
this.ids = ids;
return this;
}
public Builder setMessages(List<String> messages) {
this.messages = messages;
return this;
}
public FilteringValidation build() {
return new FilteringValidation(ids, messages);
}
}
}
| Builder |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/typesafe/DataNamespaceValidationFailureTest.java | {
"start": 332,
"end": 1746
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(Item.class, OtherItem.class)
.addAsResource(new StringAsset(
"{@io.quarkus.qute.deployment.typesafe.Item item}\n" +
"{item.name}\n" +
" {#for item in item.otherItems}\n" +
" {data:item.unknownProperty}\n" +
" {/for}\n"),
"templates/item.html"))
.assertException(t -> {
Throwable e = t;
TemplateException te = null;
while (e != null) {
if (e instanceof TemplateException) {
te = (TemplateException) e;
break;
}
e = e.getCause();
}
assertNotNull(te);
assertTrue(te.getMessage().contains(
"Property/method [unknownProperty] not found on class [io.quarkus.qute.deployment.typesafe.Item] nor handled by an extension method"),
te.getMessage());
});
@Test
public void test() {
fail();
}
}
| DataNamespaceValidationFailureTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/util/DumpModelAsXmlTransformRouteConstantTest.java | {
"start": 1289,
"end": 2626
} | class ____ extends ContextTestSupport {
@Test
public void testDumpModelAsXml() throws Exception {
String xml = PluginHelper.getModelToXMLDumper(context).dumpModelAsXml(context, context.getRouteDefinition("myRoute"));
assertNotNull(xml);
log.info(xml);
Document doc = new XmlConverter().toDOMDocument(xml, null);
NodeList nodes = doc.getElementsByTagName("constant");
assertEquals(1, nodes.getLength());
Element node = (Element) nodes.item(0);
assertNotNull(node, "Node <simple> expected to be instanceof Element");
assertEquals("Hello World", node.getTextContent());
nodes = doc.getElementsByTagName("to");
assertEquals(1, nodes.getLength());
node = (Element) nodes.item(0);
assertNotNull(node, "Node <to> expected to be instanceof Element");
assertEquals("mock:result", node.getAttribute("uri"));
assertEquals("myMock", node.getAttribute("id"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").routeId("myRoute").transform(constant("Hello World")).to("mock:result").id("myMock");
}
};
}
}
| DumpModelAsXmlTransformRouteConstantTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/hbm/query/QueryReturnTest.java | {
"start": 4223,
"end": 4491
} | class ____ {
public Integer id;
public String foo;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getFoo() {
return foo;
}
public void setFoo(String foo) {
this.foo = foo;
}
}
}
| Bar |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/creation/bytebuddy/InlineBytecodeGenerator.java | {
"start": 21597,
"end": 23533
} | class ____ extends ClassVisitor {
private final TypeDescription typeDescription;
private ParameterAddingClassVisitor(ClassVisitor cv, TypeDescription typeDescription) {
super(OpenedClassReader.ASM_API, cv);
this.typeDescription = typeDescription;
}
@Override
public MethodVisitor visitMethod(
int access, String name, String desc, String signature, String[] exceptions) {
MethodVisitor methodVisitor =
super.visitMethod(access, name, desc, signature, exceptions);
MethodList<?> methodList =
typeDescription
.getDeclaredMethods()
.filter(
(name.equals(MethodDescription.CONSTRUCTOR_INTERNAL_NAME)
? isConstructor()
: ElementMatchers.<MethodDescription>named(
name))
.and(hasDescriptor(desc)));
if (methodList.size() == 1
&& methodList.getOnly().getParameters().hasExplicitMetaData()) {
for (ParameterDescription parameterDescription :
methodList.getOnly().getParameters()) {
methodVisitor.visitParameter(
parameterDescription.getName(),
parameterDescription.getModifiers());
}
return new MethodParameterStrippingMethodVisitor(methodVisitor);
} else {
return methodVisitor;
}
}
}
private static | ParameterAddingClassVisitor |
java | google__error-prone | check_api/src/test/java/com/google/errorprone/util/FindIdentifiersTest.java | {
"start": 4402,
"end": 4843
} | class ____ {
private void doIt(String s1, String s2) {
// BUG: Diagnostic contains: [s1, s2]
String.format(s1 + s2);
}
}
""")
.doTest();
}
@Test
public void findAllIdentsMixedLocalsAndParams() {
CompilationTestHelper.newInstance(PrintIdents.class, getClass())
.addSourceLines(
"Test.java",
"""
| Test |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/createTable/OracleCreateTableTest58.java | {
"start": 1026,
"end": 7560
} | class ____ extends OracleTest {
public void test_types() throws Exception {
String sql = //
" CREATE TABLE \"SC_001\".\"TB_001\" \n" +
" ( \"ID\" NUMBER NOT NULL ENABLE, \n" +
" \"PROPS\" VARCHAR2(4000), \n" +
" CONSTRAINT \"PRODUCT_PROPERTY_SEARCH_PK\" PRIMARY KEY (\"ID\")\n" +
" USING INDEX PCTFREE 10 INITRANS 2 MAXTRANS 255 LOGGING \n" +
" STORAGE(\n" +
" BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APPINDX1K\" LOCAL\n" +
" (PARTITION \"P1\" \n" +
" PCTFREE 10 INITRANS 2 MAXTRANS 255 \n" +
" STORAGE(INITIAL 24117248 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APPINDX1K\" , \n" +
" PARTITION \"P2\" \n" +
" PCTFREE 10 INITRANS 2 MAXTRANS 255 \n" +
" STORAGE(INITIAL 1048576 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APPINDX1K\" ) ENABLE\n" +
" ) PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 LOGGING \n" +
" STORAGE(\n" +
" BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APP_DATA1K\" \n" +
" PARTITION BY RANGE (\"ID\") \n" +
" (PARTITION \"P1\" VALUES LESS THAN (2000000000) \n" +
" PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 \n" +
" STORAGE(INITIAL 150994944 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APPDATA1M\" NOCOMPRESS , \n" +
" PARTITION \"P2\" VALUES LESS THAN (MAXVALUE) \n" +
" PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 \n" +
" STORAGE(INITIAL 1048576 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APP_DATA1K\" NOCOMPRESS ) ";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
assertEquals("CREATE TABLE \"SC_001\".\"TB_001\" (\n" +
"\t\"ID\" NUMBER NOT NULL ENABLE,\n" +
"\t\"PROPS\" VARCHAR2(4000),\n" +
"\tCONSTRAINT \"PRODUCT_PROPERTY_SEARCH_PK\" PRIMARY KEY (\"ID\")\n" +
"\t\tUSING INDEX\n" +
"\t\tPCTFREE 10\n" +
"\t\tINITRANS 2\n" +
"\t\tMAXTRANS 255\n" +
"\t\tLOGGING\n" +
"\t\tTABLESPACE \"APPINDX1K\"\n" +
"\t\tSTORAGE (\n" +
"\t\t\tBUFFER_POOL DEFAULT\n" +
"\t\t)\n" +
"\t\tENABLE\n" +
")\n" +
"PCTFREE 10\n" +
"PCTUSED 40\n" +
"INITRANS 1\n" +
"MAXTRANS 255\n" +
"LOGGING\n" +
"TABLESPACE \"APP_DATA1K\"\n" +
"STORAGE (\n" +
"\tBUFFER_POOL DEFAULT\n" +
")\n" +
"PARTITION BY RANGE (\"ID\") (\n" +
"\tPARTITION \"P1\" VALUES LESS THAN (2000000000)\n" +
"\t\tPCTFREE 10\n" +
"\t\tPCTUSED 40\n" +
"\t\tINITRANS 1\n" +
"\t\tMAXTRANS 255\n" +
"\t\tNOCOMPRESS\n" +
"\t\tTABLESPACE \"APPDATA1M\"\n" +
"\t\tSTORAGE (\n" +
"\t\t\tINITIAL 150994944\n" +
"\t\t\tNEXT 1048576\n" +
"\t\t\tMINEXTENTS 1\n" +
"\t\t\tMAXEXTENTS 2147483645\n" +
"\t\t\tPCTINCREASE 0\n" +
"\t\t\tFREELISTS 1\n" +
"\t\t\tFREELIST GROUPS 1\n" +
"\t\t\tBUFFER_POOL DEFAULT\n" +
"\t\t),\n" +
"\tPARTITION \"P2\" VALUES LESS THAN (MAXVALUE)\n" +
"\t\tPCTFREE 10\n" +
"\t\tPCTUSED 40\n" +
"\t\tINITRANS 1\n" +
"\t\tMAXTRANS 255\n" +
"\t\tNOCOMPRESS\n" +
"\t\tTABLESPACE \"APP_DATA1K\"\n" +
"\t\tSTORAGE (\n" +
"\t\t\tINITIAL 1048576\n" +
"\t\t\tNEXT 1048576\n" +
"\t\t\tMINEXTENTS 1\n" +
"\t\t\tMAXEXTENTS 2147483645\n" +
"\t\t\tPCTINCREASE 0\n" +
"\t\t\tFREELISTS 1\n" +
"\t\t\tFREELIST GROUPS 1\n" +
"\t\t\tBUFFER_POOL DEFAULT\n" +
"\t\t)\n" +
")",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(2, visitor.getColumns().size());
assertTrue(visitor.containsColumn("SC_001.TB_001", "ID"));
}
}
| OracleCreateTableTest58 |
java | FasterXML__jackson-databind | src/test/java/perf/ManualWritePerfWithUUID.java | {
"start": 302,
"end": 980
} | class ____
extends ObjectWriterTestBase<UUIDFast, UUIDSlow>
{
@Override
protected int targetSizeMegs() { return 10; }
public static void main(String[] args) throws Exception
{
if (args.length != 0) {
System.err.println("Usage: java ...");
System.exit(1);
}
UUID[] uuids = new UUID[8];
for (int i = 0; i < uuids.length; ++i) {
uuids[i] = UUID.randomUUID();
}
new ManualWritePerfWithUUID().test(new JsonMapper(),
"faster-UUID", new UUIDFast(uuids), UUIDFast.class,
"JDK-UUID", new UUIDSlow(uuids), UUIDSlow.class);
}
}
| ManualWritePerfWithUUID |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/ASTHelpersSuggestions.java | {
"start": 2215,
"end": 4891
} | class ____ extends BugChecker implements MethodInvocationTreeMatcher {
private static final Supplier<Type> MODULE_SYMBOL =
Suppliers.typeFromString("com.sun.tools.javac.code.Symbol.ModuleSymbol");
private static final Matcher<ExpressionTree> SYMBOL =
anyOf(
instanceMethod()
.onDescendantOf("com.sun.tools.javac.code.Symbol")
.namedAnyOf("packge", "getEnclosedElements"),
instanceMethod()
.onClass((t, s) -> isSubtype(MODULE_SYMBOL.get(s), t, s))
.namedAnyOf("isStatic"));
private static final Matcher<ExpressionTree> SYMBOL_ENCLCLASS =
instanceMethod().onDescendantOf("com.sun.tools.javac.code.Symbol").namedAnyOf("enclClass");
private static final Matcher<ExpressionTree> SYMBOL_OWNER =
instanceField("com.sun.tools.javac.code.Symbol", "owner");
private static final ImmutableMap<String, String> NAMES =
ImmutableMap.of("packge", "enclosingPackage");
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
ExpressionTree receiver = getReceiver(tree);
if (receiver == null) {
return NO_MATCH;
}
ClassSymbol outermost =
outermostClass(getSymbol(findEnclosingNode(state.getPath(), ClassTree.class)));
if (outermost.getQualifiedName().contentEquals("com.google.errorprone.util.ASTHelpers")) {
return NO_MATCH;
}
if (SYMBOL.matches(tree, state)) {
MethodSymbol sym = getSymbol(tree);
String name = sym.getSimpleName().toString();
name = NAMES.getOrDefault(name, name);
return describeMatch(
tree,
SuggestedFix.builder()
.addStaticImport("com.google.errorprone.util.ASTHelpers." + name)
.prefixWith(tree, name + "(")
.replace(state.getEndPosition(receiver), state.getEndPosition(tree), ")")
.build());
}
if (SYMBOL_ENCLCLASS.matches(tree, state)) {
// Check whether the receiver matches the instance field Symbol.owner.
if (SYMBOL_OWNER.matches(receiver, state)) {
// Get the receiver of the Symbol.owner expression.
ExpressionTree receiver2 = getReceiver(receiver);
if (receiver2 != null) {
return describeMatch(
tree,
SuggestedFix.builder()
.addStaticImport("com.google.errorprone.util.ASTHelpers.enclosingClass")
.prefixWith(tree, "enclosingClass(")
.replace(state.getEndPosition(receiver2), state.getEndPosition(tree), ")")
.build());
}
}
}
return NO_MATCH;
}
}
| ASTHelpersSuggestions |
java | apache__camel | components/camel-huawei/camel-huaweicloud-dms/src/test/java/org/apache/camel/component/huaweicloud/dms/QueryInstanceTest.java | {
"start": 1513,
"end": 4140
} | class ____ extends CamelTestSupport {
TestConfiguration testConfiguration = new TestConfiguration();
@BindToRegistry("dmsClient")
DmsClient mockClient = Mockito.mock(DmsClient.class);
@BindToRegistry("serviceKeys")
ServiceKeys serviceKeys = new ServiceKeys(
testConfiguration.getProperty("accessKey"),
testConfiguration.getProperty("secretKey"));
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:operation")
.setProperty(DMSProperties.OPERATION, constant("queryInstance"))
.to("hwcloud-dms:?" +
"serviceKeys=#serviceKeys" +
"&projectId=" + testConfiguration.getProperty("projectId") +
"®ion=" + testConfiguration.getProperty("region") +
"&instanceId=" + testConfiguration.getProperty("instanceId") +
"&ignoreSslVerification=true" +
"&dmsClient=#dmsClient")
.log("Operation successful")
.to("mock:operation_result");
}
};
}
@Test
public void testOperation() throws Exception {
DmsInstance instance = new DmsInstance()
.withName("test-instance-1")
.withEngine(testConfiguration.getProperty("engine"))
.withStorageSpace(500)
.withInstanceId("id-1")
.withVpcId("vpc-id-1")
.withUserName("user-1");
Mockito.when(mockClient.queryInstance(Mockito.any(QueryInstanceRequest.class))).thenReturn(instance);
MockEndpoint mock = getMockEndpoint("mock:operation_result");
mock.expectedMinimumMessageCount(1);
template.sendBody("direct:operation", "sample_body");
Exchange responseExchange = mock.getExchanges().get(0);
mock.assertIsSatisfied();
assertEquals(
"{\"name\":\"test-instance-1\",\"engine\":\"kafka\",\"storage_space\":500,\"partition_num\":0,\"used_storage_space\":0,\"port\":0,"
+
"\"instance_id\":\"id-1\",\"charging_mode\":0,\"vpc_id\":\"vpc-id-1\",\"user_name\":\"user-1\",\"enable_publicip\":false,\"ssl_enable\":false,"
+
"\"is_logical_volume\":false,\"extend_times\":0,\"enable_auto_topic\":false}",
responseExchange.getIn().getBody(String.class));
}
}
| QueryInstanceTest |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/internal/impl/DefaultPackagingRegistry.java | {
"start": 2037,
"end": 8013
} | class ____
extends ExtensibleEnumRegistries.DefaultExtensibleEnumRegistry<Packaging, PackagingProvider>
implements PackagingRegistry {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultPackagingRegistry.class);
private final Lookup lookup;
private final TypeRegistry typeRegistry;
@Inject
public DefaultPackagingRegistry(Lookup lookup, TypeRegistry typeRegistry, List<PackagingProvider> providers) {
super(providers);
this.lookup = lookup;
this.typeRegistry = typeRegistry;
}
@Override
public Optional<Packaging> lookup(String id) {
id = id.toLowerCase(Locale.ROOT);
// TODO: we should be able to inject a Map<String, LifecycleMapping> directly,
// however, SISU visibility filtering can only happen when an explicit
// lookup is performed. The whole problem here is caused by "project extensions"
// which are bound to a project's classloader, without any clear definition
// of a "project scope"
LifecycleMapping lifecycleMapping =
lookup.lookupOptional(LifecycleMapping.class, id).orElse(null);
if (lifecycleMapping == null) {
return Optional.empty();
}
Type type = typeRegistry.lookup(id).orElse(null);
if (type == null) {
return Optional.empty();
}
return Optional.of(new DefaultPackaging(id, type, getPlugins(lifecycleMapping)));
}
private Map<String, PluginContainer> getPlugins(LifecycleMapping lifecycleMapping) {
Map<String, PluginContainer> lfs = new HashMap<>();
lifecycleMapping.getLifecycles().forEach((id, lifecycle) -> {
Map<String, Plugin> plugins = new HashMap<>();
lifecycle
.getLifecyclePhases()
.forEach((phase, lifecyclePhase) -> parseLifecyclePhaseDefinitions(plugins, phase, lifecyclePhase));
lfs.put(id, PluginContainer.newBuilder().plugins(plugins.values()).build());
});
return lfs;
}
static void parseLifecyclePhaseDefinitions(Map<String, Plugin> plugins, String phase, LifecyclePhase goals) {
InputLocation location = DefaultLifecycleRegistry.DEFAULT_LIFECYCLE_INPUT_LOCATION;
List<LifecycleMojo> mojos = goals.getMojos();
if (mojos != null) {
for (int i = 0; i < mojos.size(); i++) {
LifecycleMojo mojo = mojos.get(i);
// Compute goal coordinates
String groupId, artifactId, version, goal;
String[] p = mojo.getGoal().trim().split(":");
if (p.length == 3) {
// <groupId>:<artifactId>:<goal>
groupId = p[0];
artifactId = p[1];
version = null;
goal = p[2];
} else if (p.length == 4) {
// <groupId>:<artifactId>:<version>:<goal>
groupId = p[0];
artifactId = p[1];
version = p[2];
goal = p[3];
} else {
// invalid
LOGGER.warn(
"Ignored invalid goal specification '{}' from lifecycle mapping for phase {}",
mojo.getGoal(),
phase);
continue;
}
String key = groupId + ":" + artifactId;
// Build plugin
List<PluginExecution> execs = new ArrayList<>();
List<Dependency> deps = new ArrayList<>();
Plugin existing = plugins.get(key);
if (existing != null) {
if (version == null) {
version = existing.getVersion();
}
execs.addAll(existing.getExecutions());
deps.addAll(existing.getDependencies());
}
PluginExecution execution = PluginExecution.newBuilder()
.id(getExecutionId(existing, goal))
.priority(i - mojos.size())
.phase(phase)
.goals(List.of(goal))
.configuration(mojo.getConfiguration())
.location("", location)
.location("id", location)
.location("phase", location)
.location("goals", location)
.build();
execs.add(execution);
if (mojo.getDependencies() != null) {
mojo.getDependencies().forEach(d -> deps.add(d.getDelegate()));
}
Plugin plugin = Plugin.newBuilder()
.groupId(groupId)
.artifactId(artifactId)
.version(version)
.location("", location)
.location("groupId", location)
.location("artifactId", location)
.location("version", location)
.executions(execs)
.dependencies(deps)
.build();
plugins.put(key, plugin);
}
}
}
private static String getExecutionId(Plugin plugin, String goal) {
Set<String> existingIds = plugin != null
? plugin.getExecutions().stream().map(PluginExecution::getId).collect(Collectors.toSet())
: Set.of();
String base = "default-" + goal;
String id = base;
for (int index = 1; existingIds.contains(id); index++) {
id = base + '-' + index;
}
return id;
}
private record DefaultPackaging(String id, Type type, Map<String, PluginContainer> plugins) implements Packaging {}
}
| DefaultPackagingRegistry |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/bugs/creation/otherpackage/PublicParentClass.java | {
"start": 181,
"end": 291
} | class ____ {
public void method_with_non_public_argument(PackageLocalArg arg) {}
static | PublicParentClass |
java | apache__camel | components/camel-zipfile/src/main/java/org/apache/camel/dataformat/zipfile/ZipFileDataFormat.java | {
"start": 1869,
"end": 7135
} | class ____ extends ServiceSupport implements DataFormat, DataFormatName {
/**
* The default maximum decompressed size (in bytes), which corresponds to 1G.
*/
private static final long DEFAULT_MAXIMUM_DECOMPRESSED_SIZE = 1073741824;
private boolean usingIterator;
private boolean allowEmptyDirectory;
private boolean preservePathElements;
private long maxDecompressedSize = DEFAULT_MAXIMUM_DECOMPRESSED_SIZE;
@Override
public String getDataFormatName() {
return "zipFile";
}
@Override
public void marshal(final Exchange exchange, final Object graph, final OutputStream stream) throws Exception {
String filename = null;
String filepath = exchange.getIn().getHeader(FILE_NAME, String.class);
Long fileLength = exchange.getIn().getHeader(FILE_LENGTH, Long.class);
if (filepath != null) {
Path filenamePath = Paths.get(filepath).getFileName();
if (filenamePath != null) {
filename = filenamePath.toString(); // remove any path elements
}
}
if (filename == null) {
// generate the file name as the camel file component would do
filename = filepath = StringHelper.sanitize(exchange.getIn().getMessageId());
}
InputStream is = exchange.getContext().getTypeConverter().mandatoryConvertTo(InputStream.class, exchange, graph);
if (fileLength == null) {
fileLength = (long) is.available();
}
ZipArchiveOutputStream zos = new ZipArchiveOutputStream(stream);
if (preservePathElements) {
createZipEntries(zos, filepath, fileLength);
} else {
createZipEntries(zos, filename, fileLength);
}
try {
IOHelper.copy(is, zos);
} finally {
zos.closeArchiveEntry();
IOHelper.close(is, zos);
}
String newFilename = filename + ".zip";
exchange.getMessage().setHeader(FILE_NAME, newFilename);
}
@Override
public Object unmarshal(final Exchange exchange, final InputStream inputStream) throws Exception {
if (usingIterator) {
ZipIterator zipIterator = new ZipIterator(exchange, inputStream);
zipIterator.setAllowEmptyDirectory(allowEmptyDirectory);
return zipIterator;
} else {
BufferedInputStream bis = new BufferedInputStream(inputStream);
ZipArchiveInputStream zis = new ArchiveStreamFactory()
.createArchiveInputStream(ArchiveStreamFactory.ZIP, bis);
OutputStreamBuilder osb = OutputStreamBuilder.withExchange(exchange);
try {
ZipArchiveEntry entry = zis.getNextEntry();
if (entry != null) {
exchange.getMessage().setHeader(FILE_NAME, entry.getName());
IOHelper.copy(zis, osb, IOHelper.DEFAULT_BUFFER_SIZE, false, maxDecompressedSize);
} else {
throw new IllegalStateException("Unable to unzip the file, it may be corrupted.");
}
entry = zis.getNextEntry();
if (entry != null) {
throw new IllegalStateException("Zip file has more than 1 entry.");
}
return osb.build();
} finally {
IOHelper.close(osb, zis, bis);
}
}
}
private void createZipEntries(ZipArchiveOutputStream zos, String filepath, Long fileLength) throws IOException {
Iterator<Path> elements = Paths.get(filepath).iterator();
StringBuilder sb = new StringBuilder(256);
while (elements.hasNext()) {
Path path = elements.next();
String element = path.toString();
Long length = fileLength;
// If there are more elements to come this element is a directory
// The "/" at the end tells the ZipEntry it is a folder
if (elements.hasNext()) {
element += "/";
length = 0L;
}
// Each entry needs the complete path, including previous created folders.
ZipArchiveEntry entry = new ZipArchiveEntry(sb + element);
entry.setSize(length);
zos.putArchiveEntry(entry);
sb.append(element);
}
}
public boolean isUsingIterator() {
return usingIterator;
}
public void setUsingIterator(boolean usingIterator) {
this.usingIterator = usingIterator;
}
public boolean isAllowEmptyDirectory() {
return allowEmptyDirectory;
}
public void setAllowEmptyDirectory(boolean allowEmptyDirectory) {
this.allowEmptyDirectory = allowEmptyDirectory;
}
public boolean isPreservePathElements() {
return preservePathElements;
}
public void setPreservePathElements(boolean preservePathElements) {
this.preservePathElements = preservePathElements;
}
public long getMaxDecompressedSize() {
return maxDecompressedSize;
}
public void setMaxDecompressedSize(long maxDecompressedSize) {
this.maxDecompressedSize = maxDecompressedSize;
}
}
| ZipFileDataFormat |
java | spring-projects__spring-security | saml2/saml2-service-provider/src/main/java/org/springframework/security/saml2/provider/service/web/authentication/Saml2WebSsoAuthenticationFilter.java | {
"start": 2463,
"end": 9029
} | class ____ extends AbstractAuthenticationProcessingFilter {
public static final String DEFAULT_FILTER_PROCESSES_URI = "/login/saml2/sso/{registrationId}";
private static final RequestMatcher DEFAULT_REQUEST_MATCHER = new OrRequestMatcher(
pathPattern(DEFAULT_FILTER_PROCESSES_URI), pathPattern("/login/saml2/sso"));
private final AuthenticationConverter authenticationConverter;
private Saml2AuthenticationRequestRepository<AbstractSaml2AuthenticationRequest> authenticationRequestRepository = new HttpSessionSaml2AuthenticationRequestRepository();
private boolean continueChainWhenNoRelyingPartyRegistrationFound = false;
/**
* Creates a {@code Saml2WebSsoAuthenticationFilter} authentication filter that is
* configured to use the {@link #DEFAULT_FILTER_PROCESSES_URI} processing URL
* @param relyingPartyRegistrationRepository - repository of configured SAML 2
* entities. Required.
*/
public Saml2WebSsoAuthenticationFilter(RelyingPartyRegistrationRepository relyingPartyRegistrationRepository) {
this(relyingPartyRegistrationRepository, DEFAULT_FILTER_PROCESSES_URI);
RequestMatcher processUri = pathPattern(DEFAULT_FILTER_PROCESSES_URI);
setRequiresAuthenticationRequestMatcher(processUri);
}
/**
* Creates a {@code Saml2WebSsoAuthenticationFilter} authentication filter
* @param relyingPartyRegistrationRepository - repository of configured SAML 2
* entities. Required.
* @param filterProcessesUrl the processing URL, must contain a {registrationId}
* variable. Required.
*/
public Saml2WebSsoAuthenticationFilter(RelyingPartyRegistrationRepository relyingPartyRegistrationRepository,
String filterProcessesUrl) {
this(new Saml2AuthenticationTokenConverter(
new DefaultRelyingPartyRegistrationResolver(relyingPartyRegistrationRepository)), filterProcessesUrl);
Assert.isTrue(filterProcessesUrl.contains("{registrationId}"),
"filterProcessesUrl must contain a {registrationId} match variable");
}
/**
* Creates a {@link Saml2WebSsoAuthenticationFilter} that is configured to use the
* {@link #DEFAULT_FILTER_PROCESSES_URI} processing URL
* @param authenticationConverter the strategy for converting an
* {@link HttpServletRequest} into an {@link Authentication}
* @since 6.2
*/
public Saml2WebSsoAuthenticationFilter(AuthenticationConverter authenticationConverter) {
super(DEFAULT_REQUEST_MATCHER);
Assert.notNull(authenticationConverter, "authenticationConverter cannot be null");
this.authenticationConverter = authenticationConverter;
setAllowSessionCreation(true);
setSessionAuthenticationStrategy(new ChangeSessionIdAuthenticationStrategy());
setAuthenticationConverter(authenticationConverter);
}
/**
* Creates a {@link Saml2WebSsoAuthenticationFilter} given the provided parameters
* @param authenticationConverter the strategy for converting an
* {@link HttpServletRequest} into an {@link Authentication}
* @param filterProcessesUrl the processing URL
* @since 5.4
*/
public Saml2WebSsoAuthenticationFilter(AuthenticationConverter authenticationConverter, String filterProcessesUrl) {
super(filterProcessesUrl);
Assert.notNull(authenticationConverter, "authenticationConverter cannot be null");
Assert.hasText(filterProcessesUrl, "filterProcessesUrl must contain a URL pattern");
this.authenticationConverter = authenticationConverter;
setAllowSessionCreation(true);
setSessionAuthenticationStrategy(new ChangeSessionIdAuthenticationStrategy());
setAuthenticationConverter(authenticationConverter);
}
@Override
protected boolean requiresAuthentication(HttpServletRequest request, HttpServletResponse response) {
return super.requiresAuthentication(request, response);
}
@Override
public Authentication attemptAuthentication(HttpServletRequest request, HttpServletResponse response)
throws AuthenticationException {
Authentication authentication = this.authenticationConverter.convert(request);
if (authentication == null) {
if (this.continueChainWhenNoRelyingPartyRegistrationFound) {
return null;
}
Saml2Error saml2Error = new Saml2Error(Saml2ErrorCodes.RELYING_PARTY_REGISTRATION_NOT_FOUND,
"No relying party registration found");
throw new Saml2AuthenticationException(saml2Error);
}
setDetails(request, authentication);
this.authenticationRequestRepository.removeAuthenticationRequest(request, response);
return getAuthenticationManager().authenticate(authentication);
}
/**
* Use the given {@link Saml2AuthenticationRequestRepository} to remove the saved
* authentication request. If the {@link #authenticationConverter} is of the type
* {@link Saml2AuthenticationTokenConverter}, the
* {@link Saml2AuthenticationRequestRepository} will also be set into the
* {@link #authenticationConverter}.
* @param authenticationRequestRepository the
* {@link Saml2AuthenticationRequestRepository} to use
* @since 5.6
*/
public void setAuthenticationRequestRepository(
Saml2AuthenticationRequestRepository<AbstractSaml2AuthenticationRequest> authenticationRequestRepository) {
Assert.notNull(authenticationRequestRepository, "authenticationRequestRepository cannot be null");
this.authenticationRequestRepository = authenticationRequestRepository;
setAuthenticationRequestRepositoryIntoAuthenticationConverter(authenticationRequestRepository);
}
private void setAuthenticationRequestRepositoryIntoAuthenticationConverter(
Saml2AuthenticationRequestRepository<AbstractSaml2AuthenticationRequest> authenticationRequestRepository) {
if (this.authenticationConverter instanceof Saml2AuthenticationTokenConverter authenticationTokenConverter) {
authenticationTokenConverter.setAuthenticationRequestRepository(authenticationRequestRepository);
}
}
private void setDetails(HttpServletRequest request, Authentication authentication) {
if (authentication.getDetails() != null) {
return;
}
if (authentication instanceof AbstractAuthenticationToken token) {
Object details = this.authenticationDetailsSource.buildDetails(request);
token.setDetails(details);
}
}
/**
* Indicate whether to continue with the rest of the filter chain in the event that no
* relying party registration is found. This is {@code false} by default, meaning that
* it will throw an exception.
* @param continueChain whether to continue
* @since 6.5
*/
public void setContinueChainWhenNoRelyingPartyRegistrationFound(boolean continueChain) {
this.continueChainWhenNoRelyingPartyRegistrationFound = continueChain;
}
}
| Saml2WebSsoAuthenticationFilter |
java | quarkusio__quarkus | extensions/container-image/container-image-buildpack/deployment/src/main/java/io/quarkus/container/image/buildpack/deployment/BuildpackBuild.java | {
"start": 177,
"end": 593
} | class ____ implements BooleanSupplier {
private final ContainerImageConfig containerImageConfig;
public BuildpackBuild(ContainerImageConfig containerImageConfig) {
this.containerImageConfig = containerImageConfig;
}
@Override
public boolean getAsBoolean() {
return containerImageConfig.builder().map(b -> b.equals(BuildpackProcessor.BUILDPACK)).orElse(true);
}
}
| BuildpackBuild |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/longarray/LongArrayAssert_hasSizeBetween_Test.java | {
"start": 800,
"end": 1142
} | class ____ extends LongArrayAssertBaseTest {
@Override
protected LongArrayAssert invoke_api_method() {
return assertions.hasSizeBetween(4, 6);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSizeBetween(getInfo(assertions), getActual(assertions), 4, 6);
}
}
| LongArrayAssert_hasSizeBetween_Test |
java | apache__kafka | connect/api/src/main/java/org/apache/kafka/connect/data/Schema.java | {
"start": 2074,
"end": 2239
} | interface ____ {
/**
* The type of a schema. These only include the core types; logical types must be determined by checking the schema name.
*/
| Schema |
java | apache__dubbo | dubbo-plugin/dubbo-spring-security/src/main/java/org/apache/dubbo/spring/security/jackson/ObjectMapperCodecCustomer.java | {
"start": 1003,
"end": 1101
} | interface ____ {
void customize(ObjectMapperCodec objectMapperCodec);
}
| ObjectMapperCodecCustomer |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/support/GenericApplicationContext.java | {
"start": 18485,
"end": 19231
} | class ____ the bean
* @param constructorArgs custom argument values to be fed into Spring's
* constructor resolution algorithm, resolving either all arguments or just
* specific ones, with the rest to be resolved through regular autowiring
* (may be {@code null} or empty)
* @since 5.2 (since 5.0 on the AnnotationConfigApplicationContext subclass)
*/
public <T> void registerBean(Class<T> beanClass, @Nullable Object... constructorArgs) {
registerBean(null, beanClass, constructorArgs);
}
/**
* Register a bean from the given bean class, optionally providing explicit
* constructor arguments for consideration in the autowiring process.
* @param beanName the name of the bean (may be {@code null})
* @param beanClass the | of |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/cid/Parent.java | {
"start": 299,
"end": 623
} | class ____ {
@EmbeddedId
public ParentPk id;
public boolean equals(Object o) {
if ( this == o ) return true;
if ( !( o instanceof Parent ) ) return false;
final Parent parent = (Parent) o;
if ( !id.equals( parent.id ) ) return false;
return true;
}
public int hashCode() {
return id.hashCode();
}
}
| Parent |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/task/SyncTaskExecutorTests.java | {
"start": 1125,
"end": 3386
} | class ____ {
@Test
void plainExecution() {
SyncTaskExecutor taskExecutor = new SyncTaskExecutor();
ConcurrentClass target = new ConcurrentClass();
assertThatNoException().isThrownBy(() -> taskExecutor.execute(target::concurrentOperation));
assertThat(taskExecutor.execute(target::concurrentOperationWithResult)).isEqualTo("result");
assertThatIOException().isThrownBy(() -> taskExecutor.execute(target::concurrentOperationWithException));
}
@Test
void withConcurrencyLimit() {
SyncTaskExecutor taskExecutor = new SyncTaskExecutor();
taskExecutor.setConcurrencyLimit(2);
ConcurrentClass target = new ConcurrentClass();
List<CompletableFuture<?>> futures = new ArrayList<>(10);
for (int i = 0; i < 10; i++) {
futures.add(CompletableFuture.runAsync(() -> taskExecutor.execute(target::concurrentOperation)));
}
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
assertThat(target.current).hasValue(0);
assertThat(target.counter).hasValue(10);
}
@Test
void withConcurrencyLimitAndResult() {
SyncTaskExecutor taskExecutor = new SyncTaskExecutor();
taskExecutor.setConcurrencyLimit(2);
ConcurrentClass target = new ConcurrentClass();
List<CompletableFuture<?>> futures = new ArrayList<>(10);
for (int i = 0; i < 10; i++) {
futures.add(CompletableFuture.runAsync(() ->
assertThat(taskExecutor.execute(target::concurrentOperationWithResult)).isEqualTo("result")));
}
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
assertThat(target.current).hasValue(0);
assertThat(target.counter).hasValue(10);
}
@Test
void withConcurrencyLimitAndException() {
SyncTaskExecutor taskExecutor = new SyncTaskExecutor();
taskExecutor.setConcurrencyLimit(2);
ConcurrentClass target = new ConcurrentClass();
List<CompletableFuture<?>> futures = new ArrayList<>(10);
for (int i = 0; i < 10; i++) {
futures.add(CompletableFuture.runAsync(() ->
assertThatIOException().isThrownBy(() -> taskExecutor.execute(target::concurrentOperationWithException))));
}
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
assertThat(target.current).hasValue(0);
assertThat(target.counter).hasValue(10);
}
static | SyncTaskExecutorTests |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/ByteBufferReadableFSDataInputStream.java | {
"start": 1271,
"end": 1372
} | class ____ used by ForSt, please start a discussion firstly if it has to
* be modified.
*/
public | maybe |
java | spring-projects__spring-boot | module/spring-boot-cache/src/main/java/org/springframework/boot/cache/autoconfigure/metrics/CacheMetricsRegistrarConfiguration.java | {
"start": 1753,
"end": 3725
} | class ____ {
private static final String CACHE_MANAGER_SUFFIX = "cacheManager";
private final MeterRegistry registry;
private final CacheMetricsRegistrar cacheMetricsRegistrar;
private final Map<String, CacheManager> cacheManagers;
CacheMetricsRegistrarConfiguration(MeterRegistry registry, Collection<CacheMeterBinderProvider<?>> binderProviders,
ConfigurableListableBeanFactory beanFactory) {
this.registry = registry;
this.cacheManagers = SimpleAutowireCandidateResolver.resolveAutowireCandidates(beanFactory, CacheManager.class);
this.cacheMetricsRegistrar = new CacheMetricsRegistrar(this.registry, binderProviders);
bindCachesToRegistry();
}
@Bean
CacheMetricsRegistrar cacheMetricsRegistrar() {
return this.cacheMetricsRegistrar;
}
private void bindCachesToRegistry() {
this.cacheManagers.forEach(this::bindCacheManagerToRegistry);
}
private void bindCacheManagerToRegistry(String beanName, CacheManager cacheManager) {
cacheManager.getCacheNames().forEach((cacheName) -> {
Cache cache = cacheManager.getCache(cacheName);
Assert.state(cache != null, () -> "'cache' must not be null. 'cacheName' is '%s'".formatted(cacheName));
bindCacheToRegistry(beanName, cache);
});
}
private void bindCacheToRegistry(String beanName, Cache cache) {
Tag cacheManagerTag = Tag.of("cache.manager", getCacheManagerName(beanName));
this.cacheMetricsRegistrar.bindCacheToRegistry(cache, cacheManagerTag);
}
/**
* Get the name of a {@link CacheManager} based on its {@code beanName}.
* @param beanName the name of the {@link CacheManager} bean
* @return a name for the given cache manager
*/
private String getCacheManagerName(String beanName) {
if (beanName.length() > CACHE_MANAGER_SUFFIX.length()
&& StringUtils.endsWithIgnoreCase(beanName, CACHE_MANAGER_SUFFIX)) {
return beanName.substring(0, beanName.length() - CACHE_MANAGER_SUFFIX.length());
}
return beanName;
}
}
| CacheMetricsRegistrarConfiguration |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/HttpStatusClass.java | {
"start": 721,
"end": 754
} | class ____ HTTP status.
*/
public | of |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java | {
"start": 1162,
"end": 2579
} | class ____ {
/** Commands returned from the namenode to the datanode */
private final DatanodeCommand[] commands;
/** Information about the current HA-related state of the NN */
private final NNHAStatusHeartbeat haStatus;
private final RollingUpgradeStatus rollingUpdateStatus;
private final long fullBlockReportLeaseId;
private final boolean isSlownode;
public HeartbeatResponse(DatanodeCommand[] cmds,
NNHAStatusHeartbeat haStatus, RollingUpgradeStatus rollingUpdateStatus,
long fullBlockReportLeaseId) {
this(cmds, haStatus, rollingUpdateStatus, fullBlockReportLeaseId, false);
}
public HeartbeatResponse(DatanodeCommand[] cmds,
NNHAStatusHeartbeat haStatus, RollingUpgradeStatus rollingUpdateStatus,
long fullBlockReportLeaseId, boolean isSlownode) {
commands = cmds;
this.haStatus = haStatus;
this.rollingUpdateStatus = rollingUpdateStatus;
this.fullBlockReportLeaseId = fullBlockReportLeaseId;
this.isSlownode = isSlownode;
}
public DatanodeCommand[] getCommands() {
return commands;
}
public NNHAStatusHeartbeat getNameNodeHaState() {
return haStatus;
}
public RollingUpgradeStatus getRollingUpdateStatus() {
return rollingUpdateStatus;
}
public long getFullBlockReportLeaseId() {
return fullBlockReportLeaseId;
}
public boolean getIsSlownode() {
return isSlownode;
}
}
| HeartbeatResponse |
java | google__dagger | javatests/dagger/internal/codegen/MissingBindingValidationTest.java | {
"start": 28026,
"end": 28309
} | class ____<T> {",
" @Inject Generic(T t) {}",
"}");
Source testClass =
CompilerTests.javaSource("test.TestClass",
"package test;",
"",
"import javax.inject.Inject;",
"import java.util.List;",
"",
"final | Generic |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java | {
"start": 916,
"end": 3130
} | class ____ extends GroupingAggregatorFunctionTestCase {
@Override
protected SourceOperator simpleInput(BlockFactory blockFactory, int end) {
double[][] samples = new double[][] {
{ 1.2, 1.25, 2.0, 2.0, 4.3, 6.0, 9.0 },
{ 0.1, 1.5, 2.0, 3.0, 4.0, 7.5, 100.0 },
{ 0.2, 1.75, 2.0, 2.5 },
{ 0.5, 3.0, 3.0, 3.0, 4.3 },
{ 0.25, 1.5, 3.0 } };
List<Tuple<Long, Double>> values = new ArrayList<>();
for (int i = 0; i < samples.length; i++) {
List<Double> list = Arrays.stream(samples[i]).boxed().collect(Collectors.toList());
Randomness.shuffle(list);
for (double v : list) {
values.add(Tuple.tuple((long) i, v));
}
}
return new LongDoubleTupleBlockSourceOperator(blockFactory, values.subList(0, Math.min(values.size(), end)));
}
@Override
protected AggregatorFunctionSupplier aggregatorFunction() {
return new MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier();
}
@Override
protected String expectedDescriptionOfAggregator() {
return "median_absolute_deviation of doubles";
}
@Override
protected void assertSimpleGroup(List<Page> input, Block result, int position, Long group) {
double medianAbsoluteDeviation = medianAbsoluteDeviation(input.stream().flatMapToDouble(p -> allDoubles(p, group)));
assertThat(((DoubleBlock) result).getDouble(position), closeTo(medianAbsoluteDeviation, medianAbsoluteDeviation * .000001));
}
static double medianAbsoluteDeviation(DoubleStream s) {
double[] data = s.toArray();
double median = median(Arrays.stream(data));
return median(Arrays.stream(data).map(d -> Math.abs(median - d)));
}
static double median(DoubleStream s) {
// The input data is small enough that tdigest will find the actual median.
double[] data = s.sorted().toArray();
if (data.length == 0) {
return 0;
}
int c = data.length / 2;
return data.length % 2 == 0 ? (data[c - 1] + data[c]) / 2 : data[c];
}
}
| MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests |
java | eclipse-vertx__vert.x | vertx-core-logging/src/main/java/io/vertx/core/internal/logging/LoggerFactory.java | {
"start": 1731,
"end": 2825
} | class ____ visible by AOT compilers
delegateFactory = new JULLogDelegateFactory();
}
private static boolean configureWith(String name, boolean shortName, ClassLoader loader) {
try {
Class<?> clazz = Class.forName(shortName ? "io.vertx.core.logging." + name + "LogDelegateFactory" : name, true, loader);
LogDelegateFactory factory = (LogDelegateFactory) clazz.getDeclaredConstructor().newInstance();
if (!factory.isAvailable()) {
return false;
}
delegateFactory = factory;
return true;
} catch (Throwable ignore) {
return false;
}
}
/**
* Like {@link #getLogger(String)}, using the provided {@code clazz} name.
*/
public static Logger getLogger(Class<?> clazz) {
String name = clazz.isAnonymousClass() ?
clazz.getEnclosingClass().getCanonicalName() :
clazz.getCanonicalName();
return getLogger(name);
}
/**
* Get the logger with the specified {@code name}.
*/
public static Logger getLogger(String name) {
return new LoggerAdapter(delegateFactory.createDelegate(name));
}
}
| is |
java | spring-projects__spring-boot | module/spring-boot-webflux/src/test/java/org/springframework/boot/webflux/autoconfigure/WebFluxAutoConfigurationTests.java | {
"start": 51292,
"end": 51408
} | class ____ implements WebFluxConfigurer {
}
@Configuration(proxyBeanMethods = false)
static | LowPrecedenceConfigurer |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/GenericTypeResolver.java | {
"start": 11778,
"end": 12346
} | class ____ implements ResolvableType.VariableResolver {
private final Map<TypeVariable, Type> typeVariableMap;
public TypeVariableMapVariableResolver(Map<TypeVariable, Type> typeVariableMap) {
this.typeVariableMap = typeVariableMap;
}
@Override
public @Nullable ResolvableType resolveVariable(TypeVariable<?> variable) {
Type type = this.typeVariableMap.get(variable);
return (type != null ? ResolvableType.forType(type) : null);
}
@Override
public Object getSource() {
return this.typeVariableMap;
}
}
}
| TypeVariableMapVariableResolver |
java | grpc__grpc-java | examples/src/main/java/io/grpc/examples/grpcproxy/GrpcProxy.java | {
"start": 4565,
"end": 5947
} | class ____ extends ClientCall.Listener<RespT> {
private final ServerCall<?, RespT> serverCall;
// Hold 'this' lock when accessing
private boolean needToRequest;
public ResponseProxy(ServerCall<?, RespT> serverCall) {
this.serverCall = serverCall;
}
@Override public void onClose(Status status, Metadata trailers) {
serverCall.close(status, trailers);
}
@Override public void onHeaders(Metadata headers) {
serverCall.sendHeaders(headers);
}
@Override public void onMessage(RespT message) {
serverCall.sendMessage(message);
synchronized (this) {
if (serverCall.isReady()) {
serverCallListener.clientCall.request(1);
} else {
// The incoming call is not ready for more responses. Stop requesting additional data
// and wait for it to catch up.
needToRequest = true;
}
}
}
@Override public void onReady() {
serverCallListener.onClientReady();
}
// Called from RequestProxy, which is a different thread than the ClientCall.Listener
// callbacks.
synchronized void onServerReady() {
if (needToRequest) {
serverCallListener.clientCall.request(1);
needToRequest = false;
}
}
}
}
private static | ResponseProxy |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/util/matcher/RequestMatcherEditor.java | {
"start": 1114,
"end": 1301
} | class ____ extends PropertyEditorSupport {
@Override
public void setAsText(String text) throws IllegalArgumentException {
setValue(new ELRequestMatcher(text));
}
}
| RequestMatcherEditor |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/ApplicationContextAware.java | {
"start": 919,
"end": 1102
} | interface ____ sense for example when an object
* requires access to a set of collaborating beans. Note that configuration
* via bean references is preferable to implementing this | makes |
java | bumptech__glide | instrumentation/src/androidTest/java/com/bumptech/glide/test/ResourceIds.java | {
"start": 340,
"end": 422
} | class ____ {
private ResourceIds() {
// Utility class.
}
public | ResourceIds |
java | google__guava | guava/src/com/google/common/base/Optional.java | {
"start": 4260,
"end": 13656
} | class ____<T> implements Serializable {
/**
* Returns an {@code Optional} instance with no contained reference.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> this method is equivalent to Java 8's
* {@code Optional.empty}.
*/
public static <T> Optional<T> absent() {
return Absent.withType();
}
/**
* Returns an {@code Optional} instance containing the given non-null reference. To have {@code
* null} treated as {@link #absent}, use {@link #fromNullable} instead.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> no differences.
*
* @throws NullPointerException if {@code reference} is null
*/
public static <T> Optional<T> of(T reference) {
return new Present<>(checkNotNull(reference));
}
/**
* If {@code nullableReference} is non-null, returns an {@code Optional} instance containing that
* reference; otherwise returns {@link Optional#absent}.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> this method is equivalent to Java 8's
* {@code Optional.ofNullable}.
*/
public static <T> Optional<T> fromNullable(@Nullable T nullableReference) {
return (nullableReference == null) ? Optional.<T>absent() : new Present<T>(nullableReference);
}
/**
* Returns the equivalent {@code com.google.common.base.Optional} value to the given {@code
* java.util.Optional}, or {@code null} if the argument is null.
*
* @since 21.0 (but only since 33.4.0 in the Android flavor)
*/
@SuppressWarnings("NullableOptional") // Null passthrough is reasonable for type conversions
public static <T> @Nullable Optional<T> fromJavaUtil(
java.util.@Nullable Optional<T> javaUtilOptional) {
return (javaUtilOptional == null) ? null : fromNullable(javaUtilOptional.orElse(null));
}
/**
* Returns the equivalent {@code java.util.Optional} value to the given {@code
* com.google.common.base.Optional}, or {@code null} if the argument is null.
*
* <p>If {@code googleOptional} is known to be non-null, use {@code googleOptional.toJavaUtil()}
* instead.
*
* <p>Unfortunately, the method reference {@code Optional::toJavaUtil} will not work, because it
* could refer to either the static or instance version of this method. Write out the lambda
* expression {@code o -> Optional.toJavaUtil(o)} instead.
*
* @since 21.0 (but only since 33.4.0 in the Android flavor)
*/
@SuppressWarnings({
"AmbiguousMethodReference", // We chose the name despite knowing this risk.
"NullableOptional", // Null passthrough is reasonable for type conversions
})
public static <T> java.util.@Nullable Optional<T> toJavaUtil(
@Nullable Optional<T> googleOptional) {
return googleOptional == null ? null : googleOptional.toJavaUtil();
}
/**
* Returns the equivalent {@code java.util.Optional} value to this optional.
*
* <p>Unfortunately, the method reference {@code Optional::toJavaUtil} will not work, because it
* could refer to either the static or instance version of this method. Write out the lambda
* expression {@code o -> o.toJavaUtil()} instead.
*
* @since 21.0 (but only since 33.4.0 in the Android flavor)
*/
@SuppressWarnings("AmbiguousMethodReference") // We chose the name despite knowing this risk.
public java.util.Optional<T> toJavaUtil() {
return java.util.Optional.ofNullable(orNull());
}
Optional() {}
/**
* Returns {@code true} if this holder contains a (non-null) instance.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> no differences.
*/
public abstract boolean isPresent();
/**
* Returns the contained instance, which must be present. If the instance might be absent, use
* {@link #or(Object)} or {@link #orNull} instead.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> when the value is absent, this method
* throws {@link IllegalStateException}, whereas the {@code java.util} counterpart throws {@link
* java.util.NoSuchElementException NoSuchElementException}.
*
* @throws IllegalStateException if the instance is absent ({@link #isPresent} returns {@code
* false}); depending on this <i>specific</i> exception type (over the more general {@link
* RuntimeException}) is discouraged
*/
public abstract T get();
/**
* Returns the contained instance if it is present; {@code defaultValue} otherwise. If no default
* value should be required because the instance is known to be present, use {@link #get()}
* instead. For a default value of {@code null}, use {@link #orNull}.
*
* <p>Note about generics: The signature {@code public T or(T defaultValue)} is overly
* restrictive. However, the ideal signature, {@code public <S super T> S or(S)}, is not legal
* Java. As a result, some sensible operations involving subtypes are compile errors:
*
* {@snippet :
* Optional<Integer> optionalInt = getSomeOptionalInt();
* Number value = optionalInt.or(0.5); // error
*
* FluentIterable<? extends Number> numbers = getSomeNumbers();
* Optional<? extends Number> first = numbers.first();
* Number value = first.or(0.5); // error
* }
*
* <p>As a workaround, it is always safe to cast an {@code Optional<? extends T>} to {@code
* Optional<T>}. Casting either of the above example {@code Optional} instances to {@code
* Optional<Number>} (where {@code Number} is the desired output type) solves the problem:
*
* {@snippet :
* Optional<Number> optionalInt = (Optional) getSomeOptionalInt();
* Number value = optionalInt.or(0.5); // fine
*
* FluentIterable<? extends Number> numbers = getSomeNumbers();
* Optional<Number> first = (Optional) numbers.first();
* Number value = first.or(0.5); // fine
* }
*
* <p><b>Comparison to {@code java.util.Optional}:</b> this method is similar to Java 8's {@code
* Optional.orElse}, but will not accept {@code null} as a {@code defaultValue} ({@link #orNull}
* must be used instead). As a result, the value returned by this method is guaranteed non-null,
* which is not the case for the {@code java.util} equivalent.
*/
public abstract T or(T defaultValue);
/**
* Returns this {@code Optional} if it has a value present; {@code secondChoice} otherwise.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> this method has no equivalent in Java 8's
* {@code Optional} class; write {@code thisOptional.isPresent() ? thisOptional : secondChoice}
* instead.
*/
public abstract Optional<T> or(Optional<? extends T> secondChoice);
/**
* Returns the contained instance if it is present; {@code supplier.get()} otherwise.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> this method is similar to Java 8's {@code
* Optional.orElseGet}, except when {@code supplier} returns {@code null}. In this case this
* method throws an exception, whereas the Java 8+ method returns the {@code null} to the caller.
*
* @throws NullPointerException if this optional's value is absent and the supplier returns {@code
* null}
*/
public abstract T or(Supplier<? extends T> supplier);
/**
* Returns the contained instance if it is present; {@code null} otherwise. If the instance is
* known to be present, use {@link #get()} instead.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> this method is equivalent to Java 8's
* {@code Optional.orElse(null)}.
*/
public abstract @Nullable T orNull();
/**
* Returns an immutable singleton {@link Set} whose only element is the contained instance if it
* is present; an empty immutable {@link Set} otherwise.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> this method has no equivalent in Java 8's
* {@code Optional} class. However, this common usage:
*
* {@snippet :
* for (Foo foo : possibleFoo.asSet()) {
* doSomethingWith(foo);
* }
* }
*
* ... can be replaced with:
*
* {@snippet :
* possibleFoo.ifPresent(foo -> doSomethingWith(foo));
* }
*
* <p><b>Java 9 users:</b> some use cases can be written with calls to {@code optional.stream()}.
*
* @since 11.0
*/
public abstract Set<T> asSet();
/**
* If the instance is present, it is transformed with the given {@link Function}; otherwise,
* {@link Optional#absent} is returned.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> this method is similar to Java 8's {@code
* Optional.map}, except when {@code function} returns {@code null}. In this case this method
* throws an exception, whereas the Java 8+ method returns {@code Optional.absent()}.
*
* @throws NullPointerException if the function returns {@code null}
* @since 12.0
*/
public abstract <V> Optional<V> transform(Function<? super T, V> function);
/**
* Returns {@code true} if {@code object} is an {@code Optional} instance, and either the
* contained references are {@linkplain Object#equals equal} to each other or both are absent.
* Note that {@code Optional} instances of differing parameterized types can be equal.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> no differences.
*/
@Override
public abstract boolean equals(@Nullable Object object);
/**
* Returns a hash code for this instance.
*
* <p><b>Comparison to {@code java.util.Optional}:</b> this | Optional |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressMetrics.java | {
"start": 1548,
"end": 4637
} | class ____ implements MetricsSource {
private static final MetricsInfo STARTUP_PROGRESS_METRICS_INFO =
info("StartupProgress", "NameNode startup progress");
private final StartupProgress startupProgress;
/**
* Registers StartupProgressMetrics linked to the given StartupProgress.
*
* @param prog StartupProgress to link
*/
public static void register(StartupProgress prog) {
new StartupProgressMetrics(prog);
}
/**
* Creates a new StartupProgressMetrics registered with the metrics system.
*
* @param startupProgress StartupProgress to link
*/
public StartupProgressMetrics(StartupProgress startupProgress) {
this.startupProgress = startupProgress;
DefaultMetricsSystem.instance().register(
STARTUP_PROGRESS_METRICS_INFO.name(),
STARTUP_PROGRESS_METRICS_INFO.description(), this);
}
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
StartupProgressView prog = startupProgress.createView();
MetricsRecordBuilder builder = collector.addRecord(
STARTUP_PROGRESS_METRICS_INFO);
builder.addCounter(info("ElapsedTime", "overall elapsed time"),
prog.getElapsedTime());
builder.addGauge(info("PercentComplete", "overall percent complete"),
prog.getPercentComplete());
for (Phase phase: prog.getPhases()) {
addCounter(builder, phase, "Count", " count", prog.getCount(phase));
addCounter(builder, phase, "ElapsedTime", " elapsed time",
prog.getElapsedTime(phase));
addCounter(builder, phase, "Total", " total", prog.getTotal(phase));
addGauge(builder, phase, "PercentComplete", " percent complete",
prog.getPercentComplete(phase));
}
}
/**
* Adds a counter with a name built by using the specified phase's name as
* prefix and then appending the specified suffix.
*
* @param builder MetricsRecordBuilder to receive counter
* @param phase Phase to add
* @param nameSuffix String suffix of metric name
* @param descSuffix String suffix of metric description
* @param value long counter value
*/
private static void addCounter(MetricsRecordBuilder builder, Phase phase,
String nameSuffix, String descSuffix, long value) {
MetricsInfo metricsInfo = info(phase.getName() + nameSuffix,
phase.getDescription() + descSuffix);
builder.addCounter(metricsInfo, value);
}
/**
* Adds a gauge with a name built by using the specified phase's name as prefix
* and then appending the specified suffix.
*
* @param builder MetricsRecordBuilder to receive counter
* @param phase Phase to add
* @param nameSuffix String suffix of metric name
* @param descSuffix String suffix of metric description
* @param value float gauge value
*/
private static void addGauge(MetricsRecordBuilder builder, Phase phase,
String nameSuffix, String descSuffix, float value) {
MetricsInfo metricsInfo = info(phase.getName() + nameSuffix,
phase.getDescription() + descSuffix);
builder.addGauge(metricsInfo, value);
}
}
| StartupProgressMetrics |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/serialization/DeepStubsSerializableTest.java | {
"start": 3722,
"end": 3870
} | class ____ extends Container<List<String>> {
public ListContainer(List<String> list) {
super(list);
}
}
}
| ListContainer |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/filemapping/MappingEntrySource.java | {
"start": 1281,
"end": 1895
} | class ____ {
protected static final Logger LOG = LoggerFactory.getLogger(MappingEntrySource.class);
MappingEntrySource() {}
public abstract void delete(boolean recursive) throws IOException;
public abstract @Nullable Path getFilePath();
public abstract long getSize() throws IOException;
public abstract FSDataInputStream openInputStream() throws IOException;
public abstract FSDataInputStream openInputStream(int bufferSize) throws IOException;
public abstract boolean cacheable();
public abstract StreamStateHandle toStateHandle() throws IOException;
}
| MappingEntrySource |
java | google__guava | android/guava/src/com/google/common/collect/Iterators.java | {
"start": 47601,
"end": 49229
} | class ____<E extends @Nullable Object> {
final PeekingIterator<E> iterator;
final int index;
IndexedIterator(PeekingIterator<E> iterator, int index) {
this.iterator = iterator;
this.index = index;
}
}
final Queue<IndexedIterator<T>> queue;
MergingIterator(
Iterable<? extends Iterator<? extends T>> iterators, Comparator<? super T> itemComparator) {
// A comparator that's used by the heap, allowing the heap
// to be sorted based on the top of each iterator, with insertion order as tiebreaker
Comparator<IndexedIterator<T>> heapComparator =
(o1, o2) ->
ComparisonChain.start()
.compare(o1.iterator.peek(), o2.iterator.peek(), itemComparator)
// When elements are equal, use insertion order to maintain stability
.compare(o1.index, o2.index)
.result();
queue = new PriorityQueue<>(2, heapComparator);
int index = 0;
for (Iterator<? extends T> iterator : iterators) {
if (iterator.hasNext()) {
queue.add(new IndexedIterator<>(peekingIterator(iterator), index++));
}
}
}
@Override
public boolean hasNext() {
return !queue.isEmpty();
}
@Override
@ParametricNullness
public T next() {
IndexedIterator<T> nextIndexed = queue.remove();
PeekingIterator<T> nextIter = nextIndexed.iterator;
T next = nextIter.next();
if (nextIter.hasNext()) {
queue.add(nextIndexed);
}
return next;
}
}
private static final | IndexedIterator |
java | quarkusio__quarkus | independent-projects/bootstrap/maven-resolver/src/main/java/io/quarkus/bootstrap/resolver/maven/RetryLockAcquisitionErrorHandler.java | {
"start": 720,
"end": 3943
} | class ____ extends FailAtCompletionErrorHandler {
private static final Logger log = Logger.getLogger(RetryLockAcquisitionErrorHandler.class);
private final ConcurrentLinkedDeque<ModelResolutionTask> failedTasks = new ConcurrentLinkedDeque<>();
private final AtomicBoolean skipRetry = new AtomicBoolean();
@Override
public void handleError(ModelResolutionTask task, Exception error) {
// collect the exception
super.handleError(task, error);
// check if it's a failure to acquire a lock
if (isRetriableError(error)) {
failedTasks.add(task);
} else {
// if it's a different error, we do not re-try
skipRetry.set(true);
}
}
@Override
public void allTasksFinished() {
if (isEmpty()) {
return;
}
if (skipRetry.get()) {
// this will throw an exception
super.allTasksFinished();
}
final ModelResolutionTaskRunner blockingRunner = ModelResolutionTaskRunner.getBlockingTaskRunner();
log.warn("Re-trying dependency resolution tasks previously failed to acquire locks in the local Maven repository");
for (ModelResolutionTask task : failedTasks) {
blockingRunner.run(task);
}
}
private boolean isRetriableError(Exception error) {
return isCouldNotAcquireLockError(error) || isMissingFileError(error);
}
private static boolean isCouldNotAcquireLockError(Exception error) {
// it's either "Could not acquire read lock" or "Could not acquire write lock"
return error.getLocalizedMessage().contains("Could not acquire ");
}
/**
* It could happen, especially in Maven 3.8, that multiple threads could end up writing/reading
* the same temporary files while resolving the same artifact. Once one of the threads completes
* resolving the artifact, the temporary file will be renamed to the target artifact file
* and the other thread will fail with one of the file-not-found exceptions.
* In this case, we simply re-try the collect request, which should now pick up the already resolved artifact.
* <p>
* Checks whether the cause of this exception a kind of no-such-file exception.
* This error should not be seen with later versions of Maven 3.9.
*
* @param error top level exception
* @return whether cause is a missing file
*/
private static boolean isMissingFileError(Exception error) {
if (!(error instanceof DeploymentInjectionException)) {
return false;
}
Throwable t = error.getCause();
if (!(t instanceof DependencyCollectionException)) {
return false;
}
while (t != null) {
var cause = t.getCause();
// It looks like in Maven 3.9 it's NoSuchFileException, while in Maven 3.8 it's FileNotFoundException
if (cause instanceof NoSuchFileException e) {
return true;
} else if (cause instanceof FileNotFoundException) {
return true;
}
t = cause;
}
return false;
}
}
| RetryLockAcquisitionErrorHandler |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/serialization/SerializerConfigImplTest.java | {
"start": 11941,
"end": 12481
} | class ____
extends Serializer<SerializerConfigImplTest.TestSerializer1> implements Serializable {
@Override
public void write(
Kryo kryo, Output output, SerializerConfigImplTest.TestSerializer1 object) {}
@Override
public SerializerConfigImplTest.TestSerializer1 read(
Kryo kryo,
Input input,
Class<? extends SerializerConfigImplTest.TestSerializer1> type) {
return null;
}
}
private static | TestSerializer2 |
java | apache__camel | components/camel-google/camel-google-pubsub/src/main/java/org/apache/camel/component/google/pubsub/GooglePubsubConsumer.java | {
"start": 2270,
"end": 5042
} | class ____ extends DefaultConsumer {
private final Logger localLog;
private final GooglePubsubEndpoint endpoint;
private final Processor processor;
private ExecutorService executor;
private final List<Subscriber> subscribers;
private final Set<ApiFuture<PullResponse>> pendingSynchronousPullResponses;
private final HeaderFilterStrategy headerFilterStrategy;
GooglePubsubConsumer(GooglePubsubEndpoint endpoint, Processor processor) {
super(endpoint, processor);
this.endpoint = endpoint;
this.processor = processor;
this.subscribers = Collections.synchronizedList(new LinkedList<>());
this.pendingSynchronousPullResponses = ConcurrentHashMap.newKeySet();
String loggerId = endpoint.getLoggerId();
if (Strings.isNullOrEmpty(loggerId)) {
loggerId = this.getClass().getName();
}
localLog = LoggerFactory.getLogger(loggerId);
headerFilterStrategy = endpoint.getHeaderFilterStrategy();
}
@Override
protected void doStart() throws Exception {
super.doStart();
localLog.info("Starting Google PubSub consumer for {}/{}", endpoint.getProjectId(), endpoint.getDestinationName());
executor = endpoint.createExecutor(this);
for (int i = 0; i < endpoint.getConcurrentConsumers(); i++) {
executor.submit(new SubscriberWrapper());
}
}
@Override
protected void doStop() throws Exception {
localLog.info("Stopping Google PubSub consumer for {}/{}", endpoint.getProjectId(), endpoint.getDestinationName());
synchronized (subscribers) {
if (!subscribers.isEmpty()) {
localLog.info("Stopping subscribers for {}/{}", endpoint.getProjectId(), endpoint.getDestinationName());
subscribers.forEach(AbstractApiService::stopAsync);
}
}
safeCancelSynchronousPullResponses();
if (executor != null) {
if (getEndpoint() != null && getEndpoint().getCamelContext() != null) {
getEndpoint().getCamelContext().getExecutorServiceManager().shutdownGraceful(executor);
} else {
executor.shutdownNow();
}
}
executor = null;
super.doStop();
}
private void safeCancelSynchronousPullResponses() {
for (ApiFuture<PullResponse> pullResponseApiFuture : pendingSynchronousPullResponses) {
try {
pullResponseApiFuture.cancel(true);
} catch (Exception e) {
localLog.warn("Exception while cancelling pending synchronous pull response", e);
}
}
pendingSynchronousPullResponses.clear();
}
private | GooglePubsubConsumer |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/uris/Uris_assertHasScheme_Test.java | {
"start": 1102,
"end": 2141
} | class ____ extends UrisBaseTest {
@ParameterizedTest
@CsvSource({
"http://example.com/pages/, http",
"example.com/pages/, "
})
void should_pass_if_actual_uri_has_the_given_scheme(URI uri, String expectedScheme) {
// WHEN/THEN
uris.assertHasScheme(info, uri, expectedScheme);
}
@Test
void should_fail_if_actual_is_null() {
// GIVEN
URI uri = null;
String expectedScheme = "http";
// WHEN
var assertionError = expectAssertionError(() -> uris.assertHasScheme(info, uri, expectedScheme));
// THEN
then(assertionError).hasMessage(actualIsNull());
}
@Test
void should_fail_if_actual_scheme_is_not_the_expected_scheme() {
// GIVEN
URI uri = URI.create("http://example.com/pages/");
String expectedScheme = "ftp";
// WHEN
var assertionError = expectAssertionError(() -> uris.assertHasScheme(info, uri, expectedScheme));
// THEN
then(assertionError).hasMessage(shouldHaveScheme(uri, expectedScheme).create());
}
}
| Uris_assertHasScheme_Test |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java | {
"start": 2183,
"end": 10172
} | class ____ extends HandledTransportAction<CreateTokenRequest, CreateTokenResponse> {
private static final String DEFAULT_SCOPE = "full";
private final ThreadPool threadPool;
private final TokenService tokenService;
private final AuthenticationService authenticationService;
private final SecurityContext securityContext;
@Inject
public TransportCreateTokenAction(
ThreadPool threadPool,
TransportService transportService,
ActionFilters actionFilters,
TokenService tokenService,
AuthenticationService authenticationService,
SecurityContext securityContext
) {
super(CreateTokenAction.NAME, transportService, actionFilters, CreateTokenRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE);
this.threadPool = threadPool;
this.tokenService = tokenService;
this.authenticationService = authenticationService;
this.securityContext = securityContext;
}
@Override
protected void doExecute(Task task, CreateTokenRequest request, ActionListener<CreateTokenResponse> listener) {
CreateTokenRequest.GrantType type = CreateTokenRequest.GrantType.fromString(request.getGrantType());
assert type != null : "type should have been validated in the action";
switch (type) {
case PASSWORD, KERBEROS -> authenticateAndCreateToken(type, request, listener);
case CLIENT_CREDENTIALS -> {
Authentication authentication = securityContext.getAuthentication();
if (authentication.isServiceAccount()) {
// Service account itself cannot create OAuth2 tokens.
listener.onFailure(new ElasticsearchException("OAuth2 token creation is not supported for service accounts"));
return;
}
createToken(type, request, authentication, authentication, false, listener);
}
default -> listener.onFailure(
new IllegalStateException("grant_type [" + request.getGrantType() + "] is not supported by the create token action")
);
}
}
private void authenticateAndCreateToken(GrantType grantType, CreateTokenRequest request, ActionListener<CreateTokenResponse> listener) {
Authentication originatingAuthentication = securityContext.getAuthentication();
try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) {
final Tuple<AuthenticationToken, Optional<Exception>> tokenAndException = extractAuthenticationToken(grantType, request);
if (tokenAndException.v2().isPresent()) {
listener.onFailure(tokenAndException.v2().get());
return;
}
final AuthenticationToken authToken = tokenAndException.v1();
if (authToken == null) {
listener.onFailure(
new IllegalStateException("grant_type [" + request.getGrantType() + "] is not supported by the create token action")
);
return;
}
authenticationService.authenticate(CreateTokenAction.NAME, request, authToken, ActionListener.wrap(authentication -> {
clearCredentialsFromRequest(grantType, request);
if (authentication != null) {
createToken(grantType, request, authentication, originatingAuthentication, true, listener);
} else {
listener.onFailure(new UnsupportedOperationException("cannot create token if authentication is not allowed"));
}
}, e -> {
clearCredentialsFromRequest(grantType, request);
listener.onFailure(e);
}));
}
}
private static Tuple<AuthenticationToken, Optional<Exception>> extractAuthenticationToken(
GrantType grantType,
CreateTokenRequest request
) {
AuthenticationToken authToken = null;
if (grantType == GrantType.PASSWORD) {
authToken = new UsernamePasswordToken(request.getUsername(), request.getPassword());
} else if (grantType == GrantType.KERBEROS) {
SecureString kerberosTicket = request.getKerberosTicket();
String base64EncodedToken = kerberosTicket.toString();
byte[] decodedKerberosTicket;
try {
decodedKerberosTicket = Base64.getDecoder().decode(base64EncodedToken);
} catch (IllegalArgumentException iae) {
return new Tuple<>(
null,
Optional.of(new UnsupportedOperationException("could not decode base64 kerberos ticket " + base64EncodedToken, iae))
);
}
authToken = new KerberosAuthenticationToken(decodedKerberosTicket);
}
return new Tuple<>(authToken, Optional.empty());
}
private static void clearCredentialsFromRequest(GrantType grantType, CreateTokenRequest request) {
if (grantType == GrantType.PASSWORD) {
request.getPassword().close();
} else if (grantType == GrantType.KERBEROS) {
request.getKerberosTicket().close();
}
}
private void createToken(
GrantType grantType,
CreateTokenRequest request,
Authentication authentication,
Authentication originatingAuth,
boolean includeRefreshToken,
ActionListener<CreateTokenResponse> listener
) {
tokenService.createOAuth2Tokens(
authentication,
originatingAuth,
Collections.emptyMap(),
includeRefreshToken,
ActionListener.wrap(tokenResult -> {
final String scope = getResponseScopeValue(request.getScope());
final String base64AuthenticateResponse = (grantType == GrantType.KERBEROS) ? extractOutToken() : null;
final CreateTokenResponse response = new CreateTokenResponse(
tokenResult.getAccessToken(),
tokenService.getExpirationDelay(),
scope,
tokenResult.getRefreshToken(),
base64AuthenticateResponse,
authentication
);
listener.onResponse(response);
}, listener::onFailure)
);
}
private String extractOutToken() {
List<String> values = threadPool.getThreadContext().getResponseHeaders().get(KerberosAuthenticationToken.WWW_AUTHENTICATE);
if (values != null && values.size() == 1) {
final String wwwAuthenticateHeaderValue = values.get(0);
// it may contain base64 encoded token that needs to be sent to client if mutual auth was requested
if (wwwAuthenticateHeaderValue.startsWith(KerberosAuthenticationToken.NEGOTIATE_AUTH_HEADER_PREFIX)) {
final String base64EncodedToken = wwwAuthenticateHeaderValue.substring(
KerberosAuthenticationToken.NEGOTIATE_AUTH_HEADER_PREFIX.length()
).trim();
return base64EncodedToken;
}
}
threadPool.getThreadContext().getResponseHeaders().remove(KerberosAuthenticationToken.WWW_AUTHENTICATE);
return null;
}
static String getResponseScopeValue(String requestScope) {
final String scope;
// the OAuth2.0 RFC requires the scope to be provided in the
// response if it differs from the user provided scope. If the
// scope was not provided then it does not need to be returned.
// if the scope is not supported, the value of the scope that the
// token is for must be returned
if (requestScope != null) {
scope = DEFAULT_SCOPE; // this is the only non-null value that is currently supported
} else {
scope = null;
}
return scope;
}
}
| TransportCreateTokenAction |
java | alibaba__fastjson | src/main/java/com/alibaba/fastjson/parser/deserializer/OptionalCodec.java | {
"start": 539,
"end": 3804
} | class ____ implements ObjectSerializer, ObjectDeserializer {
public static OptionalCodec instance = new OptionalCodec();
@SuppressWarnings("unchecked")
public <T> T deserialze(DefaultJSONParser parser, Type type, Object fieldName) {
if (type == OptionalInt.class) {
Object obj = parser.parseObject(Integer.class);
Integer value = TypeUtils.castToInt(obj);
if (value == null) {
return (T) OptionalInt.empty();
} else {
return (T) OptionalInt.of(value);
}
}
if (type == OptionalLong.class) {
Object obj = parser.parseObject(Long.class);
Long value = TypeUtils.castToLong(obj);
if (value == null) {
return (T) OptionalLong.empty();
} else {
return (T) OptionalLong.of(value);
}
}
if (type == OptionalDouble.class) {
Object obj = parser.parseObject(Double.class);
Double value = TypeUtils.castToDouble(obj);
if (value == null) {
return (T) OptionalDouble.empty();
} else {
return (T) OptionalDouble.of(value);
}
}
type = TypeUtils.unwrapOptional(type);
Object value = parser.parseObject(type);
if (value == null) {
return (T) Optional.empty();
}
return (T) Optional.of(value);
}
public int getFastMatchToken() {
return JSONToken.LBRACE;
}
public void write(JSONSerializer serializer, Object object, Object fieldName, Type fieldType,
int features) throws IOException {
if (object == null) {
serializer.writeNull();
return;
}
if (object instanceof Optional) {
Optional<?> optional = (Optional<?>) object;
Object value = optional.isPresent() ? optional.get() : null;
serializer.write(value);
return;
}
if (object instanceof OptionalDouble) {
OptionalDouble optional = (OptionalDouble) object;
if (optional.isPresent()) {
double value = optional.getAsDouble();
serializer.write(value);
} else {
serializer.writeNull();
}
return;
}
if (object instanceof OptionalInt) {
OptionalInt optional = (OptionalInt) object;
if (optional.isPresent()) {
int value = optional.getAsInt();
serializer.out.writeInt(value);
} else {
serializer.writeNull();
}
return;
}
if (object instanceof OptionalLong) {
OptionalLong optional = (OptionalLong) object;
if (optional.isPresent()) {
long value = optional.getAsLong();
serializer.out.writeLong(value);
} else {
serializer.writeNull();
}
return;
}
throw new JSONException("not support optional : " + object.getClass());
}
}
| OptionalCodec |
java | google__jimfs | jimfs/src/main/java/com/google/common/jimfs/SystemJimfsFileSystemProvider.java | {
"start": 2351,
"end": 3987
} | class ____ extends FileSystemProvider {
/**
* Env map key that maps to the already-created {@code FileSystem} instance in {@code
* newFileSystem}.
*/
static final String FILE_SYSTEM_KEY = "fileSystem";
/**
* Cache of file systems that have been created but not closed.
*
* <p>This cache is static to ensure that even when this provider isn't loaded by the system class
* loader, meaning that a new instance of it must be created each time one of the methods on
* {@link FileSystems} or {@link Paths#get(URI)} is called, cached file system instances are still
* available.
*
* <p>The cache uses weak values so that it doesn't prevent file systems that are created but not
* closed from being garbage collected if no references to them are held elsewhere. This is a
* compromise between ensuring that any file URI continues to work as long as the file system
* hasn't been closed (which is technically the correct thing to do but unlikely to be something
* that most users care about) and ensuring that users don't get unexpected leaks of large amounts
* of memory because they're creating many file systems in tests but forgetting to close them
* (which seems likely to happen sometimes). Users that want to ensure that a file system won't be
* garbage collected just need to ensure they hold a reference to it somewhere for as long as they
* need it to stick around.
*/
private static final ConcurrentMap<URI, FileSystem> fileSystems =
new MapMaker().weakValues().makeMap();
/** @deprecated Not intended to be called directly; this | SystemJimfsFileSystemProvider |
java | netty__netty | codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsQuestion.java | {
"start": 782,
"end": 1321
} | class ____ extends AbstractDnsRecord implements DnsQuestion {
/**
* Creates a new {@link #CLASS_IN IN-class} question.
*
* @param name the domain name of the DNS question
* @param type the type of the DNS question
*/
public DefaultDnsQuestion(String name, DnsRecordType type) {
super(name, type, 0);
}
/**
* Creates a new question.
*
* @param name the domain name of the DNS question
* @param type the type of the DNS question
* @param dnsClass the | DefaultDnsQuestion |
java | apache__hadoop | hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobHistoryParserFactory.java | {
"start": 1447,
"end": 2264
} | enum ____ {
Hadoop20() {
@Override
public boolean canParse(InputStream input) throws IOException {
return Hadoop20JHParser.canParse(input);
}
@Override
public JobHistoryParser newInstance(InputStream input) throws IOException {
return new Hadoop20JHParser(input);
}
},
Current() {
@Override
public boolean canParse(InputStream input) throws IOException {
return CurrentJHParser.canParse(input);
}
@Override
public JobHistoryParser newInstance(InputStream input) throws IOException {
return new CurrentJHParser(input);
}
};
abstract JobHistoryParser newInstance(InputStream input) throws IOException;
abstract boolean canParse(InputStream input) throws IOException;
}
}
| VersionDetector |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/expression/AsWrapperSqmExpression.java | {
"start": 610,
"end": 2328
} | class ____<T> extends AbstractSqmExpression<T> {
private final SqmExpression<?> expression;
AsWrapperSqmExpression(SqmBindableType<T> type, SqmExpression<?> expression) {
super( type, expression.nodeBuilder() );
this.expression = expression;
}
@Override
public <X> X accept(SemanticQueryWalker<X> walker) {
return walker.visitAsWrapperExpression( this );
}
@Override
public void appendHqlString(StringBuilder hql, SqmRenderContext context) {
hql.append( "wrap(" );
expression.appendHqlString( hql, context );
hql.append( " as " );
hql.append( getNodeType().getReturnedClassName() );
hql.append( ")" );
}
@Override
public <X> SqmExpression<X> as(Class<X> type) {
return expression.as( type );
}
@Override
public SqmExpression<T> copy(SqmCopyContext context) {
return new AsWrapperSqmExpression<>( getNodeType(), expression.copy( context ) );
}
public SqmExpression<?> getExpression() {
return expression;
}
@Override
public @NonNull BasicType<T> getNodeType() {
return (BasicType<T>) castNonNull( super.getNodeType() );
}
@Override
public boolean equals(@Nullable Object object) {
return object instanceof AsWrapperSqmExpression<?> that
&& this.expression.equals( that.expression )
&& Objects.equals( this.getNodeType(), that.getNodeType() );
}
@Override
public int hashCode() {
return expression.hashCode();
}
@Override
public boolean isCompatible(Object object) {
return object instanceof AsWrapperSqmExpression<?> that
&& this.expression.isCompatible( that.expression )
&& Objects.equals( this.getNodeType(), that.getNodeType() );
}
@Override
public int cacheHashCode() {
return expression.cacheHashCode();
}
}
| AsWrapperSqmExpression |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/TestMultipleTypeNames.java | {
"start": 1256,
"end": 1815
} | class ____ {
private String type;
public String getType() { return type; }
@JsonTypeInfo(
use = JsonTypeInfo.Id.NAME,
include = JsonTypeInfo.As.EXTERNAL_PROPERTY,
property = "type"
)
@JsonSubTypes(value = {
@JsonSubTypes.Type(value = A.class, names = "a"),
@JsonSubTypes.Type(value = B.class, names = {"b","c"}),
})
MultiTypeName data;
public MultiTypeName getData() { return data; }
}
static | BaseForNamesTest |
java | jhy__jsoup | src/main/java/org/jsoup/select/Evaluator.java | {
"start": 24735,
"end": 25344
} | class ____ extends Evaluator {
private final String searchText;
public ContainsData(String searchText) {
this.searchText = lowerCase(searchText);
}
@Override
public boolean matches(Element root, Element element) {
return lowerCase(element.data()).contains(searchText); // not whitespace normalized
}
@Override
public String toString() {
return String.format(":containsData(%s)", searchText);
}
}
/**
* Evaluator for matching Element's own text
*/
public static final | ContainsData |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/client/match/MockRestRequestMatchersTests.java | {
"start": 1786,
"end": 14224
} | class ____ {
private final MockClientHttpRequest request = new MockClientHttpRequest();
@Test
void requestTo() throws Exception {
this.request.setURI(URI.create("http://www.foo.example/bar"));
MockRestRequestMatchers.requestTo("http://www.foo.example/bar").match(this.request);
}
@Test // SPR-15819
void requestToUriTemplate() throws Exception {
this.request.setURI(URI.create("http://www.foo.example/bar"));
MockRestRequestMatchers.requestToUriTemplate("http://www.foo.example/{bar}", "bar").match(this.request);
}
@Test
void requestToNoMatch() {
this.request.setURI(URI.create("http://www.foo.example/bar"));
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.requestTo("http://www.foo.example/wrong").match(this.request));
}
@Test
void requestToContains() throws Exception {
this.request.setURI(URI.create("http://www.foo.example/bar"));
MockRestRequestMatchers.requestTo(containsString("bar")).match(this.request);
}
@Test
void method() throws Exception {
this.request.setMethod(HttpMethod.GET);
MockRestRequestMatchers.method(HttpMethod.GET).match(this.request);
}
@Test
void methodNoMatch() {
this.request.setMethod(HttpMethod.POST);
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.method(HttpMethod.GET).match(this.request))
.withMessageContaining("expected:<GET> but was:<POST>");
}
@Test
void header() throws Exception {
this.request.getHeaders().put("foo", List.of("bar", "baz"));
MockRestRequestMatchers.header("foo", "bar", "baz").match(this.request);
}
@Test
void headerDoesNotExist() throws Exception {
MockRestRequestMatchers.headerDoesNotExist(null).match(this.request);
MockRestRequestMatchers.headerDoesNotExist("").match(this.request);
MockRestRequestMatchers.headerDoesNotExist("foo").match(this.request);
List<String> values = List.of("bar", "baz");
this.request.getHeaders().put("foo", values);
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.headerDoesNotExist("foo").match(this.request))
.withMessage("Expected header <foo> not to exist, but it exists with values: " + values);
}
@Test
void headerMissing() {
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.header("foo", "bar").match(this.request))
.withMessageContaining("was null");
}
@Test
void headerMissingValue() {
this.request.getHeaders().put("foo", List.of("bar", "baz"));
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.header("foo", "bad").match(this.request))
.withMessageContaining("expected:<bad> but was:<bar>");
}
@Test
void headerContains() throws Exception {
this.request.getHeaders().put("foo", List.of("bar", "baz"));
MockRestRequestMatchers.header("foo", containsString("ba")).match(this.request);
}
@Test
void headerContainsWithMissingHeader() {
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.header("foo", containsString("baz")).match(this.request))
.withMessage("Expected header <foo> to exist but was null");
}
@Test
void headerContainsWithMissingValue() {
this.request.getHeaders().put("foo", List.of("bar", "baz"));
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.header("foo", containsString("bx")).match(this.request))
.withMessageContaining("was \"bar\"");
}
@Test
void headerListMissing() {
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.headerList("foo", hasSize(2)).match(this.request))
.withMessage("Expected header <foo> to exist but was null");
}
@Test
void headerListMatchers() throws IOException {
this.request.getHeaders().put("foo", List.of("bar", "baz"));
MockRestRequestMatchers.headerList("foo", containsInAnyOrder(endsWith("baz"), endsWith("bar"))).match(this.request);
MockRestRequestMatchers.headerList("foo", contains(is("bar"), is("baz"))).match(this.request);
MockRestRequestMatchers.headerList("foo", contains(is("bar"), anything())).match(this.request);
MockRestRequestMatchers.headerList("foo", hasItem(endsWith("baz"))).match(this.request);
MockRestRequestMatchers.headerList("foo", everyItem(startsWith("ba"))).match(this.request);
MockRestRequestMatchers.headerList("foo", hasSize(2)).match(this.request);
MockRestRequestMatchers.headerList("foo", notNullValue()).match(this.request);
MockRestRequestMatchers.headerList("foo", is(anything())).match(this.request);
MockRestRequestMatchers.headerList("foo", allOf(notNullValue(), notNullValue())).match(this.request);
MockRestRequestMatchers.headerList("foo", allOf(notNullValue(), hasSize(2))).match(this.request);
}
@Test
void headerListContainsMismatch() {
this.request.getHeaders().put("foo", List.of("bar", "baz"));
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.headerList("foo", contains(containsString("ba"))).match(this.request))
.withMessageContainingAll(
"Request header [foo] values",
"Expected: iterable containing [a string containing \"ba\"]",
"but: not matched: \"baz\"");
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.headerList("foo", hasItem(endsWith("ba"))).match(this.request))
.withMessageContainingAll(
"Request header [foo] values",
"Expected: a collection containing a string ending with \"ba\"",
"but: mismatches were: [was \"bar\", was \"baz\"]");
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.headerList("foo", everyItem(endsWith("ar"))).match(this.request))
.withMessageContainingAll(
"Request header [foo] values",
"Expected: every item is a string ending with \"ar\"",
"but: an item was \"baz\"");
}
@Test
void headerListDoesNotHideHeaderWithSingleMatcher() throws IOException {
this.request.getHeaders().put("foo", List.of("bar", "baz"));
MockRestRequestMatchers.header("foo", equalTo("bar")).match(this.request);
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.headerList("foo", equalTo("bar")).match(this.request))
.withMessageContainingAll(
"Request header [foo] values",
"Expected: \"bar\"",
"but: was <[bar, baz]>");
}
@Test
void headers() throws Exception {
this.request.getHeaders().put("foo", List.of("bar", "baz"));
MockRestRequestMatchers.header("foo", "bar", "baz").match(this.request);
}
@Test
void headersWithMissingHeader() {
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.header("foo", "bar").match(this.request))
.withMessage("Expected header <foo> to exist but was null");
}
@Test
void headersWithMissingValue() {
this.request.getHeaders().put("foo", List.of("bar"));
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.header("foo", "bar", "baz").match(this.request))
.withMessageContaining("to have at least <2> values");
}
@Test
void queryParam() throws Exception {
this.request.setURI(URI.create("http://www.foo.example/a?foo=bar&foo=baz"));
MockRestRequestMatchers.queryParam("foo", "bar", "baz").match(this.request);
}
@Test
void queryParamMissing() {
this.request.setURI(URI.create("http://www.foo.example/a"));
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.queryParam("foo", "bar").match(this.request))
.withMessage("Expected query param <foo> to exist but was null");
}
@Test
void queryParamMissingValue() {
this.request.setURI(URI.create("http://www.foo.example/a?foo=bar&foo=baz"));
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.queryParam("foo", "bad").match(this.request))
.withMessageContaining("expected:<bad> but was:<bar>");
}
@Test
void queryParamContains() throws Exception {
this.request.setURI(URI.create("http://www.foo.example/a?foo=bar&foo=baz"));
MockRestRequestMatchers.queryParam("foo", containsString("ba")).match(this.request);
}
@Test
void queryParamContainsWithMissingValue() {
this.request.setURI(URI.create("http://www.foo.example/a?foo=bar&foo=baz"));
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.queryParam("foo", containsString("bx")).match(this.request))
.withMessageContaining("was \"bar\"");
}
@Test
void queryParamListMissing() {
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.queryParamList("foo", hasSize(2)).match(this.request))
.withMessage("Expected query param <foo> to exist but was null");
}
@Test
void queryParamListMatchers() throws IOException {
this.request.setURI(URI.create("http://www.foo.example/a?foo=bar&foo=baz"));
MockRestRequestMatchers.queryParamList("foo", containsInAnyOrder(endsWith("baz"), endsWith("bar"))).match(this.request);
MockRestRequestMatchers.queryParamList("foo", contains(is("bar"), is("baz"))).match(this.request);
MockRestRequestMatchers.queryParamList("foo", contains(is("bar"), anything())).match(this.request);
MockRestRequestMatchers.queryParamList("foo", hasItem(endsWith("baz"))).match(this.request);
MockRestRequestMatchers.queryParamList("foo", everyItem(startsWith("ba"))).match(this.request);
MockRestRequestMatchers.queryParamList("foo", hasSize(2)).match(this.request);
MockRestRequestMatchers.queryParamList("foo", notNullValue()).match(this.request);
MockRestRequestMatchers.queryParamList("foo", is(anything())).match(this.request);
MockRestRequestMatchers.queryParamList("foo", allOf(notNullValue(), notNullValue())).match(this.request);
MockRestRequestMatchers.queryParamList("foo", allOf(notNullValue(), hasSize(2))).match(this.request);
}
@Test
void queryParamListContainsMismatch() {
this.request.setURI(URI.create("http://www.foo.example/a?foo=bar&foo=baz"));
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.queryParamList("foo", contains(containsString("ba"))).match(this.request))
.withMessageContainingAll(
"Query param [foo] values",
"Expected: iterable containing [a string containing \"ba\"]",
"but: not matched: \"baz\"");
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.queryParamList("foo", hasItem(endsWith("ba"))).match(this.request))
.withMessageContainingAll(
"Query param [foo] values",
"Expected: a collection containing a string ending with \"ba\"",
"but: mismatches were: [was \"bar\", was \"baz\"]");
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.queryParamList("foo", everyItem(endsWith("ar"))).match(this.request))
.withMessageContainingAll(
"Query param [foo] values",
"Expected: every item is a string ending with \"ar\"",
"but: an item was \"baz\"");
}
@Test
void queryParamListDoesNotHideQueryParamWithSingleMatcher() throws IOException {
this.request.setURI(URI.create("http://www.foo.example/a?foo=bar&foo=baz"));
MockRestRequestMatchers.queryParam("foo", equalTo("bar")).match(this.request);
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.queryParamList("foo", equalTo("bar")).match(this.request))
.withMessageContainingAll(
"Query param [foo] values",
"Expected: \"bar\"",
"but: was <[bar, baz]>");
}
@Test // gh-34703
void queryParamCount() throws Exception {
this.request.setURI(URI.create("http://www.foo.example/a"));
MockRestRequestMatchers.queryParamCount(0).match(this.request);
this.request.setURI(URI.create("http://www.foo.example/a?"));
MockRestRequestMatchers.queryParamCount(0).match(this.request);
this.request.setURI(URI.create("http://www.foo.example/a?foo=1"));
MockRestRequestMatchers.queryParamCount(1).match(this.request);
this.request.setURI(URI.create("http://www.foo.example/a?foo=1&foo=2"));
MockRestRequestMatchers.queryParamCount(1).match(this.request);
this.request.setURI(URI.create("http://www.foo.example/a?foo=1&baz=2"));
MockRestRequestMatchers.queryParamCount(2).match(this.request);
}
@Test // gh-34703
void queryParamCountMismatch() {
this.request.setURI(URI.create("http://www.foo.example/a?foo=1&baz=2"));
assertThatAssertionError()
.isThrownBy(() -> MockRestRequestMatchers.queryParamCount(1).match(this.request))
.withMessage("Expected 1 query parameter(s) but found 2: [foo, baz]");
}
private static ThrowableTypeAssert<AssertionError> assertThatAssertionError() {
return assertThatExceptionOfType(AssertionError.class);
}
}
| MockRestRequestMatchersTests |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/IndexModuleTests.java | {
"start": 41791,
"end": 42109
} | class ____ implements IndexStorePlugin.DirectoryFactory {
@Override
public Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException {
return new FsDirectoryFactory().newDirectory(indexSettings, shardPath);
}
}
public static final | FooFunction |
java | apache__flink | flink-python/src/main/java/org/apache/flink/formats/csv/CsvRowSerializationSchema.java | {
"start": 2854,
"end": 4169
} | class ____ implements SerializationSchema<Row> {
private static final long serialVersionUID = 2098447220136965L;
/** Type information describing the input CSV data. */
private final RowTypeInfo typeInfo;
/** Runtime instance that performs the actual work. */
private final RuntimeConverter runtimeConverter;
/** CsvMapper used to write {@link JsonNode} into bytes. */
private transient CsvMapper csvMapper;
/** Schema describing the input CSV data. */
private final CsvSchema csvSchema;
/** Object writer used to write rows. It is configured by {@link CsvSchema}. */
private transient ObjectWriter objectWriter;
/** Reusable object node. */
private transient ObjectNode root;
private CsvRowSerializationSchema(RowTypeInfo typeInfo, CsvSchema csvSchema) {
this.typeInfo = typeInfo;
this.runtimeConverter = createRowRuntimeConverter(typeInfo, true);
this.csvSchema = csvSchema;
}
@Override
public void open(InitializationContext context) throws Exception {
this.csvMapper = JacksonMapperFactory.createCsvMapper();
this.objectWriter = csvMapper.writer(csvSchema);
}
/** A builder for creating a {@link CsvRowSerializationSchema}. */
@PublicEvolving
public static | CsvRowSerializationSchema |
java | apache__maven | impl/maven-impl/src/main/java/org/apache/maven/impl/model/profile/ConditionParser.java | {
"start": 1548,
"end": 24349
} | interface ____ {
/**
* Applies the function to the given list of arguments.
*
* @param args the list of arguments passed to the function
* @return the result of applying the function
*/
Object apply(List<Object> args);
}
private final Map<String, ExpressionFunction> functions; // Map to store functions by their names
private final UnaryOperator<String> propertyResolver; // Property resolver
private List<String> tokens; // List of tokens derived from the expression
private int current; // Keeps track of the current token index
/**
* Constructs a new {@code ConditionParser} with the given function mappings.
*
* @param functions a map of function names to their corresponding {@code ExpressionFunction} implementations
* @param propertyResolver the property resolver
*/
public ConditionParser(Map<String, ExpressionFunction> functions, UnaryOperator<String> propertyResolver) {
this.functions = functions;
this.propertyResolver = propertyResolver;
}
/**
* Parses the given expression and returns the result of the evaluation.
*
* @param expression the expression to be parsed
* @return the result of parsing and evaluating the expression
*/
public Object parse(String expression) {
this.tokens = tokenize(expression);
this.current = 0;
return parseExpression();
}
/**
* Tokenizes the input expression into a list of string tokens for further parsing.
* This method handles quoted strings, property aliases, and various operators.
*
* @param expression the expression to tokenize
* @return a list of tokens
*/
private List<String> tokenize(String expression) {
List<String> tokens = new ArrayList<>();
StringBuilder sb = new StringBuilder();
char quoteType = 0;
boolean inPropertyReference = false;
for (int i = 0; i < expression.length(); i++) {
char c = expression.charAt(i);
if (quoteType != 0) {
if (c == quoteType) {
quoteType = 0;
sb.append(c);
tokens.add(sb.toString());
sb.setLength(0);
} else {
sb.append(c);
}
continue;
}
if (inPropertyReference) {
if (c == '}') {
inPropertyReference = false;
tokens.add("${" + sb + "}");
sb.setLength(0);
} else {
sb.append(c);
}
continue;
}
if (c == '$' && i + 1 < expression.length() && expression.charAt(i + 1) == '{') {
if (!sb.isEmpty()) {
tokens.add(sb.toString());
sb.setLength(0);
}
inPropertyReference = true;
i++; // Skip the '{'
continue;
}
if (c == '"' || c == '\'') {
if (!sb.isEmpty()) {
tokens.add(sb.toString());
sb.setLength(0);
}
quoteType = c;
sb.append(c);
} else if (c == ' ' || c == '(' || c == ')' || c == ',' || c == '+' || c == '>' || c == '<' || c == '='
|| c == '!') {
if (!sb.isEmpty()) {
tokens.add(sb.toString());
sb.setLength(0);
}
if (c != ' ') {
if ((c == '>' || c == '<' || c == '=' || c == '!')
&& i + 1 < expression.length()
&& expression.charAt(i + 1) == '=') {
tokens.add(c + "=");
i++; // Skip the next character
} else {
tokens.add(String.valueOf(c));
}
}
} else {
sb.append(c);
}
}
if (inPropertyReference) {
throw new RuntimeException("Unclosed property reference: ${");
}
if (!sb.isEmpty()) {
tokens.add(sb.toString());
}
return tokens;
}
/**
* Parses the next expression from the list of tokens.
*
* @return the parsed expression as an object
* @throws RuntimeException if there are unexpected tokens after the end of the expression
*/
private Object parseExpression() {
Object result = parseLogicalOr();
if (current < tokens.size()) {
throw new RuntimeException("Unexpected tokens after end of expression");
}
return result;
}
/**
* Parses logical OR operations.
*
* @return the result of parsing logical OR operations
*/
private Object parseLogicalOr() {
Object left = parseLogicalAnd();
while (current < tokens.size() && tokens.get(current).equals("||")) {
current++;
Object right = parseLogicalAnd();
left = (boolean) left || (boolean) right;
}
return left;
}
/**
* Parses logical AND operations.
*
* @return the result of parsing logical AND operations
*/
private Object parseLogicalAnd() {
Object left = parseComparison();
while (current < tokens.size() && tokens.get(current).equals("&&")) {
current++;
Object right = parseComparison();
left = (boolean) left && (boolean) right;
}
return left;
}
/**
* Parses comparison operations.
*
* @return the result of parsing comparison operations
*/
private Object parseComparison() {
Object left = parseAddSubtract();
while (current < tokens.size()
&& (tokens.get(current).equals(">")
|| tokens.get(current).equals("<")
|| tokens.get(current).equals(">=")
|| tokens.get(current).equals("<=")
|| tokens.get(current).equals("==")
|| tokens.get(current).equals("!="))) {
String operator = tokens.get(current);
current++;
Object right = parseAddSubtract();
left = compare(left, operator, right);
}
return left;
}
/**
* Parses addition and subtraction operations.
*
* @return the result of parsing addition and subtraction operations
*/
private Object parseAddSubtract() {
Object left = parseMultiplyDivide();
while (current < tokens.size()
&& (tokens.get(current).equals("+") || tokens.get(current).equals("-"))) {
String operator = tokens.get(current);
current++;
Object right = parseMultiplyDivide();
if (operator.equals("+")) {
left = add(left, right);
} else {
left = subtract(left, right);
}
}
return left;
}
/**
* Parses multiplication and division operations.
*
* @return the result of parsing multiplication and division operations
*/
private Object parseMultiplyDivide() {
Object left = parseUnary();
while (current < tokens.size()
&& (tokens.get(current).equals("*") || tokens.get(current).equals("/"))) {
String operator = tokens.get(current);
current++;
Object right = parseUnary();
if (operator.equals("*")) {
left = multiply(left, right);
} else {
left = divide(left, right);
}
}
return left;
}
/**
* Parses unary operations (negation).
*
* @return the result of parsing unary operations
*/
private Object parseUnary() {
if (current < tokens.size() && tokens.get(current).equals("-")) {
current++;
Object value = parseUnary();
return negate(value);
}
return parseTerm();
}
/**
* Parses individual terms (numbers, strings, booleans, parentheses, functions).
*
* @return the parsed term
* @throws RuntimeException if the expression ends unexpectedly or contains unknown tokens
*/
private Object parseTerm() {
if (current >= tokens.size()) {
throw new RuntimeException("Unexpected end of expression");
}
String token = tokens.get(current);
if (token.equals("(")) {
return parseParentheses();
} else if (functions.containsKey(token)) {
return parseFunction();
} else if ((token.startsWith("\"") && token.endsWith("\"")) || (token.startsWith("'") && token.endsWith("'"))) {
current++;
return token.length() > 1 ? token.substring(1, token.length() - 1) : "";
} else if (token.equalsIgnoreCase("true") || token.equalsIgnoreCase("false")) {
current++;
return Boolean.parseBoolean(token);
} else if (token.startsWith("${") && token.endsWith("}")) {
current++;
String propertyName = token.substring(2, token.length() - 1);
return propertyResolver.apply(propertyName);
} else {
try {
current++;
return Double.parseDouble(token);
} catch (NumberFormatException e) {
// If it's not a number, treat it as a variable or unknown function
return parseVariableOrUnknownFunction();
}
}
}
/**
* Parses a token that could be either a variable or an unknown function.
*
* @return the result of parsing a variable or unknown function
* @throws RuntimeException if an unknown function is encountered
*/
private Object parseVariableOrUnknownFunction() {
current--; // Move back to the token we couldn't parse as a number
String name = tokens.get(current);
current++;
// Check if it's followed by an opening parenthesis, indicating a function call
if (current < tokens.size() && tokens.get(current).equals("(")) {
// It's a function call, parse it as such
List<Object> args = parseArgumentList();
if (functions.containsKey(name)) {
return functions.get(name).apply(args);
} else {
throw new RuntimeException("Unknown function: " + name);
}
} else {
// It's a variable
// Here you might want to handle variables differently
// For now, we'll throw an exception
throw new RuntimeException("Unknown variable: " + name);
}
}
/**
* Parses a list of arguments for a function call.
*
* @return a list of parsed arguments
* @throws RuntimeException if there's a mismatch in parentheses
*/
private List<Object> parseArgumentList() {
List<Object> args = new ArrayList<>();
current++; // Skip the opening parenthesis
while (current < tokens.size() && !tokens.get(current).equals(")")) {
args.add(parseLogicalOr());
if (current < tokens.size() && tokens.get(current).equals(",")) {
current++;
}
}
if (current >= tokens.size() || !tokens.get(current).equals(")")) {
throw new RuntimeException("Mismatched parentheses: missing closing parenthesis in function call");
}
current++; // Skip the closing parenthesis
return args;
}
/**
* Parses a function call.
*
* @return the result of the function call
*/
private Object parseFunction() {
String functionName = tokens.get(current);
current++;
List<Object> args = parseArgumentList();
return functions.get(functionName).apply(args);
}
/**
* Parses an expression within parentheses.
*
* @return the result of parsing the expression within parentheses
* @throws RuntimeException if there's a mismatch in parentheses
*/
private Object parseParentheses() {
current++; // Skip the opening parenthesis
Object result = parseLogicalOr();
if (current >= tokens.size() || !tokens.get(current).equals(")")) {
throw new RuntimeException("Mismatched parentheses: missing closing parenthesis");
}
current++; // Skip the closing parenthesis
return result;
}
/**
* Adds two objects, handling string concatenation and numeric addition.
*
* @param left the left operand
* @param right the right operand
* @return the result of the addition
* @throws RuntimeException if the operands cannot be added
*/
private static Object add(Object left, Object right) {
if (left instanceof String || right instanceof String) {
return toString(left) + toString(right);
} else if (left instanceof Number leftNumber && right instanceof Number rightNumber) {
return leftNumber.doubleValue() + rightNumber.doubleValue();
} else {
throw new RuntimeException("Cannot add " + left + " and " + right);
}
}
/**
* Negates a numeric value.
*
* @param value the value to negate
* @return the negated value
* @throws RuntimeException if the value cannot be negated
*/
private Object negate(Object value) {
if (value instanceof Number number) {
return -number.doubleValue();
}
throw new RuntimeException("Cannot negate non-numeric value: " + value);
}
/**
* Subtracts the right operand from the left operand.
*
* @param left the left operand
* @param right the right operand
* @return the result of the subtraction
* @throws RuntimeException if the operands cannot be subtracted
*/
private static Object subtract(Object left, Object right) {
if (left instanceof Number leftNumber && right instanceof Number rightNumber) {
return leftNumber.doubleValue() - rightNumber.doubleValue();
} else {
throw new RuntimeException("Cannot subtract " + right + " from " + left);
}
}
/**
* Multiplies two numeric operands.
*
* @param left the left operand
* @param right the right operand
* @return the result of the multiplication
* @throws RuntimeException if the operands cannot be multiplied
*/
private static Object multiply(Object left, Object right) {
if (left instanceof Number leftNumber && right instanceof Number rightNumber) {
return leftNumber.doubleValue() * rightNumber.doubleValue();
} else {
throw new RuntimeException("Cannot multiply " + left + " and " + right);
}
}
/**
* Divides the left operand by the right operand.
*
* @param left the left operand (dividend)
* @param right the right operand (divisor)
* @return the result of the division
* @throws RuntimeException if the operands cannot be divided
* @throws ArithmeticException if attempting to divide by zero
*/
private static Object divide(Object left, Object right) {
if (left instanceof Number leftNumber && right instanceof Number rightNumber) {
double divisor = rightNumber.doubleValue();
if (divisor == 0) {
throw new ArithmeticException("Division by zero");
}
return leftNumber.doubleValue() / divisor;
} else {
throw new RuntimeException("Cannot divide " + left + " by " + right);
}
}
/**
* Compares two objects based on the given operator.
* Supports comparison of numbers and strings, and equality checks for null values.
*
* @param left the left operand
* @param operator the comparison operator (">", "<", ">=", "<=", "==", or "!=")
* @param right the right operand
* @return the result of the comparison (a boolean value)
* @throws IllegalStateException if an unknown operator is provided
* @throws RuntimeException if the operands cannot be compared
*/
private static Object compare(Object left, String operator, Object right) {
if (left == null && right == null) {
return true;
}
if (left == null || right == null) {
if ("==".equals(operator)) {
return false;
} else if ("!=".equals(operator)) {
return true;
}
}
if (left instanceof Number leftNumber && right instanceof Number rightNumber) {
double leftVal = leftNumber.doubleValue();
double rightVal = rightNumber.doubleValue();
return switch (operator) {
case ">" -> leftVal > rightVal;
case "<" -> leftVal < rightVal;
case ">=" -> leftVal >= rightVal;
case "<=" -> leftVal <= rightVal;
case "==" -> Math.abs(leftVal - rightVal) < 1e-9;
case "!=" -> Math.abs(leftVal - rightVal) >= 1e-9;
default -> throw new IllegalStateException("Unknown operator: " + operator);
};
} else if (left instanceof String leftString && right instanceof String rightString) {
int comparison = leftString.compareTo(rightString);
return switch (operator) {
case ">" -> comparison > 0;
case "<" -> comparison < 0;
case ">=" -> comparison >= 0;
case "<=" -> comparison <= 0;
case "==" -> comparison == 0;
case "!=" -> comparison != 0;
default -> throw new IllegalStateException("Unknown operator: " + operator);
};
}
throw new RuntimeException("Cannot compare " + left + " and " + right + " with operator " + operator);
}
/**
* Converts an object to a string representation.
* If the object is a {@code Double}, it formats it without any decimal places.
* Otherwise, it uses the {@code String.valueOf} method.
*
* @param value the object to convert to a string
* @return the string representation of the object
*/
public static String toString(Object value) {
if (value instanceof Double || value instanceof Float) {
double doubleValue = ((Number) value).doubleValue();
if (doubleValue == Math.floor(doubleValue) && !Double.isInfinite(doubleValue)) {
return String.format("%.0f", doubleValue);
}
}
return String.valueOf(value);
}
/**
* Converts an object to a boolean value.
* If the object is:
* - a {@code Boolean}, returns its value directly.
* - a {@code String}, returns {@code true} if the string is non-blank.
* - a {@code Number}, returns {@code true} if its integer value is not zero.
* For other object types, returns {@code true} if the object is non-null.
*
* @param value the object to convert to a boolean
* @return the boolean representation of the object
*/
public static Boolean toBoolean(Object value) {
if (value instanceof Boolean b) {
return b; // Returns the boolean value
} else if (value instanceof String s) {
return !s.isBlank(); // True if the string is not blank
} else if (value instanceof Number b) {
return b.intValue() != 0; // True if the number is not zero
} else {
return value != null; // True if the object is not null
}
}
/**
* Converts an object to a double value.
* If the object is:
* - a {@code Number}, returns its double value.
* - a {@code String}, tries to parse it as a double.
* - a {@code Boolean}, returns {@code 1.0} for {@code true}, {@code 0.0} for {@code false}.
* If the object cannot be converted, a {@code RuntimeException} is thrown.
*
* @param value the object to convert to a double
* @return the double representation of the object
* @throws RuntimeException if the object cannot be converted to a double
*/
public static double toDouble(Object value) {
if (value instanceof Number number) {
return number.doubleValue(); // Converts number to double
} else if (value instanceof String string) {
try {
return Double.parseDouble(string); // Tries to parse string as double
} catch (NumberFormatException e) {
throw new RuntimeException("Cannot convert string to number: " + value);
}
} else if (value instanceof Boolean bool) {
return bool ? 1.0 : 0.0; // True = 1.0, False = 0.0
} else {
throw new RuntimeException("Cannot convert to number: " + value);
}
}
/**
* Converts an object to an integer value.
* If the object is:
* - a {@code Number}, returns its integer value.
* - a {@code String}, tries to parse it as an integer, or as a double then converted to an integer.
* - a {@code Boolean}, returns {@code 1} for {@code true}, {@code 0} for {@code false}.
* If the object cannot be converted, a {@code RuntimeException} is thrown.
*
* @param value the object to convert to an integer
* @return the integer representation of the object
* @throws RuntimeException if the object cannot be converted to an integer
*/
public static int toInt(Object value) {
if (value instanceof Number number) {
return number.intValue(); // Converts number to int
} else if (value instanceof String string) {
try {
return Integer.parseInt(string); // Tries to parse string as int
} catch (NumberFormatException e) {
// If string is not an int, tries parsing as double and converting to int
try {
return (int) Double.parseDouble((String) value);
} catch (NumberFormatException e2) {
throw new RuntimeException("Cannot convert string to integer: " + value);
}
}
} else if (value instanceof Boolean bool) {
return bool ? 1 : 0; // True = 1, False = 0
} else {
throw new RuntimeException("Cannot convert to integer: " + value);
}
}
}
| ExpressionFunction |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/MockLog.java | {
"start": 12150,
"end": 13588
} | class ____ extends SeenEventExpectation {
private final Pattern pattern;
private final Class<? extends Exception> clazz;
private final String exceptionMessage;
public PatternAndExceptionSeenEventExpectation(
String name,
String logger,
Level level,
String pattern,
Class<? extends Exception> clazz,
String exceptionMessage
) {
super(name, logger, level, pattern);
this.pattern = Pattern.compile(pattern);
this.clazz = clazz;
this.exceptionMessage = exceptionMessage;
}
@Override
public void match(LogEvent event) {
if (event.getLevel().equals(level) && event.getLoggerName().equals(logger)) {
boolean patternMatches = pattern.matcher(event.getMessage().getFormattedMessage()).matches();
boolean exceptionMatches = event.getThrown() != null
&& event.getThrown().getClass() == clazz
&& event.getThrown().getMessage().equals(exceptionMessage);
if (patternMatches && exceptionMatches) {
seenLatch.countDown();
}
}
}
}
/**
* A wrapper around {@link LoggingExpectation} to detect if the assertMatched method has been called
*/
private static | PatternAndExceptionSeenEventExpectation |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/function/server/DefaultServerRequest.java | {
"start": 10179,
"end": 11528
} | class ____ implements Headers {
private final HttpHeaders httpHeaders =
HttpHeaders.readOnlyHttpHeaders(request().getHeaders());
@Override
public List<MediaType> accept() {
return this.httpHeaders.getAccept();
}
@Override
public List<Charset> acceptCharset() {
return this.httpHeaders.getAcceptCharset();
}
@Override
public List<Locale.LanguageRange> acceptLanguage() {
return this.httpHeaders.getAcceptLanguage();
}
@Override
public OptionalLong contentLength() {
long value = this.httpHeaders.getContentLength();
return (value != -1 ? OptionalLong.of(value) : OptionalLong.empty());
}
@Override
public Optional<MediaType> contentType() {
return Optional.ofNullable(this.httpHeaders.getContentType());
}
@Override
public @Nullable InetSocketAddress host() {
return this.httpHeaders.getHost();
}
@Override
public List<HttpRange> range() {
return this.httpHeaders.getRange();
}
@Override
public List<String> header(String headerName) {
List<String> headerValues = this.httpHeaders.get(headerName);
return (headerValues != null ? headerValues : Collections.emptyList());
}
@Override
public HttpHeaders asHttpHeaders() {
return this.httpHeaders;
}
@Override
public String toString() {
return this.httpHeaders.toString();
}
}
}
| DefaultHeaders |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/ser/std/AsArraySerializerBase.java | {
"start": 431,
"end": 598
} | class ____ serializers that will output contents as JSON
* arrays; typically serializers used for {@link java.util.Collection}
* and array types.
*/
public abstract | for |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/tests/SmokeTestUtil.java | {
"start": 4397,
"end": 4617
} | class ____<K, V> implements KeyValueMapper<Windowed<K>, V, K> {
@Override
public K apply(final Windowed<K> winKey, final V value) {
return winKey.key();
}
}
public static | Unwindow |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/streaming/NewlineDelimitedByteProcessor.java | {
"start": 1125,
"end": 2546
} | class ____ extends DelegatingProcessor<HttpResult, Deque<String>> {
private static final Pattern END_OF_LINE_REGEX = Pattern.compile("\\n|\\r\\n");
private volatile String previousTokens = "";
@Override
protected void next(HttpResult item) {
// discard empty result and go to the next
if (item.isBodyEmpty()) {
upstream().request(1);
return;
}
var body = previousTokens + new String(item.body(), StandardCharsets.UTF_8);
var lines = END_OF_LINE_REGEX.split(body, -1); // -1 because we actually want trailing empty strings
var results = new ArrayDeque<String>(lines.length);
for (var i = 0; i < lines.length - 1; i++) {
var line = lines[i].trim();
if (line.isBlank() == false) {
results.offer(line);
}
}
previousTokens = lines[lines.length - 1].trim();
if (results.isEmpty()) {
upstream().request(1);
} else {
downstream().onNext(results);
}
}
@Override
public void onComplete() {
if (previousTokens.isBlank()) {
super.onComplete();
} else if (isClosed.compareAndSet(false, true)) {
var results = new ArrayDeque<String>(1);
results.offer(previousTokens);
downstream().onNext(results);
}
}
}
| NewlineDelimitedByteProcessor |
java | apache__camel | components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/fix/BindyComplexOneToManyKeyValuePairUnMarshallTest.java | {
"start": 2953,
"end": 3335
} | class ____ extends RouteBuilder {
BindyKeyValuePairDataFormat kvpBindyDataFormat
= new BindyKeyValuePairDataFormat(org.apache.camel.dataformat.bindy.model.fix.complex.onetomany.Order.class);
@Override
public void configure() {
from(URI_DIRECT_START).unmarshal(kvpBindyDataFormat).to(URI_MOCK_RESULT);
}
}
}
| ContextConfig |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/vectorized/ArrowColumnVector.java | {
"start": 15277,
"end": 15583
} | class ____ extends ArrowVectorAccessor {
private final TimeNanoVector accessor;
TimeNanoAccessor(TimeNanoVector vector) {
super(vector);
this.accessor = vector;
}
@Override
final long getLong(int rowId) {
return accessor.get(rowId);
}
}
static | TimeNanoAccessor |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromDoubleEvaluator.java | {
"start": 1122,
"end": 4355
} | class ____ extends AbstractConvertFunction.AbstractEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ToIntegerFromDoubleEvaluator.class);
private final EvalOperator.ExpressionEvaluator dbl;
public ToIntegerFromDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator dbl,
DriverContext driverContext) {
super(driverContext, source);
this.dbl = dbl;
}
@Override
public EvalOperator.ExpressionEvaluator next() {
return dbl;
}
@Override
public Block evalVector(Vector v) {
DoubleVector vector = (DoubleVector) v;
int positionCount = v.getPositionCount();
if (vector.isConstant()) {
try {
return driverContext.blockFactory().newConstantIntBlockWith(evalValue(vector, 0), positionCount);
} catch (InvalidArgumentException e) {
registerException(e);
return driverContext.blockFactory().newConstantNullBlock(positionCount);
}
}
try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) {
for (int p = 0; p < positionCount; p++) {
try {
builder.appendInt(evalValue(vector, p));
} catch (InvalidArgumentException e) {
registerException(e);
builder.appendNull();
}
}
return builder.build();
}
}
private int evalValue(DoubleVector container, int index) {
double value = container.getDouble(index);
return ToInteger.fromDouble(value);
}
@Override
public Block evalBlock(Block b) {
DoubleBlock block = (DoubleBlock) b;
int positionCount = block.getPositionCount();
try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) {
for (int p = 0; p < positionCount; p++) {
int valueCount = block.getValueCount(p);
int start = block.getFirstValueIndex(p);
int end = start + valueCount;
boolean positionOpened = false;
boolean valuesAppended = false;
for (int i = start; i < end; i++) {
try {
int value = evalValue(block, i);
if (positionOpened == false && valueCount > 1) {
builder.beginPositionEntry();
positionOpened = true;
}
builder.appendInt(value);
valuesAppended = true;
} catch (InvalidArgumentException e) {
registerException(e);
}
}
if (valuesAppended == false) {
builder.appendNull();
} else if (positionOpened) {
builder.endPositionEntry();
}
}
return builder.build();
}
}
private int evalValue(DoubleBlock container, int index) {
double value = container.getDouble(index);
return ToInteger.fromDouble(value);
}
@Override
public String toString() {
return "ToIntegerFromDoubleEvaluator[" + "dbl=" + dbl + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(dbl);
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += dbl.baseRamBytesUsed();
return baseRamBytesUsed;
}
public static | ToIntegerFromDoubleEvaluator |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/HazelcastMapEndpointBuilderFactory.java | {
"start": 1440,
"end": 1586
} | interface ____ {
/**
* Builder for endpoint consumers for the Hazelcast Map component.
*/
public | HazelcastMapEndpointBuilderFactory |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterAuditLogger.java | {
"start": 2007,
"end": 7101
} | class ____ {
private static final String USER = "test";
private static final String OPERATION = "oper";
private static final String TARGET = "tgt";
private static final String DESC = "description of an audit log";
private static final ApplicationId APPID = mock(ApplicationId.class);
private static final SubClusterId SUBCLUSTERID = mock(SubClusterId.class);
@BeforeEach
public void setUp() throws Exception {
when(APPID.toString()).thenReturn("app_1");
when(SUBCLUSTERID.toString()).thenReturn("sc0");
}
/**
* Test the AuditLog format with key-val pair.
*/
@Test
public void testKeyValLogFormat() {
StringBuilder actLog = new StringBuilder();
StringBuilder expLog = new StringBuilder();
// add the first k=v pair and check
RouterAuditLogger.start(RouterAuditLogger.Keys.USER, USER, actLog);
expLog.append("USER=test");
assertEquals(expLog.toString(), actLog.toString());
// append another k1=v1 pair to already added k=v and test
RouterAuditLogger.add(RouterAuditLogger.Keys.OPERATION, OPERATION, actLog);
expLog.append("\tOPERATION=oper");
assertEquals(expLog.toString(), actLog.toString());
// append another k1=null pair and test
RouterAuditLogger.add(RouterAuditLogger.Keys.APPID, null, actLog);
expLog.append("\tAPPID=null");
assertEquals(expLog.toString(), actLog.toString());
// now add the target and check of the final string
RouterAuditLogger.add(RouterAuditLogger.Keys.TARGET, TARGET, actLog);
expLog.append("\tTARGET=tgt");
assertEquals(expLog.toString(), actLog.toString());
}
/**
* Test the AuditLog format for successful events.
*/
private void testSuccessLogFormatHelper(boolean checkIP, ApplicationId appId,
SubClusterId subClusterId) {
// check without the IP
String sLog = RouterAuditLogger
.createSuccessLog(USER, OPERATION, TARGET, appId, subClusterId);
StringBuilder expLog = new StringBuilder();
expLog.append("USER=test\t");
if (checkIP) {
InetAddress ip = Server.getRemoteIp();
if (ip != null && ip.getHostAddress() != null) {
expLog.append(RouterAuditLogger.Keys.IP.name())
.append("=").append(ip.getHostAddress()).append("\t");
}
}
expLog.append("OPERATION=oper\tTARGET=tgt\tRESULT=SUCCESS");
if (appId != null) {
expLog.append("\tAPPID=app_1");
}
if (subClusterId != null) {
expLog.append("\tSUBCLUSTERID=sc0");
}
assertEquals(expLog.toString(), sLog);
}
/**
* Test the AuditLog format for successful events passing nulls.
*/
private void testSuccessLogNulls() {
String sLog =
RouterAuditLogger.createSuccessLog(null, null, null, null, null);
StringBuilder expLog = new StringBuilder();
expLog.append("USER=null\t");
expLog.append("OPERATION=null\tTARGET=null\tRESULT=SUCCESS");
assertEquals(expLog.toString(), sLog);
}
/**
* Test the AuditLog format for successful events with the various
* parameters.
*/
private void testSuccessLogFormat(boolean checkIP) {
testSuccessLogFormatHelper(checkIP, null, null);
testSuccessLogFormatHelper(checkIP, APPID, null);
testSuccessLogFormatHelper(checkIP, null, SUBCLUSTERID);
testSuccessLogFormatHelper(checkIP, APPID, SUBCLUSTERID);
}
/**
* Test the AuditLog format for failure events.
*/
private void testFailureLogFormatHelper(boolean checkIP, ApplicationId appId,
SubClusterId subClusterId) {
String fLog = RouterAuditLogger
.createFailureLog(USER, OPERATION, "UNKNOWN", TARGET, DESC, appId,
subClusterId);
StringBuilder expLog = new StringBuilder();
expLog.append("USER=test\t");
if (checkIP) {
InetAddress ip = Server.getRemoteIp();
if (ip != null && ip.getHostAddress() != null) {
expLog.append(RouterAuditLogger.Keys.IP.name())
.append("=")
.append(ip.getHostAddress()).append("\t");
}
}
expLog.append("OPERATION=oper\tTARGET=tgt\tRESULT=FAILURE\t");
expLog.append("DESCRIPTION=description of an audit log");
expLog.append("\tPERMISSIONS=UNKNOWN");
if (appId != null) {
expLog.append("\tAPPID=app_1");
}
if (subClusterId != null) {
expLog.append("\tSUBCLUSTERID=sc0");
}
assertEquals(expLog.toString(), fLog);
}
/**
* Test the AuditLog format for failure events with the various
* parameters.
*/
private void testFailureLogFormat(boolean checkIP) {
testFailureLogFormatHelper(checkIP, null, null);
testFailureLogFormatHelper(checkIP, APPID, null);
testFailureLogFormatHelper(checkIP, null, SUBCLUSTERID);
testFailureLogFormatHelper(checkIP, APPID, SUBCLUSTERID);
}
/**
* Test {@link RouterAuditLogger}.
*/
@Test
public void testRouterAuditLoggerWithOutIP() {
testSuccessLogFormat(false);
testFailureLogFormat(false);
}
/**
* A special extension of {@link TestRPC.TestImpl} RPC server with
* {@link TestRPC.TestImpl#ping()} testing the audit logs.
*/
private | TestRouterAuditLogger |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeFlatMapIterableObservable.java | {
"start": 1833,
"end": 5590
} | class ____<T, R>
extends BasicQueueDisposable<R>
implements MaybeObserver<T> {
final Observer<? super R> downstream;
final Function<? super T, ? extends Iterable<? extends R>> mapper;
Disposable upstream;
volatile Iterator<? extends R> it;
volatile boolean cancelled;
boolean outputFused;
FlatMapIterableObserver(Observer<? super R> actual,
Function<? super T, ? extends Iterable<? extends R>> mapper) {
this.downstream = actual;
this.mapper = mapper;
}
@Override
public void onSubscribe(Disposable d) {
if (DisposableHelper.validate(this.upstream, d)) {
this.upstream = d;
downstream.onSubscribe(this);
}
}
@Override
public void onSuccess(T value) {
Observer<? super R> a = downstream;
Iterator<? extends R> iterator;
boolean has;
try {
iterator = mapper.apply(value).iterator();
has = iterator.hasNext();
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
a.onError(ex);
return;
}
if (!has) {
a.onComplete();
return;
}
this.it = iterator;
if (outputFused) {
a.onNext(null);
a.onComplete();
return;
}
for (;;) {
if (cancelled) {
return;
}
R v;
try {
v = iterator.next();
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
a.onError(ex);
return;
}
a.onNext(v);
if (cancelled) {
return;
}
boolean b;
try {
b = iterator.hasNext();
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
a.onError(ex);
return;
}
if (!b) {
a.onComplete();
return;
}
}
}
@Override
public void onError(Throwable e) {
upstream = DisposableHelper.DISPOSED;
downstream.onError(e);
}
@Override
public void onComplete() {
downstream.onComplete();
}
@Override
public void dispose() {
cancelled = true;
upstream.dispose();
upstream = DisposableHelper.DISPOSED;
}
@Override
public boolean isDisposed() {
return cancelled;
}
@Override
public int requestFusion(int mode) {
if ((mode & ASYNC) != 0) {
outputFused = true;
return ASYNC;
}
return NONE;
}
@Override
public void clear() {
it = null;
}
@Override
public boolean isEmpty() {
return it == null;
}
@Nullable
@Override
public R poll() {
Iterator<? extends R> iterator = it;
if (iterator != null) {
R v = Objects.requireNonNull(iterator.next(), "The iterator returned a null value");
if (!iterator.hasNext()) {
it = null;
}
return v;
}
return null;
}
}
}
| FlatMapIterableObserver |
java | apache__avro | lang/java/avro/src/test/java/org/apache/avro/DummySchemaParser.java | {
"start": 948,
"end": 2170
} | class ____ implements FormattedSchemaParser {
/**
* Logger for this class.
*/
private static final Logger LOGGER = LoggerFactory.getLogger(DummySchemaParser.class);
public static final String SCHEMA_TEXT_ONE = "one";
public static final Schema FIXED_SCHEMA = Schema.createFixed("DummyOne", null, "tests", 42);
public static final String SCHEMA_TEXT_ERROR = "error";
public static final String SCHEMA_TEXT_IO_ERROR = "io-error";
public static final String ERROR_MESSAGE = "Syntax error";
public static final String IO_ERROR_MESSAGE = "I/O error";
@Override
public Schema parse(ParseContext parseContext, URI baseUri, CharSequence formattedSchema)
throws IOException, SchemaParseException {
LOGGER.debug("Using DummySchemaParser for {}", formattedSchema);
if (SCHEMA_TEXT_ONE.contentEquals(formattedSchema)) {
parseContext.put(FIXED_SCHEMA);
return FIXED_SCHEMA;
} else if (SCHEMA_TEXT_ERROR.contentEquals(formattedSchema)) {
throw new SchemaParseException(ERROR_MESSAGE);
} else if (SCHEMA_TEXT_IO_ERROR.contentEquals(formattedSchema)) {
throw new IOException(IO_ERROR_MESSAGE);
}
// Syntax not recognized
return null;
}
}
| DummySchemaParser |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/jsontype/impl/StdTypeResolverBuilder.java | {
"start": 17319,
"end": 17773
} | class ____ type resolver annotations, false otherwise
*/
protected boolean _hasTypeResolver(DatabindContext ctxt, JavaType baseType) {
AnnotatedClass ac =
AnnotatedClassResolver.resolveWithoutSuperTypes(ctxt.getConfig(),
baseType.getRawClass());
AnnotationIntrospector ai = ctxt.getAnnotationIntrospector();
return ai.findPolymorphicTypeInfo(ctxt.getConfig(), ac) != null;
}
}
| has |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java | {
"start": 2834,
"end": 17666
} | class ____ extends AbstractQueryBuilder<RuleQueryBuilder> {
public static final ParseField NAME = new ParseField("rule", "rule_query");
private static final ParseField RULESET_ID_FIELD = new ParseField("ruleset_id");
private static final ParseField RULESET_IDS_FIELD = new ParseField("ruleset_ids");
static final ParseField MATCH_CRITERIA_FIELD = new ParseField("match_criteria");
private static final ParseField ORGANIC_QUERY_FIELD = new ParseField("organic");
public static final int MAX_NUM_RULESETS = 10;
private final List<String> rulesetIds;
private final Map<String, Object> matchCriteria;
private final QueryBuilder organicQuery;
private final Supplier<List<SpecifiedDocument>> pinnedDocsSupplier;
private final Supplier<List<SpecifiedDocument>> excludedDocsSupplier;
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_8_10_X;
}
public RuleQueryBuilder(QueryBuilder organicQuery, Map<String, Object> matchCriteria, List<String> rulesetIds) {
this(organicQuery, matchCriteria, rulesetIds, null, null);
}
public RuleQueryBuilder(StreamInput in) throws IOException {
super(in);
organicQuery = in.readNamedWriteable(QueryBuilder.class);
matchCriteria = in.readGenericMap();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
rulesetIds = in.readStringCollectionAsList();
} else {
rulesetIds = List.of(in.readString());
in.readOptionalStringCollectionAsList();
in.readOptionalCollectionAsList(SpecifiedDocument::new);
}
pinnedDocsSupplier = null;
excludedDocsSupplier = null;
}
private RuleQueryBuilder(
QueryBuilder organicQuery,
Map<String, Object> matchCriteria,
List<String> rulesetIds,
Supplier<List<SpecifiedDocument>> pinnedDocsSupplier,
Supplier<List<SpecifiedDocument>> excludedDocsSupplier
) {
if (organicQuery == null) {
throw new IllegalArgumentException("organicQuery must not be null");
}
if (matchCriteria == null || matchCriteria.isEmpty()) {
throw new IllegalArgumentException("matchCriteria must not be null or empty");
}
if (rulesetIds == null || rulesetIds.isEmpty()) {
throw new IllegalArgumentException("rulesetIds must not be null or empty");
}
if (rulesetIds.size() > MAX_NUM_RULESETS) {
throw new IllegalArgumentException("rulesetIds must not contain more than " + MAX_NUM_RULESETS + " rulesets");
}
if (rulesetIds.stream().anyMatch(ruleset -> ruleset == null || ruleset.isEmpty())) {
throw new IllegalArgumentException("rulesetIds must not contain null or empty values");
}
this.organicQuery = organicQuery;
this.matchCriteria = matchCriteria;
this.rulesetIds = rulesetIds;
this.pinnedDocsSupplier = pinnedDocsSupplier;
this.excludedDocsSupplier = excludedDocsSupplier;
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
if (pinnedDocsSupplier != null) {
throw new IllegalStateException("pinnedDocsSupplier must be null, can't serialize suppliers, missing a rewriteAndFetch?");
}
if (excludedDocsSupplier != null) {
throw new IllegalStateException("excludedDocsSupplier must be null, can't serialize suppliers, missing a rewriteAndFetch?");
}
out.writeNamedWriteable(organicQuery);
out.writeGenericMap(matchCriteria);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
out.writeStringCollection(rulesetIds);
} else {
out.writeString(rulesetIds.get(0));
out.writeOptionalStringCollection(null);
out.writeOptionalCollection(null);
}
}
public List<String> rulesetIds() {
return rulesetIds;
}
public Map<String, Object> matchCriteria() {
return matchCriteria;
}
public QueryBuilder organicQuery() {
return organicQuery;
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME.getPreferredName());
builder.field(ORGANIC_QUERY_FIELD.getPreferredName(), organicQuery);
builder.startObject(MATCH_CRITERIA_FIELD.getPreferredName());
builder.mapContents(matchCriteria);
builder.endObject();
builder.array(RULESET_IDS_FIELD.getPreferredName(), rulesetIds.toArray());
boostAndQueryNameToXContent(builder);
builder.endObject();
}
@Override
protected Query doToQuery(SearchExecutionContext context) throws IOException {
// NOTE: this is old query logic, as in 8.12.2+ and 8.13.0+ we will always rewrite this query
// into a pinned/boolean query or the organic query. This logic remains here for backwards compatibility
// with coordinator nodes running versions 8.10.0 - 8.12.1.
List<SpecifiedDocument> pinnedDocs = pinnedDocsSupplier != null ? pinnedDocsSupplier.get() : null;
if (pinnedDocs != null && pinnedDocs.isEmpty() == false) {
PinnedQueryBuilder pinnedQueryBuilder = new PinnedQueryBuilder(organicQuery, pinnedDocs.toArray(new SpecifiedDocument[0]));
return pinnedQueryBuilder.toQuery(context);
} else {
return organicQuery.toQuery(context);
}
}
@Override
protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) {
if (pinnedDocsSupplier != null && excludedDocsSupplier != null) {
List<SpecifiedDocument> identifiedPinnedDocs = pinnedDocsSupplier.get();
List<SpecifiedDocument> identifiedExcludedDocs = excludedDocsSupplier.get();
if (identifiedPinnedDocs == null || identifiedExcludedDocs == null) {
// Not executed yet
return this;
}
if (identifiedPinnedDocs.isEmpty() && identifiedExcludedDocs.isEmpty()) {
// Nothing to do, just return the organic query
return organicQuery;
}
if (identifiedPinnedDocs.isEmpty() == false && identifiedExcludedDocs.isEmpty()) {
// We have pinned IDs but nothing to exclude
return new PinnedQueryBuilder(organicQuery, truncateList(identifiedPinnedDocs).toArray(new SpecifiedDocument[0]));
}
if (identifiedPinnedDocs.isEmpty()) {
// We have excluded IDs but nothing to pin
QueryBuilder excludedDocsQueryBuilder = buildExcludedDocsQuery(identifiedExcludedDocs);
return new BoolQueryBuilder().must(organicQuery).mustNot(excludedDocsQueryBuilder);
} else {
// We have documents to both pin and exclude
QueryBuilder pinnedQuery = new PinnedQueryBuilder(
organicQuery,
truncateList(identifiedPinnedDocs).toArray(new SpecifiedDocument[0])
);
QueryBuilder excludedDocsQueryBuilder = buildExcludedDocsQuery(identifiedExcludedDocs);
return new BoolQueryBuilder().must(pinnedQuery).mustNot(excludedDocsQueryBuilder);
}
}
SetOnce<List<SpecifiedDocument>> pinnedDocsSetOnce = new SetOnce<>();
SetOnce<List<SpecifiedDocument>> excludedDocsSetOnce = new SetOnce<>();
AppliedQueryRules appliedRules = new AppliedQueryRules();
// Identify matching rules and apply them as applicable
MultiGetRequest multiGetRequest = new MultiGetRequest();
for (String rulesetId : rulesetIds) {
multiGetRequest.add(QueryRulesIndexService.QUERY_RULES_ALIAS_NAME, rulesetId);
}
queryRewriteContext.registerAsyncAction((client, listener) -> {
executeAsyncWithOrigin(
client,
ENT_SEARCH_ORIGIN,
TransportMultiGetAction.TYPE,
multiGetRequest,
ActionListener.wrap(multiGetResponse -> {
if (multiGetResponse.getResponses() == null || multiGetResponse.getResponses().length == 0) {
listener.onFailure(new ResourceNotFoundException("query rulesets " + String.join(",", rulesetIds) + " not found"));
return;
}
for (MultiGetItemResponse item : multiGetResponse) {
String rulesetId = item.getId();
// this usually happens when the system index does not exist because no query rules were created yet
if (item.isFailed()) {
listener.onFailure(item.getFailure().getFailure());
return;
}
GetResponse getResponse = item.getResponse();
// this happens when an individual query ruleset cannot be found
if (getResponse.isExists() == false) {
listener.onFailure(new ResourceNotFoundException("query ruleset " + rulesetId + " not found"));
return;
}
QueryRuleset queryRuleset = QueryRuleset.fromXContentBytes(
rulesetId,
getResponse.getSourceAsBytesRef(),
XContentType.JSON
);
for (QueryRule rule : queryRuleset.rules()) {
rule.applyRule(appliedRules, matchCriteria);
}
}
pinnedDocsSetOnce.set(appliedRules.pinnedDocs().stream().distinct().toList());
excludedDocsSetOnce.set(appliedRules.excludedDocs().stream().distinct().toList());
listener.onResponse(null);
}, listener::onFailure)
);
});
return new RuleQueryBuilder(organicQuery, matchCriteria, this.rulesetIds, pinnedDocsSetOnce::get, excludedDocsSetOnce::get).boost(
this.boost
).queryName(this.queryName);
}
private QueryBuilder buildExcludedDocsQuery(List<SpecifiedDocument> identifiedExcludedDocs) {
QueryBuilder excludedDocsQueryBuilder;
if (identifiedExcludedDocs.stream().allMatch(item -> item.index() == null)) {
// Easy case - just add an ids query
excludedDocsQueryBuilder = QueryBuilders.idsQuery()
.addIds(identifiedExcludedDocs.stream().map(SpecifiedDocument::id).toArray(String[]::new));
} else {
// Here, we have to create Boolean queries for the _id and _index fields
excludedDocsQueryBuilder = QueryBuilders.boolQuery();
identifiedExcludedDocs.stream().map(item -> {
BoolQueryBuilder excludeQueryBuilder = QueryBuilders.boolQuery()
.must(QueryBuilders.termQuery(IdFieldMapper.NAME, item.id()));
if (item.index() != null) {
excludeQueryBuilder.must(QueryBuilders.termQuery(IndexFieldMapper.NAME, item.index()));
}
return excludeQueryBuilder;
}).forEach(excludeQueryBuilder -> ((BoolQueryBuilder) excludedDocsQueryBuilder).must(excludeQueryBuilder));
}
return excludedDocsQueryBuilder;
}
private List<?> truncateList(List<?> input) {
// PinnedQueryBuilder will return an error if we attempt to return more than the maximum number of
// pinned hits. Here, we truncate matching rules rather than return an error.
if (input.size() > MAX_NUM_PINNED_HITS) {
HeaderWarning.addWarning("Truncating query rule pinned hits to " + MAX_NUM_PINNED_HITS + " documents");
return input.subList(0, MAX_NUM_PINNED_HITS);
}
return input;
}
@Override
protected boolean doEquals(RuleQueryBuilder other) {
if (this == other) return true;
if (other == null || getClass() != other.getClass()) return false;
return Objects.equals(rulesetIds, other.rulesetIds)
&& Objects.equals(matchCriteria, other.matchCriteria)
&& Objects.equals(organicQuery, other.organicQuery)
&& Objects.equals(pinnedDocsSupplier, other.pinnedDocsSupplier)
&& Objects.equals(excludedDocsSupplier, other.excludedDocsSupplier);
}
@Override
protected int doHashCode() {
return Objects.hash(rulesetIds, matchCriteria, organicQuery, pinnedDocsSupplier, excludedDocsSupplier);
}
private static final ConstructingObjectParser<RuleQueryBuilder, Void> PARSER = new ConstructingObjectParser<>(
NAME.getPreferredName(),
a -> {
QueryBuilder organicQuery = (QueryBuilder) a[0];
@SuppressWarnings("unchecked")
Map<String, Object> matchCriteria = (Map<String, Object>) a[1];
String rulesetId = (String) a[2];
@SuppressWarnings("unchecked")
List<String> rulesetIds = (List<String>) a[3];
if (rulesetId == null ^ rulesetIds == null == false) {
throw new IllegalArgumentException("ruleset information not provided correctly");
}
if (rulesetIds == null) {
HeaderWarning.addWarning("Using deprecated field [ruleset_id] in query rules, please use [ruleset_ids] instead");
rulesetIds = List.of(rulesetId);
}
return new RuleQueryBuilder(organicQuery, matchCriteria, rulesetIds);
}
);
static {
PARSER.declareObject(constructorArg(), (p, c) -> parseInnerQueryBuilder(p), ORGANIC_QUERY_FIELD);
PARSER.declareObject(constructorArg(), (p, c) -> p.map(), MATCH_CRITERIA_FIELD);
PARSER.declareString(optionalConstructorArg(), RULESET_ID_FIELD);
PARSER.declareStringArray(optionalConstructorArg(), RULESET_IDS_FIELD);
declareStandardFields(PARSER);
}
public static RuleQueryBuilder fromXContent(XContentParser parser, XPackLicenseState licenseState) {
if (QueryRulesConfig.QUERY_RULES_LICENSE_FEATURE.check(licenseState) == false) {
throw LicenseUtils.newComplianceException(NAME.getPreferredName());
}
try {
return PARSER.apply(parser, null);
} catch (IllegalArgumentException e) {
throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e);
}
}
@Override
public String getWriteableName() {
return NAME.getPreferredName();
}
}
| RuleQueryBuilder |
java | spring-projects__spring-boot | build-plugin/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/EnvVariables.java | {
"start": 861,
"end": 930
} | class ____ working with Env variables.
*
* @author Dmytro Nosan
*/
| for |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/jsontype/impl/SimpleNameIdResolver.java | {
"start": 7738,
"end": 7978
} | class ____
*/
protected static String _defaultTypeId(Class<?> cls)
{
String n = cls.getName();
int ix = Math.max(n.lastIndexOf('.'), n.lastIndexOf('$'));
return (ix < 0) ? n : n.substring(ix+1);
}
}
| name |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/stat/EntityStatistics.java | {
"start": 249,
"end": 1086
} | interface ____ extends CacheableDataStatistics, Serializable {
/**
* Number of times (since last Statistics clearing) this entity
* has been deleted
*/
long getDeleteCount();
/**
* Number of times (since last Statistics clearing) this entity
* has been inserted
*/
long getInsertCount();
/**
* Number of times (since last Statistics clearing) this entity
* has been updated
*/
long getUpdateCount();
/**
* Number of times (since last Statistics clearing) this entity
* has been loaded
*/
long getLoadCount();
/**
* Number of times (since last Statistics clearing) this entity
* has been fetched
*/
long getFetchCount();
/**
* Number of times (since last Statistics clearing) this entity
* has experienced an optimistic lock failure.
*/
long getOptimisticFailureCount();
}
| EntityStatistics |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/persister/entity/JoinedInheritanceImplicitJoinTest.java | {
"start": 3679,
"end": 4026
} | class ____ {
@Id
@GeneratedValue
public Long id;
public String masterField;
public Long getId() {
return id;
}
public String getMasterField() {
return masterField;
}
public void setMasterField(final String masterField) {
this.masterField = masterField;
}
}
@Entity(name = "ChildEntity")
public static | MasterEntity |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerRunnerConfigurationTest.java | {
"start": 3061,
"end": 12869
} | class ____ {
private static final RpcSystem RPC_SYSTEM = RpcSystem.load();
private static final int TEST_TIMEOUT_SECONDS = 10;
@TempDir private Path temporaryFolder;
@Test
void testTaskManagerRpcServiceShouldBindToConfiguredTaskManagerHostname() throws Exception {
final String taskmanagerHost = "testhostname";
final Configuration config =
createFlinkConfigWithPredefinedTaskManagerHostname(taskmanagerHost);
final HighAvailabilityServices highAvailabilityServices =
createHighAvailabilityServices(config);
RpcService taskManagerRpcService = null;
try {
taskManagerRpcService =
TaskManagerRunner.createRpcService(
config, highAvailabilityServices, RPC_SYSTEM);
assertThat(taskManagerRpcService.getPort()).isGreaterThanOrEqualTo(0);
assertThat(taskManagerRpcService.getAddress()).isEqualTo(taskmanagerHost);
} finally {
maybeCloseRpcService(taskManagerRpcService);
highAvailabilityServices.closeWithOptionalClean(true);
}
}
@Test
void testTaskManagerRpcServiceShouldBindToHostnameAddress() throws Exception {
final Configuration config = createFlinkConfigWithHostBindPolicy(HostBindPolicy.NAME);
final HighAvailabilityServices highAvailabilityServices =
createHighAvailabilityServices(config);
RpcService taskManagerRpcService = null;
try {
taskManagerRpcService =
TaskManagerRunner.createRpcService(
config, highAvailabilityServices, RPC_SYSTEM);
assertThat(taskManagerRpcService.getAddress()).isNotNull().isNotEmpty();
} finally {
maybeCloseRpcService(taskManagerRpcService);
highAvailabilityServices.closeWithOptionalClean(true);
}
}
@Test
void testTaskManagerRpcServiceShouldBindToIpAddressDeterminedByConnectingToResourceManager()
throws Exception {
final ServerSocket testJobManagerSocket = openServerSocket();
final Configuration config =
createFlinkConfigWithJobManagerPort(testJobManagerSocket.getLocalPort());
final HighAvailabilityServices highAvailabilityServices =
createHighAvailabilityServices(config);
RpcService taskManagerRpcService = null;
try {
taskManagerRpcService =
TaskManagerRunner.createRpcService(
config, highAvailabilityServices, RPC_SYSTEM);
assertThat(taskManagerRpcService.getAddress()).matches(InetAddresses::isInetAddress);
} finally {
maybeCloseRpcService(taskManagerRpcService);
highAvailabilityServices.closeWithOptionalClean(true);
IOUtils.closeQuietly(testJobManagerSocket);
}
}
@Test
void testCreatingTaskManagerRpcServiceShouldFailIfRpcPortRangeIsInvalid() throws Exception {
final Configuration config =
new Configuration(
createFlinkConfigWithPredefinedTaskManagerHostname("example.org"));
config.set(TaskManagerOptions.RPC_PORT, "-1");
final HighAvailabilityServices highAvailabilityServices =
createHighAvailabilityServices(config);
try {
assertThatThrownBy(
() ->
TaskManagerRunner.createRpcService(
config, highAvailabilityServices, RPC_SYSTEM))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid port range definition: -1");
} finally {
highAvailabilityServices.closeWithOptionalClean(true);
}
}
@Test
void testDefaultFsParameterLoading() throws Exception {
try {
final File tmpDir =
Files.createTempDirectory(temporaryFolder, UUID.randomUUID().toString())
.toFile();
final File confFile = new File(tmpDir, GlobalConfiguration.FLINK_CONF_FILENAME);
final URI defaultFS = new URI("otherFS", null, "localhost", 1234, null, null, null);
final PrintWriter pw1 = new PrintWriter(confFile);
pw1.println("fs.default-scheme: " + defaultFS);
pw1.close();
String[] args = new String[] {"--configDir", tmpDir.toString()};
Configuration configuration = TaskManagerRunner.loadConfiguration(args);
FileSystem.initialize(configuration);
assertThat(defaultFS).isEqualTo(FileSystem.getDefaultFsUri());
} finally {
// reset FS settings
FileSystem.initialize(new Configuration());
}
}
@Test
void testLoadDynamicalProperties() throws IOException, FlinkParseException {
final File tmpDir =
Files.createTempDirectory(temporaryFolder, UUID.randomUUID().toString()).toFile();
final File confFile = new File(tmpDir, GlobalConfiguration.FLINK_CONF_FILENAME);
final PrintWriter pw1 = new PrintWriter(confFile);
final long managedMemory = 1024 * 1024 * 256;
pw1.println(JobManagerOptions.ADDRESS.key() + ": localhost");
pw1.println(TaskManagerOptions.MANAGED_MEMORY_SIZE.key() + ": " + managedMemory + "b");
pw1.close();
final String jmHost = "host1";
final int jmPort = 12345;
String[] args =
new String[] {
"--configDir",
tmpDir.toString(),
"-D" + JobManagerOptions.ADDRESS.key() + "=" + jmHost,
"-D" + JobManagerOptions.PORT.key() + "=" + jmPort
};
Configuration configuration = TaskManagerRunner.loadConfiguration(args);
assertThat(MemorySize.parse(managedMemory + "b"))
.isEqualTo(configuration.get(TaskManagerOptions.MANAGED_MEMORY_SIZE));
assertThat(jmHost).isEqualTo(configuration.get(JobManagerOptions.ADDRESS));
assertThat(jmPort).isEqualTo(configuration.get(JobManagerOptions.PORT));
}
@Test
void testNodeIdShouldBeConfiguredValueIfExplicitlySet() throws Exception {
String nodeId = "node1";
Configuration configuration = new Configuration();
configuration.set(TaskManagerOptionsInternal.TASK_MANAGER_NODE_ID, nodeId);
TaskManagerServicesConfiguration servicesConfiguration =
createTaskManagerServiceConfiguration(configuration);
assertThat(servicesConfiguration.getNodeId()).isEqualTo(nodeId);
}
@Test
void testNodeIdShouldBeExternalAddressIfNotExplicitlySet() throws Exception {
TaskManagerServicesConfiguration servicesConfiguration =
createTaskManagerServiceConfiguration(new Configuration());
assertThat(servicesConfiguration.getNodeId())
.isEqualTo(InetAddress.getLocalHost().getHostName());
}
private TaskManagerServicesConfiguration createTaskManagerServiceConfiguration(
Configuration config) throws Exception {
return TaskManagerServicesConfiguration.fromConfiguration(
config,
ResourceID.generate(),
InetAddress.getLocalHost().getHostName(),
true,
TaskExecutorResourceUtils.resourceSpecFromConfigForLocalExecution(config),
WorkingDirectory.create(
Files.createTempDirectory(temporaryFolder, UUID.randomUUID().toString())
.toFile()));
}
private static Configuration createFlinkConfigWithPredefinedTaskManagerHostname(
final String taskmanagerHost) {
final Configuration config = new Configuration();
config.set(TaskManagerOptions.HOST, taskmanagerHost);
config.set(JobManagerOptions.ADDRESS, "localhost");
return new UnmodifiableConfiguration(config);
}
private static Configuration createFlinkConfigWithHostBindPolicy(
final HostBindPolicy bindPolicy) {
final Configuration config = new Configuration();
config.set(TaskManagerOptions.HOST_BIND_POLICY, bindPolicy.toString());
config.set(JobManagerOptions.ADDRESS, "localhost");
config.set(RpcOptions.LOOKUP_TIMEOUT_DURATION, Duration.ofMillis(10));
return new UnmodifiableConfiguration(config);
}
private static Configuration createFlinkConfigWithJobManagerPort(final int port) {
Configuration config = new Configuration();
config.set(JobManagerOptions.ADDRESS, "localhost");
config.set(JobManagerOptions.PORT, port);
return new UnmodifiableConfiguration(config);
}
private HighAvailabilityServices createHighAvailabilityServices(final Configuration config)
throws Exception {
return HighAvailabilityServicesUtils.createHighAvailabilityServices(
config,
Executors.directExecutor(),
AddressResolution.NO_ADDRESS_RESOLUTION,
RpcSystem.load(),
NoOpFatalErrorHandler.INSTANCE);
}
private static ServerSocket openServerSocket() {
try {
return new ServerSocket(0);
} catch (IOException e) {
throw new TestAbortedException("Skip test because could not open a server socket");
}
}
private static void maybeCloseRpcService(@Nullable final RpcService rpcService)
throws Exception {
if (rpcService != null) {
rpcService.closeAsync().get(TEST_TIMEOUT_SECONDS, TimeUnit.SECONDS);
}
}
}
| TaskManagerRunnerConfigurationTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NamedLikeContextualKeywordTest.java | {
"start": 1706,
"end": 2310
} | class ____ {
static Throwable foo;
public Test() {}
// BUG: Diagnostic contains: [NamedLikeContextualKeyword]
public static void yield() {
foo = new NullPointerException("uh oh");
}
}
""")
.doTest();
}
@Test
public void autoOneOfMethodName_noError() {
helper
.addSourceLines(
"Test.java",
"""
import javax.annotation.processing.Generated;
@Generated("com.google.auto.value.processor.AutoOneOfProcessor")
| Test |
java | apache__kafka | tools/src/test/java/org/apache/kafka/tools/TopicCommandTest.java | {
"start": 4691,
"end": 82755
} | class ____ {
private final short defaultReplicationFactor = 1;
private final int defaultNumPartitions = 1;
private static final int CLUSTER_WAIT_MS = 60000;
private final String bootstrapServer = "localhost:9092";
private final String topicName = "topicName";
@Test
public void testIsNotUnderReplicatedWhenAdding() {
List<Integer> replicaIds = List.of(1, 2);
List<Node> replicas = new ArrayList<>();
for (int id : replicaIds) {
replicas.add(new Node(id, "localhost", 9090 + id));
}
TopicCommand.PartitionDescription partitionDescription = new TopicCommand.PartitionDescription("test-topic",
new TopicPartitionInfo(0, new Node(1, "localhost", 9091), replicas,
List.of(new Node(1, "localhost", 9091))),
null, false,
new PartitionReassignment(replicaIds, List.of(2), List.of())
);
assertFalse(partitionDescription.isUnderReplicated());
}
@Test
public void testAlterWithUnspecifiedPartitionCount() {
String[] options = new String[] {" --bootstrap-server", bootstrapServer, "--alter", "--topic", topicName};
assertInitializeInvalidOptionsExitCode(1, options);
}
@Test
public void testConfigOptWithBootstrapServers() {
assertInitializeInvalidOptionsExitCode(1,
new String[] {"--bootstrap-server", bootstrapServer, "--alter", "--topic", topicName,
"--partitions", "3", "--config", "cleanup.policy=compact"});
TopicCommand.TopicCommandOptions opts =
new TopicCommand.TopicCommandOptions(
new String[] {"--bootstrap-server", bootstrapServer, "--create", "--topic", topicName, "--partitions", "3",
"--replication-factor", "3", "--config", "cleanup.policy=compact"});
assertTrue(opts.hasCreateOption());
assertEquals(bootstrapServer, opts.bootstrapServer().get());
assertEquals("cleanup.policy=compact", opts.topicConfig().get().get(0));
}
@Test
public void testCreateWithPartitionCountWithoutReplicationFactorShouldSucceed() {
TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(
new String[] {"--bootstrap-server", bootstrapServer,
"--create",
"--partitions", "2",
"--topic", topicName});
assertTrue(opts.hasCreateOption());
assertEquals(topicName, opts.topic().get());
assertEquals(2, opts.partitions().get());
}
@Test
public void testCreateWithReplicationFactorWithoutPartitionCountShouldSucceed() {
TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(
new String[] {"--bootstrap-server", bootstrapServer,
"--create",
"--replication-factor", "3",
"--topic", topicName});
assertTrue(opts.hasCreateOption());
assertEquals(topicName, opts.topic().get());
assertEquals(3, opts.replicationFactor().get());
}
@Test
public void testCreateWithAssignmentAndPartitionCount() {
assertInitializeInvalidOptionsExitCode(1,
new String[]{"--bootstrap-server", bootstrapServer,
"--create",
"--replica-assignment", "3:0,5:1",
"--partitions", "2",
"--topic", topicName});
}
@Test
public void testCreateWithAssignmentAndReplicationFactor() {
assertInitializeInvalidOptionsExitCode(1,
new String[] {"--bootstrap-server", bootstrapServer,
"--create",
"--replica-assignment", "3:0,5:1",
"--replication-factor", "2",
"--topic", topicName});
}
@Test
public void testCreateWithoutPartitionCountAndReplicationFactorShouldSucceed() {
TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(
new String[] {"--bootstrap-server", bootstrapServer,
"--create",
"--topic", topicName});
assertTrue(opts.hasCreateOption());
assertEquals(topicName, opts.topic().get());
assertFalse(opts.partitions().isPresent());
}
@Test
public void testDescribeShouldSucceed() {
TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(
new String[] {"--bootstrap-server", bootstrapServer,
"--describe",
"--topic", topicName});
assertTrue(opts.hasDescribeOption());
assertEquals(topicName, opts.topic().get());
}
@Test
public void testDescribeWithDescribeTopicsApiShouldSucceed() {
TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(
new String[] {"--bootstrap-server", bootstrapServer,
"--describe",
"--topic", topicName});
assertTrue(opts.hasDescribeOption());
assertEquals(topicName, opts.topic().get());
}
@Test
public void testParseAssignmentDuplicateEntries() {
assertThrows(AdminCommandFailedException.class, () -> TopicCommand.parseReplicaAssignment("5:5"));
}
@Test
public void testParseAssignmentPartitionsOfDifferentSize() {
assertThrows(AdminOperationException.class, () -> TopicCommand.parseReplicaAssignment("5:4:3,2:1"));
}
@Test
public void testParseAssignment() {
Map<Integer, List<Integer>> actualAssignment = TopicCommand.parseReplicaAssignment("5:4,3:2,1:0");
Map<Integer, List<Integer>> expectedAssignment = new HashMap<>();
expectedAssignment.put(0, List.of(5, 4));
expectedAssignment.put(1, List.of(3, 2));
expectedAssignment.put(2, List.of(1, 0));
assertEquals(expectedAssignment, actualAssignment);
}
@Test
public void testCreateTopicDoesNotRetryThrottlingQuotaExceededException() {
Admin adminClient = mock(Admin.class);
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient);
CreateTopicsResult result = AdminClientTestUtils.createTopicsResult(topicName, Errors.THROTTLING_QUOTA_EXCEEDED.exception());
when(adminClient.createTopics(any(), any())).thenReturn(result);
assertThrows(ThrottlingQuotaExceededException.class,
() -> topicService.createTopic(new TopicCommand.TopicCommandOptions(new String[]{
"--bootstrap-server", bootstrapServer,
"--create", "--topic", topicName
})));
NewTopic expectedNewTopic = new NewTopic(topicName, Optional.empty(), Optional.empty())
.configs(Map.of());
verify(adminClient, times(1)).createTopics(
eq(Set.of(expectedNewTopic)),
argThat(exception -> !exception.shouldRetryOnQuotaViolation())
);
}
@Test
public void testDeleteTopicDoesNotRetryThrottlingQuotaExceededException() {
Admin adminClient = mock(Admin.class);
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient);
ListTopicsResult listResult = AdminClientTestUtils.listTopicsResult(topicName);
when(adminClient.listTopics(any())).thenReturn(listResult);
DeleteTopicsResult result = AdminClientTestUtils.deleteTopicsResult(topicName, Errors.THROTTLING_QUOTA_EXCEEDED.exception());
when(adminClient.deleteTopics(anyCollection(), any())).thenReturn(result);
ExecutionException exception = assertThrows(ExecutionException.class,
() -> topicService.deleteTopic(new TopicCommand.TopicCommandOptions(new String[]{
"--bootstrap-server", bootstrapServer,
"--delete", "--topic", topicName
})));
assertInstanceOf(ThrottlingQuotaExceededException.class, exception.getCause());
verify(adminClient).deleteTopics(
argThat((Collection<String> topics) -> topics.equals(List.of(topicName))),
argThat((DeleteTopicsOptions options) -> !options.shouldRetryOnQuotaViolation()));
}
@Test
public void testCreatePartitionsDoesNotRetryThrottlingQuotaExceededException() {
Admin adminClient = mock(Admin.class);
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient);
ListTopicsResult listResult = AdminClientTestUtils.listTopicsResult(topicName);
when(adminClient.listTopics(any())).thenReturn(listResult);
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, new Node(0, "", 0),
List.of(), List.of());
DescribeTopicsResult describeResult = AdminClientTestUtils.describeTopicsResult(topicName,
new TopicDescription(topicName, false, List.of(topicPartitionInfo)));
when(adminClient.describeTopics(anyCollection())).thenReturn(describeResult);
CreatePartitionsResult result = AdminClientTestUtils.createPartitionsResult(topicName, Errors.THROTTLING_QUOTA_EXCEEDED.exception());
when(adminClient.createPartitions(any(), any())).thenReturn(result);
Exception exception = assertThrows(ExecutionException.class,
() -> topicService.alterTopic(new TopicCommand.TopicCommandOptions(new String[]{
"--alter", "--topic", topicName, "--partitions", "3",
"--bootstrap-server", bootstrapServer
})));
assertInstanceOf(ThrottlingQuotaExceededException.class, exception.getCause());
verify(adminClient, times(1)).createPartitions(
argThat(newPartitions -> newPartitions.get(topicName).totalCount() == 3),
argThat(createPartitionOption -> !createPartitionOption.shouldRetryOnQuotaViolation()));
}
public void assertInitializeInvalidOptionsExitCode(int expected, String[] options) {
Exit.setExitProcedure((exitCode, message) -> {
assertEquals(expected, exitCode);
throw new RuntimeException();
});
try {
assertThrows(RuntimeException.class, () -> new TopicCommand.TopicCommandOptions(options));
} finally {
Exit.resetExitProcedure();
}
}
private TopicCommand.TopicCommandOptions buildTopicCommandOptionsWithBootstrap(ClusterInstance clusterInstance, String... opts) {
String bootstrapServer = clusterInstance.bootstrapServers();
String[] finalOptions = Stream.concat(Arrays.stream(opts),
Stream.of("--bootstrap-server", bootstrapServer)
).toArray(String[]::new);
return new TopicCommand.TopicCommandOptions(finalOptions);
}
static List<ClusterConfig> generate() {
Map<String, String> serverProp = new HashMap<>();
serverProp.put(REPLICA_FETCH_MAX_BYTES_CONFIG, "1"); // if config name error, no exception throw
serverProp.put("log.initial.task.delay.ms", "100");
serverProp.put("log.segment.delete.delay.ms", "1000");
Map<Integer, Map<String, String>> rackInfo = new HashMap<>();
Map<String, String> infoPerBroker1 = new HashMap<>();
infoPerBroker1.put("broker.rack", "rack1");
Map<String, String> infoPerBroker2 = new HashMap<>();
infoPerBroker2.put("broker.rack", "rack2");
Map<String, String> infoPerBroker3 = new HashMap<>();
infoPerBroker3.put("broker.rack", "rack2");
Map<String, String> infoPerBroker4 = new HashMap<>();
infoPerBroker4.put("broker.rack", "rack1");
Map<String, String> infoPerBroker5 = new HashMap<>();
infoPerBroker5.put("broker.rack", "rack3");
Map<String, String> infoPerBroker6 = new HashMap<>();
infoPerBroker6.put("broker.rack", "rack3");
rackInfo.put(0, infoPerBroker1);
rackInfo.put(1, infoPerBroker2);
rackInfo.put(2, infoPerBroker3);
rackInfo.put(3, infoPerBroker4);
rackInfo.put(4, infoPerBroker5);
rackInfo.put(5, infoPerBroker6);
return List.of(ClusterConfig.defaultBuilder()
.setBrokers(6)
.setServerProperties(serverProp)
.setPerServerProperties(rackInfo)
.setTypes(Stream.of(Type.KRAFT).collect(Collectors.toSet()))
.build()
);
}
@ClusterTest(
brokers = 3,
serverProperties = {
@ClusterConfigProperty(key = "log.initial.task.delay.ms", value = "100"),
@ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000")
}
)
public void testCreate(ClusterInstance clusterInstance) throws InterruptedException, ExecutionException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
Assertions.assertTrue(adminClient.listTopics().names().get().contains(testTopicName),
"Admin client didn't see the created topic. It saw: " + adminClient.listTopics().names().get());
adminClient.deleteTopics(List.of(testTopicName));
clusterInstance.waitTopicDeletion(testTopicName);
Assertions.assertTrue(adminClient.listTopics().names().get().isEmpty(),
"Admin client see the created topic. It saw: " + adminClient.listTopics().names().get());
}
}
@ClusterTest(
brokers = 3,
serverProperties = {
@ClusterConfigProperty(key = "log.initial.task.delay.ms", value = "100"),
@ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000")
}
)
public void testCreateWithDefaults(ClusterInstance clusterInstance) throws InterruptedException, ExecutionException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
Assertions.assertTrue(adminClient.listTopics().names().get().contains(testTopicName),
"Admin client didn't see the created topic. It saw: " + adminClient.listTopics().names().get());
List<TopicPartitionInfo> partitions = adminClient
.describeTopics(List.of(testTopicName))
.allTopicNames()
.get()
.get(testTopicName)
.partitions();
Assertions.assertEquals(defaultNumPartitions, partitions.size(), "Unequal partition size: " + partitions.size());
Assertions.assertEquals(defaultReplicationFactor, (short) partitions.get(0).replicas().size(), "Unequal replication factor: " + partitions.get(0).replicas().size());
adminClient.deleteTopics(List.of(testTopicName));
clusterInstance.waitTopicDeletion(testTopicName);
Assertions.assertTrue(adminClient.listTopics().names().get().isEmpty(),
"Admin client see the created topic. It saw: " + adminClient.listTopics().names().get());
}
}
@ClusterTest(
brokers = 3,
serverProperties = {
@ClusterConfigProperty(key = "log.initial.task.delay.ms", value = "100"),
@ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000")
}
)
public void testCreateWithDefaultReplication(ClusterInstance clusterInstance) throws InterruptedException, ExecutionException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
adminClient.createTopics(List.of(new NewTopic(testTopicName, 2, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, 2);
List<TopicPartitionInfo> partitions = adminClient
.describeTopics(List.of(testTopicName))
.allTopicNames()
.get()
.get(testTopicName)
.partitions();
assertEquals(2, partitions.size(), "Unequal partition size: " + partitions.size());
assertEquals(defaultReplicationFactor, (short) partitions.get(0).replicas().size(), "Unequal replication factor: " + partitions.get(0).replicas().size());
}
}
@ClusterTest(brokers = 3)
public void testCreateWithDefaultPartitions(ClusterInstance clusterInstance) throws InterruptedException, ExecutionException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, (short) 2)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
List<TopicPartitionInfo> partitions = adminClient
.describeTopics(List.of(testTopicName))
.allTopicNames()
.get()
.get(testTopicName)
.partitions();
assertEquals(defaultNumPartitions, partitions.size(), "Unequal partition size: " + partitions.size());
assertEquals(2, (short) partitions.get(0).replicas().size(), "Partitions not replicated: " + partitions.get(0).replicas().size());
}
}
@ClusterTest(brokers = 3)
public void testCreateWithConfigs(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
Map<String, String> topicConfig = new HashMap<>();
topicConfig.put(TopicConfig.DELETE_RETENTION_MS_CONFIG, "1000");
adminClient.createTopics(List.of(new NewTopic(testTopicName, 2, (short) 2).configs(topicConfig)));
clusterInstance.waitTopicCreation(testTopicName, 2);
Config configs = adminClient.describeConfigs(Set.of(configResource)).all().get().get(configResource);
assertEquals(1000, Integer.valueOf(configs.get("delete.retention.ms").value()),
"Config not set correctly: " + configs.get("delete.retention.ms").value());
}
}
@ClusterTest(brokers = 3)
public void testCreateWhenAlreadyExists(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
TopicCommand.TopicCommandOptions createOpts = buildTopicCommandOptionsWithBootstrap(
clusterInstance, "--create", "--partitions", Integer.toString(defaultNumPartitions), "--replication-factor", "1",
"--topic", testTopicName);
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
// try to re-create the topic
assertThrows(TopicExistsException.class, () -> topicService.createTopic(createOpts),
"Expected TopicExistsException to throw");
}
}
@ClusterTest(brokers = 3)
public void testCreateWhenAlreadyExistsWithIfNotExists(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
TopicCommand.TopicCommandOptions createOpts =
buildTopicCommandOptionsWithBootstrap(clusterInstance, "--create", "--topic", testTopicName, "--if-not-exists");
topicService.createTopic(createOpts);
}
}
private List<Integer> getPartitionReplicas(List<TopicPartitionInfo> partitions, int partitionNumber) {
return partitions.get(partitionNumber).replicas().stream().map(Node::id).toList();
}
@ClusterTemplate("generate")
public void testCreateWithReplicaAssignment(ClusterInstance clusterInstance) throws Exception {
Map<Integer, List<Integer>> replicaAssignmentMap = new HashMap<>();
try (Admin adminClient = clusterInstance.admin()) {
String testTopicName = TestUtils.randomString(10);
replicaAssignmentMap.put(0, List.of(5, 4));
replicaAssignmentMap.put(1, List.of(3, 2));
replicaAssignmentMap.put(2, List.of(1, 0));
adminClient.createTopics(List.of(new NewTopic(testTopicName, replicaAssignmentMap)));
clusterInstance.waitTopicCreation(testTopicName, 3);
List<TopicPartitionInfo> partitions = adminClient
.describeTopics(List.of(testTopicName))
.allTopicNames()
.get()
.get(testTopicName)
.partitions();
assertEquals(3, partitions.size(),
"Unequal partition size: " + partitions.size());
assertEquals(List.of(5, 4), getPartitionReplicas(partitions, 0),
"Unexpected replica assignment: " + getPartitionReplicas(partitions, 0));
assertEquals(List.of(3, 2), getPartitionReplicas(partitions, 1),
"Unexpected replica assignment: " + getPartitionReplicas(partitions, 1));
assertEquals(List.of(1, 0), getPartitionReplicas(partitions, 2),
"Unexpected replica assignment: " + getPartitionReplicas(partitions, 2));
}
}
@ClusterTest(brokers = 3)
public void testCreateWithInvalidReplicationFactor(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
TopicCommand.TopicCommandOptions opts = buildTopicCommandOptionsWithBootstrap(clusterInstance, "--create", "--partitions", "2", "--replication-factor", Integer.toString(Short.MAX_VALUE + 1),
"--topic", testTopicName);
assertThrows(IllegalArgumentException.class, () -> topicService.createTopic(opts), "Expected IllegalArgumentException to throw");
}
}
@ClusterTest
public void testCreateWithNegativeReplicationFactor(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
TopicCommand.TopicCommandOptions opts = buildTopicCommandOptionsWithBootstrap(clusterInstance, "--create",
"--partitions", "2", "--replication-factor", "-1", "--topic", testTopicName);
assertThrows(IllegalArgumentException.class, () -> topicService.createTopic(opts), "Expected IllegalArgumentException to throw");
}
}
@ClusterTest
public void testCreateWithNegativePartitionCount(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
TopicCommand.TopicCommandOptions opts = buildTopicCommandOptionsWithBootstrap(clusterInstance, "--create", "--partitions", "-1", "--replication-factor", "1", "--topic", testTopicName);
assertThrows(IllegalArgumentException.class, () -> topicService.createTopic(opts), "Expected IllegalArgumentException to throw");
}
}
@ClusterTest
public void testInvalidTopicLevelConfig(ClusterInstance clusterInstance) {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient);
TopicCommand.TopicCommandOptions createOpts = buildTopicCommandOptionsWithBootstrap(clusterInstance, "--create",
"--partitions", "1", "--replication-factor", "1", "--topic", testTopicName,
"--config", "message.timestamp.type=boom");
assertThrows(ConfigException.class, () -> topicService.createTopic(createOpts), "Expected ConfigException to throw");
}
}
@ClusterTest
public void testListTopics(ClusterInstance clusterInstance) throws InterruptedException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
String output = captureListTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--list"));
assertTrue(output.contains(testTopicName), "Expected topic name to be present in output: " + output);
}
}
@ClusterTest(brokers = 3)
public void testListTopicsWithIncludeList(ClusterInstance clusterInstance) throws InterruptedException {
try (Admin adminClient = clusterInstance.admin()) {
String topic1 = "kafka.testTopic1";
String topic2 = "kafka.testTopic2";
String topic3 = "oooof.testTopic1";
int partition = 2;
short replicationFactor = 2;
adminClient.createTopics(List.of(new NewTopic(topic1, partition, replicationFactor)));
adminClient.createTopics(List.of(new NewTopic(topic2, partition, replicationFactor)));
adminClient.createTopics(List.of(new NewTopic(topic3, partition, replicationFactor)));
clusterInstance.waitTopicCreation(topic1, partition);
clusterInstance.waitTopicCreation(topic2, partition);
clusterInstance.waitTopicCreation(topic3, partition);
String output = captureListTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--list", "--topic", "kafka.*"));
assertTrue(output.contains(topic1), "Expected topic name " + topic1 + " to be present in output: " + output);
assertTrue(output.contains(topic2), "Expected topic name " + topic2 + " to be present in output: " + output);
assertFalse(output.contains(topic3), "Do not expect topic name " + topic3 + " to be present in output: " + output);
}
}
@ClusterTest(brokers = 3)
public void testListTopicsWithExcludeInternal(ClusterInstance clusterInstance) throws InterruptedException {
try (Admin adminClient = clusterInstance.admin()) {
String topic1 = "kafka.testTopic1";
String hiddenConsumerTopic = Topic.GROUP_METADATA_TOPIC_NAME;
int partition = 2;
short replicationFactor = 2;
adminClient.createTopics(List.of(new NewTopic(topic1, partition, replicationFactor)));
clusterInstance.waitTopicCreation(topic1, partition);
String output = captureListTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--list", "--exclude-internal"));
assertTrue(output.contains(topic1), "Expected topic name " + topic1 + " to be present in output: " + output);
assertFalse(output.contains(hiddenConsumerTopic), "Do not expect topic name " + hiddenConsumerTopic + " to be present in output: " + output);
}
}
@ClusterTest(brokers = 3)
public void testAlterPartitionCount(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
int partition = 2;
short replicationFactor = 2;
adminClient.createTopics(List.of(new NewTopic(testTopicName, partition, replicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, partition);
topicService.alterTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter", "--topic", testTopicName, "--partitions", "3"));
TestUtils.waitForCondition(
() -> adminClient.listPartitionReassignments().reassignments().get().isEmpty(),
CLUSTER_WAIT_MS, testTopicName + String.format("reassignmet not finished after %s ms", CLUSTER_WAIT_MS)
);
TestUtils.waitForCondition(
() -> clusterInstance.brokers().values().stream().allMatch(
b -> b.metadataCache().numPartitions(testTopicName).orElse(0) == 3),
TestUtils.DEFAULT_MAX_WAIT_MS, "Timeout waiting for new assignment propagating to broker");
TopicDescription topicDescription = adminClient.describeTopics(List.of(testTopicName)).topicNameValues().get(testTopicName).get();
assertEquals(3, topicDescription.partitions().size(), "Expected partition count to be 3. Got: " + topicDescription.partitions().size());
}
}
@ClusterTemplate("generate")
public void testAlterAssignment(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
int partition = 2;
short replicationFactor = 2;
adminClient.createTopics(List.of(new NewTopic(testTopicName, partition, replicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, partition);
topicService.alterTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter",
"--topic", testTopicName, "--replica-assignment", "5:3,3:1,4:2", "--partitions", "3"));
TestUtils.waitForCondition(
() -> adminClient.listPartitionReassignments().reassignments().get().isEmpty(),
CLUSTER_WAIT_MS, testTopicName + String.format("reassignmet not finished after %s ms", CLUSTER_WAIT_MS)
);
TestUtils.waitForCondition(
() -> clusterInstance.brokers().values().stream().allMatch(
b -> b.metadataCache().numPartitions(testTopicName).orElse(0) == 3),
TestUtils.DEFAULT_MAX_WAIT_MS, "Timeout waiting for new assignment propagating to broker");
TopicDescription topicDescription = adminClient.describeTopics(List.of(testTopicName)).topicNameValues().get(testTopicName).get();
assertEquals(3, topicDescription.partitions().size(), "Expected partition count to be 3. Got: " + topicDescription.partitions().size());
List<Integer> partitionReplicas = getPartitionReplicas(topicDescription.partitions(), 2);
assertEquals(List.of(4, 2), partitionReplicas, "Expected to have replicas 4,2. Got: " + partitionReplicas);
}
}
@ClusterTest(brokers = 3)
public void testAlterAssignmentWithMoreAssignmentThanPartitions(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
int partition = 2;
short replicationFactor = 2;
adminClient.createTopics(List.of(new NewTopic(testTopicName, partition, replicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, partition);
assertThrows(ExecutionException.class,
() -> topicService.alterTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter",
"--topic", testTopicName, "--replica-assignment", "5:3,3:1,4:2,3:2", "--partitions", "3")),
"Expected to fail with ExecutionException");
}
}
@ClusterTemplate("generate")
public void testAlterAssignmentWithMorePartitionsThanAssignment(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
int partition = 2;
short replicationFactor = 2;
adminClient.createTopics(List.of(new NewTopic(testTopicName, partition, replicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, partition);
assertThrows(ExecutionException.class,
() -> topicService.alterTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter", "--topic", testTopicName,
"--replica-assignment", "5:3,3:1,4:2", "--partitions", "6")),
"Expected to fail with ExecutionException");
}
}
@ClusterTest
public void testAlterWithInvalidPartitionCount(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
assertThrows(ExecutionException.class,
() -> topicService.alterTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter", "--partitions", "-1", "--topic", testTopicName)),
"Expected to fail with ExecutionException");
}
}
@ClusterTest
public void testAlterWhenTopicDoesntExist(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
// alter a topic that does not exist without --if-exists
TopicCommand.TopicCommandOptions alterOpts = buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter", "--topic", testTopicName, "--partitions", "1");
assertThrows(IllegalArgumentException.class, () -> topicService.alterTopic(alterOpts), "Expected to fail with IllegalArgumentException");
}
}
@ClusterTest
public void testAlterWhenTopicDoesntExistWithIfExists(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient);
topicService.alterTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter", "--topic", testTopicName, "--partitions", "1", "--if-exists"));
adminClient.close();
topicService.close();
}
@ClusterTemplate("generate")
public void testCreateAlterTopicWithRackAware(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
Map<Integer, String> rackInfo = new HashMap<>();
rackInfo.put(0, "rack1");
rackInfo.put(1, "rack2");
rackInfo.put(2, "rack2");
rackInfo.put(3, "rack1");
rackInfo.put(4, "rack3");
rackInfo.put(5, "rack3");
int numPartitions = 18;
int replicationFactor = 3;
adminClient.createTopics(List.of(new NewTopic(testTopicName, numPartitions, (short) replicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, numPartitions);
Map<Integer, List<Integer>> assignment = adminClient.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName).partitions()
.stream()
.collect(Collectors.toMap(
TopicPartitionInfo::partition,
info -> info.replicas().stream().map(Node::id).toList()));
checkReplicaDistribution(assignment, rackInfo, rackInfo.size(), numPartitions,
replicationFactor, true, true, true);
int alteredNumPartitions = 36;
// verify that adding partitions will also be rack aware
TopicCommand.TopicCommandOptions alterOpts = buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter",
"--partitions", Integer.toString(alteredNumPartitions),
"--topic", testTopicName);
topicService.alterTopic(alterOpts);
TestUtils.waitForCondition(
() -> adminClient.listPartitionReassignments().reassignments().get().isEmpty(),
CLUSTER_WAIT_MS, testTopicName + String.format("reassignmet not finished after %s ms", CLUSTER_WAIT_MS)
);
TestUtils.waitForCondition(
() -> clusterInstance.brokers().values().stream().allMatch(p -> p.metadataCache().numPartitions(testTopicName).orElse(0) == alteredNumPartitions),
TestUtils.DEFAULT_MAX_WAIT_MS, "Timeout waiting for new assignment propagating to broker");
assignment = adminClient.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName).partitions().stream()
.collect(Collectors.toMap(TopicPartitionInfo::partition, info -> info.replicas().stream().map(Node::id).toList()));
checkReplicaDistribution(assignment, rackInfo, rackInfo.size(), alteredNumPartitions, replicationFactor,
true, true, true);
}
}
@ClusterTest(brokers = 3)
public void testConfigPreservationAcrossPartitionAlteration(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
String cleanUpPolicy = "compact";
HashMap<String, String> topicConfig = new HashMap<>();
topicConfig.put(TopicConfig.CLEANUP_POLICY_CONFIG, cleanUpPolicy);
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor).configs(topicConfig)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
Config props = adminClient.describeConfigs(Set.of(configResource)).all().get().get(configResource);
assertNotNull(props.get(TopicConfig.CLEANUP_POLICY_CONFIG), "Properties after creation don't contain " + cleanUpPolicy);
assertEquals(cleanUpPolicy, props.get(TopicConfig.CLEANUP_POLICY_CONFIG).value(), "Properties after creation have incorrect value");
// modify the topic to add new partitions
int numPartitionsModified = 3;
TopicCommand.TopicCommandOptions alterOpts = buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter",
"--partitions", Integer.toString(numPartitionsModified), "--topic", testTopicName);
topicService.alterTopic(alterOpts);
TestUtils.waitForCondition(
() -> clusterInstance.brokers().values().stream().allMatch(p -> p.metadataCache().numPartitions(testTopicName).orElse(0) == numPartitionsModified),
TestUtils.DEFAULT_MAX_WAIT_MS, "Timeout waiting for new assignment propagating to broker");
Config newProps = adminClient.describeConfigs(Set.of(configResource)).all().get().get(configResource);
assertNotNull(newProps.get(TopicConfig.CLEANUP_POLICY_CONFIG), "Updated properties do not contain " + TopicConfig.CLEANUP_POLICY_CONFIG);
assertEquals(cleanUpPolicy, newProps.get(TopicConfig.CLEANUP_POLICY_CONFIG).value(), "Updated properties have incorrect value");
}
}
@ClusterTest(
brokers = 3,
serverProperties = {
@ClusterConfigProperty(key = "log.initial.task.delay.ms", value = "100"),
@ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000")
}
)
public void testTopicDeletion(ClusterInstance clusterInstance) throws Exception {
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
String testTopicName = "testing";
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
// delete the NormalTopic
TopicCommand.TopicCommandOptions deleteOpts = buildTopicCommandOptionsWithBootstrap(clusterInstance, "--delete", "--topic", testTopicName);
topicService.deleteTopic(deleteOpts);
TestUtils.waitForCondition(
() -> adminClient.listTopics().listings().get().stream().noneMatch(topic -> topic.name().equals(testTopicName)),
CLUSTER_WAIT_MS, String.format("Delete topic fail in %s ms", CLUSTER_WAIT_MS)
);
}
}
@ClusterTest(
brokers = 3,
serverProperties = {
@ClusterConfigProperty(key = "log.initial.task.delay.ms", value = "100"),
@ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000")
}
)
public void testTopicWithCollidingCharDeletionAndCreateAgain(ClusterInstance clusterInstance) throws Exception {
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
// create the topic with colliding chars
String topicWithCollidingChar = "test.a";
adminClient.createTopics(List.of(new NewTopic(topicWithCollidingChar, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(topicWithCollidingChar, defaultNumPartitions);
// delete the topic
TopicCommand.TopicCommandOptions deleteOpts = buildTopicCommandOptionsWithBootstrap(clusterInstance, "--delete", "--topic", topicWithCollidingChar);
topicService.deleteTopic(deleteOpts);
TestUtils.waitForCondition(
() -> adminClient.listTopics().listings().get().stream().noneMatch(topic -> topic.name().equals(topicWithCollidingChar)),
CLUSTER_WAIT_MS, String.format("Delete topic fail in %s ms", CLUSTER_WAIT_MS)
);
clusterInstance.waitTopicDeletion(topicWithCollidingChar);
// recreate same topic
adminClient.createTopics(List.of(new NewTopic(topicWithCollidingChar, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(topicWithCollidingChar, defaultNumPartitions);
}
}
@ClusterTest(
brokers = 3,
serverProperties = {
@ClusterConfigProperty(key = "log.initial.task.delay.ms", value = "100"),
@ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000")
}
)
public void testDeleteInternalTopic(ClusterInstance clusterInstance) throws Exception {
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
// create the offset topic
adminClient.createTopics(List.of(new NewTopic(Topic.GROUP_METADATA_TOPIC_NAME, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(Topic.GROUP_METADATA_TOPIC_NAME, defaultNumPartitions);
// Try to delete the Topic.GROUP_METADATA_TOPIC_NAME which is allowed by default.
// This is a difference between the new and the old command as the old one didn't allow internal topic deletion.
// If deleting internal topics is not desired, ACLS should be used to control it.
TopicCommand.TopicCommandOptions deleteOffsetTopicOpts =
buildTopicCommandOptionsWithBootstrap(clusterInstance, "--delete", "--topic", Topic.GROUP_METADATA_TOPIC_NAME);
topicService.deleteTopic(deleteOffsetTopicOpts);
TestUtils.waitForCondition(
() -> adminClient.listTopics().listings().get().stream().noneMatch(topic -> topic.name().equals(Topic.GROUP_METADATA_TOPIC_NAME)),
CLUSTER_WAIT_MS, String.format("Delete topic fail in %s ms", CLUSTER_WAIT_MS)
);
}
}
@ClusterTest(
brokers = 3,
serverProperties = {
@ClusterConfigProperty(key = "log.initial.task.delay.ms", value = "100"),
@ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000")
}
)
public void testDeleteWhenTopicDoesntExist(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
// delete a topic that does not exist
TopicCommand.TopicCommandOptions deleteOpts = buildTopicCommandOptionsWithBootstrap(clusterInstance, "--delete", "--topic", testTopicName);
assertThrows(IllegalArgumentException.class, () -> topicService.deleteTopic(deleteOpts),
"Expected an exception when trying to delete a topic that does not exist.");
}
}
@ClusterTest(
brokers = 3,
serverProperties = {
@ClusterConfigProperty(key = "log.initial.task.delay.ms", value = "100"),
@ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000")
}
)
public void testDeleteWhenTopicDoesntExistWithIfExists(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
topicService.deleteTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--delete", "--topic", testTopicName, "--if-exists"));
}
}
@ClusterTemplate("generate")
public void testDescribe(ClusterInstance clusterInstance) throws InterruptedException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
int partition = 2;
short replicationFactor = 2;
adminClient.createTopics(List.of(new NewTopic(testTopicName, partition, replicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, partition);
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--topic", testTopicName));
String[] rows = output.split(System.lineSeparator());
assertEquals(3, rows.length, "Expected 3 rows in output, got " + rows.length);
assertTrue(rows[0].startsWith(String.format("Topic: %s", testTopicName)), "Row does not start with " + testTopicName + ". Row is: " + rows[0]);
}
}
@ClusterTemplate("generate")
public void testDescribeWithDescribeTopicPartitionsApi(ClusterInstance clusterInstance) throws InterruptedException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
List<NewTopic> topics = new ArrayList<>();
topics.add(new NewTopic(testTopicName, 20, (short) 2));
topics.add(new NewTopic("test-2", 41, (short) 2));
topics.add(new NewTopic("test-3", 5, (short) 2));
topics.add(new NewTopic("test-4", 5, (short) 2));
topics.add(new NewTopic("test-5", 100, (short) 2));
adminClient.createTopics(topics);
clusterInstance.waitTopicCreation(testTopicName, 20);
clusterInstance.waitTopicCreation("test-2", 41);
clusterInstance.waitTopicCreation("test-3", 5);
clusterInstance.waitTopicCreation("test-4", 5);
clusterInstance.waitTopicCreation("test-5", 100);
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance,
"--describe", "--partition-size-limit-per-response=20", "--exclude-internal"));
String[] rows = output.split("\n");
assertEquals(176, rows.length, String.join("\n", rows));
assertTrue(rows[2].contains("\tElr"), rows[2]);
assertTrue(rows[2].contains("LastKnownElr"), rows[2]);
}
}
@ClusterTest
public void testDescribeWhenTopicDoesntExist(ClusterInstance clusterInstance) {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient);
assertThrows(IllegalArgumentException.class,
() -> topicService.describeTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--topic", testTopicName)),
"Expected an exception when trying to describe a topic that does not exist.");
}
}
@ClusterTest
public void testDescribeWhenTopicDoesntExistWithIfExists(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient);
topicService.describeTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--topic", testTopicName, "--if-exists"));
topicService.close();
}
}
@ClusterTest(brokers = 3)
public void testDescribeUnavailablePartitions(ClusterInstance clusterInstance) throws InterruptedException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
int partitions = 3;
short replicationFactor = 1;
adminClient.createTopics(List.of(new NewTopic(testTopicName, partitions, replicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, partitions);
// check which partition is on broker 0 which we'll kill
clusterInstance.shutdownBroker(0);
assertEquals(2, clusterInstance.aliveBrokers().size());
// wait until the topic metadata for the test topic is propagated to each alive broker
clusterInstance.waitTopicCreation(testTopicName, 3);
// grab the console output and assert
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--topic", testTopicName, "--unavailable-partitions"));
String[] rows = output.split(System.lineSeparator());
assertTrue(rows[0].startsWith(String.format("Topic: %s", testTopicName)),
"Unexpected Topic " + rows[0] + " received. Expect " + String.format("Topic: %s", testTopicName));
assertTrue(rows[0].contains("Leader: none\tReplicas: 0\tIsr:"),
"Rows did not contain 'Leader: none\tReplicas: 0\tIsr:'");
}
}
@ClusterTest(brokers = 3)
public void testDescribeUnderReplicatedPartitions(ClusterInstance clusterInstance) throws InterruptedException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
int partitions = 1;
short replicationFactor = 3;
adminClient.createTopics(List.of(new NewTopic(testTopicName, partitions, replicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, partitions);
clusterInstance.shutdownBroker(0);
Assertions.assertEquals(2, clusterInstance.aliveBrokers().size());
TestUtils.waitForCondition(
() -> clusterInstance.aliveBrokers().values().stream().allMatch(
broker -> {
Optional<LeaderAndIsr> partitionState = Optional.ofNullable(
broker.metadataCache().getLeaderAndIsr(testTopicName, 0).orElseGet(null));
return partitionState.map(s -> FetchRequest.isValidBrokerId(s.leader())).orElse(false);
}
), CLUSTER_WAIT_MS, String.format("Meta data propogation fail in %s ms", CLUSTER_WAIT_MS));
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--under-replicated-partitions"));
String[] rows = output.split(System.lineSeparator());
assertTrue(rows[0].startsWith(String.format("Topic: %s", testTopicName)), String.format("Unexpected output: %s", rows[0]));
}
}
@ClusterTest(brokers = 3)
public void testDescribeUnderMinIsrPartitions(ClusterInstance clusterInstance) throws InterruptedException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
Map<String, String> topicConfig = new HashMap<>();
topicConfig.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3");
int partitions = 1;
short replicationFactor = 3;
adminClient.createTopics(List.of(new NewTopic(testTopicName, partitions, replicationFactor).configs(topicConfig)));
clusterInstance.waitTopicCreation(testTopicName, partitions);
clusterInstance.shutdownBroker(0);
assertEquals(2, clusterInstance.aliveBrokers().size());
TestUtils.waitForCondition(
() -> clusterInstance.aliveBrokers().values().stream().allMatch(broker -> broker.metadataCache().getLeaderAndIsr(testTopicName, 0).get().isr().size() == 2),
CLUSTER_WAIT_MS, String.format("Timeout waiting for partition metadata propagating to brokers for %s topic", testTopicName)
);
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--under-min-isr-partitions", "--exclude-internal"));
String[] rows = output.split(System.lineSeparator());
assertTrue(rows[0].startsWith(String.format("Topic: %s", testTopicName)),
"Unexpected topic: " + rows[0]);
}
}
@ClusterTemplate("generate")
public void testDescribeUnderReplicatedPartitionsWhenReassignmentIsInProgress(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin();
KafkaProducer<String, String> producer = createProducer(clusterInstance)) {
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
TopicPartition tp = new TopicPartition(testTopicName, 0);
// Produce multiple batches.
sendProducerRecords(testTopicName, producer, 10);
sendProducerRecords(testTopicName, producer, 10);
// Enable throttling. Note the broker config sets the replica max fetch bytes to `1` upon to minimize replication
// throughput so the reassignment doesn't complete quickly.
List<Integer> brokerIds = new ArrayList<>(clusterInstance.brokerIds());
ToolsTestUtils.setReplicationThrottleForPartitions(adminClient, brokerIds, Set.of(tp), 1);
TopicDescription testTopicDesc = adminClient.describeTopics(Set.of(testTopicName)).allTopicNames().get().get(testTopicName);
TopicPartitionInfo firstPartition = testTopicDesc.partitions().get(0);
List<Integer> replicasOfFirstPartition = firstPartition.replicas().stream().map(Node::id).toList();
List<Integer> replicasDiff = new ArrayList<>(brokerIds);
replicasDiff.removeAll(replicasOfFirstPartition);
Integer targetReplica = replicasDiff.get(0);
adminClient.alterPartitionReassignments(Map.of(tp,
Optional.of(new NewPartitionReassignment(List.of(targetReplica))))).all().get();
// let's wait until the LAIR is propagated
TestUtils.waitForCondition(
() -> !adminClient.listPartitionReassignments(Set.of(tp)).reassignments().get()
.get(tp).addingReplicas().isEmpty(), CLUSTER_WAIT_MS, "Reassignment didn't add the second node"
);
// describe the topic and test if it's under-replicated
String simpleDescribeOutput = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--topic", testTopicName));
String[] simpleDescribeOutputRows = simpleDescribeOutput.split(System.lineSeparator());
String testTopicNameLogLine = String.format("Topic: %s", testTopicName);
assertTrue(simpleDescribeOutputRows[0].startsWith(testTopicNameLogLine),
"Unexpected describe output: " + simpleDescribeOutputRows[0]);
assertEquals(2, simpleDescribeOutputRows.length,
"Unexpected describe output length: " + simpleDescribeOutputRows.length);
String underReplicatedOutput = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--under-replicated-partitions"));
assertFalse(underReplicatedOutput.contains(testTopicNameLogLine),
String.format("--under-replicated-partitions shouldn't contain '%s': '%s'", testTopicNameLogLine, underReplicatedOutput));
int maxRetries = 20;
long pause = 100L;
long waitTimeMs = maxRetries * pause;
AtomicReference<PartitionReassignment> reassignmentsRef = new AtomicReference<>();
TestUtils.waitForCondition(
() -> {
PartitionReassignment tempReassignments = adminClient.listPartitionReassignments(Set.of(tp)).reassignments().get().get(tp);
reassignmentsRef.set(tempReassignments);
return reassignmentsRef.get() != null;
}, waitTimeMs, "Reassignments did not become non-null within the specified time"
);
assertFalse(reassignmentsRef.get().addingReplicas().isEmpty());
ToolsTestUtils.removeReplicationThrottleForPartitions(adminClient, brokerIds, Set.of(tp));
TestUtils.waitForCondition(
() -> adminClient.listPartitionReassignments().reassignments().get().isEmpty(),
CLUSTER_WAIT_MS, String.format("reassignmet not finished after %s ms", CLUSTER_WAIT_MS)
);
}
}
@ClusterTemplate("generate")
public void testDescribeAtMinIsrPartitions(ClusterInstance clusterInstance) throws InterruptedException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
Map<String, String> topicConfig = new HashMap<>();
topicConfig.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "4");
int partitions = 1;
short replicationFactor = 6;
adminClient.createTopics(List.of(new NewTopic(testTopicName, partitions, replicationFactor).configs(topicConfig)));
clusterInstance.waitTopicCreation(testTopicName, partitions);
clusterInstance.shutdownBroker(0);
clusterInstance.shutdownBroker(1);
assertEquals(4, clusterInstance.aliveBrokers().size());
TestUtils.waitForCondition(
() -> clusterInstance.aliveBrokers().values().stream().allMatch(broker -> broker.metadataCache().getLeaderAndIsr(testTopicName, 0).get().isr().size() == 4),
CLUSTER_WAIT_MS, String.format("Timeout waiting for partition metadata propagating to brokers for %s topic", testTopicName)
);
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--at-min-isr-partitions", "--exclude-internal"));
String[] rows = output.split(System.lineSeparator());
assertTrue(rows[0].startsWith(String.format("Topic: %s", testTopicName)),
"Unexpected output: " + rows[0]);
assertEquals(1, rows.length);
}
}
/**
* Test describe --under-min-isr-partitions option with four topics:
* (1) topic with partition under the configured min ISR count
* (2) topic with under-replicated partition (but not under min ISR count)
* (3) topic with offline partition
* (4) topic with fully replicated partition
*
* Output should only display the (1) topic with partition under min ISR count and (3) topic with offline partition
*/
@ClusterTemplate("generate")
public void testDescribeUnderMinIsrPartitionsMixed(ClusterInstance clusterInstance) throws InterruptedException {
try (Admin adminClient = clusterInstance.admin()) {
String underMinIsrTopic = "under-min-isr-topic";
String notUnderMinIsrTopic = "not-under-min-isr-topic";
String offlineTopic = "offline-topic";
String fullyReplicatedTopic = "fully-replicated-topic";
int partitions = 1;
short replicationFactor = 6;
List<NewTopic> newTopics = new ArrayList<>();
Map<Integer, List<Integer>> fullyReplicatedReplicaAssignmentMap = new HashMap<>();
fullyReplicatedReplicaAssignmentMap.put(0, List.of(1, 2, 3));
Map<Integer, List<Integer>> offlineReplicaAssignmentMap = new HashMap<>();
offlineReplicaAssignmentMap.put(0, List.of(0));
Map<String, String> topicConfig = new HashMap<>();
topicConfig.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "6");
newTopics.add(new NewTopic(underMinIsrTopic, partitions, replicationFactor).configs(topicConfig));
newTopics.add(new NewTopic(notUnderMinIsrTopic, partitions, replicationFactor));
newTopics.add(new NewTopic(offlineTopic, offlineReplicaAssignmentMap));
newTopics.add(new NewTopic(fullyReplicatedTopic, fullyReplicatedReplicaAssignmentMap));
adminClient.createTopics(newTopics);
for (NewTopic topioc: newTopics) {
clusterInstance.waitTopicCreation(topioc.name(), partitions);
}
clusterInstance.shutdownBroker(0);
Assertions.assertEquals(5, clusterInstance.aliveBrokers().size());
TestUtils.waitForCondition(
() -> clusterInstance.aliveBrokers().values().stream().allMatch(broker ->
broker.metadataCache().getLeaderAndIsr(underMinIsrTopic, 0).get().isr().size() < 6 &&
broker.metadataCache().getLeaderAndIsr(offlineTopic, 0).get().leader() == MetadataResponse.NO_LEADER_ID),
CLUSTER_WAIT_MS, "Timeout waiting for partition metadata propagating to brokers for underMinIsrTopic topic"
);
TestUtils.waitForCondition(
() -> adminClient.listPartitionReassignments().reassignments().get().isEmpty(),
CLUSTER_WAIT_MS, String.format("reassignmet not finished after %s ms", CLUSTER_WAIT_MS)
);
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--under-min-isr-partitions", "--exclude-internal"));
String[] rows = output.split(System.lineSeparator());
assertTrue(rows[0].startsWith(String.format("Topic: %s", underMinIsrTopic)),
"Unexpected output: " + rows[0]);
assertTrue(rows[1].startsWith(String.format("\tTopic: %s", offlineTopic)),
"Unexpected output: " + rows[1]);
assertEquals(2, rows.length);
}
}
@ClusterTest(brokers = 3)
public void testDescribeReportOverriddenConfigs(ClusterInstance clusterInstance) throws InterruptedException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
String config = "file.delete.delay.ms=1000";
Map<String, String> topicConfig = new HashMap<>();
topicConfig.put(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, "1000");
int partitions = 2;
short replicationFactor = 2;
adminClient.createTopics(List.of(new NewTopic(testTopicName, partitions, replicationFactor).configs(topicConfig)));
clusterInstance.waitTopicCreation(testTopicName, partitions);
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe"));
assertTrue(output.contains(config), String.format("Describe output should have contained %s", config));
}
}
@ClusterTest
public void testDescribeAndListTopicsWithoutInternalTopics(ClusterInstance clusterInstance) throws InterruptedException {
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = clusterInstance.admin()) {
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
// test describe
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--describe", "--exclude-internal"));
assertTrue(output.contains(testTopicName),
String.format("Output should have contained %s", testTopicName));
assertFalse(output.contains(Topic.GROUP_METADATA_TOPIC_NAME),
"Output should not have contained " + Topic.GROUP_METADATA_TOPIC_NAME);
// test list
output = captureListTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--list", "--exclude-internal"));
assertTrue(output.contains(testTopicName), String.format("Output should have contained %s", testTopicName));
assertFalse(output.contains(Topic.GROUP_METADATA_TOPIC_NAME),
"Output should not have contained " + Topic.GROUP_METADATA_TOPIC_NAME);
}
}
@ClusterTest
public void testDescribeDoesNotFailWhenListingReassignmentIsUnauthorized(ClusterInstance clusterInstance) throws Exception {
String testTopicName = TestUtils.randomString(10);
Admin adminClient = clusterInstance.admin();
adminClient = spy(adminClient);
ListPartitionReassignmentsResult result = AdminClientTestUtils.listPartitionReassignmentsResult(
new ClusterAuthorizationException("Unauthorized"));
doReturn(result).when(adminClient).listPartitionReassignments(
Set.of(new TopicPartition(testTopicName, 0))
);
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--topic", testTopicName));
String[] rows = output.split(System.lineSeparator());
assertEquals(2, rows.length, "Unexpected output: " + output);
assertTrue(rows[0].startsWith(String.format("Topic: %s", testTopicName)), "Unexpected output: " + rows[0]);
adminClient.close();
}
@ClusterTest(brokers = 3)
public void testCreateWithTopicNameCollision(ClusterInstance clusterInstance) throws Exception {
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
String topic = "foo_bar";
int partitions = 1;
short replicationFactor = 3;
adminClient.createTopics(List.of(new NewTopic(topic, partitions, replicationFactor)));
clusterInstance.waitTopicCreation(topic, defaultNumPartitions);
assertThrows(TopicExistsException.class,
() -> topicService.createTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--create", "--topic", topic)));
}
}
@ClusterTest
public void testCreateWithInternalConfig(ClusterInstance cluster) throws InterruptedException, ExecutionException {
String internalConfigTopicName = TestUtils.randomString(10);
String testTopicName = TestUtils.randomString(10);
try (Admin adminClient = cluster.admin()) {
CreateTopicsResult internalResult = adminClient.createTopics(List.of(new NewTopic(internalConfigTopicName, defaultNumPartitions, defaultReplicationFactor).configs(
Map.of(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, "1000")
)));
ConfigEntry internalConfigEntry = internalResult.config(internalConfigTopicName).get().get(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG);
assertNotNull(internalConfigEntry, "Internal config entry should not be null");
assertEquals("1000", internalConfigEntry.value());
CreateTopicsResult nonInternalResult = adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
ConfigEntry nonInternalConfigEntry = nonInternalResult.config(testTopicName).get().get(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG);
assertNull(nonInternalConfigEntry, "Non-internal config entry should be null");
}
}
private void checkReplicaDistribution(Map<Integer, List<Integer>> assignment,
Map<Integer, String> brokerRackMapping,
int numBrokers,
int numPartitions,
int replicationFactor,
boolean verifyRackAware,
boolean verifyLeaderDistribution,
boolean verifyReplicasDistribution) {
// always verify that no broker will be assigned for more than one replica
assignment.forEach((partition, assignedNodes) -> assertEquals(new HashSet<>(assignedNodes).size(), assignedNodes.size(),
"More than one replica is assigned to same broker for the same partition"));
ReplicaDistributions distribution = getReplicaDistribution(assignment, brokerRackMapping);
if (verifyRackAware) {
Map<Integer, List<String>> partitionRackMap = distribution.partitionRacks;
List<Integer> partitionRackMapValueSize = partitionRackMap.values().stream()
.map(value -> (int) value.stream().distinct().count())
.toList();
List<Integer> expected = Collections.nCopies(numPartitions, replicationFactor);
assertEquals(expected, partitionRackMapValueSize, "More than one replica of the same partition is assigned to the same rack");
}
if (verifyLeaderDistribution) {
Map<Integer, Integer> leaderCount = distribution.brokerLeaderCount;
int leaderCountPerBroker = numPartitions / numBrokers;
List<Integer> expected = Collections.nCopies(numBrokers, leaderCountPerBroker);
assertEquals(expected, new ArrayList<>(leaderCount.values()), "Preferred leader count is not even for brokers");
}
if (verifyReplicasDistribution) {
Map<Integer, Integer> replicasCount = distribution.brokerReplicasCount;
int numReplicasPerBroker = numPartitions * replicationFactor / numBrokers;
List<Integer> expected = Collections.nCopies(numBrokers, numReplicasPerBroker);
assertEquals(expected, new ArrayList<>(replicasCount.values()), "Replica count is not even for broker");
}
}
private String captureDescribeTopicStandardOut(ClusterInstance clusterInstance, TopicCommand.TopicCommandOptions opts) {
Runnable runnable = () -> {
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
topicService.describeTopic(opts);
} catch (Exception e) {
throw new RuntimeException(e);
}
};
return ToolsTestUtils.captureStandardOut(runnable);
}
private String captureListTopicStandardOut(ClusterInstance clusterInstance, TopicCommand.TopicCommandOptions opts) {
Runnable runnable = () -> {
try (Admin adminClient = clusterInstance.admin();
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
topicService.listTopics(opts);
} catch (Exception e) {
throw new RuntimeException(e);
}
};
return ToolsTestUtils.captureStandardOut(runnable);
}
private static ReplicaDistributions getReplicaDistribution(Map<Integer, List<Integer>> assignment, Map<Integer, String> brokerRackMapping) {
Map<Integer, Integer> leaderCount = new HashMap<>();
Map<Integer, Integer> partitionCount = new HashMap<>();
Map<Integer, List<String>> partitionRackMap = new HashMap<>();
assignment.forEach((partitionId, replicaList) -> {
Integer leader = replicaList.get(0);
leaderCount.put(leader, leaderCount.getOrDefault(leader, 0) + 1);
replicaList.forEach(brokerId -> {
partitionCount.put(brokerId, partitionCount.getOrDefault(brokerId, 0) + 1);
String rack;
if (brokerRackMapping.containsKey(brokerId)) {
rack = brokerRackMapping.get(brokerId);
List<String> partitionRackValues = Stream.of(List.of(rack), partitionRackMap.getOrDefault(partitionId, List.of()))
.flatMap(List::stream)
.toList();
partitionRackMap.put(partitionId, partitionRackValues);
} else {
System.err.printf("No mapping found for %s in `brokerRackMapping`%n", brokerId);
}
});
});
return new ReplicaDistributions(partitionRackMap, leaderCount, partitionCount);
}
private record ReplicaDistributions(Map<Integer, List<String>> partitionRacks,
Map<Integer, Integer> brokerLeaderCount,
Map<Integer, Integer> brokerReplicasCount) {
}
private KafkaProducer<String, String> createProducer(ClusterInstance clusterInstance) {
Properties producerProps = new Properties();
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers());
producerProps.put(ProducerConfig.ACKS_CONFIG, "-1");
return new KafkaProducer<>(producerProps, new StringSerializer(), new StringSerializer());
}
private void sendProducerRecords(String testTopicName, KafkaProducer<String, String> producer, int numMessage) {
IntStream.range(0, numMessage).forEach(i -> producer.send(new ProducerRecord<>(testTopicName, "test-" + i)));
producer.flush();
}
}
| TopicCommandTest |
java | quarkusio__quarkus | extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/start/HttpServerStartEventsTest.java | {
"start": 1035,
"end": 1853
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot(root -> root.addClasses(MyListener.class)
.addAsResource(new File("target/certs/ssl-test-keystore.jks"), "server-keystore.jks"))
.overrideConfigKey("quarkus.http.ssl.certificate.key-store-file", "server-keystore.jks")
.overrideConfigKey("quarkus.http.ssl.certificate.key-store-password", "secret");
@Test
public void test() throws InterruptedException {
assertTrue(MyListener.HTTP.await(5, TimeUnit.SECONDS));
assertTrue(MyListener.HTTPS.await(5, TimeUnit.SECONDS));
// httpsStarted() is static
assertEquals(1, MyListener.COUNTER.get());
}
@Dependent
public static | HttpServerStartEventsTest |
java | apache__spark | sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFToStringIntMap.java | {
"start": 926,
"end": 1164
} | class ____ extends UDF {
public HashMap<String, Integer> evaluate(Object o) {
return new HashMap<String, Integer>() {
{
put("key1", 1);
put("key2", 2);
put("key3", 3);
}
};
}
}
| UDFToStringIntMap |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/nested/fieldaccess/MonetaryAmount.java | {
"start": 468,
"end": 887
} | enum ____ {
USD,
EUR
}
private BigDecimal amount;
@Column(length = 3)
@Enumerated(EnumType.STRING)
private CurrencyCode currency;
public BigDecimal getAmount() {
return amount;
}
public void setAmount(BigDecimal amount) {
this.amount = amount;
}
public CurrencyCode getCurrency() {
return currency;
}
public void setCurrency(CurrencyCode currency) {
this.currency = currency;
}
}
| CurrencyCode |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimestampedTupleForwarder.java | {
"start": 1352,
"end": 1573
} | class ____ occurs when caching is not enabled. If caching is enabled,
* forwarding occurs in the flush listener when the cached store flushes.
*
* @param <K> the type of the key
* @param <V> the type of the value
*/
| only |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/persister/entity/mutation/AbstractMutationCoordinator.java | {
"start": 2009,
"end": 7982
} | class ____ {
protected final EntityPersister entityPersister;
protected final SessionFactoryImplementor factory;
protected final MutationExecutorService mutationExecutorService;
protected final Dialect dialect;
public AbstractMutationCoordinator(EntityPersister entityPersister, SessionFactoryImplementor factory) {
this.entityPersister = entityPersister;
this.factory = factory;
dialect = factory.getJdbcServices().getDialect();
mutationExecutorService = factory.getServiceRegistry().getService( MutationExecutorService.class );
}
protected EntityPersister entityPersister() {
return entityPersister;
}
protected SessionFactoryImplementor factory() {
return factory;
}
protected Dialect dialect() {
return dialect;
}
protected BatchKeyAccess resolveBatchKeyAccess(boolean dynamicUpdate, SharedSessionContractImplementor session) {
if ( !dynamicUpdate
&& !entityPersister().optimisticLockStyle().isAllOrDirty()
&& session.getTransactionCoordinator() != null
&& session.getTransactionCoordinator().isTransactionActive() ) {
return this::getBatchKey;
}
return NoBatchKeyAccess.INSTANCE;
}
protected abstract BatchKey getBatchKey();
protected MutationOperationGroup createOperationGroup(ValuesAnalysis valuesAnalysis, MutationGroup mutationGroup) {
final int numberOfTableMutations = mutationGroup.getNumberOfTableMutations();
switch ( numberOfTableMutations ) {
case 0:
return noOperations( mutationGroup );
case 1: {
final var operation = createOperation( valuesAnalysis, mutationGroup.getSingleTableMutation() );
return operation == null
? noOperations( mutationGroup )
: singleOperation( mutationGroup, operation );
}
default: {
var operations = new MutationOperation[numberOfTableMutations];
int outputIndex = 0;
int skipped = 0;
for ( int i = 0; i < mutationGroup.getNumberOfTableMutations(); i++ ) {
final var tableMutation = mutationGroup.getTableMutation( i );
final var operation = tableMutation.createMutationOperation( valuesAnalysis, factory );
if ( operation != null ) {
operations[outputIndex++] = operation;
}
else {
skipped++;
MODEL_MUTATION_LOGGER.skippingUpdate( tableMutation.getTableName() );
}
}
if ( skipped != 0 ) {
final var trimmed = new MutationOperation[outputIndex];
arraycopy( operations, 0, trimmed, 0, outputIndex );
operations = trimmed;
}
return manyOperations( mutationGroup.getMutationType(), entityPersister, operations );
}
}
}
/*
* Used by Hibernate Reactive
*/
protected MutationOperation createOperation(ValuesAnalysis valuesAnalysis, TableMutation<?> singleTableMutation) {
return singleTableMutation.createMutationOperation( valuesAnalysis, factory() );
}
protected void handleValueGeneration(
AttributeMapping attributeMapping,
MutationGroupBuilder mutationGroupBuilder,
OnExecutionGenerator generator) {
final Dialect dialect = factory.getJdbcServices().getDialect();
final boolean writePropertyValue = generator.writePropertyValue();
final String[] columnValues = writePropertyValue ? null : generator.getReferencedColumnValues( dialect );
attributeMapping.forEachSelectable( (j, mapping) -> {
final String tableName = entityPersister.physicalTableNameForMutation( mapping );
final ColumnValuesTableMutationBuilder tableUpdateBuilder =
mutationGroupBuilder.findTableDetailsBuilder( tableName );
tableUpdateBuilder.addValueColumn(
writePropertyValue ? "?" : columnValues[j],
mapping
);
} );
}
protected void bindPartitionColumnValueBindings(
Object[] loadedState,
SharedSessionContractImplementor session,
JdbcValueBindings jdbcValueBindings) {
final var persister = entityPersister();
if ( persister.hasPartitionedSelectionMapping() ) {
final var attributeMappings = persister.getAttributeMappings();
final int size = attributeMappings.size();
for ( int i = 0; i < size; i++ ) {
final var attributeMapping = attributeMappings.get( i );
if ( attributeMapping.hasPartitionedSelectionMapping() ) {
attributeMapping.decompose(
loadedState[i],
0,
jdbcValueBindings,
null,
(valueIndex, bindings, noop, value, jdbcValueMapping) -> {
if ( jdbcValueMapping.isPartitioned() ) {
bindings.bindValue(
value,
jdbcValueMapping,
ParameterUsage.RESTRICT
);
}
},
session
);
}
}
}
}
protected static boolean needsRowId(EntityPersister entityPersister, EntityTableMapping tableMapping) {
return entityPersister.getRowIdMapping() != null
&& tableMapping.isIdentifierTable();
}
protected static void applyKeyRestriction(
Object rowId,
EntityPersister entityPersister,
RestrictedTableMutationBuilder<?, ?> tableMutationBuilder,
EntityTableMapping tableMapping) {
if ( rowId != null && needsRowId( entityPersister, tableMapping ) ) {
tableMutationBuilder.addKeyRestrictionLeniently( entityPersister.getRowIdMapping() );
}
else {
tableMutationBuilder.addKeyRestrictions( tableMapping.getKeyMapping() );
}
}
protected void breakDownKeyJdbcValues(
Object id,
Object rowId,
SharedSessionContractImplementor session,
JdbcValueBindings jdbcValueBindings,
EntityTableMapping tableMapping) {
if ( rowId != null && needsRowId( entityPersister(), tableMapping ) ) {
jdbcValueBindings.bindValue(
rowId,
tableMapping.getTableName(),
entityPersister().getRowIdMapping().getRowIdName(),
ParameterUsage.RESTRICT
);
}
else {
tableMapping.getKeyMapping().breakDownKeyJdbcValues(
id,
(jdbcValue, columnMapping) -> {
jdbcValueBindings.bindValue(
jdbcValue,
tableMapping.getTableName(),
columnMapping.getColumnName(),
ParameterUsage.RESTRICT
);
},
session
);
}
}
}
| AbstractMutationCoordinator |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/google/MultimapTestSuiteBuilder.java | {
"start": 17432,
"end": 18503
} | class ____<K, V, M extends Multimap<K, V>>
implements TestCollectionGenerator<Entry<K, V>>, DerivedGenerator {
private final OneSizeTestContainerGenerator<M, Entry<K, V>> multimapGenerator;
public EntriesGenerator(OneSizeTestContainerGenerator<M, Entry<K, V>> multimapGenerator) {
this.multimapGenerator = multimapGenerator;
}
@Override
public TestSubjectGenerator<?> getInnerGenerator() {
return multimapGenerator;
}
@Override
public SampleElements<Entry<K, V>> samples() {
return multimapGenerator.samples();
}
@Override
public Collection<Entry<K, V>> create(Object... elements) {
return multimapGenerator.create(elements).entries();
}
@SuppressWarnings("unchecked")
@Override
public Entry<K, V>[] createArray(int length) {
return (Entry<K, V>[]) new Entry<?, ?>[length];
}
@Override
public Iterable<Entry<K, V>> order(List<Entry<K, V>> insertionOrder) {
return multimapGenerator.order(insertionOrder);
}
}
private static final | EntriesGenerator |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/creator/JSONCreatorTest2.java | {
"start": 946,
"end": 1544
} | class ____ {
private final int id;
private final String name;
private final Object obj;
@JSONCreator
public Entity(@JSONField(name = "id") int id, @JSONField(name = "name") String name,
@JSONField(name = "obj") Object obj){
this.id = id;
this.name = name;
this.obj = obj;
}
public int getId() {
return id;
}
public String getName() {
return name;
}
public Object getObj() {
return obj;
}
}
}
| Entity |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.