language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/sorted/state/BatchExecutionStateBackendVerificationTest.java | {
"start": 1460,
"end": 2498
} | class ____ {
private static final LongSerializer LONG_SERIALIZER = new LongSerializer();
@Test
void verifySnapshotNotSupported() {
BatchExecutionKeyedStateBackend<Long> stateBackend =
new BatchExecutionKeyedStateBackend<>(
LONG_SERIALIZER, new KeyGroupRange(0, 9), new ExecutionConfig());
long checkpointId = 0L;
CheckpointStreamFactory streamFactory = new MemCheckpointStreamFactory(10);
assertThatThrownBy(
() ->
stateBackend.snapshot(
checkpointId,
0L,
streamFactory,
CheckpointOptions.forCheckpointWithDefaultLocation()))
.isInstanceOf(UnsupportedOperationException.class)
.hasMessageContaining("Snapshotting is not supported in BATCH runtime mode.");
}
}
| BatchExecutionStateBackendVerificationTest |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/config/annotation/ViewResolverRegistry.java | {
"start": 2138,
"end": 2306
} | class ____ expected to be used via {@link WebMvcConfigurer#configureViewResolvers}.
*
* @author Sebastien Deleuze
* @author Rossen Stoyanchev
* @since 4.1
*/
public | is |
java | alibaba__nacos | naming/src/test/java/com/alibaba/nacos/naming/healthcheck/v2/PersistentHealthStatusSynchronizerTest.java | {
"start": 1278,
"end": 2300
} | class ____ {
@Mock
private PersistentClientOperationServiceImpl persistentClientOperationService;
@Mock
private Client client;
@Test
void testInstanceHealthStatusChange() {
Service service = Service.newService("public", "DEFAULT", "nacos", true);
InstancePublishInfo instancePublishInfo = new InstancePublishInfo("127.0.0.1", 8080);
PersistentHealthStatusSynchronizer persistentHealthStatusSynchronizer = new PersistentHealthStatusSynchronizer(
persistentClientOperationService);
persistentHealthStatusSynchronizer.instanceHealthStatusChange(true, client, service, instancePublishInfo);
Instance updateInstance = InstanceUtil.parseToApiInstance(service, instancePublishInfo);
updateInstance.setHealthy(true);
verify(client).getClientId();
verify(persistentClientOperationService).updateInstance(service, updateInstance, client.getClientId());
}
}
| PersistentHealthStatusSynchronizerTest |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/lookup/cache/trigger/TimedCacheReloadTrigger.java | {
"start": 2187,
"end": 6251
} | class ____ implements CacheReloadTrigger {
private static final long serialVersionUID = 1L;
private final Temporal reloadTime;
private final int reloadIntervalInDays;
private transient ScheduledExecutorService scheduledExecutor;
private transient Clock clock; // clock for testing purposes
public TimedCacheReloadTrigger(OffsetTime reloadTime, int reloadIntervalInDays) {
this((Temporal) reloadTime, reloadIntervalInDays);
}
public TimedCacheReloadTrigger(LocalTime reloadTime, int reloadIntervalInDays) {
this((Temporal) reloadTime, reloadIntervalInDays);
}
private TimedCacheReloadTrigger(Temporal reloadTime, int reloadIntervalInDays) {
checkArgument(
reloadIntervalInDays > 0,
"Reload interval for Timed cache reload trigger must be at least 1 day.");
this.reloadTime = reloadTime;
this.reloadIntervalInDays = reloadIntervalInDays;
}
@VisibleForTesting
TimedCacheReloadTrigger(
Temporal reloadTime,
int reloadIntervalInDays,
ScheduledExecutorService scheduledExecutor,
Clock clock) {
this(reloadTime, reloadIntervalInDays);
this.scheduledExecutor = scheduledExecutor;
this.clock = clock;
}
@Override
public void open(Context context) {
if (scheduledExecutor == null) {
scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
}
if (clock == null) {
clock =
reloadTime instanceof LocalTime
? Clock.systemDefaultZone()
: Clock.system(((OffsetTime) reloadTime).getOffset());
}
Temporal now =
reloadTime instanceof LocalTime ? LocalTime.now(clock) : OffsetTime.now(clock);
Duration initialDelayDuration = Duration.between(now, reloadTime);
if (initialDelayDuration.isNegative()) {
// in case when reloadTime is less than current time, reload will happen next day
initialDelayDuration = initialDelayDuration.plus(1, ChronoUnit.DAYS);
}
scheduledExecutor.execute(context::triggerReload); // trigger first load operation
scheduledExecutor.scheduleAtFixedRate(
context::triggerReload,
initialDelayDuration.toMillis(),
Duration.ofDays(reloadIntervalInDays).toMillis(),
TimeUnit.MILLISECONDS);
}
@Override
public void close() throws Exception {
if (scheduledExecutor != null) {
scheduledExecutor.shutdownNow();
}
}
@VisibleForTesting
Temporal getReloadTime() {
return reloadTime;
}
public static TimedCacheReloadTrigger fromConfig(ReadableConfig config) {
checkArgument(
config.get(CACHE_TYPE) == FULL,
"'%s' should be '%s' in order to build a Timed cache reload trigger.",
CACHE_TYPE.key(),
FULL);
checkArgument(
config.get(FULL_CACHE_RELOAD_STRATEGY) == TIMED,
"'%s' should be '%s' in order to build a Timed cache reload trigger.",
FULL_CACHE_RELOAD_STRATEGY.key(),
TIMED);
checkArgument(
config.getOptional(FULL_CACHE_TIMED_RELOAD_ISO_TIME).isPresent(),
"Missing '%s' in the configuration. This option is required to build a Timed cache reload trigger.",
FULL_CACHE_TIMED_RELOAD_ISO_TIME.key());
Temporal reloadTime =
(Temporal)
DateTimeFormatter.ISO_TIME.parseBest(
config.get(FULL_CACHE_TIMED_RELOAD_ISO_TIME),
OffsetTime::from,
LocalTime::from);
int reloadIntervalInDays = config.get(FULL_CACHE_TIMED_RELOAD_INTERVAL_IN_DAYS);
return new TimedCacheReloadTrigger(reloadTime, reloadIntervalInDays);
}
}
| TimedCacheReloadTrigger |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/validation/ModelBindingGraphConverter.java | {
"start": 11859,
"end": 12463
} | class ____ implements ChildFactoryMethodEdge {
static ChildFactoryMethodEdge create(
dagger.internal.codegen.model.BindingGraph.ChildFactoryMethodEdge childFactoryMethodEdge) {
return new AutoValue_ModelBindingGraphConverter_ChildFactoryMethodEdgeImpl(
childFactoryMethodEdge.factoryMethod().javac(), childFactoryMethodEdge);
}
abstract dagger.internal.codegen.model.BindingGraph.ChildFactoryMethodEdge delegate();
@Override
public final String toString() {
return delegate().toString();
}
}
@AutoValue
abstract static | ChildFactoryMethodEdgeImpl |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/parsetools/JsonParser.java | {
"start": 719,
"end": 1568
} | class ____ allows to incrementally parse json elements and emit json parse events instead of parsing a json
* element fully. This parser is convenient for parsing large json structures.
* <p/>
* The parser also parses concatenated json streams or line delimited json streams.
* <p/>
* The parser can also parse entire object or array when it is convenient, for instance a very large array
* of small objects can be parsed efficiently by handling array <i>start</i>/<i>end</i> and <i>object</i>
* events.
* <p/>
* Whenever the parser fails to parse or process the stream, the {@link #exceptionHandler(Handler)} is called with
* the cause of the failure and the current handling stops. After such event, the parser should not handle data
* anymore.
*
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
*/
@VertxGen
public | which |
java | google__jimfs | jimfs/src/main/java/com/google/common/jimfs/JimfsOutputStream.java | {
"start": 986,
"end": 2933
} | class ____ extends OutputStream {
@GuardedBy("this")
@VisibleForTesting
RegularFile file;
@GuardedBy("this")
private long pos;
private final boolean append;
private final FileSystemState fileSystemState;
JimfsOutputStream(RegularFile file, boolean append, FileSystemState fileSystemState) {
this.file = checkNotNull(file);
this.append = append;
this.fileSystemState = fileSystemState;
fileSystemState.register(this);
}
@Override
public synchronized void write(int b) throws IOException {
checkNotClosed();
file.writeLock().lock();
try {
if (append) {
pos = file.sizeWithoutLocking();
}
file.write(pos++, (byte) b);
file.setLastModifiedTime(fileSystemState.now());
} finally {
file.writeLock().unlock();
}
}
@Override
public void write(byte[] b) throws IOException {
writeInternal(b, 0, b.length);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
checkPositionIndexes(off, off + len, b.length);
writeInternal(b, off, len);
}
private synchronized void writeInternal(byte[] b, int off, int len) throws IOException {
checkNotClosed();
file.writeLock().lock();
try {
if (append) {
pos = file.sizeWithoutLocking();
}
pos += file.write(pos, b, off, len);
file.setLastModifiedTime(fileSystemState.now());
} finally {
file.writeLock().unlock();
}
}
@GuardedBy("this")
private void checkNotClosed() throws IOException {
if (file == null) {
throw new IOException("stream is closed");
}
}
@Override
public synchronized void close() throws IOException {
if (isOpen()) {
fileSystemState.unregister(this);
file.closed();
// file is set to null here and only here
file = null;
}
}
@GuardedBy("this")
private boolean isOpen() {
return file != null;
}
}
| JimfsOutputStream |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/nullcheck/NullObjectMapper.java | {
"start": 230,
"end": 342
} | class ____ {
public String toNullString(NullObject in) {
return in.toString();
}
}
| NullObjectMapper |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/net/QuicOptions.java | {
"start": 583,
"end": 18584
} | class ____ extends TransportOptions {
public static final long DEFAULT_MAX_INITIAL_DATA = 0L;
public static final long DEFAULT_MAX_STREAM_DATA_BIDI_LOCAL = 0L;
public static final long DEFAULT_MAX_STREAM_DATA_BIDI_REMOTE = 0L;
public static final long DEFAULT_MAX_STREAMS_DATA_UNI = 0L;
public static final long DEFAULT_MAX_STREAMS_DATA_BIDI = 0L;
public static final long DEFAULT_MAX_STREAM_DATA_UNI = 0L;
public static final boolean DEFAULT_ACTIVE_MIGRATION = false;
public static final Duration DEFAULT_MAX_IDLE_TIMEOUT = null;
public static final boolean DEFAULT_ENABLE_DATAGRAMS = false;
public static final int DEFAULT_DATAGRAM_SEND_QUEUE_LENGTH = 128;
public static final int DEFAULT_DATAGRAM_RECEIVE_QUEUE_LENGTH = 128;
public static final Duration DEFAULT_MAX_ACK_DELAY = Duration.ofMillis(25);
public static final int DEFAULT_ACK_DELAY_EXPONENT = 3;
public static final QuicCongestionControlAlgorithm DEFAULT_CONGESTION_CONTROL_ALGORITHM = QuicCongestionControlAlgorithm.CUBIC;
public static final boolean DEFAULT_GREASE = true;
public static final boolean DEFAULT_HYSTART = true;
public static final int DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS = 10;
private long initialMaxData = DEFAULT_MAX_INITIAL_DATA;
private long initialMaxStreamDataBidirectionalLocal = DEFAULT_MAX_STREAM_DATA_BIDI_LOCAL;
private long initialMaxStreamDataBidirectionalRemote = DEFAULT_MAX_STREAM_DATA_BIDI_REMOTE;
private long initialMaxStreamDataUnidirectional = DEFAULT_MAX_STREAMS_DATA_UNI;
private long initialMaxStreamsBidirectional = DEFAULT_MAX_STREAMS_DATA_BIDI;
private long initialMaxStreamsUnidirectional = DEFAULT_MAX_STREAM_DATA_UNI;
private boolean activeMigration = DEFAULT_ACTIVE_MIGRATION;
private Duration maxIdleTimeout = DEFAULT_MAX_IDLE_TIMEOUT;
private boolean enableDatagrams = DEFAULT_ENABLE_DATAGRAMS;
private int datagramSendQueueLength = DEFAULT_DATAGRAM_SEND_QUEUE_LENGTH;
private int datagramReceiveQueueLength = DEFAULT_DATAGRAM_RECEIVE_QUEUE_LENGTH;
private Duration maxAckDelay = DEFAULT_MAX_ACK_DELAY;
private int ackDelayExponent = DEFAULT_ACK_DELAY_EXPONENT;
private QuicCongestionControlAlgorithm congestionControlAlgorithm = DEFAULT_CONGESTION_CONTROL_ALGORITHM;
private boolean grease = DEFAULT_GREASE;
private boolean hystart = DEFAULT_HYSTART;
private int initialCongestionWindowPackets = DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS;
public QuicOptions() {
}
public QuicOptions(QuicOptions other) {
this.initialMaxData = other.initialMaxData;
this.initialMaxStreamDataBidirectionalLocal = other.initialMaxStreamDataBidirectionalLocal;
this.initialMaxStreamDataBidirectionalRemote = other.initialMaxStreamDataBidirectionalRemote;
this.initialMaxStreamsBidirectional = other.initialMaxStreamsBidirectional;
this.initialMaxStreamsUnidirectional = other.initialMaxStreamsUnidirectional;
this.initialMaxStreamDataUnidirectional = other.initialMaxStreamDataUnidirectional;
this.activeMigration = other.activeMigration;
this.maxIdleTimeout = other.maxIdleTimeout;
this.enableDatagrams = other.enableDatagrams;
this.datagramSendQueueLength = other.datagramSendQueueLength;
this.datagramReceiveQueueLength = other.datagramReceiveQueueLength;
this.maxAckDelay = other.maxAckDelay;
this.ackDelayExponent = other.ackDelayExponent;
this.congestionControlAlgorithm = other.congestionControlAlgorithm;
this.grease = other.grease;
this.hystart = other.hystart;
this.initialCongestionWindowPackets = other.initialCongestionWindowPackets;
}
@Override
protected QuicOptions copy() {
return new QuicOptions(this);
}
/**
* @return the {@code initialMaxData} parameter value
* @see #setInitialMaxData(long)
*/
public long getInitialMaxData() {
return initialMaxData;
}
/**
* <p>Set the {@code initialMaxData} transport parameter.</p>
*
* <p>When set to a non-zero value, it will only allow at most {@code initialMaxData} bytes of incoming stream data to be buffered
* for the whole connection (that is, data that is not yet read by the application) and will allow more data to be
* received as the buffer is consumed by the application.</p>
*
* <p>The default value is {@code 0}</p>
*
* @param initialMaxData the value to set
* @return this instance
*/
public QuicOptions setInitialMaxData(long initialMaxData) {
if (initialMaxData < 0) {
throw new IllegalArgumentException("initialMaxData must be >= 0");
}
this.initialMaxData = initialMaxData;
return this;
}
/**
* @return the {@code initialMaxStreamDataBidirectionalLocal } parameter value
* @see #setInitialMaxStreamDataBidirectionalLocal(long)
*/
public long getInitialMaxStreamDataBidirectionalLocal() {
return initialMaxStreamDataBidirectionalLocal;
}
/**
* <p>Set the {@code initialMaxStreamDataBidirectionalLocal} transport parameter.</p>
*
* <p>When set to a non-zero value it will only allow at most {@code initialMaxStreamDataBidirectionalLocal} bytes of incoming stream data
* to be buffered for each locally-initiated bidirectional stream (that is, data that is not yet read by the application) and will
* allow more data to be received as the buffer is consumed by the application.</p>
*
* <p>The default value is {@code 0}.</p>
*
* @param initialMaxStreamDataBidirectionalLocal the value to set
* @return this instance
*/
public QuicOptions setInitialMaxStreamDataBidirectionalLocal(long initialMaxStreamDataBidirectionalLocal) {
if (initialMaxStreamDataBidirectionalLocal < 0) {
throw new IllegalArgumentException("initialMaxStreamDataBidirectionalLocal must be >= 0");
}
this.initialMaxStreamDataBidirectionalLocal = initialMaxStreamDataBidirectionalLocal;
return this;
}
/**
* @return the {@code initialMaxStreamDataBidirectionalRemote } parameter value
* @see #setInitialMaxStreamDataBidirectionalRemote(long)
*/
public long getInitialMaxStreamDataBidirectionalRemote() {
return initialMaxStreamDataBidirectionalRemote;
}
/**
* <p>Set the {@code initialMaxStreamDataBidirectionalRemote} transport parameter.</p>
*
* <p>When set to a non-zero value it will only allow at most {@code initialMaxStreamDataBidirectionalRemote} bytes of incoming
* stream data to be buffered for each remotely-initiated bidirectional stream (that is, data that is not yet read by the application)
* and will allow more data to be received as the buffer is consumed by the application.</p>
*
* <p>The default value is {@code 0}.</p>
*
* @param initialMaxStreamDataBidirectionalRemote the value to set
* @return this instance
*/
public QuicOptions setInitialMaxStreamDataBidirectionalRemote(long initialMaxStreamDataBidirectionalRemote) {
if (initialMaxStreamDataBidirectionalRemote < 0) {
throw new IllegalArgumentException("initialMaxStreamDataBidirectionalRemote must be >= 0");
}
this.initialMaxStreamDataBidirectionalRemote = initialMaxStreamDataBidirectionalRemote;
return this;
}
/**
* @return the {@code initialMaxStreamsBidirectional } parameter value
* @see #setInitialMaxStreamsBidirectional(long)
*/
public long getInitialMaxStreamsBidirectional() {
return initialMaxStreamsBidirectional;
}
/**
* <p>Set the {@code setInitialMaxStreamsBidirectional} transport parameter.</p>
*
* <p>When set to a non-zero value it will only allow {@code initialMaxStreamsBidirectional} number of concurrent
* remotely-initiated bidirectional streams to be open at any given time and will increase the limit
* automatically as streams are completed.</p>
*
* <p>A bidirectional stream is considered completed when all incoming data has been read by the application (up to the fin offset)
* or the stream's read direction has been shutdown, and all outgoing data has been acked by the peer (up to the fin offset)
* or the stream's write direction has been shutdown.</p>
*
* <p>The default value is {@code 0}.</p>
*
* @param initialMaxStreamsBidirectional the value to set
* @return this instance
*/
public QuicOptions setInitialMaxStreamsBidirectional(long initialMaxStreamsBidirectional) {
if (initialMaxStreamsBidirectional < 0) {
throw new IllegalArgumentException("initialMaxStreamsBidirectional must be >= 0");
}
this.initialMaxStreamsBidirectional = initialMaxStreamsBidirectional;
return this;
}
/**
* @return the {@code initialMaxStreamsUnidirectional } parameter value
* @see #setInitialMaxStreamsUnidirectional(long)
*/
public long getInitialMaxStreamsUnidirectional() {
return initialMaxStreamsUnidirectional;
}
/**
* <p>Sets the {@code initialMaxStreamsUnidirectional} transport parameter.</p>
*
* <p>When set to a non-zero value it will only allow {@code initialMaxStreamsUnidirectional} number of concurrent
* remotely-initiated unidirectional streams to be open at any given time and will increase the limit automatically
* as streams are completed.</p>
*
* <p>A unidirectional stream is considered completed when all incoming data has been read by the application (up to the fin offset)
* or the stream's read direction has been shutdown.</p>
*
* <p>The default value is {@code 0}.</p>
*
* @param initialMaxStreamsUnidirectional the value to set
* @return this instance
*/
public QuicOptions setInitialMaxStreamsUnidirectional(long initialMaxStreamsUnidirectional) {
if (initialMaxStreamsUnidirectional < 0) {
throw new IllegalArgumentException("initialMaxStreamsUnidirectional must be >= 0");
}
this.initialMaxStreamsUnidirectional = initialMaxStreamsUnidirectional;
return this;
}
/**
* @return the {@code initialMaxStreamDataUnidirectional } parameter value
* @see #setInitialMaxStreamDataUnidirectional(long)
*/
public long getInitialMaxStreamDataUnidirectional() {
return initialMaxStreamDataUnidirectional;
}
/**
* <p>Sets the {@code initialMaxStreamDataUnidirectional} transport parameter.</p>
*
* <p>When set to a non-zero value it will only allow at most {@code initialMaxStreamDataUnidirectional} bytes of incoming
* stream data to be buffered for each unidirectional stream (that is, data that is not yet read by the application) and
* will allow more data to be received as the buffer is consumed by the application.</p>
*
* <p>The default value is {@code 0}.</p>
*
* @param initialMaxStreamDataUnidirectional the value to set
* @return this instance
*/
public QuicOptions setInitialMaxStreamDataUnidirectional(long initialMaxStreamDataUnidirectional) {
if (initialMaxStreamDataUnidirectional < 0) {
throw new IllegalArgumentException("initialMaxStreamDataUnidirectional must be >= 0");
}
this.initialMaxStreamDataUnidirectional = initialMaxStreamDataUnidirectional;
return this;
}
/**
* @return the {@code activeMigration } parameter value
* @see #setActiveMigration(boolean)
*/
public boolean getActiveMigration() {
return activeMigration;
}
/**
* <p>Set whether to allow active migration.</p>
*
* <p>The default value is {@code false}.</p>
*
* @param activeMigration the value to set
* @return this instance
*/
public QuicOptions setActiveMigration(boolean activeMigration) {
this.activeMigration = activeMigration;
return this;
}
/**
* @return the {@code maxIdleTimeout } parameter value
* @see #setMaxIdleTimeout(Duration)
*/
public Duration getMaxIdleTimeout() {
return maxIdleTimeout;
}
/**
* <p>Sets the {@code maxIdleTimeout} transport parameter.</p>
*
* <p>The default value {@code null} means infinite, that is, no timeout is used.</p>
*
* @param maxIdleTimeout the value to set
* @return this instance
*/
public QuicOptions setMaxIdleTimeout(Duration maxIdleTimeout) {
if (maxIdleTimeout != null && (maxIdleTimeout.isZero() || maxIdleTimeout.isNegative())) {
throw new IllegalArgumentException("maxIdleTimeout must be > 0 or null (no timeout)");
}
this.maxIdleTimeout = maxIdleTimeout;
return this;
}
/**
* @return whether to support datagrams frames
* @see #setEnableDatagrams(boolean)
*/
public boolean isEnableDatagrams() {
return enableDatagrams;
}
/**
* <p>Set whether to support datagrams frames.</p>
*
* <p>The default value is {@code false} (disabled).</p>
*
* @param enableDatagrams the value to set
* @return this instance
*/
public QuicOptions setEnableDatagrams(boolean enableDatagrams) {
this.enableDatagrams = enableDatagrams;
return this;
}
/**
* @return the datagram send queue length
* @see #setDatagramSendQueueLength(int)
*/
public int getDatagramSendQueueLength() {
return datagramSendQueueLength;
}
/**
* <p>Set the datagram receive queue length.</p>
*
* <p>The default value is {@code 128}.</p>
*
* @param datagramSendQueueLength the value to use
* @return this instance
*/
public QuicOptions setDatagramSendQueueLength(int datagramSendQueueLength) {
if (datagramSendQueueLength <= 0) {
throw new IllegalArgumentException("datagramSendQueueLength must be > 0");
}
this.datagramSendQueueLength = datagramSendQueueLength;
return this;
}
/**
* @return the datagram receive queue length
* @see #setDatagramReceiveQueueLength(int)
*/
public int getDatagramReceiveQueueLength() {
return datagramReceiveQueueLength;
}
/**
* <p>Set the datagram send queue length.</p>
*
* <p>The default value is {@code 128}.</p>
*
* @param datagramReceiveQueueLength the value to use
* @return this instance
*/
public QuicOptions setDatagramReceiveQueueLength(int datagramReceiveQueueLength) {
if (datagramReceiveQueueLength <= 0) {
throw new IllegalArgumentException("datagramReceiveQueueLength must be > 0");
}
this.datagramReceiveQueueLength = datagramReceiveQueueLength;
return this;
}
/**
* @return the {@code maxAckDelay } parameter value
* @see #setMaxAckDelay(Duration)
*/
public Duration getMaxAckDelay() {
return maxAckDelay;
}
/**
* <p>Sets the {@code maxAckDelay} transport parameter.</p>
*
* <p>The default value is {@code 25} (milliseconds).</p>
*
* @param maxAckDelay the value to set
* @return this instance
*/
public QuicOptions setMaxAckDelay(Duration maxAckDelay) {
if (maxAckDelay == null || maxAckDelay.isZero() || maxAckDelay.isNegative()) {
throw new IllegalArgumentException("maxAckDelay must be > 0");
}
this.maxAckDelay = maxAckDelay;
return this;
}
/**
* @return the {@code ackDelayExponent } parameter value
* @see #setAckDelayExponent(int)
*/
public int getAckDelayExponent() {
return ackDelayExponent;
}
/**
* <p>Sets the {@code ackDelayExponent} transport parameter.</p>
*
* <p>The default value is {@code 3}.</p>
*
* @param ackDelayExponent the value to set
* @return this instance
*/
public QuicOptions setAckDelayExponent(int ackDelayExponent) {
if (ackDelayExponent <= 0) {
throw new IllegalArgumentException("ackDelayExponent must be > 0");
}
this.ackDelayExponent = ackDelayExponent;
return this;
}
/**
* @return the {@code congestionControlAlgorithm } parameter value
* @see #setCongestionControlAlgorithm(QuicCongestionControlAlgorithm)
*/
public QuicCongestionControlAlgorithm getCongestionControlAlgorithm() {
return congestionControlAlgorithm;
}
/**
* <p>Sets the congestion control algorithm used.</p>
*
* <p>The default value is {@link QuicCongestionControlAlgorithm#CUBIC}.</p>
*
* @param congestionControlAlgorithm the congestion control algorithm to use
* @return this instance
*/
public QuicOptions setCongestionControlAlgorithm(QuicCongestionControlAlgorithm congestionControlAlgorithm) {
if (congestionControlAlgorithm == null) {
throw new IllegalArgumentException("congestionControlAlgorithm must be not null");
}
this.congestionControlAlgorithm = congestionControlAlgorithm;
return this;
}
/**
* @return whether to send GREASE values
* @see #setGrease(boolean)
*/
public boolean getGrease() {
return grease;
}
/**
* <p>Configures whether to send GREASE values.</p>
*
* <p>The default value is {@code true}</p>
*
* @param grease whether to send GREASE values.
* @return this instance
*/
public QuicOptions setGrease(boolean grease) {
this.grease = grease;
return this;
}
/**
* @return whether to enable HyStart++
* @see #setHystart(boolean)
*/
public boolean getHystart() {
return hystart;
}
/**
* <p>Configures whether to enable HyStart++.</p>
*
* <p>The default value is {@code true}.</p>
*
* @param hystart whether to enable HyStart++.
* @return this instance
*/
public QuicOptions setHystart(boolean hystart) {
this.hystart = hystart;
return this;
}
/**
* @return the initial congestion window size in terms of packet count
* @see #setInitialCongestionWindowPackets(int)
*/
public int getInitialCongestionWindowPackets() {
return initialCongestionWindowPackets;
}
/**
* <p>Sets initial congestion window size in terms of packet count.</p>
*
* <p>The default value is {@code 10}.</p>
*
* @param initialCongestionWindowPackets the value to set
* @return this instance
*/
public QuicOptions setInitialCongestionWindowPackets(int initialCongestionWindowPackets) {
if (initialCongestionWindowPackets <= 0) {
throw new IllegalArgumentException("initialCongestionWindowPackets must be > 0");
}
this.initialCongestionWindowPackets = initialCongestionWindowPackets;
return this;
}
}
| QuicOptions |
java | quarkusio__quarkus | extensions/reactive-oracle-client/deployment/src/test/java/io/quarkus/reactive/oracle/client/ConfigActiveFalseNamedDatasourceDynamicInjectionTest.java | {
"start": 635,
"end": 3133
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.overrideConfigKey("quarkus.datasource.ds-1.active", "false")
// We need at least one build-time property for the datasource,
// otherwise it's considered unconfigured at build time...
.overrideConfigKey("quarkus.datasource.ds-1.db-kind", "oracle");
@Inject
@ReactiveDataSource("ds-1")
InjectableInstance<Pool> pool;
@Inject
@ReactiveDataSource("ds-1")
InjectableInstance<io.vertx.mutiny.sqlclient.Pool> mutinyPool;
@Inject
@ReactiveDataSource("ds-1")
InjectableInstance<OraclePool> vendorPool;
@Inject
@ReactiveDataSource("ds-1")
InjectableInstance<io.vertx.mutiny.oracleclient.OraclePool> mutinyVendorPool;
@Test
public void pool() {
doTest(pool, Pool::getConnection);
}
@Test
public void mutinyPool() {
doTest(mutinyPool, io.vertx.mutiny.sqlclient.Pool::getConnection);
}
@Test
public void vendorPool() {
doTest(vendorPool, Pool::getConnection);
}
@Test
public void mutinyVendorPool() {
doTest(mutinyVendorPool, io.vertx.mutiny.sqlclient.Pool::getConnection);
}
private <T> void doTest(InjectableInstance<T> instance, Consumer<T> action) {
// The bean is always available to be injected during static init
// since we don't know whether the datasource will be active at runtime.
// So the bean proxy cannot be null.
assertThat(instance.getHandle().getBean())
.isNotNull()
.returns(false, InjectableBean::isActive);
var pool = instance.get();
assertThat(pool).isNotNull();
// However, any attempt to use it at runtime will fail.
assertThatThrownBy(() -> action.accept(pool))
.isInstanceOf(InactiveBeanException.class)
.hasMessageContainingAll("Datasource 'ds-1' was deactivated through configuration properties.",
"To avoid this exception while keeping the bean inactive", // Message from Arc with generic hints
"To activate the datasource, set configuration property 'quarkus.datasource.\"ds-1\".active'"
+ " to 'true' and configure datasource 'ds-1'",
"Refer to https://quarkus.io/guides/datasource for guidance.");
}
}
| ConfigActiveFalseNamedDatasourceDynamicInjectionTest |
java | elastic__elasticsearch | build-tools/src/main/java/org/elasticsearch/gradle/jarhell/JarHellPlugin.java | {
"start": 954,
"end": 2375
} | class ____ implements Plugin<Project> {
@Override
public void apply(Project project) {
Configuration jarHellConfig = project.getConfigurations().create("jarHell");
DependencyHandler dependencyHandler = project.getDependencies();
jarHellConfig.defaultDependencies(
deps -> deps.add(dependencyHandler.create("org.elasticsearch:elasticsearch-core:" + VersionProperties.getElasticsearch()))
);
TaskProvider<? extends Task> jarHellTask = createTask(jarHellConfig, project);
project.getPluginManager()
.withPlugin(
"lifecycle-base",
p -> project.getTasks().named(LifecycleBasePlugin.CHECK_TASK_NAME).configure(t -> t.dependsOn(jarHellTask))
);
}
private TaskProvider<? extends Task> createTask(Configuration jarHellConfig, Project project) {
TaskProvider<JarHellTask> jarHell = project.getTasks().register("jarHell", JarHellTask.class);
project.getPluginManager().withPlugin("java", p -> {
jarHell.configure(t -> {
SourceSet testSourceSet = GradleUtils.getJavaSourceSets(project).findByName(SourceSet.TEST_SOURCE_SET_NAME);
t.setClasspath(testSourceSet.getRuntimeClasspath());
});
});
jarHell.configure(t -> { t.setJarHellRuntimeClasspath(jarHellConfig); });
return jarHell;
}
}
| JarHellPlugin |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/DefaultCamelContextExtension.java | {
"start": 40069,
"end": 40258
} | class ____ {
static final Logger LOG = LoggerFactory.getLogger(DefaultCamelContextExtension.class);
}
private static Logger logger() {
return Holder.LOG;
}
}
| Holder |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/intTest/java/org/springframework/boot/devtools/tests/AbstractDevToolsIntegrationTests.java | {
"start": 1469,
"end": 2801
} | class ____ {
protected static final BuildOutput buildOutput = new BuildOutput(AbstractDevToolsIntegrationTests.class);
protected final File serverPortFile = new File(buildOutput.getRootLocation(), "server.port");
@RegisterExtension
protected final JvmLauncher javaLauncher = new JvmLauncher();
@TempDir
protected static File temp;
protected LaunchedApplication launchedApplication;
protected void launchApplication(ApplicationLauncher applicationLauncher, String... args) throws Exception {
this.serverPortFile.delete();
this.launchedApplication = applicationLauncher.launchApplication(this.javaLauncher, this.serverPortFile, args);
}
@AfterEach
void stopApplication() throws InterruptedException {
this.launchedApplication.stop();
}
protected int awaitServerPort() throws Exception {
int port = Awaitility.waitAtMost(Duration.ofMinutes(3))
.until(() -> new ApplicationState(this.serverPortFile, this.launchedApplication),
ApplicationState::hasServerPort)
.getServerPort();
this.serverPortFile.delete();
this.launchedApplication.restartRemote(port);
Thread.sleep(1000);
return port;
}
protected ControllerBuilder controller(String name) {
return new ControllerBuilder(name, this.launchedApplication.getClassesDirectory());
}
protected static final | AbstractDevToolsIntegrationTests |
java | elastic__elasticsearch | plugins/analysis-smartcn/src/test/java/org/elasticsearch/plugin/analysis/smartcn/SimpleSmartChineseAnalysisTests.java | {
"start": 831,
"end": 1294
} | class ____ extends ESTestCase {
public void testDefaultsIcuAnalysis() throws IOException {
final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisSmartChinesePlugin());
TokenizerFactory tokenizerFactory = analysis.tokenizer.get("smartcn_tokenizer");
MatcherAssert.assertThat(tokenizerFactory, instanceOf(SmartChineseTokenizerTokenizerFactory.class));
}
}
| SimpleSmartChineseAnalysisTests |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/RequestPartIntegrationTests.java | {
"start": 8323,
"end": 9427
} | class ____ {
@RequestMapping(value = "/test", method = POST, consumes = {"multipart/mixed", "multipart/form-data"})
public ResponseEntity<Object> create(@RequestPart(name = "json-data") TestData testData,
@RequestPart("file-data") Optional<MultipartFile> file,
@RequestPart(name = "empty-data", required = false) TestData emptyData,
@RequestPart(name = "iso-8859-1-data") byte[] iso88591Data) {
assertThat(iso88591Data).isEqualTo(new byte[]{(byte) 0xC4});
String url = "http://localhost:8080/test/" + testData.getName() + "/" + file.get().getOriginalFilename();
HttpHeaders headers = new HttpHeaders();
headers.setLocation(URI.create(url));
return new ResponseEntity<>(headers, HttpStatus.CREATED);
}
@RequestMapping(value = "/spr13319", method = POST, consumes = "multipart/form-data")
public ResponseEntity<Void> create(@RequestPart("file") MultipartFile multipartFile) {
assertThat(multipartFile.getOriginalFilename()).isEqualTo("élève.txt");
return ResponseEntity.ok().build();
}
}
@SuppressWarnings("unused")
private static | RequestPartTestController |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/concurrent/BasicThreadFactory.java | {
"start": 4866,
"end": 11990
} | class ____ implements org.apache.commons.lang3.builder.Builder<BasicThreadFactory> {
/** The wrapped factory. */
private ThreadFactory factory;
/** The uncaught exception handler. */
private Thread.UncaughtExceptionHandler exceptionHandler;
/** The naming pattern. */
private String namingPattern;
/** The priority. */
private Integer priority;
/** The daemon flag. */
private Boolean daemon;
/**
* Constructs a new instance.
*
* @deprecated Use {@link BasicThreadFactory#builder()}.
*/
@Deprecated
public Builder() {
// empty
}
/**
* Creates a new {@link BasicThreadFactory} with all configuration
* options that have been specified by calling methods on this builder.
* After creating the factory {@link #reset()} is called.
*
* @return the new {@link BasicThreadFactory}
*/
@Override
public BasicThreadFactory build() {
final BasicThreadFactory factory = new BasicThreadFactory(this);
reset();
return factory;
}
/**
* Sets the daemon flag for the new {@link BasicThreadFactory} to {@code true} causing a new thread factory to create daemon threads.
*
* @return a reference to this {@link Builder}
* @since 3.18.0
*/
public Builder daemon() {
return daemon(true);
}
/**
* Sets the daemon flag for the new {@link BasicThreadFactory}. If this
* flag is set to <strong>true</strong> the new thread factory will create daemon
* threads.
*
* @param daemon the value of the daemon flag
* @return a reference to this {@link Builder}
*/
public Builder daemon(final boolean daemon) {
this.daemon = Boolean.valueOf(daemon);
return this;
}
/**
* Sets the naming pattern to be used by the new {@code
* BasicThreadFactory}.
*
* @param namingPattern the naming pattern (must not be <strong>null</strong>)
* @return a reference to this {@link Builder}
* @throws NullPointerException if the naming pattern is <strong>null</strong>
*/
public Builder namingPattern(final String namingPattern) {
this.namingPattern = Objects.requireNonNull(namingPattern, "pattern");
return this;
}
/**
* Sets the priority for the threads created by the new {@code
* BasicThreadFactory}.
*
* @param priority the priority
* @return a reference to this {@link Builder}
*/
public Builder priority(final int priority) {
this.priority = Integer.valueOf(priority);
return this;
}
/**
* Resets this builder. All configuration options are set to default
* values. Note: If the {@link #build()} method was called, it is not
* necessary to call {@code reset()} explicitly because this is done
* automatically.
*/
public void reset() {
factory = null;
exceptionHandler = null;
namingPattern = null;
priority = null;
daemon = null;
}
/**
* Sets the uncaught exception handler for the threads created by the
* new {@link BasicThreadFactory}.
*
* @param exceptionHandler the {@link UncaughtExceptionHandler} (must not be
* <strong>null</strong>)
* @return a reference to this {@link Builder}
* @throws NullPointerException if the exception handler is <strong>null</strong>
*/
public Builder uncaughtExceptionHandler(
final Thread.UncaughtExceptionHandler exceptionHandler) {
this.exceptionHandler = Objects.requireNonNull(exceptionHandler, "handler");
return this;
}
/**
* Sets the {@link ThreadFactory} to be wrapped by the new {@code
* BasicThreadFactory}.
*
* @param factory the wrapped {@link ThreadFactory} (must not be
* <strong>null</strong>)
* @return a reference to this {@link Builder}
* @throws NullPointerException if the passed in {@link ThreadFactory}
* is <strong>null</strong>
*/
public Builder wrappedFactory(final ThreadFactory factory) {
this.factory = Objects.requireNonNull(factory, "factory");
return this;
}
}
/**
* Creates a new builder.
*
* @return a new builder.
* @since 3.18.0
*/
public static Builder builder() {
return new Builder();
}
/** A counter for the threads created by this factory. */
private final AtomicLong threadCounter;
/** Stores the wrapped factory. */
private final ThreadFactory wrappedFactory;
/** Stores the uncaught exception handler. */
private final Thread.UncaughtExceptionHandler uncaughtExceptionHandler;
/** Stores the naming pattern for newly created threads. */
private final String namingPattern;
/** Stores the priority. */
private final Integer priority;
/** Stores the daemon status flag. */
private final Boolean daemon;
/**
* Creates a new instance of {@link ThreadFactory} and configures it
* from the specified {@link Builder} object.
*
* @param builder the {@link Builder} object
*/
private BasicThreadFactory(final Builder builder) {
wrappedFactory = builder.factory != null ? builder.factory : Executors.defaultThreadFactory();
namingPattern = builder.namingPattern;
priority = builder.priority;
daemon = builder.daemon;
uncaughtExceptionHandler = builder.exceptionHandler;
threadCounter = new AtomicLong();
}
/**
* Gets the daemon flag. This flag determines whether newly created
* threads should be daemon threads. If <strong>true</strong>, this factory object
* calls {@code setDaemon(true)} on the newly created threads. Result can be
* <strong>null</strong> if no daemon flag was provided at creation time.
*
* @return the daemon flag
*/
public final Boolean getDaemonFlag() {
return daemon;
}
/**
* Gets the naming pattern for naming newly created threads. Result can
* be <strong>null</strong> if no naming pattern was provided.
*
* @return the naming pattern
*/
public final String getNamingPattern() {
return namingPattern;
}
/**
* Gets the priority of the threads created by this factory. Result can
* be <strong>null</strong> if no priority was specified.
*
* @return the priority for newly created threads
*/
public final Integer getPriority() {
return priority;
}
/**
* Gets the number of threads this factory has already created. This
* | Builder |
java | apache__camel | components/camel-ldap/src/main/java/org/apache/camel/component/ldap/LdapHelper.java | {
"start": 858,
"end": 2291
} | class ____ {
private LdapHelper() {
}
/**
* Given an LDAP search string, returns the string with certain characters escaped according to RFC 2254 guidelines.
* The character mapping is as follows:
* <ul>
* <li>* = \2a</li>
* <li>( = \28</li>
* <li>) = \29</li>
* <li>\ = \5c</li>
* <li>\0 = \00</li>
* </ul>
*
* @param filter string to escape according to RFC 2254 guidelines
* @return String the escaped/encoded result
*/
public static String escapeFilter(String filter) {
if (filter == null) {
return null;
}
StringBuilder buf = new StringBuilder(filter.length());
for (int i = 0; i < filter.length(); i++) {
char c = filter.charAt(i);
switch (c) {
case '\\':
buf.append("\\5c");
break;
case '*':
buf.append("\\2a");
break;
case '(':
buf.append("\\28");
break;
case ')':
buf.append("\\29");
break;
case '\0':
buf.append("\\00");
break;
default:
buf.append(c);
break;
}
}
return buf.toString();
}
}
| LdapHelper |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/converter/KotlinSerializationStringHttpMessageConverter.java | {
"start": 1252,
"end": 1982
} | class ____ {@link HttpMessageConverter} implementations that
* defer to Kotlin {@linkplain StringFormat string serializers}.
*
* <p>As of Spring Framework 7.0, by default it only encodes types annotated with
* {@link kotlinx.serialization.Serializable @Serializable} at type or generics level
* since it allows combined usage with other general purpose converters without conflicts.
* Alternative constructors with a {@code Predicate<ResolvableType>} parameter can be used
* to customize this behavior.
*
* @author Andreas Ahlenstorf
* @author Sebastien Deleuze
* @author Juergen Hoeller
* @author Iain Henderson
* @author Arjen Poutsma
* @since 6.0
* @param <T> the type of {@link StringFormat}
*/
public abstract | for |
java | greenrobot__EventBus | EventBusPerformance/src/org/greenrobot/eventbusperf/testsubject/PerfTestEventBus.java | {
"start": 3339,
"end": 4783
} | class ____ extends PerfTestEventBus {
public Post(Context context, TestParams params) {
super(context, params);
}
@Override
public void prepareTest() {
super.prepareTest();
super.registerSubscribers();
}
public void runTest() {
TestEvent event = new TestEvent();
long timeStart = System.nanoTime();
for (int i = 0; i < super.eventCount; i++) {
super.eventBus.post(event);
if (canceled) {
break;
}
}
long timeAfterPosting = System.nanoTime();
waitForReceivedEventCount(super.expectedEventCount);
long timeAllReceived = System.nanoTime();
primaryResultMicros = (timeAfterPosting - timeStart) / 1000;
primaryResultCount = super.expectedEventCount;
long deliveredMicros = (timeAllReceived - timeStart) / 1000;
int deliveryRate = (int) (primaryResultCount / (deliveredMicros / 1000000d));
otherTestResults = "Post and delivery time: " + deliveredMicros + " micros<br/>" + //
"Post and delivery rate: " + deliveryRate + "/s";
}
@Override
public String getDisplayName() {
return "EventBus Post Events, " + params.getThreadMode() + getDisplayModifier(params);
}
}
public static | Post |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/CollectingProblemHandler.java | {
"start": 3607,
"end": 3941
} | class ____ extends DeserializationProblemHandler
{
/**
* Default maximum number of problems to collect before stopping.
* Prevents memory exhaustion attacks.
*/
public static final int DEFAULT_MAX_PROBLEMS = 100;
/**
* Attribute key for the problem collection bucket.
* Using | CollectingProblemHandler |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/jdk/ArrayDeserializationTest.java | {
"start": 1832,
"end": 2622
} | class ____
implements JacksonSerializable // so we can output as simple String
{
final String _desc;
public Bean2(String d)
{
_desc = d;
}
@Override
public void serialize(JsonGenerator gen, SerializationContext provider)
{
gen.writeString(_desc);
}
@Override public String toString() { return _desc; }
@Override public boolean equals(Object o) {
if (!(o instanceof Bean2)) return false;
Bean2 other = (Bean2) o;
return _desc.equals(other._desc);
}
@Override
public void serializeWithType(JsonGenerator gen,
SerializationContext provider, TypeSerializer typeSer) {
}
}
static | Bean2 |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/IRDecorations.java | {
"start": 8151,
"end": 8575
} | class ____ extends IRDecoration<PainlessMethod> {
public IRDMethod(PainlessMethod value) {
super(value);
}
@Override
public String toString() {
return PainlessLookupUtility.buildPainlessMethodKey(getValue().javaMethod().getName(), getValue().typeParameters().size());
}
}
/** describes the return type for a statement node */
public static | IRDMethod |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/writer/BeanDefinitionWriter.java | {
"start": 39829,
"end": 40213
} | class ____
* @param visitorContext The visitor context
*/
public BeanDefinitionWriter(ClassElement classElement,
VisitorContext visitorContext) {
this(classElement, OriginatingElements.of(classElement), visitorContext, null);
}
/**
* Creates a bean definition writer.
*
* @param classElement The | element |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/extension/ExtensionLoader_Activate_Test.java | {
"start": 1074,
"end": 1484
} | class ____ {
@Test
void test_onClass() throws Exception {
URL url = URL.valueOf("test://localhost/test");
ExtensionLoader<ActivateExt1> loader = ExtensionLoader.getExtensionLoader(ActivateExt1.class);
List<ActivateExt1> list = loader.getActivateExtension(url, new String[] {}, "onClass");
assertTrue(list == null || list.size() == 0);
}
}
| ExtensionLoader_Activate_Test |
java | apache__camel | components/camel-telegram/src/main/java/org/apache/camel/component/telegram/TelegramParseMode.java | {
"start": 924,
"end": 1156
} | enum ____ {
HTML("HTML"),
MARKDOWN("Markdown");
private final String code;
TelegramParseMode(String code) {
this.code = code;
}
public String getCode() {
return code;
}
}
| TelegramParseMode |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java | {
"start": 10564,
"end": 136500
} | class ____ {
private static final String CONSUMER_1 = "consumer1";
private static final String CONSUMER_2 = "consumer2";
private static final String CONSUMER_3 = "consumer3";
private static final String CONSUMER_4 = "consumer4";
private final Set<String> allTopics = Set.of("topic1", "topic2");
private final TopicPartition t1p0 = new TopicPartition("topic1", 0);
private final TopicPartition t1p1 = new TopicPartition("topic1", 1);
private final TopicPartition t1p2 = new TopicPartition("topic1", 2);
private final TopicPartition t1p3 = new TopicPartition("topic1", 3);
private final TopicPartition t2p0 = new TopicPartition("topic2", 0);
private final TopicPartition t2p1 = new TopicPartition("topic2", 1);
private final TopicPartition t2p2 = new TopicPartition("topic2", 2);
private final TopicPartition t2p3 = new TopicPartition("topic2", 3);
private final TopicPartition t3p0 = new TopicPartition("topic3", 0);
private final TopicPartition t3p1 = new TopicPartition("topic3", 1);
private final TopicPartition t3p2 = new TopicPartition("topic3", 2);
private final TopicPartition t3p3 = new TopicPartition("topic3", 3);
private final List<PartitionInfo> infos = asList(
new PartitionInfo("topic1", 0, NODE_0, REPLICA_0, REPLICA_0),
new PartitionInfo("topic1", 1, NODE_1, REPLICA_1, REPLICA_1),
new PartitionInfo("topic1", 2, NODE_2, REPLICA_2, REPLICA_2),
new PartitionInfo("topic2", 0, NODE_3, REPLICA_3, REPLICA_3),
new PartitionInfo("topic2", 1, NODE_4, REPLICA_4, REPLICA_4),
new PartitionInfo("topic2", 2, NODE_0, REPLICA_0, REPLICA_0),
new PartitionInfo("topic3", 0, NODE_1, REPLICA_1, REPLICA_1),
new PartitionInfo("topic3", 1, NODE_2, REPLICA_2, REPLICA_2),
new PartitionInfo("topic3", 2, NODE_3, REPLICA_3, REPLICA_3),
new PartitionInfo("topic3", 3, NODE_0, REPLICA_0, REPLICA_0)
);
private final SubscriptionInfo defaultSubscriptionInfo = getInfo(PID_1, EMPTY_TASKS, EMPTY_TASKS);
private final Cluster metadata = new Cluster(
"cluster",
Arrays.asList(NODE_0, NODE_1, NODE_2, NODE_3, NODE_4),
infos,
emptySet(),
emptySet()
);
private final StreamsPartitionAssignor partitionAssignor = new StreamsPartitionAssignor();
private final MockClientSupplier mockClientSupplier = new MockClientSupplier();
private static final String USER_END_POINT = "localhost:8080";
private static final String OTHER_END_POINT = "other:9090";
private static final String APPLICATION_ID = "stream-partition-assignor-test";
private TaskManager taskManager;
private Admin adminClient;
private InternalTopologyBuilder builder = new InternalTopologyBuilder();
private TopologyMetadata topologyMetadata;
@Mock
private StreamsMetadataState streamsMetadataState;
@Captor
private ArgumentCaptor<Map<TopicPartition, PartitionInfo>> topicPartitionInfoCaptor;
private final Map<String, Subscription> subscriptions = new HashMap<>();
private Map<String, String> clientTags;
private final ReferenceContainer referenceContainer = new ReferenceContainer();
private final MockTime time = new MockTime();
private final byte uniqueField = 1;
@SuppressWarnings("unchecked")
private Map<String, Object> configProps(final Map<String, Object> parameterizedConfig) {
final Map<String, Object> configurationMap = new HashMap<>();
configurationMap.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID);
configurationMap.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, USER_END_POINT);
referenceContainer.mainConsumer = mock(Consumer.class);
referenceContainer.adminClient = adminClient != null ? adminClient : mock(Admin.class);
referenceContainer.taskManager = taskManager;
referenceContainer.streamsMetadataState = streamsMetadataState;
referenceContainer.time = time;
referenceContainer.clientTags = clientTags != null ? clientTags : EMPTY_CLIENT_TAGS;
configurationMap.put(InternalConfig.REFERENCE_CONTAINER_PARTITION_ASSIGNOR, referenceContainer);
configurationMap.putAll(parameterizedConfig);
return configurationMap;
}
private MockInternalTopicManager configureDefault(final Map<String, Object> parameterizedConfig) {
createDefaultMockTaskManager();
return configureDefaultPartitionAssignor(parameterizedConfig);
}
// Make sure to complete setting up any mocks (such as TaskManager or AdminClient) before configuring the assignor
private MockInternalTopicManager configureDefaultPartitionAssignor(final Map<String, Object> parameterizedConfig) {
return configurePartitionAssignorWith(emptyMap(), parameterizedConfig);
}
// Make sure to complete setting up any mocks (such as TaskManager or AdminClient) before configuring the assignor
private MockInternalTopicManager configurePartitionAssignorWith(final Map<String, Object> props,
final Map<String, Object> parameterizedConfig) {
return configurePartitionAssignorWith(props, null, parameterizedConfig);
}
private MockInternalTopicManager configurePartitionAssignorWith(final Map<String, Object> props,
final List<Map<String, List<TopicPartitionInfo>>> topicPartitionInfo,
final Map<String, Object> parameterizedConfig) {
final Map<String, Object> configMap = configProps(parameterizedConfig);
configMap.putAll(props);
partitionAssignor.configure(configMap);
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps(parameterizedConfig)));
return overwriteInternalTopicManagerWithMock(false, topicPartitionInfo, parameterizedConfig);
}
private void createDefaultMockTaskManager() {
createMockTaskManager(EMPTY_TASKS, EMPTY_TASKS);
}
private void createMockTaskManager(final Set<TaskId> activeTasks,
final Set<TaskId> standbyTasks) {
taskManager = mock(TaskManager.class);
lenient().when(taskManager.topologyMetadata()).thenReturn(topologyMetadata);
lenient().when(taskManager.taskOffsetSums()).thenReturn(getTaskOffsetSums(activeTasks, standbyTasks));
lenient().when(taskManager.processId()).thenReturn(PID_1);
builder.setApplicationId(APPLICATION_ID);
topologyMetadata.buildAndRewriteTopology();
}
// If mockCreateInternalTopics is true the internal topic manager will report that it had to create all internal
// topics and we will skip the listOffsets request for these changelogs
private MockInternalTopicManager overwriteInternalTopicManagerWithMock(final boolean mockCreateInternalTopics,
final Map<String, Object> parameterizedConfig) {
return overwriteInternalTopicManagerWithMock(mockCreateInternalTopics, null, parameterizedConfig);
}
private MockInternalTopicManager overwriteInternalTopicManagerWithMock(final boolean mockCreateInternalTopics,
final List<Map<String, List<TopicPartitionInfo>>> topicPartitionInfo,
final Map<String, Object> parameterizedConfig) {
final MockInternalTopicManager mockInternalTopicManager = spy(new MockInternalTopicManager(
time,
new StreamsConfig(configProps(parameterizedConfig)),
mockClientSupplier.restoreConsumer,
mockCreateInternalTopics
));
if (topicPartitionInfo != null) {
lenient().when(mockInternalTopicManager.getTopicPartitionInfo(anySet())).thenAnswer(
invocation -> {
final Set<String> topics = invocation.getArgument(0);
for (final Map<String, List<TopicPartitionInfo>> tp : topicPartitionInfo) {
if (topics.equals(tp.keySet())) {
return tp;
}
}
return emptyMap();
}
);
}
partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
return mockInternalTopicManager;
}
static Stream<Arguments> parameter() {
return Stream.of(
Arguments.of(buildParameterizedConfig(HighAvailabilityTaskAssignor.class, null, StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)),
Arguments.of(buildParameterizedConfig(HighAvailabilityTaskAssignor.class, null, StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE)),
Arguments.of(buildParameterizedConfig(LegacyStickyTaskAssignor.class, null, StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)),
Arguments.of(buildParameterizedConfig(LegacyStickyTaskAssignor.class, null, StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE)),
Arguments.of(buildParameterizedConfig(FallbackPriorTaskAssignor.class, null, StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)),
Arguments.of(buildParameterizedConfig(FallbackPriorTaskAssignor.class, null, StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE)),
Arguments.of(buildParameterizedConfig(null, StickyTaskAssignor.class, StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE)),
Arguments.of(buildParameterizedConfig(null, StickyTaskAssignor.class, StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)),
Arguments.of(buildParameterizedConfig(HighAvailabilityTaskAssignor.class, StickyTaskAssignor.class, StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE))
);
}
private static Map<String, Object> buildParameterizedConfig(final Class<? extends LegacyTaskAssignor> internalTaskAssignor,
final Class<? extends TaskAssignor> customTaskAssignor,
final String rackAwareAssignorStrategy) {
final Map<String, Object> configurationMap = new HashMap<>();
configurationMap.put(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG, rackAwareAssignorStrategy);
if (internalTaskAssignor != null) {
configurationMap.put(InternalConfig.INTERNAL_TASK_ASSIGNOR_CLASS, internalTaskAssignor.getName());
}
if (customTaskAssignor != null) {
configurationMap.put(StreamsConfig.TASK_ASSIGNOR_CLASS_CONFIG, customTaskAssignor.getName());
}
return configurationMap;
}
private void setUp(final Map<String, Object> parameterizedConfig, final boolean mockListOffsets) {
adminClient = createMockAdminClientForAssignor(EMPTY_CHANGELOG_END_OFFSETS, mockListOffsets);
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps(parameterizedConfig)));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldSupportOnlyCooperativeRebalancingProtocol(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
configureDefault(parameterizedConfig);
assertEquals(1, partitionAssignor.supportedProtocols().size());
assertTrue(partitionAssignor.supportedProtocols().contains(RebalanceProtocol.COOPERATIVE));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldProduceStickyAndBalancedAssignmentWhenNothingChanges(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final List<TaskId> allTasks =
asList(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2, TASK_1_3);
final Map<String, List<TaskId>> previousAssignment = mkMap(
mkEntry(CONSUMER_1, asList(TASK_0_0, TASK_1_1, TASK_1_3)),
mkEntry(CONSUMER_2, asList(TASK_0_3, TASK_1_0)),
mkEntry(CONSUMER_3, asList(TASK_0_1, TASK_0_2, TASK_1_2))
);
final ClientState state = new ClientState();
final SortedSet<String> consumers = mkSortedSet(CONSUMER_1, CONSUMER_2, CONSUMER_3);
state.addPreviousTasksAndOffsetSums(CONSUMER_1, getTaskOffsetSums(asList(TASK_0_0, TASK_1_1, TASK_1_3), EMPTY_TASKS));
state.addPreviousTasksAndOffsetSums(CONSUMER_2, getTaskOffsetSums(asList(TASK_0_3, TASK_1_0), EMPTY_TASKS));
state.addPreviousTasksAndOffsetSums(CONSUMER_3, getTaskOffsetSums(asList(TASK_0_1, TASK_0_2, TASK_1_2), EMPTY_TASKS));
state.initializePrevTasks(emptyMap(), false);
state.computeTaskLags(PID_1, getTaskEndOffsetSums(allTasks));
assertEquivalentAssignment(
previousAssignment,
assignTasksToThreads(
allTasks,
true,
consumers,
state,
new HashMap<>()
)
);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldProduceStickyAndBalancedAssignmentWhenNewTasksAreAdded(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final List<TaskId> allTasks =
new ArrayList<>(asList(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2, TASK_1_3));
final Map<String, List<TaskId>> previousAssignment = mkMap(
mkEntry(CONSUMER_1, new ArrayList<>(asList(TASK_0_0, TASK_1_1, TASK_1_3))),
mkEntry(CONSUMER_2, new ArrayList<>(asList(TASK_0_3, TASK_1_0))),
mkEntry(CONSUMER_3, new ArrayList<>(asList(TASK_0_1, TASK_0_2, TASK_1_2)))
);
final ClientState state = new ClientState();
final SortedSet<String> consumers = mkSortedSet(CONSUMER_1, CONSUMER_2, CONSUMER_3);
state.addPreviousTasksAndOffsetSums(CONSUMER_1, getTaskOffsetSums(asList(TASK_0_0, TASK_1_1, TASK_1_3), EMPTY_TASKS));
state.addPreviousTasksAndOffsetSums(CONSUMER_2, getTaskOffsetSums(asList(TASK_0_3, TASK_1_0), EMPTY_TASKS));
state.addPreviousTasksAndOffsetSums(CONSUMER_3, getTaskOffsetSums(asList(TASK_0_1, TASK_0_2, TASK_1_2), EMPTY_TASKS));
state.initializePrevTasks(emptyMap(), false);
state.computeTaskLags(PID_1, getTaskEndOffsetSums(allTasks));
// We should be able to add a new task without sacrificing stickiness
final TaskId newTask = TASK_2_0;
allTasks.add(newTask);
state.assignActiveTasks(allTasks);
final Map<String, List<TaskId>> newAssignment =
assignTasksToThreads(
allTasks,
true,
consumers,
state,
new HashMap<>()
);
previousAssignment.get(CONSUMER_2).add(newTask);
assertEquivalentAssignment(previousAssignment, newAssignment);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldProduceMaximallyStickyAssignmentWhenMemberLeaves(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final List<TaskId> allTasks =
asList(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2, TASK_1_3);
final Map<String, List<TaskId>> previousAssignment = mkMap(
mkEntry(CONSUMER_1, asList(TASK_0_0, TASK_1_1, TASK_1_3)),
mkEntry(CONSUMER_2, asList(TASK_0_3, TASK_1_0)),
mkEntry(CONSUMER_3, asList(TASK_0_1, TASK_0_2, TASK_1_2))
);
final ClientState state = new ClientState();
final SortedSet<String> consumers = mkSortedSet(CONSUMER_1, CONSUMER_2, CONSUMER_3);
state.addPreviousTasksAndOffsetSums(CONSUMER_1, getTaskOffsetSums(asList(TASK_0_0, TASK_1_1, TASK_1_3), EMPTY_TASKS));
state.addPreviousTasksAndOffsetSums(CONSUMER_2, getTaskOffsetSums(asList(TASK_0_3, TASK_1_0), EMPTY_TASKS));
state.addPreviousTasksAndOffsetSums(CONSUMER_3, getTaskOffsetSums(asList(TASK_0_1, TASK_0_2, TASK_1_2), EMPTY_TASKS));
state.initializePrevTasks(emptyMap(), false);
state.computeTaskLags(PID_1, getTaskEndOffsetSums(allTasks));
// Consumer 3 leaves the group
consumers.remove(CONSUMER_3);
final Map<String, List<TaskId>> assignment = assignTasksToThreads(
allTasks,
true,
consumers,
state,
new HashMap<>()
);
// Each member should have all of its previous tasks reassigned plus some of consumer 3's tasks
// We should give one of its tasks to consumer 1, and two of its tasks to consumer 2
assertTrue(assignment.get(CONSUMER_1).containsAll(previousAssignment.get(CONSUMER_1)));
assertTrue(assignment.get(CONSUMER_2).containsAll(previousAssignment.get(CONSUMER_2)));
assertThat(assignment.get(CONSUMER_1).size(), equalTo(4));
assertThat(assignment.get(CONSUMER_2).size(), equalTo(4));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldProduceStickyEnoughAssignmentWhenNewMemberJoins(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final List<TaskId> allTasks =
asList(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2, TASK_1_3);
final Map<String, List<TaskId>> previousAssignment = mkMap(
mkEntry(CONSUMER_1, asList(TASK_0_0, TASK_1_1, TASK_1_3)),
mkEntry(CONSUMER_2, asList(TASK_0_3, TASK_1_0)),
mkEntry(CONSUMER_3, asList(TASK_0_1, TASK_0_2, TASK_1_2))
);
final ClientState state = new ClientState();
final SortedSet<String> consumers = mkSortedSet(CONSUMER_1, CONSUMER_2, CONSUMER_3);
state.addPreviousTasksAndOffsetSums(CONSUMER_1, getTaskOffsetSums(asList(TASK_0_0, TASK_1_1, TASK_1_3), EMPTY_TASKS));
state.addPreviousTasksAndOffsetSums(CONSUMER_2, getTaskOffsetSums(asList(TASK_0_3, TASK_1_0), EMPTY_TASKS));
state.addPreviousTasksAndOffsetSums(CONSUMER_3, getTaskOffsetSums(asList(TASK_0_1, TASK_0_2, TASK_1_2), EMPTY_TASKS));
// Consumer 4 joins the group
consumers.add(CONSUMER_4);
state.addPreviousTasksAndOffsetSums(CONSUMER_4, getTaskOffsetSums(EMPTY_TASKS, EMPTY_TASKS));
state.initializePrevTasks(emptyMap(), false);
state.computeTaskLags(PID_1, getTaskEndOffsetSums(allTasks));
final Map<String, List<TaskId>> assignment = assignTasksToThreads(
allTasks,
true,
consumers,
state,
new HashMap<>()
);
// we should move one task each from consumer 1 and consumer 3 to the new member, and none from consumer 2
assertTrue(previousAssignment.get(CONSUMER_1).containsAll(assignment.get(CONSUMER_1)));
assertTrue(previousAssignment.get(CONSUMER_3).containsAll(assignment.get(CONSUMER_3)));
assertTrue(assignment.get(CONSUMER_2).containsAll(previousAssignment.get(CONSUMER_2)));
assertThat(assignment.get(CONSUMER_1).size(), equalTo(2));
assertThat(assignment.get(CONSUMER_2).size(), equalTo(2));
assertThat(assignment.get(CONSUMER_3).size(), equalTo(2));
assertThat(assignment.get(CONSUMER_4).size(), equalTo(2));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldInterleaveTasksByGroupIdDuringNewAssignment(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final List<TaskId> allTasks =
asList(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2, TASK_2_0, TASK_2_1);
final Map<String, List<TaskId>> assignment = mkMap(
mkEntry(CONSUMER_1, new ArrayList<>(asList(TASK_0_0, TASK_0_3, TASK_1_2))),
mkEntry(CONSUMER_2, new ArrayList<>(asList(TASK_0_1, TASK_1_0, TASK_2_0))),
mkEntry(CONSUMER_3, new ArrayList<>(asList(TASK_0_2, TASK_1_1, TASK_2_1)))
);
final ClientState state = new ClientState();
final SortedSet<String> consumers = mkSortedSet(CONSUMER_1, CONSUMER_2, CONSUMER_3);
state.addPreviousTasksAndOffsetSums(CONSUMER_1, emptyMap());
state.addPreviousTasksAndOffsetSums(CONSUMER_2, emptyMap());
state.addPreviousTasksAndOffsetSums(CONSUMER_3, emptyMap());
Collections.shuffle(allTasks);
final Map<String, List<TaskId>> interleavedTaskIds =
assignTasksToThreads(
allTasks,
true,
consumers,
state,
new HashMap<>()
);
assertThat(interleavedTaskIds, equalTo(assignment));
}
@ParameterizedTest
@MethodSource("parameter")
public void testCooperativeSubscription(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
final Set<TaskId> prevTasks = Set.of(
new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1));
final Set<TaskId> standbyTasks = Set.of(
new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1),
new TaskId(0, 2), new TaskId(1, 2), new TaskId(2, 2));
createMockTaskManager(prevTasks, standbyTasks);
configureDefaultPartitionAssignor(parameterizedConfig);
final Set<String> topics = Set.of("topic1", "topic2");
final Subscription subscription = new Subscription(
new ArrayList<>(topics), partitionAssignor.subscriptionUserData(topics));
Collections.sort(subscription.topics());
assertEquals(asList("topic1", "topic2"), subscription.topics());
final SubscriptionInfo info = getInfo(PID_1, prevTasks, standbyTasks, uniqueField);
assertEquals(info, SubscriptionInfo.decode(subscription.userData()));
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignBasic(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor");
final List<String> topics = asList("topic1", "topic2");
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2);
final Set<TaskId> prevTasks10 = Set.of(TASK_0_0);
final Set<TaskId> prevTasks11 = Set.of(TASK_0_1);
final Set<TaskId> prevTasks20 = Set.of(TASK_0_2);
final Set<TaskId> standbyTasks10 = EMPTY_TASKS;
final Set<TaskId> standbyTasks11 = Set.of(TASK_0_2);
final Set<TaskId> standbyTasks20 = Set.of(TASK_0_0);
createMockTaskManager(prevTasks10, standbyTasks10);
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(
singletonList(APPLICATION_ID + "-store-changelog"),
singletonList(3)),
true
);
final List<Map<String, List<TopicPartitionInfo>>> partitionInfo = singletonList(mkMap(mkEntry(
"stream-partition-assignor-test-store-changelog",
singletonList(
new TopicPartitionInfo(
0,
new Node(1, "h1", 80),
singletonList(new Node(1, "h1", 80)),
emptyList()
)
)
)
));
configurePartitionAssignorWith(emptyMap(), partitionInfo, parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
getInfo(PID_1, prevTasks10, standbyTasks10).encode(),
Collections.emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_3)
));
subscriptions.put("consumer11",
new Subscription(
topics,
getInfo(PID_1, prevTasks11, standbyTasks11).encode(),
Collections.emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_3)
));
subscriptions.put("consumer20",
new Subscription(
topics,
getInfo(PID_2, prevTasks20, standbyTasks20).encode(),
Collections.emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_0)
));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// check the assignment
assertEquals(Set.of(Set.of(t1p0, t2p0), Set.of(t1p1, t2p1)),
Set.of(new HashSet<>(assignments.get("consumer10").partitions()),
new HashSet<>(assignments.get("consumer11").partitions())));
assertEquals(Set.of(t1p2, t2p2), new HashSet<>(assignments.get("consumer20").partitions()));
// check assignment info
// the first consumer
final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
final Set<TaskId> allActiveTasks = new HashSet<>(info10.activeTasks());
// the second consumer
final AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
allActiveTasks.addAll(info11.activeTasks());
assertEquals(Set.of(TASK_0_0, TASK_0_1), allActiveTasks);
// the third consumer
final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
allActiveTasks.addAll(info20.activeTasks());
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, new HashSet<>(allActiveTasks));
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignEvenlyAcrossConsumersOneClientMultipleThreads(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1");
builder.addProcessor("processorII", new MockApiProcessorSupplier<>(), "source2");
final List<PartitionInfo> localInfos = asList(
new PartitionInfo("topic1", 0, NODE_0, REPLICA_0, REPLICA_0),
new PartitionInfo("topic1", 1, NODE_1, REPLICA_1, REPLICA_1),
new PartitionInfo("topic1", 2, NODE_2, REPLICA_2, REPLICA_2),
new PartitionInfo("topic1", 3, NODE_3, REPLICA_3, REPLICA_3),
new PartitionInfo("topic2", 0, NODE_4, REPLICA_4, REPLICA_4),
new PartitionInfo("topic2", 1, NODE_0, REPLICA_0, REPLICA_0),
new PartitionInfo("topic2", 2, NODE_1, REPLICA_1, REPLICA_1),
new PartitionInfo("topic2", 3, NODE_2, REPLICA_2, REPLICA_2)
);
final Cluster localMetadata = new Cluster(
"cluster",
asList(NODE_0, NODE_1, NODE_2, NODE_3, NODE_4),
localInfos,
emptySet(),
emptySet()
);
final List<String> topics = asList("topic1", "topic2");
configureDefault(parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)
));
subscriptions.put("consumer11",
new Subscription(
topics,
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)
));
final Map<String, Assignment> assignments = partitionAssignor.assign(localMetadata, new GroupSubscription(subscriptions)).groupAssignment();
// check assigned partitions
assertEquals(Set.of(Set.of(t2p2, t1p0, t1p2, t2p0), Set.of(t1p1, t2p1, t1p3, t2p3)),
Set.of(new HashSet<>(assignments.get("consumer10").partitions()), new HashSet<>(assignments.get("consumer11").partitions())));
// the first consumer
final AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
final List<TaskId> expectedInfo10TaskIds = asList(TASK_0_0, TASK_0_2, TASK_1_0, TASK_1_2);
assertEquals(expectedInfo10TaskIds, info10.activeTasks());
// the second consumer
final AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
final List<TaskId> expectedInfo11TaskIds = asList(TASK_0_1, TASK_0_3, TASK_1_1, TASK_1_3);
assertEquals(expectedInfo11TaskIds, info11.activeTasks());
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldNotAssignTemporaryStandbyTask(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addSource(null, "source1", null, null, null, "topic1");
final List<PartitionInfo> localInfos = asList(
new PartitionInfo("topic1", 0, NODE_0, REPLICA_0, REPLICA_0),
new PartitionInfo("topic1", 1, NODE_1, REPLICA_1, REPLICA_1),
new PartitionInfo("topic1", 2, NODE_2, REPLICA_2, REPLICA_2),
new PartitionInfo("topic1", 3, NODE_0, REPLICA_1, REPLICA_2)
);
final Cluster localMetadata = new Cluster(
"cluster",
asList(NODE_0, NODE_1, NODE_2),
localInfos,
emptySet(),
emptySet()
);
final List<String> topics = singletonList("topic1");
createMockTaskManager(Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3), emptySet());
configureDefaultPartitionAssignor(parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
getInfo(PID_1, Set.of(TASK_0_0, TASK_0_2), emptySet()).encode(),
asList(t1p0, t1p2),
DEFAULT_GENERATION,
Optional.of(RACK_2)
));
subscriptions.put("consumer11",
new Subscription(
topics,
getInfo(PID_1, Set.of(TASK_0_1, TASK_0_3), emptySet()).encode(),
asList(t1p1, t1p3),
DEFAULT_GENERATION,
Optional.of(RACK_2)
));
subscriptions.put("consumer20",
new Subscription(
topics,
getInfo(PID_2, emptySet(), emptySet()).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)
));
final Map<String, Assignment> assignments = partitionAssignor.assign(localMetadata, new GroupSubscription(subscriptions)).groupAssignment();
// neither active nor standby tasks should be assigned to consumer 3, which will have to wait until
// the followup cooperative rebalance to get the active task(s) it was assigned (and does not need
// a standby copy before that since it previously had no tasks at all)
final AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
assertTrue(info20.activeTasks().isEmpty());
assertTrue(info20.standbyTasks().isEmpty());
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignEmptyMetadata(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
final List<String> topics = asList("topic1", "topic2");
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2);
final Set<TaskId> prevTasks10 = Set.of(TASK_0_0);
final Set<TaskId> standbyTasks10 = Set.of(TASK_0_1);
final Cluster emptyMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()),
emptySet(),
emptySet(),
emptySet());
createMockTaskManager(prevTasks10, standbyTasks10);
configureDefaultPartitionAssignor(parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
getInfo(PID_1, prevTasks10, standbyTasks10).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_3)
));
// initially metadata is empty
Map<String, Assignment> assignments =
partitionAssignor.assign(emptyMetadata, new GroupSubscription(subscriptions)).groupAssignment();
// check assigned partitions
assertEquals(emptySet(),
new HashSet<>(assignments.get("consumer10").partitions()));
// check assignment info
AssignmentInfo info10 = checkAssignment(emptySet(), assignments.get("consumer10"));
final Set<TaskId> allActiveTasks = new HashSet<>(info10.activeTasks());
assertEquals(0, allActiveTasks.size());
// then metadata gets populated
assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// check assigned partitions
assertEquals(Set.of(t1p0, t2p0, t1p1, t2p1, t1p2, t2p2),
new HashSet<>(assignments.get("consumer10").partitions()));
// the first consumer
info10 = checkAssignment(allTopics, assignments.get("consumer10"));
allActiveTasks.addAll(info10.activeTasks());
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, new HashSet<>(allActiveTasks));
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignWithNewTasks(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addSource(null, "source3", null, null, null, "topic3");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2", "source3");
final List<String> topics = asList("topic1", "topic2", "topic3");
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
// assuming that previous tasks do not have topic3
final Set<TaskId> prevTasks10 = Set.of(TASK_0_0);
final Set<TaskId> prevTasks11 = Set.of(TASK_0_1);
final Set<TaskId> prevTasks20 = Set.of(TASK_0_2);
createMockTaskManager(prevTasks10, EMPTY_TASKS);
configureDefaultPartitionAssignor(parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
getInfo(PID_1, prevTasks10, EMPTY_TASKS).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_4)));
subscriptions.put("consumer11",
new Subscription(
topics,
getInfo(PID_1, prevTasks11, EMPTY_TASKS).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_4)));
subscriptions.put("consumer20",
new Subscription(
topics,
getInfo(PID_2, prevTasks20, EMPTY_TASKS).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_1)));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// check assigned partitions: since there is no previous task for topic 3 it will be assigned randomly so we cannot check exact match
// also note that previously assigned partitions / tasks may not stay on the previous host since we may assign the new task first and
// then later ones will be re-assigned to other hosts due to load balancing
AssignmentInfo info = AssignmentInfo.decode(assignments.get("consumer10").userData());
final Set<TaskId> allActiveTasks = new HashSet<>(info.activeTasks());
final Set<TopicPartition> allPartitions = new HashSet<>(assignments.get("consumer10").partitions());
info = AssignmentInfo.decode(assignments.get("consumer11").userData());
allActiveTasks.addAll(info.activeTasks());
allPartitions.addAll(assignments.get("consumer11").partitions());
info = AssignmentInfo.decode(assignments.get("consumer20").userData());
allActiveTasks.addAll(info.activeTasks());
allPartitions.addAll(assignments.get("consumer20").partitions());
assertEquals(allTasks, allActiveTasks);
assertEquals(Set.of(t1p0, t1p1, t1p2, t2p0, t2p1, t2p2, t3p0, t3p1, t3p2, t3p3), allPartitions);
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignWithStates(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor-1", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor-1");
builder.addProcessor("processor-2", new MockApiProcessorSupplier<>(), "source2");
builder.addStateStore(new MockKeyValueStoreBuilder("store2", false), "processor-2");
builder.addStateStore(new MockKeyValueStoreBuilder("store3", false), "processor-2");
final List<String> topics = asList("topic1", "topic2");
final List<TaskId> tasks = asList(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2);
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(
asList(APPLICATION_ID + "-store1-changelog",
APPLICATION_ID + "-store2-changelog",
APPLICATION_ID + "-store3-changelog"),
asList(3, 3, 3)),
true
);
createDefaultMockTaskManager();
final List<Map<String, List<TopicPartitionInfo>>> changelogTopicPartitionInfo = getTopicPartitionInfo(
3,
singletonList(Set.of(
APPLICATION_ID + "-store1-changelog",
APPLICATION_ID + "-store2-changelog",
APPLICATION_ID + "-store3-changelog"
)));
configurePartitionAssignorWith(emptyMap(), changelogTopicPartitionInfo, parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_1)));
subscriptions.put("consumer11",
new Subscription(
topics,
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_1)));
subscriptions.put("consumer20",
new Subscription(
topics,
getInfo(PID_2, EMPTY_TASKS, EMPTY_TASKS).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// check assigned partition size: since there is no previous task and there are two sub-topologies the assignment is random so we cannot check exact match
assertEquals(2, assignments.get("consumer10").partitions().size());
assertEquals(2, assignments.get("consumer11").partitions().size());
assertEquals(2, assignments.get("consumer20").partitions().size());
final AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
final AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
final AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
assertEquals(2, info10.activeTasks().size());
assertEquals(2, info11.activeTasks().size());
assertEquals(2, info20.activeTasks().size());
final Set<TaskId> allTasks = new HashSet<>();
allTasks.addAll(info10.activeTasks());
allTasks.addAll(info11.activeTasks());
allTasks.addAll(info20.activeTasks());
assertEquals(new HashSet<>(tasks), allTasks);
// check tasks for state topics
final Map<Subtopology, InternalTopologyBuilder.TopicsInfo> topicGroups = builder.subtopologyToTopicsInfo();
assertEquals(Set.of(TASK_0_0, TASK_0_1, TASK_0_2), tasksForState("store1", tasks, topicGroups));
assertEquals(Set.of(TASK_1_0, TASK_1_1, TASK_1_2), tasksForState("store2", tasks, topicGroups));
assertEquals(Set.of(TASK_1_0, TASK_1_1, TASK_1_2), tasksForState("store3", tasks, topicGroups));
}
private static Set<TaskId> tasksForState(final String storeName,
final List<TaskId> tasks,
final Map<Subtopology, InternalTopologyBuilder.TopicsInfo> topicGroups) {
final String changelogTopic = ProcessorStateManager.storeChangelogTopic(APPLICATION_ID, storeName, null);
final Set<TaskId> ids = new HashSet<>();
for (final Map.Entry<Subtopology, InternalTopologyBuilder.TopicsInfo> entry : topicGroups.entrySet()) {
final Set<String> stateChangelogTopics = entry.getValue().stateChangelogTopics.keySet();
if (stateChangelogTopics.contains(changelogTopic)) {
for (final TaskId id : tasks) {
if (id.subtopology() == entry.getKey().nodeGroupId) {
ids.add(id);
}
}
}
}
return ids;
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignWithStandbyReplicasAndStatelessTasks(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addSource(null, "source1", null, null, null, "topic1", "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1");
final List<String> topics = asList("topic1", "topic2");
createMockTaskManager(Set.of(TASK_0_0), emptySet());
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1), parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
getInfo(PID_1, Set.of(TASK_0_0), emptySet()).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_0)));
subscriptions.put("consumer20",
new Subscription(
topics,
getInfo(PID_2, Set.of(TASK_0_2), emptySet()).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)));
final Map<String, Assignment> assignments =
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
assertTrue(info10.standbyTasks().isEmpty());
final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
assertTrue(info20.standbyTasks().isEmpty());
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignWithStandbyReplicasAndLoggingDisabled(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addSource(null, "source1", null, null, null, "topic1", "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false).withLoggingDisabled(), "processor");
final List<String> topics = asList("topic1", "topic2");
createMockTaskManager(Set.of(TASK_0_0), emptySet());
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1), parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
getInfo(PID_1, Set.of(TASK_0_0), emptySet()).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_1)));
subscriptions.put("consumer20",
new Subscription(
topics,
getInfo(PID_2, Set.of(TASK_0_2), emptySet()).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_3)));
final Map<String, Assignment> assignments =
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
assertTrue(info10.standbyTasks().isEmpty());
final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
assertTrue(info20.standbyTasks().isEmpty());
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignWithStandbyReplicas(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor");
final List<String> topics = asList("topic1", "topic2");
final Set<TopicPartition> allTopicPartitions = topics.stream()
.map(topic -> asList(new TopicPartition(topic, 0), new TopicPartition(topic, 1), new TopicPartition(topic, 2)))
.flatMap(Collection::stream)
.collect(Collectors.toSet());
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2);
final Set<TaskId> prevTasks00 = Set.of(TASK_0_0);
final Set<TaskId> prevTasks01 = Set.of(TASK_0_1);
final Set<TaskId> prevTasks02 = Set.of(TASK_0_2);
final Set<TaskId> standbyTasks00 = Set.of(TASK_0_0);
final Set<TaskId> standbyTasks01 = Set.of(TASK_0_1);
final Set<TaskId> standbyTasks02 = Set.of(TASK_0_2);
createMockTaskManager(prevTasks00, standbyTasks01);
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(
singletonList(APPLICATION_ID + "-store1-changelog"),
singletonList(3)),
true
);
final List<Map<String, List<TopicPartitionInfo>>> changelogTopicPartitionInfo = getTopicPartitionInfo(
3,
singletonList(Set.of(APPLICATION_ID + "-store1-changelog")));
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1), changelogTopicPartitionInfo, parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
getInfo(PID_1, prevTasks00, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_0)));
subscriptions.put("consumer11",
new Subscription(
topics,
getInfo(PID_1, prevTasks01, standbyTasks02, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_0)));
subscriptions.put("consumer20",
new Subscription(
topics,
getInfo(PID_2, prevTasks02, standbyTasks00, OTHER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_4)));
final Map<String, Assignment> assignments =
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// the first consumer
final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
final Set<TaskId> allActiveTasks = new HashSet<>(info10.activeTasks());
final Set<TaskId> allStandbyTasks = new HashSet<>(info10.standbyTasks().keySet());
// the second consumer
final AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
allActiveTasks.addAll(info11.activeTasks());
allStandbyTasks.addAll(info11.standbyTasks().keySet());
assertNotEquals(info11.standbyTasks().keySet(), info10.standbyTasks().keySet(), "same processId has same set of standby tasks");
// check active tasks assigned to the first client
assertEquals(Set.of(TASK_0_0, TASK_0_1), new HashSet<>(allActiveTasks));
assertEquals(Set.of(TASK_0_2), new HashSet<>(allStandbyTasks));
// the third consumer
final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
allActiveTasks.addAll(info20.activeTasks());
allStandbyTasks.addAll(info20.standbyTasks().keySet());
// all task ids are in the active tasks and also in the standby tasks
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
assertEquals(3, allStandbyTasks.size());
assertEquals(allTasks, allStandbyTasks);
// Check host partition assignments
final Map<HostInfo, Set<TopicPartition>> partitionsByHost = info10.partitionsByHost();
assertEquals(2, partitionsByHost.size());
assertEquals(allTopicPartitions, partitionsByHost.values().stream()
.flatMap(Collection::stream).collect(Collectors.toSet()));
final Map<HostInfo, Set<TopicPartition>> standbyPartitionsByHost = info10.standbyPartitionByHost();
assertEquals(2, standbyPartitionsByHost.size());
assertEquals(allTopicPartitions, standbyPartitionsByHost.values().stream()
.flatMap(Collection::stream).collect(Collectors.toSet()));
for (final HostInfo hostInfo : partitionsByHost.keySet()) {
assertTrue(Collections.disjoint(partitionsByHost.get(hostInfo), standbyPartitionsByHost.get(hostInfo)));
}
// All consumers got the same host info
assertEquals(partitionsByHost, info11.partitionsByHost());
assertEquals(partitionsByHost, info20.partitionsByHost());
assertEquals(standbyPartitionsByHost, info11.standbyPartitionByHost());
assertEquals(standbyPartitionsByHost, info20.standbyPartitionByHost());
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignWithStandbyReplicasBalanceSparse(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor");
final List<String> topics = singletonList("topic1");
createMockTaskManager(EMPTY_TASKS, EMPTY_TASKS);
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(
singletonList(APPLICATION_ID + "-store1-changelog"),
singletonList(3)),
true
);
final Map<String, List<TopicPartitionInfo>> changelogTopicPartitionInfo = mkMap(
mkEntry(APPLICATION_ID + "-store1-changelog",
asList(
new TopicPartitionInfo(0, NODE_0, asList(REPLICA_0), asList(REPLICA_0)),
new TopicPartitionInfo(1, NODE_1, asList(REPLICA_1), asList(REPLICA_1)),
new TopicPartitionInfo(2, NODE_3, asList(REPLICA_3), asList(REPLICA_3))
)
)
);
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1), singletonList(changelogTopicPartitionInfo), parameterizedConfig);
final List<String> client1Consumers = asList("consumer10", "consumer11", "consumer12", "consumer13");
final List<String> client2Consumers = asList("consumer20", "consumer21", "consumer22");
for (final String consumerId : client1Consumers) {
subscriptions.put(consumerId,
new Subscription(
topics,
getInfo(PID_1, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)));
}
for (final String consumerId : client2Consumers) {
subscriptions.put(consumerId,
new Subscription(
topics,
getInfo(PID_2, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_4)));
}
final Map<String, Assignment> assignments =
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// Consumers
final AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
final AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
final AssignmentInfo info12 = AssignmentInfo.decode(assignments.get("consumer12").userData());
final AssignmentInfo info13 = AssignmentInfo.decode(assignments.get("consumer13").userData());
final AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
final AssignmentInfo info21 = AssignmentInfo.decode(assignments.get("consumer21").userData());
final AssignmentInfo info22 = AssignmentInfo.decode(assignments.get("consumer22").userData());
// Check each consumer has no more than 1 task
assertTrue(info10.activeTasks().size() + info10.standbyTasks().size() <= 1);
assertTrue(info11.activeTasks().size() + info11.standbyTasks().size() <= 1);
assertTrue(info12.activeTasks().size() + info12.standbyTasks().size() <= 1);
assertTrue(info13.activeTasks().size() + info13.standbyTasks().size() <= 1);
assertTrue(info20.activeTasks().size() + info20.standbyTasks().size() <= 1);
assertTrue(info21.activeTasks().size() + info21.standbyTasks().size() <= 1);
assertTrue(info22.activeTasks().size() + info22.standbyTasks().size() <= 1);
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignWithStandbyReplicasBalanceDense(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor");
final List<String> topics = singletonList("topic1");
createMockTaskManager(EMPTY_TASKS, EMPTY_TASKS);
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(
singletonList(APPLICATION_ID + "-store1-changelog"),
singletonList(3)),
true
);
final List<Map<String, List<TopicPartitionInfo>>> changelogTopicPartitionInfo = getTopicPartitionInfo(
3,
singletonList(Set.of(APPLICATION_ID + "-store1-changelog")));
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1), changelogTopicPartitionInfo, parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
getInfo(PID_1, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_4)));
subscriptions.put("consumer20",
new Subscription(
topics,
getInfo(PID_2, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_4)));
final Map<String, Assignment> assignments =
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// Consumers
final AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
final AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
// Check each consumer has 3 tasks
assertEquals(3, info10.activeTasks().size() + info10.standbyTasks().size());
assertEquals(3, info20.activeTasks().size() + info20.standbyTasks().size());
// Check that not all the actives are on one node
assertTrue(info10.activeTasks().size() < 3);
assertTrue(info20.activeTasks().size() < 3);
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignWithStandbyReplicasBalanceWithStatelessTasks(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor_with_state", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor_with_state");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source2");
final List<String> topics = asList("topic1", "topic2");
createMockTaskManager(EMPTY_TASKS, EMPTY_TASKS);
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(
singletonList(APPLICATION_ID + "-store1-changelog"),
singletonList(3)),
true
);
final List<Map<String, List<TopicPartitionInfo>>> changelogTopicPartitionInfo = getTopicPartitionInfo(
3,
singletonList(Set.of(APPLICATION_ID + "-store1-changelog")));
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1), changelogTopicPartitionInfo, parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
getInfo(PID_1, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_0)));
subscriptions.put("consumer11",
new Subscription(
topics,
getInfo(PID_1, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_0)));
subscriptions.put("consumer20",
new Subscription(
topics,
getInfo(PID_2, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)));
subscriptions.put("consumer21",
new Subscription(
topics,
getInfo(PID_2, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)));
final Map<String, Assignment> assignments =
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// Consumers
final AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
final AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
final AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
final AssignmentInfo info21 = AssignmentInfo.decode(assignments.get("consumer21").userData());
// 9 tasks spread over 4 consumers, so we should have no more than 3 tasks per consumer
assertTrue(info10.activeTasks().size() + info10.standbyTasks().size() <= 3);
assertTrue(info11.activeTasks().size() + info11.standbyTasks().size() <= 3);
assertTrue(info20.activeTasks().size() + info20.standbyTasks().size() <= 3);
assertTrue(info21.activeTasks().size() + info21.standbyTasks().size() <= 3);
// No more than 1 standby per node.
assertTrue(info10.standbyTasks().size() <= 1);
assertTrue(info11.standbyTasks().size() <= 1);
assertTrue(info20.standbyTasks().size() <= 1);
assertTrue(info21.standbyTasks().size() <= 1);
}
@ParameterizedTest
@MethodSource("parameter")
public void testOnAssignment(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
taskManager = mock(TaskManager.class);
final Map<HostInfo, Set<TopicPartition>> hostState = Collections.singletonMap(
new HostInfo("localhost", 9090),
Set.of(t3p0, t3p3));
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
activeTasks.put(TASK_0_0, Set.of(t3p0));
activeTasks.put(TASK_0_3, Set.of(t3p3));
final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
standbyTasks.put(TASK_0_1, Set.of(t3p1));
standbyTasks.put(TASK_0_2, Set.of(t3p2));
streamsMetadataState = mock(StreamsMetadataState.class);
configureDefaultPartitionAssignor(parameterizedConfig);
final List<TaskId> activeTaskList = asList(TASK_0_0, TASK_0_3);
final AssignmentInfo info = new AssignmentInfo(LATEST_SUPPORTED_VERSION, activeTaskList, standbyTasks, hostState, emptyMap(), 0);
final Assignment assignment = new Assignment(asList(t3p0, t3p3), info.encode());
partitionAssignor.onAssignment(assignment, null);
verify(streamsMetadataState).onChange(eq(hostState), any(), topicPartitionInfoCaptor.capture());
verify(taskManager).handleAssignment(activeTasks, standbyTasks);
assertTrue(topicPartitionInfoCaptor.getValue().containsKey(t3p0));
assertTrue(topicPartitionInfoCaptor.getValue().containsKey(t3p3));
assertEquals(2, topicPartitionInfoCaptor.getValue().size());
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignWithInternalTopics(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addInternalTopic("topicX", InternalTopicProperties.empty());
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
builder.addSink("sink1", "topicX", null, null, null, "processor1");
builder.addSource(null, "source2", null, null, null, "topicX");
builder.addProcessor("processor2", new MockApiProcessorSupplier<>(), "source2");
final List<String> topics = asList("topic1", APPLICATION_ID + "-topicX");
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2);
createDefaultMockTaskManager();
final List<Map<String, List<TopicPartitionInfo>>> changelogTopicPartitionInfo = getTopicPartitionInfo(
4,
singletonList(Set.of(APPLICATION_ID + "-topicX")));
final MockInternalTopicManager internalTopicManager = configurePartitionAssignorWith(emptyMap(), changelogTopicPartitionInfo, parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2))
);
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
// check prepared internal topics
assertEquals(1, internalTopicManager.readyTopics.size());
assertEquals(allTasks.size(), (long) internalTopicManager.readyTopics.get(APPLICATION_ID + "-topicX"));
}
@ParameterizedTest
@MethodSource("parameter")
public void testAssignWithInternalTopicThatsSourceIsAnotherInternalTopic(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addInternalTopic("topicX", InternalTopicProperties.empty());
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
builder.addSink("sink1", "topicX", null, null, null, "processor1");
builder.addSource(null, "source2", null, null, null, "topicX");
builder.addInternalTopic("topicZ", InternalTopicProperties.empty());
builder.addProcessor("processor2", new MockApiProcessorSupplier<>(), "source2");
builder.addSink("sink2", "topicZ", null, null, null, "processor2");
builder.addSource(null, "source3", null, null, null, "topicZ");
final List<String> topics = asList("topic1", APPLICATION_ID + "-topicX", APPLICATION_ID + "-topicZ");
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2);
createDefaultMockTaskManager();
final List<Map<String, List<TopicPartitionInfo>>> changelogTopicPartitionInfo = getTopicPartitionInfo(
4,
singletonList(Set.of(APPLICATION_ID + "-topicX", APPLICATION_ID + "-topicZ")));
final MockInternalTopicManager internalTopicManager = configurePartitionAssignorWith(emptyMap(), changelogTopicPartitionInfo, parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_3))
);
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
// check prepared internal topics
assertEquals(2, internalTopicManager.readyTopics.size());
assertEquals(allTasks.size(), (long) internalTopicManager.readyTopics.get(APPLICATION_ID + "-topicZ"));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldGenerateTasksForAllCreatedPartitions(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final StreamsBuilder streamsBuilder = new StreamsBuilder();
// KStream with 3 partitions
final KStream<Object, Object> stream1 = streamsBuilder
.stream("topic1")
// force creation of internal repartition topic
.map((KeyValueMapper<Object, Object, KeyValue<Object, Object>>) KeyValue::new);
// KTable with 4 partitions
final KTable<Object, Long> table1 = streamsBuilder
.table("topic3")
// force creation of internal repartition topic
.groupBy(KeyValue::new)
.count();
// joining the stream and the table
// this triggers the enforceCopartitioning() routine in the StreamsPartitionAssignor,
// forcing the stream.map to get repartitioned to a topic with four partitions.
stream1.join(
table1,
(ValueJoiner<Object, Object, Void>) (value1, value2) -> null);
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps(parameterizedConfig)));
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(
asList(APPLICATION_ID + "-topic3-STATE-STORE-0000000002-changelog",
APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-changelog"),
asList(4, 4)),
true
);
createDefaultMockTaskManager();
final List<Map<String, List<TopicPartitionInfo>>> topicPartitionInfo = getTopicPartitionInfo(
4,
asList(
Set.of(
APPLICATION_ID + "-topic3-STATE-STORE-0000000002-changelog",
APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-changelog"
),
Set.of(
APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition",
APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition"
)
)
);
final MockInternalTopicManager mockInternalTopicManager = configurePartitionAssignorWith(emptyMap(), topicPartitionInfo, parameterizedConfig);
subscriptions.put(client,
new Subscription(
asList("topic1", "topic3"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_4))
);
final Map<String, Assignment> assignment =
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
final Map<String, Integer> expectedCreatedInternalTopics = new HashMap<>();
expectedCreatedInternalTopics.put(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 4);
expectedCreatedInternalTopics.put(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-changelog", 4);
expectedCreatedInternalTopics.put(APPLICATION_ID + "-topic3-STATE-STORE-0000000002-changelog", 4);
expectedCreatedInternalTopics.put(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 4);
// check if all internal topics were created as expected
assertThat(mockInternalTopicManager.readyTopics, equalTo(expectedCreatedInternalTopics));
final List<TopicPartition> expectedAssignment = asList(
new TopicPartition("topic1", 0),
new TopicPartition("topic1", 1),
new TopicPartition("topic1", 2),
new TopicPartition("topic3", 0),
new TopicPartition("topic3", 1),
new TopicPartition("topic3", 2),
new TopicPartition("topic3", 3),
new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 0),
new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 1),
new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 2),
new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 3),
new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 0),
new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 1),
new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 2),
new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 3)
);
// check if we created a task for all expected topicPartitions.
assertThat(new HashSet<>(assignment.get(client).partitions()), equalTo(new HashSet<>(expectedAssignment)));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldThrowTimeoutExceptionWhenCreatingRepartitionTopicsTimesOut(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.stream("topic1").repartition();
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
createDefaultMockTaskManager();
partitionAssignor.configure(configProps(parameterizedConfig));
final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(
time,
new StreamsConfig(configProps(parameterizedConfig)),
mockClientSupplier.restoreConsumer,
false
) {
@Override
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
throw new TimeoutException("KABOOM!");
}
};
partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
subscriptions.put(client,
new Subscription(
singletonList("topic1"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)
)
);
assertThrows(TimeoutException.class, () -> partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldThrowTimeoutExceptionWhenCreatingChangelogTopicsTimesOut(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final StreamsConfig config = new StreamsConfig(configProps(parameterizedConfig));
final StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.table("topic1", Materialized.as("store"));
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
topologyMetadata = new TopologyMetadata(builder, config);
createDefaultMockTaskManager();
partitionAssignor.configure(configProps(parameterizedConfig));
final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(
time,
config,
mockClientSupplier.restoreConsumer,
false
) {
@Override
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
if (topics.isEmpty()) {
return emptySet();
}
throw new TimeoutException("KABOOM!");
}
};
partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
subscriptions.put(client,
new Subscription(
singletonList("topic1"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)
)
);
assertThrows(TimeoutException.class, () -> partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAddUserDefinedEndPointToSubscription(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
builder.addSource(null, "source", null, null, null, "input");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source");
builder.addSink("sink", "output", null, null, null, "processor");
createDefaultMockTaskManager();
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.APPLICATION_SERVER_CONFIG, USER_END_POINT), parameterizedConfig);
final Set<String> topics = Set.of("input");
final ByteBuffer userData = partitionAssignor.subscriptionUserData(topics);
final Subscription subscription =
new Subscription(new ArrayList<>(topics), userData);
final SubscriptionInfo subscriptionInfo = SubscriptionInfo.decode(subscription.userData());
assertEquals("localhost:8080", subscriptionInfo.userEndPoint());
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldMapUserEndPointToTopicPartitions(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addSource(null, "source", null, null, null, "topic1");
builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source");
builder.addSink("sink", "output", null, null, null, "processor");
final List<String> topics = Collections.singletonList("topic1");
createDefaultMockTaskManager();
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.APPLICATION_SERVER_CONFIG, USER_END_POINT), parameterizedConfig);
subscriptions.put("consumer1",
new Subscription(
topics,
getInfo(PID_1, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)
)
);
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
final Assignment consumerAssignment = assignments.get("consumer1");
final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumerAssignment.userData());
final Set<TopicPartition> topicPartitions = assignmentInfo.partitionsByHost().get(new HostInfo("localhost", 8080));
assertEquals(
Set.of(
new TopicPartition("topic1", 0),
new TopicPartition("topic1", 1),
new TopicPartition("topic1", 2)),
topicPartitions);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldThrowExceptionIfApplicationServerConfigIsNotHostPortPair(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
createDefaultMockTaskManager();
try {
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost"), parameterizedConfig);
fail("expected to an exception due to invalid config");
} catch (final ConfigException e) {
// pass
}
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldThrowExceptionIfApplicationServerConfigPortIsNotAnInteger(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
createDefaultMockTaskManager();
assertThrows(ConfigException.class, () -> configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:j87yhk"), parameterizedConfig));
}
@SuppressWarnings("deprecation")
@ParameterizedTest
@MethodSource("parameter")
public void shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
final StreamsBuilder streamsBuilder = new StreamsBuilder();
final KStream<Object, Object> stream1 = streamsBuilder
// Task 1 (should get created):
.stream("topic1")
// force repartitioning for aggregation
.selectKey((key, value) -> null)
.groupByKey()
// Task 2 (should get created):
// create repartitioning and changelog topic as task 1 exists
.count(Materialized.as("count"))
// force repartitioning for join, but second join input topic unknown
// -> internal repartitioning topic should not get created
.toStream()
.map((KeyValueMapper<Object, Long, KeyValue<Object, Object>>) (key, value) -> null);
streamsBuilder
// Task 3 (should not get created because input topic unknown)
.stream("unknownTopic")
// force repartitioning for join, but input topic unknown
// -> thus should not create internal repartitioning topic
.selectKey((key, value) -> null)
// Task 4 (should not get created because input topics unknown)
// should not create any of both input repartition topics or any of both changelog topics
.join(
stream1,
(ValueJoiner<Object, Object, Void>) (value1, value2) -> null,
JoinWindows.of(ofMillis(0))
);
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
final MockInternalTopicManager mockInternalTopicManager = configureDefault(parameterizedConfig);
subscriptions.put(client,
new Subscription(
Collections.singletonList("unknownTopic"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)
)
);
final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(mockInternalTopicManager.readyTopics.isEmpty(), equalTo(true));
assertThat(assignment.get(client).partitions().isEmpty(), equalTo(true));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldUpdateClusterMetadataAndHostInfoOnAssignment(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final Map<HostInfo, Set<TopicPartition>> initialHostState = mkMap(
mkEntry(new HostInfo("localhost", 9090), Set.of(t1p0, t1p1)),
mkEntry(new HostInfo("otherhost", 9090), Set.of(t2p0, t2p1))
);
final Map<HostInfo, Set<TopicPartition>> newHostState = mkMap(
mkEntry(new HostInfo("localhost", 9090), Set.of(t1p0, t1p1)),
mkEntry(new HostInfo("newotherhost", 9090), Set.of(t2p0, t2p1))
);
streamsMetadataState = mock(StreamsMetadataState.class);
createDefaultMockTaskManager();
configureDefaultPartitionAssignor(parameterizedConfig);
partitionAssignor.onAssignment(createAssignment(initialHostState), null);
partitionAssignor.onAssignment(createAssignment(newHostState), null);
verify(streamsMetadataState).onChange(eq(initialHostState), any(), any());
verify(streamsMetadataState).onChange(eq(newHostState), any(), any());
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldTriggerImmediateRebalanceOnHostInfoChange(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final Map<HostInfo, Set<TopicPartition>> oldHostState = mkMap(
mkEntry(new HostInfo("localhost", 9090), Set.of(t1p0, t1p1)),
mkEntry(new HostInfo("otherhost", 9090), Set.of(t2p0, t2p1))
);
final Map<HostInfo, Set<TopicPartition>> newHostState = mkMap(
mkEntry(new HostInfo("newhost", 9090), Set.of(t1p0, t1p1)),
mkEntry(new HostInfo("otherhost", 9090), Set.of(t2p0, t2p1))
);
createDefaultMockTaskManager();
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.APPLICATION_SERVER_CONFIG, "newhost:9090"), parameterizedConfig);
partitionAssignor.onAssignment(createAssignment(oldHostState), null);
assertThat(referenceContainer.nextScheduledRebalanceMs.get(), is(0L));
partitionAssignor.onAssignment(createAssignment(newHostState), null);
assertThat(referenceContainer.nextScheduledRebalanceMs.get(), is(Long.MAX_VALUE));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldTriggerImmediateRebalanceOnTasksRevoked(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addSource(null, "source1", null, null, null, "topic1");
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2);
final List<TopicPartition> allPartitions = asList(t1p0, t1p1, t1p2);
subscriptions.put(CONSUMER_1,
new Subscription(
Collections.singletonList("topic1"),
getInfo(PID_1, allTasks, EMPTY_TASKS).encode(),
allPartitions,
DEFAULT_GENERATION,
Optional.of(RACK_0))
);
subscriptions.put(CONSUMER_2,
new Subscription(
Collections.singletonList("topic1"),
getInfo(PID_1, EMPTY_TASKS, allTasks).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_0))
);
createMockTaskManager(allTasks, allTasks);
configurePartitionAssignorWith(singletonMap(StreamsConfig.ACCEPTABLE_RECOVERY_LAG_CONFIG, 0L), parameterizedConfig);
final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
// Verify at least one partition was revoked
assertThat(assignment.get(CONSUMER_1).partitions(), not(allPartitions));
assertThat(assignment.get(CONSUMER_2).partitions(), equalTo(emptyList()));
// Verify that stateless revoked tasks would not be assigned as standbys
assertThat(AssignmentInfo.decode(assignment.get(CONSUMER_2).userData()).activeTasks(), equalTo(emptyList()));
assertThat(AssignmentInfo.decode(assignment.get(CONSUMER_2).userData()).standbyTasks(), equalTo(emptyMap()));
partitionAssignor.onAssignment(assignment.get(CONSUMER_2), null);
assertThat(referenceContainer.nextScheduledRebalanceMs.get(), is(0L));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldNotAddStandbyTaskPartitionsToPartitionsForHost(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final Map<String, Object> props = configProps(parameterizedConfig);
props.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1);
props.put(StreamsConfig.APPLICATION_SERVER_CONFIG, USER_END_POINT);
final StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.stream("topic1").groupByKey().count();
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(props));
createDefaultMockTaskManager();
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(
singletonList(APPLICATION_ID + "-KSTREAM-AGGREGATE-STATE-STORE-0000000001-changelog"),
singletonList(3)),
true
);
final List<Map<String, List<TopicPartitionInfo>>> changelogTopicPartitionInfo = getTopicPartitionInfo(
3, singletonList(Set.of(APPLICATION_ID + "-KSTREAM-AGGREGATE-STATE-STORE-0000000001-changelog")));
configurePartitionAssignorWith(props, changelogTopicPartitionInfo, parameterizedConfig);
subscriptions.put("consumer1",
new Subscription(
Collections.singletonList("topic1"),
getInfo(PID_1, EMPTY_TASKS, EMPTY_TASKS, USER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_3))
);
subscriptions.put("consumer2",
new Subscription(
Collections.singletonList("topic1"),
getInfo(PID_2, EMPTY_TASKS, EMPTY_TASKS, OTHER_END_POINT).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_1))
);
final Set<TopicPartition> allPartitions = Set.of(t1p0, t1p1, t1p2);
final Map<String, Assignment> assign = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
final Assignment consumer1Assignment = assign.get("consumer1");
final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumer1Assignment.userData());
final Set<TopicPartition> consumer1ActivePartitions = assignmentInfo.partitionsByHost().get(new HostInfo("localhost", 8080));
final Set<TopicPartition> consumer2ActivePartitions = assignmentInfo.partitionsByHost().get(new HostInfo("other", 9090));
final Set<TopicPartition> consumer1StandbyPartitions = assignmentInfo.standbyPartitionByHost().get(new HostInfo("localhost", 8080));
final Set<TopicPartition> consumer2StandbyPartitions = assignmentInfo.standbyPartitionByHost().get(new HostInfo("other", 9090));
final HashSet<TopicPartition> allAssignedPartitions = new HashSet<>(consumer1ActivePartitions);
allAssignedPartitions.addAll(consumer2ActivePartitions);
assertThat(consumer1ActivePartitions, not(allPartitions));
assertThat(consumer2ActivePartitions, not(allPartitions));
assertThat(consumer1ActivePartitions, equalTo(consumer2StandbyPartitions));
assertThat(consumer2ActivePartitions, equalTo(consumer1StandbyPartitions));
assertThat(allAssignedPartitions, equalTo(allPartitions));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldThrowKafkaExceptionIfReferenceContainerNotConfigured(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final Map<String, Object> config = configProps(parameterizedConfig);
config.remove(InternalConfig.REFERENCE_CONTAINER_PARTITION_ASSIGNOR);
final KafkaException expected = assertThrows(
KafkaException.class,
() -> partitionAssignor.configure(config)
);
assertThat(expected.getMessage(), equalTo("ReferenceContainer is not specified"));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldThrowKafkaExceptionIfReferenceContainerConfigIsNotTaskManagerInstance(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final Map<String, Object> config = configProps(parameterizedConfig);
config.put(InternalConfig.REFERENCE_CONTAINER_PARTITION_ASSIGNOR, "i am not a reference container");
final KafkaException expected = assertThrows(
KafkaException.class,
() -> partitionAssignor.configure(config)
);
assertThat(
expected.getMessage(),
equalTo("java.lang.String is not an instance of org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer")
);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldReturnLowestAssignmentVersionForDifferentSubscriptionVersionsV1V2(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
shouldReturnLowestAssignmentVersionForDifferentSubscriptionVersions(1, 2, parameterizedConfig);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldReturnLowestAssignmentVersionForDifferentSubscriptionVersionsV1V3(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
shouldReturnLowestAssignmentVersionForDifferentSubscriptionVersions(1, 3, parameterizedConfig);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldReturnLowestAssignmentVersionForDifferentSubscriptionVersionsV2V3(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
shouldReturnLowestAssignmentVersionForDifferentSubscriptionVersions(2, 3, parameterizedConfig);
}
private void shouldReturnLowestAssignmentVersionForDifferentSubscriptionVersions(final int smallestVersion,
final int otherVersion,
final Map<String, Object> parameterizedObject) {
subscriptions.put("consumer1",
new Subscription(
Collections.singletonList("topic1"),
getInfoForOlderVersion(smallestVersion,
PID_1, EMPTY_TASKS, EMPTY_TASKS).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2))
);
subscriptions.put("consumer2",
new Subscription(
Collections.singletonList("topic1"),
getInfoForOlderVersion(otherVersion, PID_2, EMPTY_TASKS, EMPTY_TASKS).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_1)
)
);
configureDefault(parameterizedObject);
final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(assignment.size(), equalTo(2));
assertThat(AssignmentInfo.decode(assignment.get("consumer1").userData()).version(), equalTo(smallestVersion));
assertThat(AssignmentInfo.decode(assignment.get("consumer2").userData()).version(), equalTo(smallestVersion));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldReturnInterleavedAssignmentWithUnrevokedPartitionsRemovedWhenNewConsumerJoins(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addSource(null, "source1", null, null, null, "topic1");
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2);
subscriptions.put(
CONSUMER_1,
new Subscription(
Collections.singletonList("topic1"),
getInfo(PID_1, allTasks, EMPTY_TASKS).encode(),
asList(t1p0, t1p1, t1p2),
DEFAULT_GENERATION,
Optional.of(RACK_1)
)
);
subscriptions.put(
CONSUMER_2,
new Subscription(
Collections.singletonList("topic1"),
getInfo(PID_2, EMPTY_TASKS, EMPTY_TASKS).encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_2)
)
);
createMockTaskManager(allTasks, allTasks);
configureDefaultPartitionAssignor(parameterizedConfig);
final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(assignment.size(), equalTo(2));
// The new consumer's assignment should be empty until c1 has the chance to revoke its partitions/tasks
assertThat(assignment.get(CONSUMER_2).partitions(), equalTo(emptyList()));
final AssignmentInfo actualAssignment = AssignmentInfo.decode(assignment.get(CONSUMER_2).userData());
assertThat(actualAssignment.version(), is(LATEST_SUPPORTED_VERSION));
assertThat(actualAssignment.activeTasks(), empty());
// Note we're not asserting anything about standbys. If the assignor gave an active task to CONSUMER_2, it would
// be converted to a standby, but we don't know whether the assignor will do that.
assertThat(actualAssignment.partitionsByHost(), anEmptyMap());
assertThat(actualAssignment.standbyPartitionByHost(), anEmptyMap());
assertThat(actualAssignment.errCode(), is(0));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldReturnInterleavedAssignmentForOnlyFutureInstancesDuringVersionProbing(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
builder.addSource(null, "source1", null, null, null, "topic1");
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2);
subscriptions.put(CONSUMER_1,
new Subscription(
Collections.singletonList("topic1"),
encodeFutureSubscription(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_1)
)
);
subscriptions.put(CONSUMER_2,
new Subscription(
Collections.singletonList("topic1"),
encodeFutureSubscription(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_1)
)
);
createMockTaskManager(allTasks, allTasks);
configurePartitionAssignorWith(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1), parameterizedConfig);
final Map<String, Assignment> assignment =
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(assignment.size(), equalTo(2));
assertThat(assignment.get(CONSUMER_1).partitions(), equalTo(asList(t1p0, t1p2)));
assertThat(
AssignmentInfo.decode(assignment.get(CONSUMER_1).userData()),
equalTo(new AssignmentInfo(LATEST_SUPPORTED_VERSION, asList(TASK_0_0, TASK_0_2), emptyMap(), emptyMap(), emptyMap(), 0)));
assertThat(assignment.get(CONSUMER_2).partitions(), equalTo(Collections.singletonList(t1p1)));
assertThat(
AssignmentInfo.decode(assignment.get(CONSUMER_2).userData()),
equalTo(new AssignmentInfo(LATEST_SUPPORTED_VERSION, Collections.singletonList(TASK_0_1), emptyMap(), emptyMap(), emptyMap(), 0)));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldEncodeAssignmentErrorIfV1SubscriptionAndFutureSubscriptionIsMixed(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
shouldEncodeAssignmentErrorIfPreVersionProbingSubscriptionAndFutureSubscriptionIsMixed(1, parameterizedConfig);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldEncodeAssignmentErrorIfV2SubscriptionAndFutureSubscriptionIsMixed(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
shouldEncodeAssignmentErrorIfPreVersionProbingSubscriptionAndFutureSubscriptionIsMixed(2, parameterizedConfig);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldNotFailOnBranchedMultiLevelRepartitionConnectedTopology(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
// Test out a topology with 3 level of sub-topology as:
// 0
// / \
// 1 3
// \ /
// 2
// where each pair of the sub topology is connected by repartition topic.
// The purpose of this test is to verify the robustness of the stream partition assignor algorithm,
// especially whether it could build the repartition topic counts (step zero) with a complex topology.
// The traversal path 0 -> 1 -> 2 -> 3 hits the case where sub-topology 2 will be initialized while its
// parent 3 hasn't been initialized yet.
builder.addSource(null, "KSTREAM-SOURCE-0000000000", null, null, null, "input-stream");
builder.addProcessor("KSTREAM-FLATMAPVALUES-0000000001", new MockApiProcessorSupplier<>(), "KSTREAM-SOURCE-0000000000");
builder.addProcessor("KSTREAM-BRANCH-0000000002", new MockApiProcessorSupplier<>(), "KSTREAM-FLATMAPVALUES-0000000001");
builder.addProcessor("KSTREAM-BRANCHCHILD-0000000003", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCH-0000000002");
builder.addProcessor("KSTREAM-BRANCHCHILD-0000000004", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCH-0000000002");
builder.addProcessor("KSTREAM-MAP-0000000005", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCHCHILD-0000000003");
builder.addInternalTopic("odd_store-repartition", InternalTopicProperties.empty());
builder.addProcessor("odd_store-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000005");
builder.addSink("odd_store-repartition-sink", "odd_store-repartition", null, null, null, "odd_store-repartition-filter");
builder.addSource(null, "odd_store-repartition-source", null, null, null, "odd_store-repartition");
builder.addProcessor("KSTREAM-REDUCE-0000000006", new MockApiProcessorSupplier<>(), "odd_store-repartition-source");
builder.addProcessor("KTABLE-TOSTREAM-0000000010", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000006");
builder.addProcessor("KSTREAM-PEEK-0000000011", new MockApiProcessorSupplier<>(), "KTABLE-TOSTREAM-0000000010");
builder.addProcessor("KSTREAM-MAP-0000000012", new MockApiProcessorSupplier<>(), "KSTREAM-PEEK-0000000011");
builder.addInternalTopic("odd_store_2-repartition", InternalTopicProperties.empty());
builder.addProcessor("odd_store_2-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000012");
builder.addSink("odd_store_2-repartition-sink", "odd_store_2-repartition", null, null, null, "odd_store_2-repartition-filter");
builder.addSource(null, "odd_store_2-repartition-source", null, null, null, "odd_store_2-repartition");
builder.addProcessor("KSTREAM-REDUCE-0000000013", new MockApiProcessorSupplier<>(), "odd_store_2-repartition-source");
builder.addProcessor("KSTREAM-MAP-0000000017", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCHCHILD-0000000004");
builder.addInternalTopic("even_store-repartition", InternalTopicProperties.empty());
builder.addProcessor("even_store-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000017");
builder.addSink("even_store-repartition-sink", "even_store-repartition", null, null, null, "even_store-repartition-filter");
builder.addSource(null, "even_store-repartition-source", null, null, null, "even_store-repartition");
builder.addProcessor("KSTREAM-REDUCE-0000000018", new MockApiProcessorSupplier<>(), "even_store-repartition-source");
builder.addProcessor("KTABLE-TOSTREAM-0000000022", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000018");
builder.addProcessor("KSTREAM-PEEK-0000000023", new MockApiProcessorSupplier<>(), "KTABLE-TOSTREAM-0000000022");
builder.addProcessor("KSTREAM-MAP-0000000024", new MockApiProcessorSupplier<>(), "KSTREAM-PEEK-0000000023");
builder.addInternalTopic("even_store_2-repartition", InternalTopicProperties.empty());
builder.addProcessor("even_store_2-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000024");
builder.addSink("even_store_2-repartition-sink", "even_store_2-repartition", null, null, null, "even_store_2-repartition-filter");
builder.addSource(null, "even_store_2-repartition-source", null, null, null, "even_store_2-repartition");
builder.addProcessor("KSTREAM-REDUCE-0000000025", new MockApiProcessorSupplier<>(), "even_store_2-repartition-source");
builder.addProcessor("KTABLE-JOINTHIS-0000000030", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000013");
builder.addProcessor("KTABLE-JOINOTHER-0000000031", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000025");
builder.addProcessor("KTABLE-MERGE-0000000029", new MockApiProcessorSupplier<>(), "KTABLE-JOINTHIS-0000000030", "KTABLE-JOINOTHER-0000000031");
builder.addProcessor("KTABLE-TOSTREAM-0000000032", new MockApiProcessorSupplier<>(), "KTABLE-MERGE-0000000029");
final List<String> topics = asList("input-stream", "test-even_store-repartition", "test-even_store_2-repartition", "test-odd_store-repartition", "test-odd_store_2-repartition");
createDefaultMockTaskManager();
final List<Map<String, List<TopicPartitionInfo>>> repartitionTopics = getTopicPartitionInfo(
4,
asList(
Set.of(APPLICATION_ID + "-odd_store-repartition"),
Set.of(
APPLICATION_ID + "-odd_store-repartition",
APPLICATION_ID + "-odd_store_2-repartition",
APPLICATION_ID + "-even_store-repartition",
APPLICATION_ID + "-even_store_2-repartition"
)
)
);
configurePartitionAssignorWith(emptyMap(), repartitionTopics, parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
topics,
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_0))
);
final Cluster metadata = new Cluster(
"cluster",
asList(NODE_0, NODE_1, NODE_3),
Collections.singletonList(new PartitionInfo("input-stream", 0, NODE_0, REPLICA_0, REPLICA_0)),
emptySet(),
emptySet());
// This shall fail if we have bugs in the repartition topic creation due to the inconsistent order of sub-topologies.
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldGetAssignmentConfigs(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
createDefaultMockTaskManager();
final Map<String, Object> props = configProps(parameterizedConfig);
props.put(StreamsConfig.ACCEPTABLE_RECOVERY_LAG_CONFIG, 11);
props.put(StreamsConfig.MAX_WARMUP_REPLICAS_CONFIG, 33);
props.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 44);
props.put(StreamsConfig.PROBING_REBALANCE_INTERVAL_MS_CONFIG, 55 * 60 * 1000L);
partitionAssignor.configure(props);
assertThat(partitionAssignor.acceptableRecoveryLag(), equalTo(11L));
assertThat(partitionAssignor.maxWarmupReplicas(), equalTo(33));
assertThat(partitionAssignor.numStandbyReplicas(), equalTo(44));
assertThat(partitionAssignor.probingRebalanceIntervalMs(), equalTo(55 * 60 * 1000L));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldGetTime(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
time.setCurrentTimeMs(Long.MAX_VALUE);
createDefaultMockTaskManager();
final Map<String, Object> props = configProps(parameterizedConfig);
final AssignorConfiguration assignorConfiguration = new AssignorConfiguration(props);
assertThat(assignorConfiguration.referenceContainer().time.milliseconds(), equalTo(Long.MAX_VALUE));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldThrowIllegalStateExceptionIfAnyPartitionsMissingFromChangelogEndOffsets(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final int changelogNumPartitions = 3;
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(
singletonList(APPLICATION_ID + "-store1-changelog"),
singletonList(changelogNumPartitions - 1)),
true
);
createDefaultMockTaskManager();
final List<Map<String, List<TopicPartitionInfo>>> changelogTopicPartitionInfo = getTopicPartitionInfo(
changelogNumPartitions - 1,
singletonList(Set.of(APPLICATION_ID + "-store1-changelog")));
configurePartitionAssignorWith(emptyMap(), changelogTopicPartitionInfo, parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
singletonList("topic1"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_1)
));
assertThrows(IllegalStateException.class, () -> partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldThrowIllegalStateExceptionIfAnyTopicsMissingFromChangelogEndOffsets(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
builder.addStateStore(new MockKeyValueStoreBuilder("store2", false), "processor1");
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(
singletonList(APPLICATION_ID + "-store1-changelog"),
singletonList(3)),
true
);
createDefaultMockTaskManager();
final List<Map<String, List<TopicPartitionInfo>>> changelogTopicPartitionInfo = getTopicPartitionInfo(
3,
singletonList(Set.of(APPLICATION_ID + "-store1-changelog")));
configurePartitionAssignorWith(emptyMap(), changelogTopicPartitionInfo, parameterizedConfig);
subscriptions.put("consumer10",
new Subscription(
singletonList("topic1"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_3)
));
assertThrows(IllegalStateException.class, () -> partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldSkipListOffsetsRequestForNewlyCreatedChangelogTopics(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
adminClient = mock(AdminClient.class);
final ListOffsetsResult result = mock(ListOffsetsResult.class);
final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
allFuture.complete(emptyMap());
when(adminClient.listOffsets(emptyMap())).thenReturn(result);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
subscriptions.put("consumer10",
new Subscription(
singletonList("topic1"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_4)
));
configureDefault(parameterizedConfig);
final List<Map<String, List<TopicPartitionInfo>>> partitionInfo = singletonList(mkMap(mkEntry(
"stream-partition-assignor-test-store-changelog",
singletonList(
new TopicPartitionInfo(
0,
new Node(1, "h1", 80),
singletonList(new Node(1, "h1", 80)),
emptyList()
)
)
)
));
overwriteInternalTopicManagerWithMock(true, partitionInfo, parameterizedConfig);
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldRequestEndOffsetsForPreexistingChangelogs(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final Set<TopicPartition> changelogs = Set.of(
new TopicPartition(APPLICATION_ID + "-store-changelog", 0),
new TopicPartition(APPLICATION_ID + "-store-changelog", 1),
new TopicPartition(APPLICATION_ID + "-store-changelog", 2)
);
adminClient = mock(AdminClient.class);
final ListOffsetsResult result = mock(ListOffsetsResult.class);
for (final TopicPartition entry : changelogs) {
final KafkaFutureImpl<ListOffsetsResultInfo> partitionFuture = new KafkaFutureImpl<>();
final ListOffsetsResultInfo info = mock(ListOffsetsResultInfo.class);
when(info.offset()).thenReturn(Long.MAX_VALUE);
partitionFuture.complete(info);
when(result.partitionResult(entry)).thenReturn(partitionFuture);
}
@SuppressWarnings("unchecked")
final ArgumentCaptor<Map<TopicPartition, OffsetSpec>> capturedChangelogs = ArgumentCaptor.forClass(Map.class);
when(adminClient.listOffsets(capturedChangelogs.capture())).thenReturn(result);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor1");
subscriptions.put("consumer10",
new Subscription(
singletonList("topic1"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_3)
));
configureDefault(parameterizedConfig);
final List<Map<String, List<TopicPartitionInfo>>> partitionInfo = singletonList(mkMap(mkEntry(
"stream-partition-assignor-test-store-changelog",
singletonList(
new TopicPartitionInfo(
0,
new Node(1, "h1", 80),
singletonList(new Node(1, "h1", 80)),
emptyList()
)
)
)
));
overwriteInternalTopicManagerWithMock(false, partitionInfo, parameterizedConfig);
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
assertThat(
capturedChangelogs.getValue().keySet(),
equalTo(changelogs)
);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldRequestCommittedOffsetsForPreexistingSourceChangelogs(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, true);
final Set<TopicPartition> changelogs = Set.of(
new TopicPartition("topic1", 0),
new TopicPartition("topic1", 1),
new TopicPartition("topic1", 2)
);
final StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.table("topic1", Materialized.as("store"));
final Properties props = new Properties();
props.putAll(configProps(parameterizedConfig));
props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build(props));
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(props));
subscriptions.put("consumer10",
new Subscription(
singletonList("topic1"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_3)
));
createDefaultMockTaskManager();
configurePartitionAssignorWith(singletonMap(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE), parameterizedConfig);
overwriteInternalTopicManagerWithMock(false, parameterizedConfig);
final Consumer<byte[], byte[]> consumerClient = referenceContainer.mainConsumer;
when(consumerClient.committed(changelogs))
.thenReturn(changelogs.stream().collect(Collectors.toMap(tp -> tp, tp -> new OffsetAndMetadata(Long.MAX_VALUE))));
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldEncodeMissingSourceTopicError(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
final Cluster emptyClusterMetadata = new Cluster(
"cluster",
Collections.singletonList(Node.noNode()),
emptyList(),
emptySet(),
emptySet()
);
builder.addSource(null, "source1", null, null, null, "topic1");
configureDefault(parameterizedConfig);
subscriptions.put("consumer",
new Subscription(
singletonList("topic"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_0)
));
final Map<String, Assignment> assignments = partitionAssignor.assign(emptyClusterMetadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(AssignmentInfo.decode(assignments.get("consumer").userData()).errCode(),
equalTo(AssignorError.INCOMPLETE_SOURCE_TOPIC_METADATA.code()));
}
@ParameterizedTest
@MethodSource("parameter")
public void testUniqueField(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
createDefaultMockTaskManager();
configureDefaultPartitionAssignor(parameterizedConfig);
final Set<String> topics = Set.of("input");
assertEquals(0, partitionAssignor.uniqueField());
partitionAssignor.subscriptionUserData(topics);
assertEquals(1, partitionAssignor.uniqueField());
partitionAssignor.subscriptionUserData(topics);
assertEquals(2, partitionAssignor.uniqueField());
}
@ParameterizedTest
@MethodSource("parameter")
public void testUniqueFieldOverflow(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
createDefaultMockTaskManager();
configureDefaultPartitionAssignor(parameterizedConfig);
final Set<String> topics = Set.of("input");
for (int i = 0; i < 127; i++) {
partitionAssignor.subscriptionUserData(topics);
}
assertEquals(127, partitionAssignor.uniqueField());
partitionAssignor.subscriptionUserData(topics);
assertEquals(-128, partitionAssignor.uniqueField());
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
builder = new CorruptedInternalTopologyBuilder();
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps(parameterizedConfig)));
final InternalStreamsBuilder streamsBuilder = new InternalStreamsBuilder(builder, false);
final KStream<String, String> inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>(Consumed.with(null, null)));
final KTable<String, String> inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(Consumed.with(null, null)), new MaterializedInternal<>(Materialized.as("store")));
inputTopic
.groupBy(
(k, v) -> k,
Grouped.with("GroupName", Serdes.String(), Serdes.String())
)
.windowedBy(TimeWindows.ofSizeWithNoGrace(Duration.ofMinutes(10)))
.aggregate(
() -> "",
(k, v, a) -> a + k)
.leftJoin(
inputTable,
v -> v,
(x, y) -> x + y
);
streamsBuilder.buildAndOptimizeTopology();
configureDefault(parameterizedConfig);
subscriptions.put("consumer",
new Subscription(
singletonList("topic"),
defaultSubscriptionInfo.encode(),
emptyList(),
DEFAULT_GENERATION,
Optional.of(RACK_1)
));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(AssignmentInfo.decode(assignments.get("consumer").userData()).errCode(),
equalTo(AssignorError.ASSIGNMENT_ERROR.code()));
}
@ParameterizedTest
@MethodSource("parameter")
public void testClientTags(final Map<String, Object> parameterizedConfig) {
setUp(parameterizedConfig, false);
clientTags = mkMap(mkEntry("cluster", "cluster1"), mkEntry("zone", "az1"));
createDefaultMockTaskManager();
configureDefaultPartitionAssignor(parameterizedConfig);
final Set<String> topics = Set.of("input");
final Subscription subscription = new Subscription(new ArrayList<>(topics),
partitionAssignor.subscriptionUserData(topics));
final SubscriptionInfo info = getInfo(PID_1, EMPTY_TASKS, EMPTY_TASKS, uniqueField, clientTags);
assertEquals(singletonList("input"), subscription.topics());
assertEquals(info, SubscriptionInfo.decode(subscription.userData()));
assertEquals(clientTags, partitionAssignor.clientTags());
}
private static | StreamsPartitionAssignorTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/blocklist/BlocklistTracker.java | {
"start": 942,
"end": 2061
} | interface ____ {
/**
* Add new blocked node records. If a node (identified by node id) already exists, the newly
* added one will be merged with the existing one.
*
* @param newNodes the new blocked node records
* @return the addition result
*/
BlockedNodeAdditionResult addNewBlockedNodes(Collection<BlockedNode> newNodes);
/**
* Returns whether the given node is blocked.
*
* @param nodeId ID of the node to query
* @return true if the given node is blocked, otherwise false
*/
boolean isBlockedNode(String nodeId);
/**
* Get all blocked node ids.
*
* @return a set containing all blocked node ids
*/
Set<String> getAllBlockedNodeIds();
/**
* Get all blocked nodes.
*
* @return a collection containing all blocked nodes
*/
Collection<BlockedNode> getAllBlockedNodes();
/**
* Remove timeout nodes.
*
* @param currentTimeMillis current time
* @return the removed nodes
*/
Collection<BlockedNode> removeTimeoutNodes(long currentTimeMillis);
}
| BlocklistTracker |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/DocumentLeafReader.java | {
"start": 2517,
"end": 17135
} | class ____ extends LeafReader {
private final LuceneDocument document;
private final Map<String, Consumer<LeafReaderContext>> calculatedFields;
private final Set<String> fieldPath = new LinkedHashSet<>();
DocumentLeafReader(LuceneDocument document, Map<String, Consumer<LeafReaderContext>> calculatedFields) {
this.document = document;
this.calculatedFields = calculatedFields;
}
private void checkField(String field) {
if (calculatedFields.containsKey(field)) {
// this means that a mapper script is referring to another calculated field;
// in which case we need to execute that field first. We also check for loops here
if (fieldPath.add(field) == false) {
throw new IllegalArgumentException("Loop in field resolution detected: " + String.join("->", fieldPath) + "->" + field);
}
calculatedFields.get(field).accept(this.getContext());
fieldPath.remove(field);
}
}
@Override
public NumericDocValues getNumericDocValues(String field) throws IOException {
checkField(field);
List<Number> values = document.getFields()
.stream()
.filter(f -> Objects.equals(f.name(), field))
.filter(f -> f.fieldType().docValuesType() == DocValuesType.NUMERIC)
.map(IndexableField::numericValue)
.sorted()
.toList();
return numericDocValues(values);
}
@Override
public BinaryDocValues getBinaryDocValues(String field) throws IOException {
checkField(field);
List<BytesRef> values = document.getFields()
.stream()
.filter(f -> Objects.equals(f.name(), field))
.filter(f -> f.fieldType().docValuesType() == DocValuesType.BINARY)
.map(IndexableField::binaryValue)
.sorted()
.toList();
return binaryDocValues(values);
}
@Override
public SortedDocValues getSortedDocValues(String field) throws IOException {
checkField(field);
List<BytesRef> values = document.getFields()
.stream()
.filter(f -> Objects.equals(f.name(), field))
.filter(f -> f.fieldType().docValuesType() == DocValuesType.SORTED)
.map(IndexableField::binaryValue)
.sorted()
.toList();
return sortedDocValues(values);
}
@Override
public SortedNumericDocValues getSortedNumericDocValues(String field) throws IOException {
checkField(field);
List<Number> values = document.getFields()
.stream()
.filter(f -> Objects.equals(f.name(), field))
.filter(f -> f.fieldType().docValuesType() == DocValuesType.SORTED_NUMERIC)
.map(IndexableField::numericValue)
.sorted()
.toList();
return sortedNumericDocValues(values);
}
@Override
public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
checkField(field);
List<BytesRef> values = document.getFields()
.stream()
.filter(f -> Objects.equals(f.name(), field))
.filter(f -> f.fieldType().docValuesType() == DocValuesType.SORTED_SET)
.map(IndexableField::binaryValue)
.sorted()
.toList();
return sortedSetDocValues(values);
}
@Override
public FieldInfos getFieldInfos() {
return new FieldInfos(new FieldInfo[0]);
}
@Override
public StoredFields storedFields() throws IOException {
return new StoredFields() {
@Override
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
List<IndexableField> fields = document.getFields().stream().filter(f -> f.fieldType().stored()).toList();
for (IndexableField field : fields) {
FieldInfo fieldInfo = fieldInfo(field.name());
if (visitor.needsField(fieldInfo) != StoredFieldVisitor.Status.YES) {
continue;
}
if (field.numericValue() != null) {
Number v = field.numericValue();
if (v instanceof Integer) {
visitor.intField(fieldInfo, v.intValue());
} else if (v instanceof Long) {
visitor.longField(fieldInfo, v.longValue());
} else if (v instanceof Float) {
visitor.floatField(fieldInfo, v.floatValue());
} else if (v instanceof Double) {
visitor.doubleField(fieldInfo, v.doubleValue());
}
} else if (field.stringValue() != null) {
visitor.stringField(fieldInfo, field.stringValue());
} else if (field.binaryValue() != null) {
// We can't just pass field.binaryValue().bytes here as there may be offset/length
// considerations
byte[] data = new byte[field.binaryValue().length];
System.arraycopy(field.binaryValue().bytes, field.binaryValue().offset, data, 0, data.length);
visitor.binaryField(fieldInfo, data);
}
}
}
};
}
@Override
public CacheHelper getCoreCacheHelper() {
throw new UnsupportedOperationException();
}
@Override
public Terms terms(String field) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public NumericDocValues getNormValues(String field) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public DocValuesSkipper getDocValuesSkipper(String s) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public FloatVectorValues getFloatVectorValues(String field) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void searchNearestVectors(String field, float[] target, KnnCollector knnCollector, AcceptDocs acceptDocs) {
throw new UnsupportedOperationException();
}
@Override
public Bits getLiveDocs() {
throw new UnsupportedOperationException();
}
@Override
public PointValues getPointValues(String field) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void checkIntegrity() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public LeafMetaData getMetaData() {
throw new UnsupportedOperationException();
}
@Override
public int numDocs() {
throw new UnsupportedOperationException();
}
@Override
public int maxDoc() {
throw new UnsupportedOperationException();
}
@Override
protected void doClose() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public ByteVectorValues getByteVectorValues(String field) {
throw new UnsupportedOperationException();
}
@Override
public void searchNearestVectors(String field, byte[] target, KnnCollector knnCollector, AcceptDocs acceptDocs) {
throw new UnsupportedOperationException();
}
@Override
public TermVectors termVectors() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public CacheHelper getReaderCacheHelper() {
throw new UnsupportedOperationException();
}
// Our StoredFieldsVisitor implementations only check the name of the passed-in
// FieldInfo, so that's the only value we need to set here.
private static FieldInfo fieldInfo(String name) {
return new FieldInfo(
name,
0,
false,
false,
false,
IndexOptions.NONE,
DocValuesType.NONE,
DocValuesSkipIndexType.NONE,
-1,
Collections.emptyMap(),
0,
0,
0,
0,
VectorEncoding.FLOAT32,
VectorSimilarityFunction.EUCLIDEAN,
false,
false
);
}
private static NumericDocValues numericDocValues(List<Number> values) {
if (values.size() == 0) {
return null;
}
DocIdSetIterator disi = DocIdSetIterator.all(1);
return new NumericDocValues() {
@Override
public long longValue() {
return values.get(0).longValue();
}
@Override
public boolean advanceExact(int target) throws IOException {
return disi.advance(target) == target;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public int nextDoc() throws IOException {
return disi.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return disi.advance(target);
}
@Override
public long cost() {
return disi.cost();
}
};
}
private static SortedNumericDocValues sortedNumericDocValues(List<Number> values) {
if (values.size() == 0) {
return null;
}
DocIdSetIterator disi = DocIdSetIterator.all(1);
return new SortedNumericDocValues() {
int i = -1;
@Override
public long nextValue() {
i++;
return values.get(i).longValue();
}
@Override
public int docValueCount() {
return values.size();
}
@Override
public boolean advanceExact(int target) throws IOException {
i = -1;
return disi.advance(target) == target;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public int nextDoc() throws IOException {
i = -1;
return disi.nextDoc();
}
@Override
public int advance(int target) throws IOException {
i = -1;
return disi.advance(target);
}
@Override
public long cost() {
return disi.cost();
}
};
}
private static BinaryDocValues binaryDocValues(List<BytesRef> values) {
if (values.size() == 0) {
return null;
}
DocIdSetIterator disi = DocIdSetIterator.all(1);
return new BinaryDocValues() {
@Override
public BytesRef binaryValue() {
return values.get(0);
}
@Override
public boolean advanceExact(int target) throws IOException {
return disi.advance(target) == target;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public int nextDoc() throws IOException {
return disi.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return disi.advance(target);
}
@Override
public long cost() {
return disi.cost();
}
};
}
private static SortedDocValues sortedDocValues(List<BytesRef> values) {
if (values.size() == 0) {
return null;
}
DocIdSetIterator disi = DocIdSetIterator.all(1);
return new SortedDocValues() {
@Override
public int ordValue() {
return 0;
}
@Override
public BytesRef lookupOrd(int ord) {
return values.get(0);
}
@Override
public int getValueCount() {
return values.size();
}
@Override
public boolean advanceExact(int target) throws IOException {
return disi.advance(target) == target;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public int nextDoc() throws IOException {
return disi.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return disi.advance(target);
}
@Override
public long cost() {
return disi.cost();
}
};
}
private static SortedSetDocValues sortedSetDocValues(List<BytesRef> values) {
if (values.size() == 0) {
return null;
}
DocIdSetIterator disi = DocIdSetIterator.all(1);
return new SortedSetDocValues() {
int i = -1;
@Override
public long nextOrd() {
i++;
assert i < values.size();
return i;
}
@Override
public int docValueCount() {
return values.size();
}
@Override
public BytesRef lookupOrd(long ord) {
return values.get((int) ord);
}
@Override
public long getValueCount() {
return values.size();
}
@Override
public boolean advanceExact(int target) throws IOException {
i = -1;
return disi.advance(target) == target;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public int nextDoc() throws IOException {
i = -1;
return disi.nextDoc();
}
@Override
public int advance(int target) throws IOException {
i = -1;
return disi.advance(target);
}
@Override
public long cost() {
return disi.cost();
}
};
}
}
| DocumentLeafReader |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/KafkaClientSupplier.java | {
"start": 1329,
"end": 3590
} | interface ____ {
/**
* Create an {@link Admin} which is used for internal topic management.
*
* @param config Supplied by the {@link java.util.Properties} given to the {@link KafkaStreams}
* @return an instance of {@link Admin}
*/
default Admin getAdmin(final Map<String, Object> config) {
throw new UnsupportedOperationException("Implementations of KafkaClientSupplier should implement the getAdmin() method.");
}
/**
* Create a {@link Producer} which is used to write records to sink topics.
*
* @param config {@link StreamsConfig#getProducerConfigs(String) producer config} which is supplied by the
* {@link java.util.Properties} given to the {@link KafkaStreams} instance
* @return an instance of Kafka producer
*/
Producer<byte[], byte[]> getProducer(final Map<String, Object> config);
/**
* Create a {@link Consumer} which is used to read records of source topics.
*
* @param config {@link StreamsConfig#getMainConsumerConfigs(String, String, int) consumer config} which is
* supplied by the {@link java.util.Properties} given to the {@link KafkaStreams} instance
* @return an instance of Kafka consumer
*/
Consumer<byte[], byte[]> getConsumer(final Map<String, Object> config);
/**
* Create a {@link Consumer} which is used to read records to restore {@link StateStore}s.
*
* @param config {@link StreamsConfig#getRestoreConsumerConfigs(String) restore consumer config} which is supplied
* by the {@link java.util.Properties} given to the {@link KafkaStreams}
* @return an instance of Kafka consumer
*/
Consumer<byte[], byte[]> getRestoreConsumer(final Map<String, Object> config);
/**
* Create a {@link Consumer} which is used to consume records for {@link GlobalKTable}.
*
* @param config {@link StreamsConfig#getGlobalConsumerConfigs(String) global consumer config} which is supplied
* by the {@link java.util.Properties} given to the {@link KafkaStreams}
* @return an instance of Kafka consumer
*/
Consumer<byte[], byte[]> getGlobalConsumer(final Map<String, Object> config);
}
| KafkaClientSupplier |
java | google__guava | android/guava/src/com/google/common/math/LongMath.java | {
"start": 2200,
"end": 36445
} | class ____ {
@VisibleForTesting static final long MAX_SIGNED_POWER_OF_TWO = 1L << (Long.SIZE - 2);
/**
* Returns the smallest power of two greater than or equal to {@code x}. This is equivalent to
* {@code checkedPow(2, log2(x, CEILING))}.
*
* @throws IllegalArgumentException if {@code x <= 0}
* @throws ArithmeticException of the next-higher power of two is not representable as a {@code
* long}, i.e. when {@code x > 2^62}
* @since 20.0
*/
public static long ceilingPowerOfTwo(long x) {
checkPositive("x", x);
if (x > MAX_SIGNED_POWER_OF_TWO) {
throw new ArithmeticException("ceilingPowerOfTwo(" + x + ") is not representable as a long");
}
return 1L << -Long.numberOfLeadingZeros(x - 1);
}
/**
* Returns the largest power of two less than or equal to {@code x}. This is equivalent to {@code
* checkedPow(2, log2(x, FLOOR))}.
*
* @throws IllegalArgumentException if {@code x <= 0}
* @since 20.0
*/
public static long floorPowerOfTwo(long x) {
checkPositive("x", x);
// Long.highestOneBit was buggy on GWT. We've fixed it, but I'm not certain when the fix will
// be released.
return 1L << ((Long.SIZE - 1) - Long.numberOfLeadingZeros(x));
}
/**
* Returns {@code true} if {@code x} represents a power of two.
*
* <p>This differs from {@code Long.bitCount(x) == 1}, because {@code
* Long.bitCount(Long.MIN_VALUE) == 1}, but {@link Long#MIN_VALUE} is not a power of two.
*/
// Whenever both tests are cheap and functional, it's faster to use &, | instead of &&, ||
@SuppressWarnings("ShortCircuitBoolean")
public static boolean isPowerOfTwo(long x) {
return x > 0 & (x & (x - 1)) == 0;
}
/**
* Returns 1 if {@code x < y} as unsigned longs, and 0 otherwise. Assumes that x - y fits into a
* signed long. The implementation is branch-free, and benchmarks suggest it is measurably faster
* than the straightforward ternary expression.
*/
@VisibleForTesting
static int lessThanBranchFree(long x, long y) {
// Returns the sign bit of x - y.
return (int) (~~(x - y) >>> (Long.SIZE - 1));
}
/**
* Returns the base-2 logarithm of {@code x}, rounded according to the specified rounding mode.
*
* @throws IllegalArgumentException if {@code x <= 0}
* @throws ArithmeticException if {@code mode} is {@link RoundingMode#UNNECESSARY} and {@code x}
* is not a power of two
*/
@SuppressWarnings("fallthrough")
// TODO(kevinb): remove after this warning is disabled globally
public static int log2(long x, RoundingMode mode) {
checkPositive("x", x);
switch (mode) {
case UNNECESSARY:
checkRoundingUnnecessary(isPowerOfTwo(x));
// fall through
case DOWN:
case FLOOR:
return (Long.SIZE - 1) - Long.numberOfLeadingZeros(x);
case UP:
case CEILING:
return Long.SIZE - Long.numberOfLeadingZeros(x - 1);
case HALF_DOWN:
case HALF_UP:
case HALF_EVEN:
// Since sqrt(2) is irrational, log2(x) - logFloor cannot be exactly 0.5
int leadingZeros = Long.numberOfLeadingZeros(x);
long cmp = MAX_POWER_OF_SQRT2_UNSIGNED >>> leadingZeros;
// floor(2^(logFloor + 0.5))
int logFloor = (Long.SIZE - 1) - leadingZeros;
return logFloor + lessThanBranchFree(cmp, x);
}
throw new AssertionError("impossible");
}
/** The biggest half power of two that fits into an unsigned long */
@VisibleForTesting static final long MAX_POWER_OF_SQRT2_UNSIGNED = 0xB504F333F9DE6484L;
/**
* Returns the base-10 logarithm of {@code x}, rounded according to the specified rounding mode.
*
* @throws IllegalArgumentException if {@code x <= 0}
* @throws ArithmeticException if {@code mode} is {@link RoundingMode#UNNECESSARY} and {@code x}
* is not a power of ten
*/
@GwtIncompatible // TODO
@SuppressWarnings("fallthrough")
// TODO(kevinb): remove after this warning is disabled globally
public static int log10(long x, RoundingMode mode) {
checkPositive("x", x);
int logFloor = log10Floor(x);
long floorPow = powersOf10[logFloor];
switch (mode) {
case UNNECESSARY:
checkRoundingUnnecessary(x == floorPow);
// fall through
case FLOOR:
case DOWN:
return logFloor;
case CEILING:
case UP:
return logFloor + lessThanBranchFree(floorPow, x);
case HALF_DOWN:
case HALF_UP:
case HALF_EVEN:
// sqrt(10) is irrational, so log10(x)-logFloor is never exactly 0.5
return logFloor + lessThanBranchFree(halfPowersOf10[logFloor], x);
}
throw new AssertionError();
}
@GwtIncompatible // TODO
static int log10Floor(long x) {
/*
* Based on Hacker's Delight Fig. 11-5, the two-table-lookup, branch-free implementation.
*
* The key idea is that based on the number of leading zeros (equivalently, floor(log2(x))), we
* can narrow the possible floor(log10(x)) values to two. For example, if floor(log2(x)) is 6,
* then 64 <= x < 128, so floor(log10(x)) is either 1 or 2.
*/
int y = maxLog10ForLeadingZeros[Long.numberOfLeadingZeros(x)];
/*
* y is the higher of the two possible values of floor(log10(x)). If x < 10^y, then we want the
* lower of the two possible values, or y - 1, otherwise, we want y.
*/
return y - lessThanBranchFree(x, powersOf10[y]);
}
// maxLog10ForLeadingZeros[i] == floor(log10(2^(Long.SIZE - i)))
@VisibleForTesting
static final byte[] maxLog10ForLeadingZeros = {
19, 18, 18, 18, 18, 17, 17, 17, 16, 16, 16, 15, 15, 15, 15, 14, 14, 14, 13, 13, 13, 12, 12, 12,
12, 11, 11, 11, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3,
3, 2, 2, 2, 1, 1, 1, 0, 0, 0
};
@GwtIncompatible // TODO
@VisibleForTesting
static final long[] powersOf10 = {
1L,
10L,
100L,
1000L,
10000L,
100000L,
1000000L,
10000000L,
100000000L,
1000000000L,
10000000000L,
100000000000L,
1000000000000L,
10000000000000L,
100000000000000L,
1000000000000000L,
10000000000000000L,
100000000000000000L,
1000000000000000000L
};
// halfPowersOf10[i] = largest long less than 10^(i + 0.5)
@GwtIncompatible // TODO
@VisibleForTesting
static final long[] halfPowersOf10 = {
3L,
31L,
316L,
3162L,
31622L,
316227L,
3162277L,
31622776L,
316227766L,
3162277660L,
31622776601L,
316227766016L,
3162277660168L,
31622776601683L,
316227766016837L,
3162277660168379L,
31622776601683793L,
316227766016837933L,
3162277660168379331L
};
/**
* Returns {@code b} to the {@code k}th power. Even if the result overflows, it will be equal to
* {@code BigInteger.valueOf(b).pow(k).longValue()}. This implementation runs in {@code O(log k)}
* time.
*
* @throws IllegalArgumentException if {@code k < 0}
*/
@GwtIncompatible // TODO
public static long pow(long b, int k) {
checkNonNegative("exponent", k);
if (-2 <= b && b <= 2) {
switch ((int) b) {
case 0:
return (k == 0) ? 1 : 0;
case 1:
return 1;
case -1:
return ((k & 1) == 0) ? 1 : -1;
case 2:
return (k < Long.SIZE) ? 1L << k : 0;
case -2:
if (k < Long.SIZE) {
return ((k & 1) == 0) ? 1L << k : -(1L << k);
} else {
return 0;
}
default:
throw new AssertionError();
}
}
for (long accum = 1; ; k >>= 1) {
switch (k) {
case 0:
return accum;
case 1:
return accum * b;
default:
accum *= ((k & 1) == 0) ? 1 : b;
b *= b;
}
}
}
/**
* Returns the square root of {@code x}, rounded with the specified rounding mode.
*
* @throws IllegalArgumentException if {@code x < 0}
* @throws ArithmeticException if {@code mode} is {@link RoundingMode#UNNECESSARY} and {@code
* sqrt(x)} is not an integer
*/
@GwtIncompatible // TODO
public static long sqrt(long x, RoundingMode mode) {
checkNonNegative("x", x);
if (fitsInInt(x)) {
return IntMath.sqrt((int) x, mode);
}
/*
* Let k be the true value of floor(sqrt(x)), so that
*
* k * k <= x < (k + 1) * (k + 1)
* (double) (k * k) <= (double) x <= (double) ((k + 1) * (k + 1))
* since casting to double is nondecreasing.
* Note that the right-hand inequality is no longer strict.
* Math.sqrt(k * k) <= Math.sqrt(x) <= Math.sqrt((k + 1) * (k + 1))
* since Math.sqrt is monotonic.
* (long) Math.sqrt(k * k) <= (long) Math.sqrt(x) <= (long) Math.sqrt((k + 1) * (k + 1))
* since casting to long is monotonic
* k <= (long) Math.sqrt(x) <= k + 1
* since (long) Math.sqrt(k * k) == k, as checked exhaustively in
* {@link LongMathTest#testSqrtOfPerfectSquareAsDoubleIsPerfect}
*/
long guess = (long) Math.sqrt((double) x);
// Note: guess is always <= FLOOR_SQRT_MAX_LONG.
long guessSquared = guess * guess;
// Note (2013-2-26): benchmarks indicate that, inscrutably enough, using if statements is
// faster here than using lessThanBranchFree.
switch (mode) {
case UNNECESSARY:
checkRoundingUnnecessary(guessSquared == x);
return guess;
case FLOOR:
case DOWN:
if (x < guessSquared) {
return guess - 1;
}
return guess;
case CEILING:
case UP:
if (x > guessSquared) {
return guess + 1;
}
return guess;
case HALF_DOWN:
case HALF_UP:
case HALF_EVEN:
long sqrtFloor = guess - ((x < guessSquared) ? 1 : 0);
long halfSquare = sqrtFloor * sqrtFloor + sqrtFloor;
/*
* We wish to test whether or not x <= (sqrtFloor + 0.5)^2 = halfSquare + 0.25. Since both x
* and halfSquare are integers, this is equivalent to testing whether or not x <=
* halfSquare. (We have to deal with overflow, though.)
*
* If we treat halfSquare as an unsigned long, we know that
* sqrtFloor^2 <= x < (sqrtFloor + 1)^2
* halfSquare - sqrtFloor <= x < halfSquare + sqrtFloor + 1
* so |x - halfSquare| <= sqrtFloor. Therefore, it's safe to treat x - halfSquare as a
* signed long, so lessThanBranchFree is safe for use.
*/
return sqrtFloor + lessThanBranchFree(halfSquare, x);
}
throw new AssertionError();
}
/**
* Returns the result of dividing {@code p} by {@code q}, rounding using the specified {@code
* RoundingMode}. If the {@code RoundingMode} is {@link RoundingMode#DOWN}, then this method is
* equivalent to regular Java division, {@code p / q}; and if it is {@link RoundingMode#FLOOR},
* then this method is equivalent to {@link Math#floorDiv(long,long) Math.floorDiv}{@code (p, q)}.
*
* @throws ArithmeticException if {@code q == 0}, or if {@code mode == UNNECESSARY} and {@code a}
* is not an integer multiple of {@code b}
*/
@GwtIncompatible // TODO
@SuppressWarnings("fallthrough")
public static long divide(long p, long q, RoundingMode mode) {
checkNotNull(mode);
long div = p / q; // throws if q == 0
long rem = p - q * div; // equals p % q
if (rem == 0) {
return div;
}
/*
* Normal Java division rounds towards 0, consistently with RoundingMode.DOWN. We just have to
* deal with the cases where rounding towards 0 is wrong, which typically depends on the sign of
* p / q.
*
* signum is 1 if p and q are both nonnegative or both negative, and -1 otherwise.
*/
int signum = 1 | (int) ((p ^ q) >> (Long.SIZE - 1));
boolean increment;
switch (mode) {
case UNNECESSARY:
checkRoundingUnnecessary(rem == 0);
// fall through
case DOWN:
increment = false;
break;
case UP:
increment = true;
break;
case CEILING:
increment = signum > 0;
break;
case FLOOR:
increment = signum < 0;
break;
case HALF_EVEN:
case HALF_DOWN:
case HALF_UP:
long absRem = abs(rem);
long cmpRemToHalfDivisor = absRem - (abs(q) - absRem);
// subtracting two nonnegative longs can't overflow
// cmpRemToHalfDivisor has the same sign as compare(abs(rem), abs(q) / 2).
if (cmpRemToHalfDivisor == 0) { // exactly on the half mark
increment = (mode == HALF_UP || (mode == HALF_EVEN && (div & 1) != 0));
} else {
increment = cmpRemToHalfDivisor > 0; // closer to the UP value
}
break;
default:
throw new AssertionError();
}
return increment ? div + signum : div;
}
/**
* Returns {@code x mod m}, a non-negative value less than {@code m}. This differs from {@code x %
* m}, which might be negative.
*
* <p>For example:
*
* {@snippet :
* mod(7, 4) == 3
* mod(-7, 4) == 1
* mod(-1, 4) == 3
* mod(-8, 4) == 0
* mod(8, 4) == 0
* }
*
* @throws ArithmeticException if {@code m <= 0}
* @see <a href="http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.17.3">
* Remainder Operator</a>
*/
@GwtIncompatible // TODO
public static int mod(long x, int m) {
// Cast is safe because the result is guaranteed in the range [0, m)
return (int) mod(x, (long) m);
}
/**
* Returns {@code x mod m}, a non-negative value less than {@code m}. This differs from {@code x %
* m}, which might be negative.
*
* <p>For example:
*
* {@snippet :
* mod(7, 4) == 3
* mod(-7, 4) == 1
* mod(-1, 4) == 3
* mod(-8, 4) == 0
* mod(8, 4) == 0
* }
*
* @throws ArithmeticException if {@code m <= 0}
* @see <a href="http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.17.3">
* Remainder Operator</a>
*/
@GwtIncompatible // TODO
public static long mod(long x, long m) {
if (m <= 0) {
throw new ArithmeticException("Modulus must be positive");
}
return Math.floorMod(x, m);
}
/**
* Returns the greatest common divisor of {@code a, b}. Returns {@code 0} if {@code a == 0 && b ==
* 0}.
*
* @throws IllegalArgumentException if {@code a < 0} or {@code b < 0}
*/
public static long gcd(long a, long b) {
/*
* The reason we require both arguments to be >= 0 is because otherwise, what do you return on
* gcd(0, Long.MIN_VALUE)? BigInteger.gcd would return positive 2^63, but positive 2^63 isn't an
* int.
*/
checkNonNegative("a", a);
checkNonNegative("b", b);
if (a == 0) {
// 0 % b == 0, so b divides a, but the converse doesn't hold.
// BigInteger.gcd is consistent with this decision.
return b;
} else if (b == 0) {
return a; // similar logic
}
/*
* Uses the binary GCD algorithm; see http://en.wikipedia.org/wiki/Binary_GCD_algorithm. This is
* >60% faster than the Euclidean algorithm in benchmarks.
*/
int aTwos = Long.numberOfTrailingZeros(a);
a >>= aTwos; // divide out all 2s
int bTwos = Long.numberOfTrailingZeros(b);
b >>= bTwos; // divide out all 2s
while (a != b) { // both a, b are odd
// The key to the binary GCD algorithm is as follows:
// Both a and b are odd. Assume a > b; then gcd(a - b, b) = gcd(a, b).
// But in gcd(a - b, b), a - b is even and b is odd, so we can divide out powers of two.
// We bend over backwards to avoid branching, adapting a technique from
// http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
long delta = a - b; // can't overflow, since a and b are nonnegative
long minDeltaOrZero = delta & (delta >> (Long.SIZE - 1));
// equivalent to Math.min(delta, 0)
a = delta - minDeltaOrZero - minDeltaOrZero; // sets a to Math.abs(a - b)
// a is now nonnegative and even
b += minDeltaOrZero; // sets b to min(old a, b)
a >>= Long.numberOfTrailingZeros(a); // divide out all 2s, since 2 doesn't divide b
}
return a << min(aTwos, bTwos);
}
/**
* Returns the sum of {@code a} and {@code b}, provided it does not overflow.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated; use {@link
* Math#addExact(long, long)} instead. Note that if both arguments are {@code int} values, writing
* {@code Math.addExact(a, b)} will call the {@link Math#addExact(int, int)} overload, not {@link
* Math#addExact(long, long)}. Also note that adding two {@code int} values can <b>never</b>
* overflow a {@code long}, so you can just write {@code (long) a + b}.
*
* @throws ArithmeticException if {@code a + b} overflows in signed {@code long} arithmetic
*/
@InlineMe(replacement = "Math.addExact(a, b)")
public static long checkedAdd(long a, long b) {
return Math.addExact(a, b);
}
/**
* Returns the difference of {@code a} and {@code b}, provided it does not overflow.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated; use {@link
* Math#subtractExact(long, long)} instead. Note that if both arguments are {@code int} values,
* writing {@code Math.subtractExact(a, b)} will call the {@link Math#subtractExact(int, int)}
* overload, not {@link Math#subtractExact(long, long)}. Also note that subtracting two {@code
* int} values can <b>never</b> overflow a {@code long}, so you can just write {@code (long) a -
* b}.
*
* @throws ArithmeticException if {@code a - b} overflows in signed {@code long} arithmetic
*/
@InlineMe(replacement = "Math.subtractExact(a, b)")
public static long checkedSubtract(long a, long b) {
return Math.subtractExact(a, b);
}
/**
* Returns the product of {@code a} and {@code b}, provided it does not overflow.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated; use {@link
* Math#multiplyExact(long, long)} instead. Note that if both arguments are {@code int} values,
* writing {@code Math.multiplyExact(a, b)} will call the {@link Math#multiplyExact(int, int)}
* overload, not {@link Math#multiplyExact(long, long)}. Also note that multiplying two {@code
* int} values can <b>never</b> overflow a {@code long}, so you can just write {@code (long) a *
* b}.
*
* @throws ArithmeticException if {@code a * b} overflows in signed {@code long} arithmetic
*/
@InlineMe(replacement = "Math.multiplyExact(a, b)")
public static long checkedMultiply(long a, long b) {
return Math.multiplyExact(a, b);
}
/**
* Returns the {@code b} to the {@code k}th power, provided it does not overflow.
*
* @throws ArithmeticException if {@code b} to the {@code k}th power overflows in signed {@code
* long} arithmetic
*/
@GwtIncompatible // TODO
// Whenever both tests are cheap and functional, it's faster to use &, | instead of &&, ||
@SuppressWarnings("ShortCircuitBoolean")
public static long checkedPow(long b, int k) {
checkNonNegative("exponent", k);
if (b >= -2 & b <= 2) {
switch ((int) b) {
case 0:
return (k == 0) ? 1 : 0;
case 1:
return 1;
case -1:
return ((k & 1) == 0) ? 1 : -1;
case 2:
checkNoOverflow(k < Long.SIZE - 1, "checkedPow", b, k);
return 1L << k;
case -2:
checkNoOverflow(k < Long.SIZE, "checkedPow", b, k);
return ((k & 1) == 0) ? (1L << k) : (-1L << k);
default:
throw new AssertionError();
}
}
long accum = 1;
while (true) {
switch (k) {
case 0:
return accum;
case 1:
return Math.multiplyExact(accum, b);
default:
if ((k & 1) != 0) {
accum = Math.multiplyExact(accum, b);
}
k >>= 1;
if (k > 0) {
checkNoOverflow(
-FLOOR_SQRT_MAX_LONG <= b && b <= FLOOR_SQRT_MAX_LONG, "checkedPow", b, k);
b *= b;
}
}
}
}
/**
* Returns the sum of {@code a} and {@code b} unless it would overflow or underflow in which case
* {@code Long.MAX_VALUE} or {@code Long.MIN_VALUE} is returned, respectively.
*
* @since 20.0
*/
// Whenever both tests are cheap and functional, it's faster to use &, | instead of &&, ||
@SuppressWarnings("ShortCircuitBoolean")
public static long saturatedAdd(long a, long b) {
long naiveSum = a + b;
if ((a ^ b) < 0 | (a ^ naiveSum) >= 0) {
// If a and b have different signs or a has the same sign as the result then there was no
// overflow, return.
return naiveSum;
}
// we did over/under flow, if the sign is negative we should return MAX otherwise MIN
return Long.MAX_VALUE + ((naiveSum >>> (Long.SIZE - 1)) ^ 1);
}
/**
* Returns the difference of {@code a} and {@code b} unless it would overflow or underflow in
* which case {@code Long.MAX_VALUE} or {@code Long.MIN_VALUE} is returned, respectively.
*
* @since 20.0
*/
// Whenever both tests are cheap and functional, it's faster to use &, | instead of &&, ||
@SuppressWarnings("ShortCircuitBoolean")
public static long saturatedSubtract(long a, long b) {
long naiveDifference = a - b;
if ((a ^ b) >= 0 | (a ^ naiveDifference) >= 0) {
// If a and b have the same signs or a has the same sign as the result then there was no
// overflow, return.
return naiveDifference;
}
// we did over/under flow
return Long.MAX_VALUE + ((naiveDifference >>> (Long.SIZE - 1)) ^ 1);
}
/**
* Returns the product of {@code a} and {@code b} unless it would overflow or underflow in which
* case {@code Long.MAX_VALUE} or {@code Long.MIN_VALUE} is returned, respectively.
*
* @since 20.0
*/
// Whenever both tests are cheap and functional, it's faster to use &, | instead of &&, ||
@SuppressWarnings("ShortCircuitBoolean")
public static long saturatedMultiply(long a, long b) {
// see checkedMultiply for explanation
int leadingZeros =
Long.numberOfLeadingZeros(a)
+ Long.numberOfLeadingZeros(~a)
+ Long.numberOfLeadingZeros(b)
+ Long.numberOfLeadingZeros(~b);
if (leadingZeros > Long.SIZE + 1) {
return a * b;
}
// the return value if we will overflow (which we calculate by overflowing a long :) )
long limit = Long.MAX_VALUE + ((a ^ b) >>> (Long.SIZE - 1));
if (leadingZeros < Long.SIZE | (a < 0 & b == Long.MIN_VALUE)) {
// overflow
return limit;
}
long result = a * b;
if (a == 0 || result / a == b) {
return result;
}
return limit;
}
/**
* Returns the {@code b} to the {@code k}th power, unless it would overflow or underflow in which
* case {@code Long.MAX_VALUE} or {@code Long.MIN_VALUE} is returned, respectively.
*
* @since 20.0
*/
// Whenever both tests are cheap and functional, it's faster to use &, | instead of &&, ||
@SuppressWarnings("ShortCircuitBoolean")
public static long saturatedPow(long b, int k) {
checkNonNegative("exponent", k);
if (b >= -2 & b <= 2) {
switch ((int) b) {
case 0:
return (k == 0) ? 1 : 0;
case 1:
return 1;
case -1:
return ((k & 1) == 0) ? 1 : -1;
case 2:
if (k >= Long.SIZE - 1) {
return Long.MAX_VALUE;
}
return 1L << k;
case -2:
if (k >= Long.SIZE) {
return Long.MAX_VALUE + (k & 1);
}
return ((k & 1) == 0) ? (1L << k) : (-1L << k);
default:
throw new AssertionError();
}
}
long accum = 1;
// if b is negative and k is odd then the limit is MIN otherwise the limit is MAX
long limit = Long.MAX_VALUE + ((b >>> (Long.SIZE - 1)) & (k & 1));
while (true) {
switch (k) {
case 0:
return accum;
case 1:
return saturatedMultiply(accum, b);
default:
if ((k & 1) != 0) {
accum = saturatedMultiply(accum, b);
}
k >>= 1;
if (k > 0) {
if (-FLOOR_SQRT_MAX_LONG > b | b > FLOOR_SQRT_MAX_LONG) {
return limit;
}
b *= b;
}
}
}
}
@VisibleForTesting static final long FLOOR_SQRT_MAX_LONG = 3037000499L;
/**
* Returns {@code n!}, that is, the product of the first {@code n} positive integers, {@code 1} if
* {@code n == 0}, or {@link Long#MAX_VALUE} if the result does not fit in a {@code long}.
*
* @throws IllegalArgumentException if {@code n < 0}
*/
@GwtIncompatible // TODO
public static long factorial(int n) {
checkNonNegative("n", n);
return (n < factorials.length) ? factorials[n] : Long.MAX_VALUE;
}
static final long[] factorials = {
1L,
1L,
1L * 2,
1L * 2 * 3,
1L * 2 * 3 * 4,
1L * 2 * 3 * 4 * 5,
1L * 2 * 3 * 4 * 5 * 6,
1L * 2 * 3 * 4 * 5 * 6 * 7,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18 * 19,
1L * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18 * 19 * 20
};
/**
* Returns {@code n} choose {@code k}, also known as the binomial coefficient of {@code n} and
* {@code k}, or {@link Long#MAX_VALUE} if the result does not fit in a {@code long}.
*
* @throws IllegalArgumentException if {@code n < 0}, {@code k < 0}, or {@code k > n}
*/
public static long binomial(int n, int k) {
checkNonNegative("n", n);
checkNonNegative("k", k);
checkArgument(k <= n, "k (%s) > n (%s)", k, n);
if (k > (n >> 1)) {
k = n - k;
}
switch (k) {
case 0:
return 1;
case 1:
return n;
default:
if (n < factorials.length) {
return factorials[n] / (factorials[k] * factorials[n - k]);
} else if (k >= biggestBinomials.length || n > biggestBinomials[k]) {
return Long.MAX_VALUE;
} else if (k < biggestSimpleBinomials.length && n <= biggestSimpleBinomials[k]) {
// guaranteed not to overflow
long result = n--;
for (int i = 2; i <= k; n--, i++) {
result *= n;
result /= i;
}
return result;
} else {
int nBits = LongMath.log2(n, RoundingMode.CEILING);
long result = 1;
long numerator = n--;
long denominator = 1;
int numeratorBits = nBits;
// This is an upper bound on log2(numerator, ceiling).
/*
* We want to do this in long math for speed, but want to avoid overflow. We adapt the
* technique previously used by BigIntegerMath: maintain separate numerator and
* denominator accumulators, multiplying the fraction into result when near overflow.
*/
for (int i = 2; i <= k; i++, n--) {
if (numeratorBits + nBits < Long.SIZE - 1) {
// It's definitely safe to multiply into numerator and denominator.
numerator *= n;
denominator *= i;
numeratorBits += nBits;
} else {
// It might not be safe to multiply into numerator and denominator,
// so multiply (numerator / denominator) into result.
result = multiplyFraction(result, numerator, denominator);
numerator = n;
denominator = i;
numeratorBits = nBits;
}
}
return multiplyFraction(result, numerator, denominator);
}
}
}
/** Returns (x * numerator / denominator), which is assumed to come out to an integral value. */
static long multiplyFraction(long x, long numerator, long denominator) {
if (x == 1) {
return numerator / denominator;
}
long commonDivisor = gcd(x, denominator);
x /= commonDivisor;
denominator /= commonDivisor;
// We know gcd(x, denominator) = 1, and x * numerator / denominator is exact,
// so denominator must be a divisor of numerator.
return x * (numerator / denominator);
}
/*
* binomial(biggestBinomials[k], k) fits in a long, but not binomial(biggestBinomials[k] + 1, k).
*/
static final int[] biggestBinomials = {
Integer.MAX_VALUE,
Integer.MAX_VALUE,
Integer.MAX_VALUE,
3810779,
121977,
16175,
4337,
1733,
887,
534,
361,
265,
206,
169,
143,
125,
111,
101,
94,
88,
83,
79,
76,
74,
72,
70,
69,
68,
67,
67,
66,
66,
66,
66
};
/*
* binomial(biggestSimpleBinomials[k], k) doesn't need to use the slower GCD-based impl, but
* binomial(biggestSimpleBinomials[k] + 1, k) does.
*/
@VisibleForTesting
static final int[] biggestSimpleBinomials = {
Integer.MAX_VALUE,
Integer.MAX_VALUE,
Integer.MAX_VALUE,
2642246,
86251,
11724,
3218,
1313,
684,
419,
287,
214,
169,
139,
119,
105,
95,
87,
81,
76,
73,
70,
68,
66,
64,
63,
62,
62,
61,
61,
61
};
// These values were generated by using checkedMultiply to see when the simple multiply/divide
// algorithm would lead to an overflow.
static boolean fitsInInt(long x) {
return (int) x == x;
}
/**
* Returns the arithmetic mean of {@code x} and {@code y}, rounded toward negative infinity. This
* method is resilient to overflow.
*
* @since 14.0
*/
public static long mean(long x, long y) {
// Efficient method for computing the arithmetic mean.
// The alternative (x + y) / 2 fails for large values.
// The alternative (x + y) >>> 1 fails for negative values.
return (x & y) + ((x ^ y) >> 1);
}
/*
* This bitmask is used as an optimization for cheaply testing for divisibility by 2, 3, or 5.
* Each bit is set to 1 for all remainders that indicate divisibility by 2, 3, or 5, so
* 1, 7, 11, 13, 17, 19, 23, 29 are set to 0. 30 and up don't matter because they won't be hit.
*/
private static final int SIEVE_30 =
~((1 << 1) | (1 << 7) | (1 << 11) | (1 << 13) | (1 << 17) | (1 << 19) | (1 << 23)
| (1 << 29));
/**
* Returns {@code true} if {@code n} is a <a
* href="http://mathworld.wolfram.com/PrimeNumber.html">prime number</a>: an integer <i>greater
* than one</i> that cannot be factored into a product of <i>smaller</i> positive integers.
* Returns {@code false} if {@code n} is zero, one, or a composite number (one which <i>can</i> be
* factored into smaller positive integers).
*
* <p>To test larger numbers, use {@link BigInteger#isProbablePrime}.
*
* @throws IllegalArgumentException if {@code n} is negative
* @since 20.0
*/
@GwtIncompatible // TODO
public static boolean isPrime(long n) {
if (n < 2) {
checkNonNegative("n", n);
return false;
}
if (n < 66) {
// Encode all primes less than 66 into mask without 0 and 1.
long mask =
(1L << (2 - 2))
| (1L << (3 - 2))
| (1L << (5 - 2))
| (1L << (7 - 2))
| (1L << (11 - 2))
| (1L << (13 - 2))
| (1L << (17 - 2))
| (1L << (19 - 2))
| (1L << (23 - 2))
| (1L << (29 - 2))
| (1L << (31 - 2))
| (1L << (37 - 2))
| (1L << (41 - 2))
| (1L << (43 - 2))
| (1L << (47 - 2))
| (1L << (53 - 2))
| (1L << (59 - 2))
| (1L << (61 - 2));
// Look up n within the mask.
return ((mask >> ((int) n - 2)) & 1) != 0;
}
if ((SIEVE_30 & (1 << (n % 30))) != 0) {
return false;
}
if (n % 7 == 0 || n % 11 == 0 || n % 13 == 0) {
return false;
}
if (n < 17 * 17) {
return true;
}
for (long[] baseSet : millerRabinBaseSets) {
if (n <= baseSet[0]) {
for (int i = 1; i < baseSet.length; i++) {
if (!MillerRabinTester.test(baseSet[i], n)) {
return false;
}
}
return true;
}
}
throw new AssertionError();
}
/*
* If n <= millerRabinBases[i][0], then testing n against bases millerRabinBases[i][1..] suffices
* to prove its primality. Values from miller-rabin.appspot.com.
*
* NOTE: We could get slightly better bases that would be treated as unsigned, but benchmarks
* showed negligible performance improvements.
*/
private static final long[][] millerRabinBaseSets = {
{291830, 126401071349994536L},
{885594168, 725270293939359937L, 3569819667048198375L},
{273919523040L, 15, 7363882082L, 992620450144556L},
{47636622961200L, 2, 2570940, 211991001, 3749873356L},
{
7999252175582850L,
2,
4130806001517L,
149795463772692060L,
186635894390467037L,
3967304179347715805L
},
{
585226005592931976L,
2,
123635709730000L,
9233062284813009L,
43835965440333360L,
761179012939631437L,
1263739024124850375L
},
{Long.MAX_VALUE, 2, 325, 9375, 28178, 450775, 9780504, 1795265022}
};
private | LongMath |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java | {
"start": 11355,
"end": 13498
} | class ____
extends BlockPlacementPolicyDefault {
static DatanodeStorageInfo[] dnStorageInfosToReturn;
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath, int numOfReplicas,
Node writer, List<DatanodeStorageInfo> chosenNodes,
boolean returnChosenNodes, Set<Node> excludedNodes, long blocksize,
final BlockStoragePolicy storagePolicy, EnumSet<AddBlockFlag> flags) {
return dnStorageInfosToReturn;
}
}
private DatanodeStorageInfo getDatanodeStorageInfo(int dnIndex)
throws UnregisteredNodeException {
if (cluster == null) {
return null;
}
DatanodeID dnId = cluster.getDataNodes().get(dnIndex).getDatanodeId();
DatanodeManager dnManager = cluster.getNamesystem()
.getBlockManager().getDatanodeManager();
return dnManager.getDatanode(dnId).getStorageInfos()[0];
}
@Test
@Timeout(value = 60)
public void testStorageIDBlockPlacementSpecific()
throws ReconfigurationException, InterruptedException, TimeoutException,
IOException {
final StorageType[][] storageTypes = {
{StorageType.DISK, StorageType.DISK},
{StorageType.DISK, StorageType.DISK},
{StorageType.DISK, StorageType.DISK},
{StorageType.DISK, StorageType.DISK},
{StorageType.DISK, StorageType.DISK},
};
final int numDataNodes = storageTypes.length;
final int storagePerDataNode = storageTypes[0].length;
startDFSCluster(1, numDataNodes, storagePerDataNode, storageTypes,
TestVolumeChoosingPolicy.class, TestBlockPlacementPolicy.class);
Path testFile = new Path("/test");
final short replFactor = 1;
final int numBlocks = 10;
DatanodeStorageInfo dnInfoToUse = getDatanodeStorageInfo(0);
TestBlockPlacementPolicy.dnStorageInfosToReturn =
new DatanodeStorageInfo[] {dnInfoToUse};
TestVolumeChoosingPolicy.expectedStorageId = dnInfoToUse.getStorageID();
//file creation invokes both BlockPlacementPolicy and VolumeChoosingPolicy,
//and will test that the storage ids match
createFile(testFile, numBlocks, replFactor);
}
}
| TestBlockPlacementPolicy |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/batch/BatchExecBoundedStreamScan.java | {
"start": 1990,
"end": 4447
} | class ____ extends ExecNodeBase<RowData>
implements BatchExecNode<RowData>, MultipleTransformationTranslator<RowData> {
private final DataStream<?> dataStream;
private final DataType sourceType;
private final int[] fieldIndexes;
private final List<String> qualifiedName;
public BatchExecBoundedStreamScan(
ReadableConfig tableConfig,
DataStream<?> dataStream,
DataType sourceType,
int[] fieldIndexes,
List<String> qualifiedName,
RowType outputType,
String description) {
super(
ExecNodeContext.newNodeId(),
ExecNodeContext.newContext(BatchExecBoundedStreamScan.class),
ExecNodeContext.newPersistedConfig(BatchExecBoundedStreamScan.class, tableConfig),
Collections.emptyList(),
outputType,
description);
this.dataStream = dataStream;
this.sourceType = sourceType;
this.fieldIndexes = fieldIndexes;
this.qualifiedName = qualifiedName;
}
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final Transformation<?> sourceTransform = dataStream.getTransformation();
if (needInternalConversion()) {
return ScanUtil.convertToInternalRow(
new CodeGeneratorContext(config, planner.getFlinkContext().getClassLoader()),
(Transformation<Object>) sourceTransform,
fieldIndexes,
sourceType,
(RowType) getOutputType(),
qualifiedName,
(detailName, simplifyName) ->
createFormattedTransformationName(detailName, simplifyName, config),
(description) -> createFormattedTransformationDescription(description, config),
JavaScalaConversionUtil.toScala(Optional.empty()),
"",
"");
} else {
return (Transformation<RowData>) sourceTransform;
}
}
private boolean needInternalConversion() {
return ScanUtil.hasTimeAttributeField(fieldIndexes) || ScanUtil.needsConversion(sourceType);
}
public DataStream<?> getDataStream() {
return dataStream;
}
}
| BatchExecBoundedStreamScan |
java | apache__flink | flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/VoidNamespaceTypeInfo.java | {
"start": 1248,
"end": 2515
} | class ____ extends TypeInformation<VoidNamespace> {
private static final long serialVersionUID = 5453679706408610586L;
public static final VoidNamespaceTypeInfo INSTANCE = new VoidNamespaceTypeInfo();
@Override
public boolean isBasicType() {
return false;
}
@Override
public boolean isTupleType() {
return false;
}
@Override
public int getArity() {
return 0;
}
@Override
public int getTotalFields() {
return 1;
}
@Override
public Class<VoidNamespace> getTypeClass() {
return VoidNamespace.class;
}
@Override
public boolean isKeyType() {
return false;
}
@Override
public TypeSerializer<VoidNamespace> createSerializer(SerializerConfig config) {
return VoidNamespaceSerializer.INSTANCE;
}
@Override
public String toString() {
return "VoidNamespaceTypeInfo";
}
@Override
public boolean equals(Object obj) {
return this == obj || obj instanceof VoidNamespaceTypeInfo;
}
@Override
public int hashCode() {
return 0;
}
@Override
public boolean canEqual(Object obj) {
return obj instanceof VoidNamespaceTypeInfo;
}
}
| VoidNamespaceTypeInfo |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/LdifEndpointBuilderFactory.java | {
"start": 6319,
"end": 6626
} | class ____ extends AbstractEndpointBuilder implements LdifEndpointBuilder, AdvancedLdifEndpointBuilder {
public LdifEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new LdifEndpointBuilderImpl(path);
}
} | LdifEndpointBuilderImpl |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/GuardedByCheckerTest.java | {
"start": 3113,
"end": 4052
} | class ____ {
final Monitor monitor = null;
@GuardedBy("monitor")
int x;
void m() {
monitor.enter();
// BUG: Diagnostic contains:
// access should be guarded by 'this.monitor'
x++;
try {
x++;
} finally {
monitor.leave();
}
// BUG: Diagnostic contains:
// access should be guarded by 'this.monitor'
x++;
}
}
""")
.doTest();
}
@Test
public void wrongLock() {
compilationHelper
.addSourceLines(
"threadsafety/Test.java",
"""
package threadsafety;
import com.google.errorprone.annotations.concurrent.GuardedBy;
import java.util.concurrent.locks.Lock;
| Test |
java | apache__camel | components/camel-mllp/src/main/java/org/apache/camel/component/mllp/MllpAcknowledgementDeliveryException.java | {
"start": 931,
"end": 1977
} | class ____ extends MllpAcknowledgementException {
static final String EXCEPTION_MESSAGE = "HL7 Acknowledgment Delivery Failed";
public MllpAcknowledgementDeliveryException(byte[] hl7Message, byte[] hl7Acknowledgement, boolean logPhi) {
super(EXCEPTION_MESSAGE, hl7Message, hl7Acknowledgement, logPhi);
}
public MllpAcknowledgementDeliveryException(byte[] hl7Message, byte[] hl7Acknowledgement, Throwable cause, boolean logPhi) {
super(EXCEPTION_MESSAGE, hl7Message, hl7Acknowledgement, cause, logPhi);
}
public MllpAcknowledgementDeliveryException(String message, byte[] hl7Message, byte[] hl7Acknowledgement, boolean logPhi) {
super(message, hl7Message, hl7Acknowledgement, logPhi);
}
public MllpAcknowledgementDeliveryException(String message, byte[] hl7Message, byte[] hl7Acknowledgement, Throwable cause,
boolean logPhi) {
super(message, hl7Message, hl7Acknowledgement, cause, logPhi);
}
}
| MllpAcknowledgementDeliveryException |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/FlowRunEntitySetReader.java | {
"start": 1693,
"end": 2816
} | class ____ implements MessageBodyReader<Set<FlowRunEntity>> {
private ObjectMapper objectMapper = new ObjectMapper();
private String timelineEntityType =
"java.util.Set<org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity>";
@Override
public boolean isReadable(Class<?> type, Type genericType,
Annotation[] annotations, MediaType mediaType) {
return timelineEntityType.equals(genericType.getTypeName());
}
@Override
public Set<FlowRunEntity> readFrom(Class<Set<FlowRunEntity>> type,
Type genericType, Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, String> httpHeaders,
InputStream entityStream) throws IOException, WebApplicationException {
Set<FlowRunEntity> flowRunEntitySet = new HashSet<>();
JsonNode jsonNode = objectMapper.readTree(entityStream);
if (jsonNode.isArray()) {
for (JsonNode jNode : jsonNode) {
FlowRunEntity flowRunEntity = objectMapper.treeToValue(jNode, FlowRunEntity.class);
flowRunEntitySet.add(flowRunEntity);
}
}
return flowRunEntitySet;
}
}
| FlowRunEntitySetReader |
java | quarkusio__quarkus | extensions/keycloak-authorization/runtime/src/main/java/io/quarkus/keycloak/pep/runtime/KeycloakPolicyEnforcerTenantConfig.java | {
"start": 4553,
"end": 4971
} | interface ____ {
/**
* Defines the limit of entries that should be kept in the cache
*/
@WithDefault("1000")
int maxEntries();
/**
* Defines the time in milliseconds when the entry should be expired
*/
@WithDefault("30000")
long lifespan();
}
@ConfigGroup
| PathCacheConfig |
java | apache__flink | flink-formats/flink-protobuf/src/main/java/org/apache/flink/formats/protobuf/deserialize/PbCodegenRowDeserializer.java | {
"start": 6707,
"end": 6791
} | interface
____ message + ".has" + fieldName + "()";
}
}
}
| return |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/FormulaAnnotation.java | {
"start": 465,
"end": 1293
} | class ____ implements Formula {
private String value;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public FormulaAnnotation(ModelsContext modelContext) {
}
/**
* Used in creating annotation instances from JDK variant
*/
public FormulaAnnotation(Formula annotation, ModelsContext modelContext) {
this.value = annotation.value();
}
/**
* Used in creating annotation instances from Jandex variant
*/
public FormulaAnnotation(Map<String, Object> attributeValues, ModelsContext modelContext) {
this.value = (String) attributeValues.get( "value" );
}
@Override
public Class<? extends Annotation> annotationType() {
return Formula.class;
}
@Override
public String value() {
return value;
}
public void value(String value) {
this.value = value;
}
}
| FormulaAnnotation |
java | elastic__elasticsearch | x-pack/plugin/rank-rrf/src/yamlRestTest/java/org/elasticsearch/xpack/rank/rrf/LicenseClientYamlTestSuiteIT.java | {
"start": 718,
"end": 1476
} | class ____ extends ESClientYamlSuiteTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local().nodes(1).module("rank-rrf").build();
@BeforeClass
public static void init() {
assumeFalse("Cannot run in FIPS mode since it uses trial license", inFipsJvm());
}
public LicenseClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return ESClientYamlSuiteTestCase.createParameters("license");
}
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
}
| LicenseClientYamlTestSuiteIT |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/matchers/LocalizedMatcher.java | {
"start": 330,
"end": 732
} | class ____ {
private final ArgumentMatcher<?> matcher;
private final Location location;
public LocalizedMatcher(ArgumentMatcher<?> matcher) {
this.matcher = matcher;
this.location = LocationFactory.create();
}
public Location getLocation() {
return location;
}
public ArgumentMatcher<?> getMatcher() {
return matcher;
}
}
| LocalizedMatcher |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/pattern/MessagePatternConverter.java | {
"start": 4789,
"end": 5905
} | class ____ extends MessagePatternConverter {
private final String[] formats;
FormattedMessagePatternConverter(final String[] formats) {
this.formats = formats;
}
/**
* {@inheritDoc}
*/
@Override
public void format(final LogEvent event, final StringBuilder toAppendTo) {
final Message msg = event.getMessage();
if (msg instanceof StringBuilderFormattable) {
if (msg instanceof MultiFormatStringBuilderFormattable) {
((MultiFormatStringBuilderFormattable) msg).formatTo(formats, toAppendTo);
} else {
((StringBuilderFormattable) msg).formatTo(toAppendTo);
}
} else if (msg != null) {
toAppendTo.append(
msg instanceof MultiformatMessage
? ((MultiformatMessage) msg).getFormattedMessage(formats)
: msg.getFormattedMessage());
}
}
}
private static final | FormattedMessagePatternConverter |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/ProducibleOperationArgumentResolverTests.java | {
"start": 3663,
"end": 4066
} | enum ____ implements Producible<WithDefault> {
ONE("one/one"),
TWO("two/two") {
@Override
public boolean isDefault() {
return true;
}
},
THREE("three/three");
private final MimeType mimeType;
WithDefault(String mimeType) {
this.mimeType = MimeType.valueOf(mimeType);
}
@Override
public MimeType getProducedMimeType() {
return this.mimeType;
}
}
| WithDefault |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/util/ExecutorServices.java | {
"start": 1032,
"end": 3636
} | class ____ {
private static final Logger LOGGER = StatusLogger.getLogger();
/**
* Shuts down the given {@link ExecutorService} in an orderly fashion. Disables new tasks from submission and then
* waits for existing tasks to terminate. Eventually cancels running tasks if too much time elapses.
* <p>
* If the timeout is 0, then a plain shutdown takes place.
* </p>
*
* @param executorService
* the pool to shutdown.
* @param timeout
* the maximum time to wait, or 0 to not wait for existing tasks to terminate.
* @param timeUnit
* the time unit of the timeout argument
* @param source
* use this string in any log messages.
* @return {@code true} if the given executor terminated and {@code false} if the timeout elapsed before
* termination.
*/
public static boolean shutdown(
final ExecutorService executorService, final long timeout, final TimeUnit timeUnit, final String source) {
if (executorService == null || executorService.isTerminated()) {
return true;
}
executorService.shutdown(); // Disable new tasks from being submitted
if (timeout > 0 && timeUnit == null) {
throw new IllegalArgumentException(String.format(
"%s can't shutdown %s when timeout = %,d and timeUnit = %s.",
source, executorService, timeout, timeUnit));
}
if (timeout > 0) {
try {
// Wait a while for existing tasks to terminate
if (!executorService.awaitTermination(timeout, timeUnit)) {
executorService.shutdownNow(); // Cancel currently executing tasks
// Wait a while for tasks to respond to being cancelled
if (!executorService.awaitTermination(timeout, timeUnit)) {
LOGGER.error(
"{} pool {} did not terminate after {} {}", source, executorService, timeout, timeUnit);
}
return false;
}
} catch (final InterruptedException ie) {
// (Re-)Cancel if current thread also interrupted
executorService.shutdownNow();
// Preserve interrupt status
Thread.currentThread().interrupt();
}
} else {
executorService.shutdown();
}
return true;
}
/** No-op method which can be invoked to ensure this | ExecutorServices |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/config/Configuration.java | {
"start": 1980,
"end": 7749
} | interface ____ extends Filterable {
/** Key for storing the Context properties. */
String CONTEXT_PROPERTIES = "ContextProperties";
/**
* Returns the configuration name.
*
* @return the name of the configuration.
*/
String getName();
/**
* Locates the appropriate LoggerConfig for a Logger name. This will remove tokens from the package name as
* necessary or return the root LoggerConfig if no other matches were found.
*
* @param name The Logger name.
* @return The located LoggerConfig.
*/
LoggerConfig getLoggerConfig(String name);
/**
* Returns the Appender with the specified name.
*
* @param <T> The expected Appender type.
* @param name The name of the Appender.
* @return the Appender with the specified name or null if the Appender cannot be located.
*/
<T extends Appender> T getAppender(String name);
/**
* Returns a Map containing all the Appenders and their name.
*
* @return A Map containing each Appender's name and the Appender object.
*/
Map<String, Appender> getAppenders();
void addAppender(final Appender appender);
Map<String, LoggerConfig> getLoggers();
void addLoggerAppender(Logger logger, Appender appender);
void addLoggerFilter(Logger logger, Filter filter);
void setLoggerAdditive(Logger logger, boolean additive);
void addLogger(final String name, final LoggerConfig loggerConfig);
void removeLogger(final String name);
/**
* Returns the list of packages to scan for plugins for this Configuration.
*
* @return the list of plugin packages.
* @since 2.1
*/
List<String> getPluginPackages();
Map<String, String> getProperties();
/**
* Returns the root Logger.
*
* @return the root Logger.
*/
LoggerConfig getRootLogger();
void addListener(ConfigurationListener listener);
void removeListener(ConfigurationListener listener);
StrSubstitutor getStrSubstitutor();
default StrSubstitutor getConfigurationStrSubstitutor() {
final StrSubstitutor defaultSubstitutor = getStrSubstitutor();
if (defaultSubstitutor == null) {
return new ConfigurationStrSubstitutor();
}
return new ConfigurationStrSubstitutor(defaultSubstitutor);
}
void createConfiguration(Node node, LogEvent event);
<T> T getComponent(String name);
void addComponent(String name, Object object);
void setAdvertiser(Advertiser advertiser);
Advertiser getAdvertiser();
boolean isShutdownHookEnabled();
long getShutdownTimeoutMillis();
ConfigurationScheduler getScheduler();
/**
* Returns the source of this configuration.
*
* @return the source of this configuration, never {@code null}, but may be
* {@link org.apache.logging.log4j.core.config.ConfigurationSource#NULL_SOURCE}
* or
* {@link org.apache.logging.log4j.core.config.ConfigurationSource#COMPOSITE_SOURCE}
*/
ConfigurationSource getConfigurationSource();
/**
* <p>
* Returns a list of descriptors of the custom levels defined in the current configuration. The returned list does
* <em>not</em> include custom levels that are defined in code with direct calls to {@link Level#forName(String, int)}.
* </p>
* <p>
* Note that the list does not include levels of previous configurations. For example, suppose a configuration
* contains custom levels A, B and C. The configuration is then modified to contain custom levels B, C and D. For
* the new configuration, this method will return only {B, C, D}, that is, only the custom levels defined in
* <em>this</em> configuration. The previously defined level A still exists (and can be obtained with
* {@link Level#getLevel(String)}), it is just not in the current configuration. {@link Level#values()} will return
* {A, B, C, D and the built-in levels}.
* </p>
*
* @return the custom levels defined in the current configuration
*/
List<CustomLevelConfig> getCustomLevels();
ScriptManager getScriptManager();
/**
* Returns the {@code AsyncLoggerConfigDelegate} shared by all
* {@code AsyncLoggerConfig} instances defined in this Configuration.
*
* @return the {@code AsyncLoggerConfigDelegate}
*/
AsyncLoggerConfigDelegate getAsyncLoggerConfigDelegate();
/**
* Returns the {@code AsyncWaitStrategyFactory} defined in this Configuration;
* this factory is used to create the LMAX disruptor {@code WaitStrategy} used
* by the disruptor ringbuffer for Async Loggers.
*
* @return the {@code AsyncWaitStrategyFactory}
* @since 2.17.3
*/
AsyncWaitStrategyFactory getAsyncWaitStrategyFactory();
/**
* Return the WatchManager.
*
* @return the WatchManager.
*/
WatchManager getWatchManager();
/*
* (non-Javadoc)
*
* @see
* org.apache.logging.log4j.core.config.ReliabilityStrategyFactory#getReliabilityStrategy(org.apache.logging.log4j
* .core.config.LoggerConfig)
*/
ReliabilityStrategy getReliabilityStrategy(LoggerConfig loggerConfig);
/**
* Returns the {@link NanoClock} instance for this configuration.
*
* @return the nano clock
*/
NanoClock getNanoClock();
/**
* Sets the {@link NanoClock} instance for this configuration.
*
* @param nanoClock the new nano clock for this configuration. Must be non-null.
*/
void setNanoClock(NanoClock nanoClock);
/**
* Gets the logger context.
*
* @return the logger context.
*/
LoggerContext getLoggerContext();
}
| Configuration |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java | {
"start": 938,
"end": 2024
} | class ____ extends AbstractWireSerializingTestCase<Response> {
@Override
protected Response createTestInstance() {
int listSize = randomInt(10);
List<DatafeedConfig> datafeedList = new ArrayList<>(listSize);
for (int j = 0; j < listSize; j++) {
datafeedList.add(DatafeedConfigTests.createRandomizedDatafeedConfig(randomAlphaOfLength(10)));
}
return new Response(new QueryPage<>(datafeedList, datafeedList.size(), DatafeedConfig.RESULTS_FIELD));
}
@Override
protected Response mutateInstance(Response instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<Response> instanceReader() {
return Response::new;
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
return new NamedWriteableRegistry(searchModule.getNamedWriteables());
}
}
| GetDatafeedsActionResponseTests |
java | apache__avro | lang/java/idl/src/main/java/org/apache/avro/idl/IdlFile.java | {
"start": 1244,
"end": 5498
} | class ____ {
private final Object resolveLock = new Object();
private volatile ParseContext parseContext;
private Schema mainSchema;
private Protocol protocol;
private Map<String, Schema> namedSchemas;
private final List<String> warnings;
IdlFile(Protocol protocol, ParseContext context, List<String> warnings) {
this(context, null, protocol, warnings);
}
IdlFile(Schema mainSchema, ParseContext context, List<String> warnings) {
this(context, mainSchema, null, warnings);
}
private IdlFile(ParseContext context, Schema mainSchema, Protocol protocol, List<String> warnings) {
this.parseContext = context;
this.namedSchemas = new LinkedHashMap<>();
this.mainSchema = mainSchema;
this.protocol = protocol;
this.warnings = Collections.unmodifiableList(new ArrayList<>(warnings));
}
/**
* The (main) schema defined by the IDL file.
*/
public Schema getMainSchema() {
if (mainSchema == null) {
return null;
}
ensureSchemasAreResolved();
return mainSchema;
}
private void ensureSchemasAreResolved() {
if (parseContext != null) {
synchronized (resolveLock) {
if (parseContext != null) {
parseContext.commit();
List<Schema> schemas = parseContext.resolveAllSchemas();
schemas.forEach(schema -> namedSchemas.put(schema.getFullName(), schema));
if (mainSchema != null) {
mainSchema = parseContext.resolve(mainSchema);
}
if (protocol != null) {
protocol.setTypes(schemas);
Map<String, Protocol.Message> messages = protocol.getMessages();
for (Map.Entry<String, Protocol.Message> entry : messages.entrySet()) {
Protocol.Message oldValue = entry.getValue();
Protocol.Message newValue;
if (oldValue.isOneWay()) {
newValue = protocol.createMessage(oldValue.getName(), oldValue.getDoc(), oldValue,
parseContext.resolve(oldValue.getRequest()));
} else {
Schema request = parseContext.resolve(oldValue.getRequest());
Schema response = parseContext.resolve(oldValue.getResponse());
Schema errors = parseContext.resolve(oldValue.getErrors());
newValue = protocol.createMessage(oldValue.getName(), oldValue.getDoc(), oldValue, request, response,
errors);
}
entry.setValue(newValue);
}
}
}
}
}
}
/**
* The protocol defined by the IDL file.
*/
public Protocol getProtocol() {
if (protocol == null) {
return null;
}
ensureSchemasAreResolved();
return protocol;
}
public List<String> getWarnings() {
return warnings;
}
public List<String> getWarnings(String importFile) {
return warnings.stream()
.map(warning -> importFile + ' ' + Character.toLowerCase(warning.charAt(0)) + warning.substring(1))
.collect(Collectors.toList());
}
/**
* The named schemas defined by the IDL file, mapped by their full name.
*/
public Map<String, Schema> getNamedSchemas() {
ensureSchemasAreResolved();
return Collections.unmodifiableMap(namedSchemas);
}
/**
* Get a named schema defined by the IDL file, by name. The name can be a simple
* name in the default namespace of the IDL file (e.g., the namespace of the
* protocol), or a full name.
*
* @param name the full name of the schema, or a simple name
* @return the schema, or {@code null} if it does not exist
*/
public Schema getNamedSchema(String name) {
ensureSchemasAreResolved();
return namedSchemas.get(name);
}
// Visible for testing
String outputString() {
ensureSchemasAreResolved();
if (protocol != null) {
return protocol.toString();
}
if (mainSchema != null) {
return mainSchema.toString();
}
if (namedSchemas.isEmpty()) {
return "[]";
} else {
StringBuilder buffer = new StringBuilder();
for (Schema schema : namedSchemas.values()) {
buffer.append(',').append(schema);
}
buffer.append(']').setCharAt(0, '[');
return buffer.toString();
}
}
}
| IdlFile |
java | spring-projects__spring-framework | spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/factory/generator/deprecation/DeprecatedConstructor.java | {
"start": 872,
"end": 979
} | class ____ {
@Deprecated
public DeprecatedConstructor(Environment environment) {
}
}
| DeprecatedConstructor |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/HttpSecurityRequestMatchersTests.java | {
"start": 13713,
"end": 13820
} | class ____ {
@RequestMapping("/path")
String path() {
return "path";
}
}
}
}
| PathController |
java | resilience4j__resilience4j | resilience4j-vavr/src/main/java/io/github/resilience4j/retry/VavrRetry.java | {
"start": 858,
"end": 7448
} | interface ____ {
/**
* Creates a retryable supplier.
*
* @param retry the retry context
* @param supplier the original function
* @param <T> the type of results supplied by this supplier
* @return a retryable function
*/
static <T> CheckedFunction0<T> decorateCheckedSupplier(Retry retry,
CheckedFunction0<T> supplier) {
return () -> {
Retry.Context<T> context = retry.context();
do {
try {
T result = supplier.apply();
final boolean validationOfResult = context.onResult(result);
if (!validationOfResult) {
context.onComplete();
return result;
}
} catch (Exception exception) {
context.onError(exception);
}
} while (true);
};
}
/**
* Creates a retryable runnable.
*
* @param retry the retry context
* @param runnable the original runnable
* @return a retryable runnable
*/
static CheckedRunnable decorateCheckedRunnable(Retry retry, CheckedRunnable runnable) {
return () -> {
Retry.Context context = retry.context();
do {
try {
runnable.run();
context.onComplete();
break;
} catch (Exception exception) {
context.onError(exception);
}
} while (true);
};
}
/**
* Creates a retryable function.
*
* @param retry the retry context
* @param function the original function
* @param <T> the type of the input to the function
* @param <R> the result type of the function
* @return a retryable function
*/
static <T, R> CheckedFunction1<T, R> decorateCheckedFunction(Retry retry,
CheckedFunction1<T, R> function) {
return (T t) -> {
Retry.Context<R> context = retry.context();
do {
try {
R result = function.apply(t);
final boolean validationOfResult = context.onResult(result);
if (!validationOfResult) {
context.onComplete();
return result;
}
} catch (Exception exception) {
context.onError(exception);
}
} while (true);
};
}
/**
* Creates a retryable supplier.
*
* @param retry the retry context
* @param supplier the original function
* @param <T> the type of results supplied by this supplier
* @return a retryable function
*/
static <E extends Exception, T> Supplier<Either<E, T>> decorateEitherSupplier(Retry retry,
Supplier<Either<E, T>> supplier) {
return () -> {
Retry.Context<T> context = retry.context();
do {
Either<E, T> result = supplier.get();
if (result.isRight()) {
final boolean validationOfResult = context.onResult(result.get());
if (!validationOfResult) {
context.onComplete();
return result;
}
} else {
E exception = result.getLeft();
try {
context.onError(result.getLeft());
} catch (Exception e) {
return Either.left(exception);
}
}
} while (true);
};
}
/**
* Creates a retryable supplier.
*
* @param retry the retry context
* @param supplier the original function
* @param <T> the type of results supplied by this supplier
* @return a retryable function
*/
static <T> Supplier<Try<T>> decorateTrySupplier(Retry retry, Supplier<Try<T>> supplier) {
return () -> {
Retry.Context<T> context = retry.context();
do {
Try<T> result = supplier.get();
if (result.isSuccess()) {
final boolean validationOfResult = context.onResult(result.get());
if (!validationOfResult) {
context.onComplete();
return result;
}
} else {
Throwable cause = result.getCause();
if (cause instanceof Exception) {
try {
context.onError((Exception) result.getCause());
} catch (Exception e) {
return result;
}
} else {
return result;
}
}
} while (true);
};
}
/**
* Decorates and executes the decorated Supplier.
*
* @param checkedSupplier the original Supplier
* @param <T> the type of results supplied by this supplier
* @return the result of the decorated Supplier.
* @throws Throwable if something goes wrong applying this function to the given arguments
*/
static <T> T executeCheckedSupplier(Retry retry, CheckedFunction0<T> checkedSupplier) throws Throwable {
return decorateCheckedSupplier(retry, checkedSupplier).apply();
}
/**
* Decorates and executes the decorated Supplier.
*
* @param supplier the original Supplier
* @param <T> the type of results supplied by this supplier
* @return the result of the decorated Supplier.
*/
static <E extends Exception, T> Either<E, T> executeEitherSupplier(Retry retry, Supplier<Either<E, T>> supplier) {
return decorateEitherSupplier(retry, supplier).get();
}
/**
* Decorates and executes the decorated Supplier.
*
* @param supplier the original Supplier
* @param <T> the type of results supplied by this supplier
* @return the result of the decorated Supplier.
*/
static <T> Try<T> executeTrySupplier(Retry retry, Supplier<Try<T>> supplier) {
return decorateTrySupplier(retry, supplier).get();
}
}
| VavrRetry |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/util/ClassUtils.java | {
"start": 3629,
"end": 4008
} | class ____ initializing it so we can distinguish between
// ClassNotFoundException and SecurityException (that may be thrown by the
// validator).
c = Class.forName(className, false, classLoader);
ClassSecurityValidator.validate(c);
} catch (ClassNotFoundException e) {
// Ignore and return null
}
}
return c;
}
}
| without |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java | {
"start": 1383,
"end": 12323
} | class ____ implements Writeable, ToXContentObject {
private final long retentionRun;
private final long retentionFailed;
private final long retentionTimedOut;
private final long retentionTimeMs;
private final Map<String, SnapshotPolicyStats> policyStats;
public static final ParseField RETENTION_RUNS = new ParseField("retention_runs");
public static final ParseField RETENTION_FAILED = new ParseField("retention_failed");
public static final ParseField RETENTION_TIMED_OUT = new ParseField("retention_timed_out");
public static final ParseField RETENTION_TIME = new ParseField("retention_deletion_time");
public static final ParseField RETENTION_TIME_MILLIS = new ParseField("retention_deletion_time_millis");
public static final ParseField POLICY_STATS = new ParseField("policy_stats");
public static final ParseField TOTAL_TAKEN = new ParseField("total_snapshots_taken");
public static final ParseField TOTAL_FAILED = new ParseField("total_snapshots_failed");
public static final ParseField TOTAL_DELETIONS = new ParseField("total_snapshots_deleted");
public static final ParseField TOTAL_DELETION_FAILURES = new ParseField("total_snapshot_deletion_failures");
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<SnapshotLifecycleStats, Void> PARSER = new ConstructingObjectParser<>(
"snapshot_policy_stats",
true,
a -> {
long runs = (long) a[0];
long failed = (long) a[1];
long timedOut = (long) a[2];
long timeMs = (long) a[3];
Map<String, SnapshotPolicyStats> policyStatsMap = ((List<SnapshotPolicyStats>) a[4]).stream()
.collect(Collectors.toMap(m -> m.policyId, Function.identity()));
return new SnapshotLifecycleStats(runs, failed, timedOut, timeMs, Collections.unmodifiableMap(policyStatsMap));
}
);
static {
PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_RUNS);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_FAILED);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_TIMED_OUT);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_TIME_MILLIS);
PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), SnapshotPolicyStats.PARSER, POLICY_STATS);
}
public SnapshotLifecycleStats() {
this(0, 0, 0, 0, Map.of());
}
// public for testing
public SnapshotLifecycleStats(
long retentionRuns,
long retentionFailed,
long retentionTimedOut,
long retentionTimeMs,
Map<String, SnapshotPolicyStats> policyStats
) {
this.retentionRun = retentionRuns;
this.retentionFailed = retentionFailed;
this.retentionTimedOut = retentionTimedOut;
this.retentionTimeMs = retentionTimeMs;
this.policyStats = Collections.unmodifiableMap(policyStats);
}
private SnapshotLifecycleStats(Map<String, SnapshotPolicyStats> policyStats) {
this(0, 0, 0, 0, policyStats);
}
public SnapshotLifecycleStats(StreamInput in) throws IOException {
this.policyStats = in.readImmutableMap(SnapshotPolicyStats::new);
this.retentionRun = in.readVLong();
this.retentionFailed = in.readVLong();
this.retentionTimedOut = in.readVLong();
this.retentionTimeMs = in.readVLong();
}
public static SnapshotLifecycleStats parse(XContentParser parser) {
return PARSER.apply(parser, null);
}
public SnapshotLifecycleStats merge(SnapshotLifecycleStats other) {
HashMap<String, SnapshotPolicyStats> newPolicyStats = new HashMap<>(this.policyStats);
// Merges the per-run stats (the stats in "other") with the stats already present
other.policyStats.forEach((policyId, perRunPolicyStats) -> {
newPolicyStats.compute(policyId, (k, existingPolicyMetrics) -> {
if (existingPolicyMetrics == null) {
return perRunPolicyStats;
} else {
return existingPolicyMetrics.merge(perRunPolicyStats);
}
});
});
return new SnapshotLifecycleStats(
this.retentionRun + other.retentionRun,
this.retentionFailed + other.retentionFailed,
this.retentionTimedOut + other.retentionTimedOut,
this.retentionTimeMs + other.retentionTimeMs,
Collections.unmodifiableMap(newPolicyStats)
);
}
public SnapshotLifecycleStats removePolicy(String policyId) {
Map<String, SnapshotPolicyStats> policyStatsCopy = new HashMap<>(this.policyStats);
policyStatsCopy.remove(policyId);
return new SnapshotLifecycleStats(
this.retentionRun,
this.retentionFailed,
this.retentionTimedOut,
this.retentionTimeMs,
Collections.unmodifiableMap(policyStatsCopy)
);
}
/**
* @return a map of per-policy stats for each SLM policy
*/
public Map<String, SnapshotPolicyStats> getMetrics() {
return this.policyStats;
}
/**
* Return new stats with number of times SLM retention has been run incremented
*/
public SnapshotLifecycleStats withRetentionRunIncremented() {
return new SnapshotLifecycleStats(retentionRun + 1, retentionFailed, retentionTimedOut, retentionTimeMs, policyStats);
}
/**
* Return new stats with number of times SLM retention has failed incremented
*/
public SnapshotLifecycleStats withRetentionFailedIncremented() {
return new SnapshotLifecycleStats(retentionRun, retentionFailed + 1, retentionTimedOut, retentionTimeMs, policyStats);
}
/**
* Return new stats the number of times that SLM retention timed out due to the max delete time
* window being exceeded incremented
*/
public SnapshotLifecycleStats withRetentionTimedOutIncremented() {
return new SnapshotLifecycleStats(retentionRun, retentionFailed, retentionTimedOut + 1, retentionTimeMs, policyStats);
}
/**
* Return new stats with the amount of time taken for deleting snapshots during SLM retention updated
*/
public SnapshotLifecycleStats withDeletionTimeUpdated(TimeValue elapsedTime) {
final long newRetentionTimeMs = retentionTimeMs + elapsedTime.millis();
return new SnapshotLifecycleStats(retentionRun, retentionFailed, retentionTimedOut, newRetentionTimeMs, policyStats);
}
/**
* Return new stats with the per-policy snapshot taken count for the given policy id incremented
*/
public SnapshotLifecycleStats withTakenIncremented(String slmPolicy) {
return merge(new SnapshotLifecycleStats(Map.of(slmPolicy, SnapshotPolicyStats.taken(slmPolicy))));
}
/**
* Return new stats with the per-policy snapshot failure count for the given policy id incremented
*/
public SnapshotLifecycleStats withFailedIncremented(String slmPolicy) {
return merge(new SnapshotLifecycleStats(Map.of(slmPolicy, SnapshotPolicyStats.failed(slmPolicy))));
}
/**
* Return new stats with the per-policy snapshot deleted count for the given policy id incremented
*/
public SnapshotLifecycleStats withDeletedIncremented(String slmPolicy) {
return merge(new SnapshotLifecycleStats(Map.of(slmPolicy, SnapshotPolicyStats.deleted(slmPolicy))));
}
/**
* Return new stats with the per-policy snapshot deletion failure count for the given policy id incremented
*/
public SnapshotLifecycleStats withDeleteFailureIncremented(String slmPolicy) {
return merge(new SnapshotLifecycleStats(Map.of(slmPolicy, SnapshotPolicyStats.deleteFailure(slmPolicy))));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeMap(policyStats, StreamOutput::writeWriteable);
out.writeVLong(retentionRun);
out.writeVLong(retentionFailed);
out.writeVLong(retentionTimedOut);
out.writeVLong(retentionTimeMs);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(RETENTION_RUNS.getPreferredName(), this.retentionRun);
builder.field(RETENTION_FAILED.getPreferredName(), this.retentionFailed);
builder.field(RETENTION_TIMED_OUT.getPreferredName(), this.retentionTimedOut);
TimeValue retentionTime = TimeValue.timeValueMillis(this.retentionTimeMs);
builder.field(RETENTION_TIME.getPreferredName(), retentionTime);
builder.field(RETENTION_TIME_MILLIS.getPreferredName(), retentionTime.millis());
List<SnapshotPolicyStats> metrics = getMetrics().values()
.stream()
.sorted(Comparator.comparing(SnapshotPolicyStats::getPolicyId)) // maintain a consistent order when serializing
.toList();
long totalTaken = metrics.stream().mapToLong(s -> s.snapshotsTaken).sum();
long totalFailed = metrics.stream().mapToLong(s -> s.snapshotsFailed).sum();
long totalDeleted = metrics.stream().mapToLong(s -> s.snapshotsDeleted).sum();
long totalDeleteFailures = metrics.stream().mapToLong(s -> s.snapshotDeleteFailures).sum();
builder.field(TOTAL_TAKEN.getPreferredName(), totalTaken);
builder.field(TOTAL_FAILED.getPreferredName(), totalFailed);
builder.field(TOTAL_DELETIONS.getPreferredName(), totalDeleted);
builder.field(TOTAL_DELETION_FAILURES.getPreferredName(), totalDeleteFailures);
builder.startArray(POLICY_STATS.getPreferredName());
for (SnapshotPolicyStats stats : metrics) {
builder.startObject();
stats.toXContent(builder, params);
builder.endObject();
}
builder.endArray();
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(retentionRun, retentionFailed, retentionTimedOut, retentionTimeMs, policyStats);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj.getClass() != getClass()) {
return false;
}
SnapshotLifecycleStats other = (SnapshotLifecycleStats) obj;
return Objects.equals(retentionRun, other.retentionRun)
&& Objects.equals(retentionFailed, other.retentionFailed)
&& Objects.equals(retentionTimedOut, other.retentionTimedOut)
&& Objects.equals(retentionTimeMs, other.retentionTimeMs)
&& Objects.equals(policyStats, other.policyStats);
}
@Override
public String toString() {
return Strings.toString(this);
}
public static | SnapshotLifecycleStats |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/monitor/os/OsStats.java | {
"start": 4128,
"end": 6976
} | class ____ implements Writeable, ToXContentFragment {
private static final TransportVersion AVAILABLE_PROCESSORS_TRANSPORT_VERSION = TransportVersion.fromName(
"available_processors_in_os_stats"
);
private final short percent;
private final double[] loadAverage;
private final int availableProcessors;
public Cpu(short systemCpuPercent, double[] systemLoadAverage, int availableProcessors) {
this.percent = systemCpuPercent;
this.loadAverage = systemLoadAverage;
this.availableProcessors = availableProcessors;
}
public Cpu(StreamInput in) throws IOException {
this.percent = in.readShort();
if (in.readBoolean()) {
this.loadAverage = in.readDoubleArray();
} else {
this.loadAverage = null;
}
this.availableProcessors = in.getTransportVersion().supports(AVAILABLE_PROCESSORS_TRANSPORT_VERSION) ? in.readInt() : 0;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeShort(percent);
if (loadAverage == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeDoubleArray(loadAverage);
}
if (out.getTransportVersion().supports(AVAILABLE_PROCESSORS_TRANSPORT_VERSION)) {
out.writeInt(availableProcessors);
}
}
public short getPercent() {
return percent;
}
public double[] getLoadAverage() {
return loadAverage;
}
public int getAvailableProcessors() {
return availableProcessors;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.CPU);
builder.field(Fields.PERCENT, getPercent());
if (getLoadAverage() != null && Arrays.stream(getLoadAverage()).anyMatch(load -> load != -1)) {
builder.startObject(Fields.LOAD_AVERAGE);
if (getLoadAverage()[0] != -1) {
builder.field(Fields.LOAD_AVERAGE_1M, getLoadAverage()[0]);
}
if (getLoadAverage()[1] != -1) {
builder.field(Fields.LOAD_AVERAGE_5M, getLoadAverage()[1]);
}
if (getLoadAverage()[2] != -1) {
builder.field(Fields.LOAD_AVERAGE_15M, getLoadAverage()[2]);
}
builder.endObject();
}
builder.field(Fields.AVAILABLE_PROCESSORS, getAvailableProcessors());
builder.endObject();
return builder;
}
}
public static | Cpu |
java | resilience4j__resilience4j | resilience4j-spring-boot2/src/test/java/io/github/resilience4j/circuitbreaker/CircuitBreakerActuatorTest.java | {
"start": 1670,
"end": 7237
} | class ____ {
@Rule
public WireMockRule wireMockRule = new WireMockRule(8090);
@Autowired
CircuitBreakerRegistry circuitBreakerRegistry;
@Autowired
private TestRestTemplate restTemplate;
@Test
public void testUpdateCircuitBreakerState() {
// given
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
// when
HttpEntity<String> forceOpenRequest = new HttpEntity<>("{\"updateState\":\"FORCE_OPEN\"}", headers);
final ResponseEntity<CircuitBreakerUpdateStateResponse> backendAState = restTemplate
.postForEntity("/actuator/circuitbreakers/backendA", forceOpenRequest, CircuitBreakerUpdateStateResponse.class);
// then
assertThat(backendAState.getBody()).isNotNull();
assertThat(backendAState.getBody().getCurrentState()).isEqualTo(CircuitBreaker.State.FORCED_OPEN.toString());
assertThat(circuitBreakerRegistry.circuitBreaker("backendA").getState()).isEqualTo(CircuitBreaker.State.FORCED_OPEN);
// when sending non valid state change
HttpEntity<String> nonValid = new HttpEntity<>("{\"updateState\":\"BLA_BLA\"}", headers);
final ResponseEntity<CircuitBreakerUpdateStateResponse> nonValidResponse = restTemplate
.postForEntity("/actuator/circuitbreakers/backendA", nonValid, CircuitBreakerUpdateStateResponse.class);
// then
assertThat(nonValidResponse.getBody()).isNotNull();
assertThat(nonValidResponse.getStatusCode()).isEqualTo(HttpStatus.BAD_REQUEST);
// when
HttpEntity<String> disableRequest = new HttpEntity<>("{\"updateState\":\"DISABLE\"}", headers);
final ResponseEntity<CircuitBreakerUpdateStateResponse> backendAStateDisabled = restTemplate
.postForEntity("/actuator/circuitbreakers/backendA", disableRequest, CircuitBreakerUpdateStateResponse.class);
// then
assertThat(backendAStateDisabled.getBody()).isNotNull();
assertThat(backendAStateDisabled.getBody().getCurrentState()).isEqualTo(CircuitBreaker.State.DISABLED.toString());
assertThat(circuitBreakerRegistry.circuitBreaker("backendA").getState()).isEqualTo(CircuitBreaker.State.DISABLED);
// when
HttpEntity<String> closeRequest = new HttpEntity<>("{\"updateState\":\"CLOSE\"}", headers);
final ResponseEntity<CircuitBreakerUpdateStateResponse> backendAStateClosed = restTemplate
.postForEntity("/actuator/circuitbreakers/backendA", closeRequest, CircuitBreakerUpdateStateResponse.class);
// then
assertThat(backendAStateClosed.getBody()).isNotNull();
assertThat(backendAStateClosed.getBody().getCurrentState()).isEqualTo(CircuitBreaker.State.CLOSED.toString());
assertThat(circuitBreakerRegistry.circuitBreaker("backendA").getState()).isEqualTo(CircuitBreaker.State.CLOSED);
}
@Test
public void testCircuitBreakerDetails() {
// given
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
// when
HttpEntity<String> forceOpenRequest = new HttpEntity<>("{\"updateState\":\"CLOSE\"}", headers);
final ResponseEntity<CircuitBreakerUpdateStateResponse> backendAState = restTemplate
.postForEntity("/actuator/circuitbreakers/backendA", forceOpenRequest, CircuitBreakerUpdateStateResponse.class);
// then
assertThat(backendAState.getBody()).isNotNull();
assertThat(backendAState.getBody().getCurrentState()).isEqualTo(CircuitBreaker.State.CLOSED.toString());
assertThat(circuitBreakerRegistry.circuitBreaker("backendA").getState()).isEqualTo(CircuitBreaker.State.CLOSED);
// when get circuit breakers
final ResponseEntity<CircuitBreakerEndpointResponse> circuitBreakersResponse = restTemplate
.getForEntity("/actuator/circuitbreakers", CircuitBreakerEndpointResponse.class);
// then
assertThat(circuitBreakersResponse.getStatusCode()).isEqualTo(HttpStatus.OK);
assertThat(circuitBreakersResponse.getBody()).isNotNull();
assertThat(circuitBreakersResponse.getBody().getCircuitBreakers()).isNotNull();
final CircuitBreakerDetails cbDetailsA = circuitBreakersResponse.getBody().getCircuitBreakers().get("backendA");
final CircuitBreaker cbA = circuitBreakerRegistry.circuitBreaker("backendA");
final CircuitBreaker.Metrics metrics = cbA.getMetrics();
final CircuitBreakerConfig config = cbA.getCircuitBreakerConfig();
assertThat(cbDetailsA.getFailureRate()).isEqualTo(metrics.getFailureRate() + "%");
assertThat(cbDetailsA.getFailureRateThreshold()).isEqualTo(config.getFailureRateThreshold() + "%");
assertThat(cbDetailsA.getSlowCallRate()).isEqualTo(metrics.getSlowCallRate() + "%");
assertThat(cbDetailsA.getSlowCallRateThreshold()).isEqualTo(config.getSlowCallRateThreshold() + "%");
assertThat(cbDetailsA.getBufferedCalls()).isEqualTo(metrics.getNumberOfBufferedCalls());
assertThat(cbDetailsA.getSlowCalls()).isEqualTo(metrics.getNumberOfSlowCalls());
assertThat(cbDetailsA.getSlowFailedCalls()).isEqualTo(metrics.getNumberOfSlowFailedCalls());
assertThat(cbDetailsA.getFailedCalls()).isEqualTo(metrics.getNumberOfFailedCalls());
assertThat(cbDetailsA.getNotPermittedCalls()).isEqualTo(metrics.getNumberOfNotPermittedCalls());
assertThat(cbDetailsA.getState()).isEqualTo(cbA.getState());
}
}
| CircuitBreakerActuatorTest |
java | netty__netty | codec-protobuf/src/main/java/io/netty/handler/codec/protobuf/ProtobufDecoderNano.java | {
"start": 2461,
"end": 3456
} | class ____ extends MessageToMessageDecoder<ByteBuf> {
private final Class<? extends MessageNano> clazz;
/**
* Creates a new instance.
*/
public ProtobufDecoderNano(Class<? extends MessageNano> clazz) {
super(ByteBuf.class);
this.clazz = ObjectUtil.checkNotNull(clazz, "You must provide a Class");
}
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out)
throws Exception {
final byte[] array;
final int offset;
final int length = msg.readableBytes();
if (msg.hasArray()) {
array = msg.array();
offset = msg.arrayOffset() + msg.readerIndex();
} else {
array = ByteBufUtil.getBytes(msg, msg.readerIndex(), length, false);
offset = 0;
}
MessageNano prototype = clazz.getConstructor().newInstance();
out.add(MessageNano.mergeFrom(prototype, array, offset, length));
}
}
| ProtobufDecoderNano |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestsBuildService.java | {
"start": 4240,
"end": 4623
} | class ____ parameterized tests
excludes.add(mutedTest.getClassName() + "." + method + " *");
}
}
} else if (mutedTest.getClassName() != null) {
excludes.add(mutedTest.getClassName() + ".*");
}
}
}
return excludes;
}
public | of |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/AbstractMockChecker.java | {
"start": 2950,
"end": 3390
} | interface ____ {
/**
* If the given type should not be mocked, provide an explanation why.
*
* @param type the type that is being mocked
* @return the reason it should not be mocked
*/
Optional<Reason> forbidReason(Type type, VisitorState state);
}
/**
* An explanation of what type should not be mocked, and the reason why.
*
* @param unmockableClass A Type object representing the | MockForbidder |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/action/MockResolvedIndices.java | {
"start": 686,
"end": 1333
} | class ____ extends ResolvedIndices {
public MockResolvedIndices(
Map<String, OriginalIndices> remoteClusterIndices,
OriginalIndices localIndices,
Map<Index, IndexMetadata> localIndexMetadata,
SearchContextId searchContextId
) {
super(remoteClusterIndices, localIndices, localIndexMetadata, searchContextId);
}
public MockResolvedIndices(
Map<String, OriginalIndices> remoteClusterIndices,
OriginalIndices localIndices,
Map<Index, IndexMetadata> localIndexMetadata
) {
super(remoteClusterIndices, localIndices, localIndexMetadata);
}
}
| MockResolvedIndices |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/metrics/util/MetricUtilsTest.java | {
"start": 2759,
"end": 16630
} | class ____ {
/** Container for local objects to keep them from gc runs. */
private final List<Object> referencedObjects = new ArrayList<>();
@AfterEach
void cleanupReferencedObjects() {
referencedObjects.clear();
}
/**
* Tests that the {@link MetricUtils#startRemoteMetricsRpcService(Configuration, String, String,
* RpcSystem)} respects the given {@link MetricOptions#QUERY_SERVICE_THREAD_PRIORITY}.
*/
@Test
void testStartMetricActorSystemRespectsThreadPriority() throws Exception {
final Configuration configuration = new Configuration();
final int expectedThreadPriority = 3;
configuration.set(MetricOptions.QUERY_SERVICE_THREAD_PRIORITY, expectedThreadPriority);
final RpcService rpcService =
MetricUtils.startRemoteMetricsRpcService(
configuration, "localhost", null, RpcSystem.load());
try {
final int threadPriority =
rpcService
.getScheduledExecutor()
.schedule(
() -> Thread.currentThread().getPriority(), 0, TimeUnit.SECONDS)
.get();
assertThat(threadPriority).isEqualTo(expectedThreadPriority);
} finally {
rpcService.closeAsync().get();
}
}
@Test
void testNonHeapMetricsCompleteness() {
final InterceptingOperatorMetricGroup nonHeapMetrics =
new InterceptingOperatorMetricGroup();
MetricUtils.instantiateNonHeapMemoryMetrics(nonHeapMetrics);
assertThat(nonHeapMetrics.get(MetricNames.MEMORY_USED)).isNotNull();
assertThat(nonHeapMetrics.get(MetricNames.MEMORY_COMMITTED)).isNotNull();
assertThat(nonHeapMetrics.get(MetricNames.MEMORY_MAX)).isNotNull();
}
@Test
void testMetaspaceCompleteness() {
assertThat(hasMetaspaceMemoryPool())
.withFailMessage("Requires JVM with Metaspace memory pool")
.isTrue();
final InterceptingOperatorMetricGroup metaspaceMetrics =
new InterceptingOperatorMetricGroup() {
@Override
public MetricGroup addGroup(String name) {
return this;
}
};
MetricUtils.instantiateMetaspaceMemoryMetrics(metaspaceMetrics);
assertThat(metaspaceMetrics.get(MetricNames.MEMORY_USED)).isNotNull();
assertThat(metaspaceMetrics.get(MetricNames.MEMORY_COMMITTED)).isNotNull();
assertThat(metaspaceMetrics.get(MetricNames.MEMORY_MAX)).isNotNull();
}
@Test
public void testGcMetricCompleteness() {
Map<String, InterceptingOperatorMetricGroup> addedGroups = new HashMap<>();
InterceptingOperatorMetricGroup gcGroup =
new InterceptingOperatorMetricGroup() {
@Override
public MetricGroup addGroup(String name) {
return addedGroups.computeIfAbsent(
name, k -> new InterceptingOperatorMetricGroup());
}
};
List<GarbageCollectorMXBean> garbageCollectors = new ArrayList<>();
garbageCollectors.add(new TestGcBean("gc1", 100, 500));
garbageCollectors.add(new TestGcBean("gc2", 50, 250));
MetricUtils.instantiateGarbageCollectorMetrics(gcGroup, garbageCollectors);
assertThat(addedGroups).containsOnlyKeys("gc1", "gc2", "All");
// Make sure individual collector metrics are correct
validateCollectorMetric(addedGroups.get("gc1"), 100, 500L);
validateCollectorMetric(addedGroups.get("gc2"), 50L, 250L);
// Make sure all/total collector metrics are correct
validateCollectorMetric(addedGroups.get("All"), 150L, 750L);
}
private static void validateCollectorMetric(
InterceptingOperatorMetricGroup group, long count, long time) {
assertThat(((Gauge) group.get("Count")).getValue()).isEqualTo(count);
assertThat(((Gauge) group.get("Time")).getValue()).isEqualTo(time);
MeterView perSecond = ((MeterView) group.get("TimeMsPerSecond"));
perSecond.update();
assertThat(perSecond.getRate()).isGreaterThan(0.);
}
@Test
void testHeapMetricsCompleteness() {
final InterceptingOperatorMetricGroup heapMetrics = new InterceptingOperatorMetricGroup();
MetricUtils.instantiateHeapMemoryMetrics(heapMetrics);
assertThat(heapMetrics.get(MetricNames.MEMORY_USED)).isNotNull();
assertThat(heapMetrics.get(MetricNames.MEMORY_COMMITTED)).isNotNull();
assertThat(heapMetrics.get(MetricNames.MEMORY_MAX)).isNotNull();
}
@Test
void testFileDescriptorMetricsCompleteness() {
final InterceptingOperatorMetricGroup heapMetrics = new InterceptingOperatorMetricGroup();
MetricUtils.instantiateFileDescriptorMetrics(heapMetrics);
assertThat(heapMetrics.get(MetricNames.FILE_DESCRIPTOR_MAX)).isNotNull();
assertThat(heapMetrics.get(MetricNames.FILE_DESCRIPTOR_OPEN)).isNotNull();
}
/**
* Tests that heap/non-heap metrics do not rely on a static MemoryUsage instance.
*
* <p>We can only check this easily for the currently used heap memory, so we use it this as a
* proxy for testing the functionality in general.
*/
@Test
void testHeapMetricUsageNotStatic() throws Exception {
final InterceptingOperatorMetricGroup heapMetrics = new InterceptingOperatorMetricGroup();
MetricUtils.instantiateHeapMemoryMetrics(heapMetrics);
@SuppressWarnings("unchecked")
final Gauge<Long> used = (Gauge<Long>) heapMetrics.get(MetricNames.MEMORY_USED);
runUntilMetricChanged("Heap", 10, () -> new byte[1024 * 1024 * 8], used);
}
@Test
void testMetaspaceMetricUsageNotStatic() throws Exception {
assertThat(hasMetaspaceMemoryPool())
.withFailMessage("Requires JVM with Metaspace memory pool")
.isTrue();
final InterceptingOperatorMetricGroup metaspaceMetrics =
new InterceptingOperatorMetricGroup() {
@Override
public MetricGroup addGroup(String name) {
return this;
}
};
MetricUtils.instantiateMetaspaceMemoryMetrics(metaspaceMetrics);
@SuppressWarnings("unchecked")
final Gauge<Long> used = (Gauge<Long>) metaspaceMetrics.get(MetricNames.MEMORY_USED);
runUntilMetricChanged("Metaspace", 10, MetricUtilsTest::redefineDummyClass, used);
}
@Test
void testNonHeapMetricUsageNotStatic() throws Exception {
final InterceptingOperatorMetricGroup nonHeapMetrics =
new InterceptingOperatorMetricGroup();
MetricUtils.instantiateNonHeapMemoryMetrics(nonHeapMetrics);
@SuppressWarnings("unchecked")
final Gauge<Long> used = (Gauge<Long>) nonHeapMetrics.get(MetricNames.MEMORY_USED);
runUntilMetricChanged("Non-heap", 10, MetricUtilsTest::redefineDummyClass, used);
}
@Test
void testManagedMemoryMetricsInitialization() throws MemoryAllocationException, FlinkException {
final int maxMemorySize = 16284;
final int numberOfAllocatedPages = 2;
final int pageSize = 4096;
final Object owner = new Object();
final MemoryManager memoryManager = MemoryManager.create(maxMemorySize, pageSize);
memoryManager.allocatePages(owner, numberOfAllocatedPages);
final TaskManagerServices taskManagerServices =
new TaskManagerServicesBuilder()
.setTaskSlotTable(
new TestingTaskSlotTable.TestingTaskSlotTableBuilder<Task>()
.memoryManagerGetterReturns(memoryManager)
.allActiveSlotAllocationIds(
() -> Sets.newHashSet(new AllocationID()))
.build())
.setManagedMemorySize(maxMemorySize)
.build();
try {
List<String> actualSubGroupPath = new ArrayList<>();
final InterceptingOperatorMetricGroup metricGroup =
new InterceptingOperatorMetricGroup() {
@Override
public MetricGroup addGroup(String name) {
actualSubGroupPath.add(name);
return this;
}
};
MetricUtils.instantiateFlinkMemoryMetricGroup(
metricGroup,
taskManagerServices.getTaskSlotTable(),
taskManagerServices::getManagedMemorySize);
Gauge<Number> usedMetric = (Gauge<Number>) metricGroup.get("Used");
Gauge<Number> maxMetric = (Gauge<Number>) metricGroup.get("Total");
assertThat(usedMetric.getValue().intValue())
.isEqualTo(numberOfAllocatedPages * pageSize);
assertThat(maxMetric.getValue().intValue()).isEqualTo(maxMemorySize);
assertThat(actualSubGroupPath)
.containsAnyElementsOf(
Arrays.asList(
METRIC_GROUP_FLINK,
METRIC_GROUP_MEMORY,
METRIC_GROUP_MANAGED_MEMORY));
} finally {
taskManagerServices.shutDown();
}
}
@Test
void testTruncateOperatorName() {
// test operator name is null
assertThat(MetricUtils.truncateOperatorName(null)).isNull();
// test operator name length less than 80
final String operatorNameLess = "testOperatorName";
assertThat(MetricUtils.truncateOperatorName(operatorNameLess)).isEqualTo(operatorNameLess);
// test operator name length less than 80 and end with : Writer
final String operatorNameLessEndWithWriter = "testOperatorName: Writer";
assertThat(MetricUtils.truncateOperatorName(operatorNameLessEndWithWriter))
.isEqualTo(operatorNameLessEndWithWriter);
// test operator name length less than 80 and end with : Committer
final String operatorNameLessEndWithCommitter = "testOperatorName: Committer";
assertThat(MetricUtils.truncateOperatorName(operatorNameLessEndWithCommitter))
.isEqualTo(operatorNameLessEndWithCommitter);
// test operator name length less than 80 and contains with : Writer
final String operatorNameLessAndContainsWriter = "test: WriterOperatorName";
assertThat(MetricUtils.truncateOperatorName(operatorNameLessAndContainsWriter))
.isEqualTo(operatorNameLessAndContainsWriter);
// test operator name length less than 80 and contains with : Committer
final String operatorNameLessAndContainsCommitter = "test: CommitterOperatorName";
assertThat(MetricUtils.truncateOperatorName(operatorNameLessAndContainsCommitter))
.isEqualTo(operatorNameLessAndContainsCommitter);
// test operator name length more than 80
final String operatorNameMore =
"testLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongOperatorName";
final String expectedOperatorNameMore =
"testLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLong";
assertThat(MetricUtils.truncateOperatorName(operatorNameMore))
.isEqualTo(expectedOperatorNameMore);
// test operator name length more than 80 and end with : Writer
final String operatorNameMoreEndWithWriter =
"testLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongOperatorName: Writer";
final String expectedOperatorNameMoreEndWithWriter =
"testLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLong: Writer";
assertThat(MetricUtils.truncateOperatorName(operatorNameMoreEndWithWriter))
.isEqualTo(expectedOperatorNameMoreEndWithWriter);
// test operator name length more than 80 and end with : Committer
final String operatorNameMoreEndWithCommitter =
"testLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongOperatorName: Committer";
final String expectedOperatorNameMoreEndWithCommitter =
"testLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongL: Committer";
assertThat(MetricUtils.truncateOperatorName(operatorNameMoreEndWithCommitter))
.isEqualTo(expectedOperatorNameMoreEndWithCommitter);
// test operator name length more than 80 and contains with : Writer
final String operatorNameMoreAndContainsWriter =
"testLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLong: WriterOperatorName";
assertThat(MetricUtils.truncateOperatorName(operatorNameMoreAndContainsWriter))
.isEqualTo(expectedOperatorNameMore);
// test operator name length more than 80 and contains with : Committer
final String operatorNameMoreAndContainsCommitter =
"testLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLong: CommitterOperatorName";
assertThat(MetricUtils.truncateOperatorName(operatorNameMoreAndContainsCommitter))
.isEqualTo(expectedOperatorNameMore);
}
// --------------- utility methods and classes ---------------
/**
* An extreme simple | MetricUtilsTest |
java | apache__camel | components/camel-jsch/src/generated/java/org/apache/camel/component/scp/ScpEndpointUriFactory.java | {
"start": 513,
"end": 3579
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":host:port/directoryName";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(27);
props.add("allowNullBody");
props.add("browseLimit");
props.add("checksumFileAlgorithm");
props.add("chmod");
props.add("ciphers");
props.add("connectTimeout");
props.add("directoryName");
props.add("disconnect");
props.add("disconnectOnBatchComplete");
props.add("fileName");
props.add("flatten");
props.add("host");
props.add("jailStartingDirectory");
props.add("knownHostsFile");
props.add("lazyStartProducer");
props.add("moveExistingFileStrategy");
props.add("password");
props.add("port");
props.add("preferredAuthentications");
props.add("privateKeyBytes");
props.add("privateKeyFile");
props.add("privateKeyFilePassphrase");
props.add("soTimeout");
props.add("strictHostKeyChecking");
props.add("timeout");
props.add("useUserKnownHostsFile");
props.add("username");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(7);
secretProps.add("knownHostsFile");
secretProps.add("password");
secretProps.add("preferredAuthentications");
secretProps.add("privateKeyBytes");
secretProps.add("privateKeyFile");
secretProps.add("privateKeyFilePassphrase");
secretProps.add("username");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "scp".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "host", null, true, copy);
uri = buildPathParameter(syntax, uri, "port", null, false, copy);
uri = buildPathParameter(syntax, uri, "directoryName", null, false, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| ScpEndpointUriFactory |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/net/TrustOptions.java | {
"start": 722,
"end": 2399
} | interface ____ {
/**
* @return a copy of these options
*/
TrustOptions copy();
/**
* Create and return the trust manager factory for these options.
* <p>
* The returned trust manager factory should be already initialized and ready to use.
*
* @param vertx the vertx instance
* @return the trust manager factory
*/
TrustManagerFactory getTrustManagerFactory(Vertx vertx) throws Exception;
/**
* Returns a function that maps SNI server names to a {@link TrustManagerFactory} instance.
*
* The returned {@code TrustManagerFactory} must already be initialized and ready to use.
*
* The mapper is only used when the server has SNI enabled and the client indicated a server name.
* <p/>
* The returned function may return {@code null} in which case {@link #getTrustManagerFactory(Vertx)} is used as fallback.
*
* @param vertx the vertx instance
* @return the trustManager
*/
Function<String, TrustManager[]> trustManagerMapper(Vertx vertx) throws Exception;
/**
* Returns a {@link TrustOptions} from the provided {@link TrustManager}
*
* @param trustManager the trustManager instance
* @return the {@link TrustOptions}
*/
static TrustOptions wrap(TrustManager trustManager) {
return new TrustManagerFactoryOptions(trustManager);
}
/**
* Returns a {@link TrustOptions} from the provided {@link TrustManagerFactory}
*
* @param trustManagerFactory the trustManagerFactory instance
* @return the {@link TrustOptions}
*/
static TrustOptions wrap(TrustManagerFactory trustManagerFactory) {
return new TrustManagerFactoryOptions(trustManagerFactory);
}
}
| TrustOptions |
java | spring-projects__spring-boot | module/spring-boot-jdbc/src/main/java/org/springframework/boot/jdbc/docker/compose/PostgresEnvironment.java | {
"start": 1029,
"end": 2739
} | class ____ {
private static final String[] USERNAME_KEYS = new String[] { "POSTGRES_USER", "POSTGRESQL_USER",
"POSTGRESQL_USERNAME" };
private static final String DEFAULT_USERNAME = "postgres";
private static final String[] DATABASE_KEYS = new String[] { "POSTGRES_DB", "POSTGRESQL_DB",
"POSTGRESQL_DATABASE" };
private final String username;
private final @Nullable String password;
private final String database;
PostgresEnvironment(Map<String, @Nullable String> env) {
this.username = extract(env, USERNAME_KEYS, DEFAULT_USERNAME);
this.password = extractPassword(env);
this.database = extract(env, DATABASE_KEYS, this.username);
}
private String extract(Map<String, @Nullable String> env, String[] keys, String defaultValue) {
for (String key : keys) {
String value = env.get(key);
if (value != null) {
return value;
}
}
return defaultValue;
}
private @Nullable String extractPassword(Map<String, @Nullable String> env) {
if (isUsingTrustHostAuthMethod(env)) {
return null;
}
String password = env.getOrDefault("POSTGRES_PASSWORD", env.get("POSTGRESQL_PASSWORD"));
boolean allowEmpty = env.containsKey("ALLOW_EMPTY_PASSWORD");
Assert.state(allowEmpty || StringUtils.hasLength(password), "No PostgreSQL password found");
return (password != null) ? password : "";
}
private boolean isUsingTrustHostAuthMethod(Map<String, @Nullable String> env) {
String hostAuthMethod = env.get("POSTGRES_HOST_AUTH_METHOD");
return "trust".equals(hostAuthMethod);
}
String getUsername() {
return this.username;
}
@Nullable String getPassword() {
return this.password;
}
String getDatabase() {
return this.database;
}
}
| PostgresEnvironment |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/assertion/RecursiveAssertionAssert_allFieldsSatisfy_with_ignoringFieldsMatchingRegexes_Test.java | {
"start": 2154,
"end": 2394
} | class ____ {
String name;
String occupation;
int age;
Address address = new Address();
Person(String name, String occupation, int age) {
this.name = name;
this.occupation = occupation;
}
}
static | Person |
java | netty__netty | common/src/test/java/io/netty/util/internal/DefaultPriorityQueueTest.java | {
"start": 1273,
"end": 10450
} | class ____ {
@Test
public void testPoll() {
PriorityQueue<TestElement> queue = new DefaultPriorityQueue<TestElement>(TestElementComparator.INSTANCE, 0);
assertEmptyQueue(queue);
TestElement a = new TestElement(5);
TestElement b = new TestElement(10);
TestElement c = new TestElement(2);
TestElement d = new TestElement(7);
TestElement e = new TestElement(6);
assertOffer(queue, a);
assertOffer(queue, b);
assertOffer(queue, c);
assertOffer(queue, d);
// Remove the first element
assertSame(c, queue.peek());
assertSame(c, queue.poll());
assertEquals(3, queue.size());
// Test that offering another element preserves the priority queue semantics.
assertOffer(queue, e);
assertEquals(4, queue.size());
assertSame(a, queue.peek());
assertSame(a, queue.poll());
assertEquals(3, queue.size());
// Keep removing the remaining elements
assertSame(e, queue.peek());
assertSame(e, queue.poll());
assertEquals(2, queue.size());
assertSame(d, queue.peek());
assertSame(d, queue.poll());
assertEquals(1, queue.size());
assertSame(b, queue.peek());
assertSame(b, queue.poll());
assertEmptyQueue(queue);
}
@Test
public void testClear() {
PriorityQueue<TestElement> queue = new DefaultPriorityQueue<TestElement>(TestElementComparator.INSTANCE, 0);
assertEmptyQueue(queue);
TestElement a = new TestElement(5);
TestElement b = new TestElement(10);
TestElement c = new TestElement(2);
TestElement d = new TestElement(6);
assertOffer(queue, a);
assertOffer(queue, b);
assertOffer(queue, c);
assertOffer(queue, d);
queue.clear();
assertEmptyQueue(queue);
// Test that elements can be re-inserted after the clear operation
assertOffer(queue, a);
assertSame(a, queue.peek());
assertOffer(queue, b);
assertSame(a, queue.peek());
assertOffer(queue, c);
assertSame(c, queue.peek());
assertOffer(queue, d);
assertSame(c, queue.peek());
}
@Test
public void testClearIgnoringIndexes() {
PriorityQueue<TestElement> queue = new DefaultPriorityQueue<TestElement>(TestElementComparator.INSTANCE, 0);
assertEmptyQueue(queue);
TestElement a = new TestElement(5);
TestElement b = new TestElement(10);
TestElement c = new TestElement(2);
TestElement d = new TestElement(6);
TestElement e = new TestElement(11);
assertOffer(queue, a);
assertOffer(queue, b);
assertOffer(queue, c);
assertOffer(queue, d);
queue.clearIgnoringIndexes();
assertEmptyQueue(queue);
// Elements cannot be re-inserted but new ones can.
try {
queue.offer(a);
fail();
} catch (IllegalArgumentException t) {
// expected
}
assertOffer(queue, e);
assertSame(e, queue.peek());
}
@Test
public void testRemoval() {
testRemoval(false);
}
@Test
public void testRemovalTyped() {
testRemoval(true);
}
@Test
public void testRemovalFuzz() {
ThreadLocalRandom threadLocalRandom = ThreadLocalRandom.current();
final int numElements = threadLocalRandom.nextInt(0, 30);
final TestElement[] values = new TestElement[numElements];
PriorityQueue<TestElement> queue =
new DefaultPriorityQueue<>(TestElementComparator.INSTANCE, values.length);
for (int i = 0; i < values.length; ++i) {
do {
values[i] = new TestElement(threadLocalRandom.nextInt(0, numElements * 2));
} while (!queue.add(values[i]));
}
for (int i = 0; i < values.length; ++i) {
try {
assertTrue(queue.removeTyped(values[i]));
assertEquals(queue.size(), values.length - (i + 1));
} catch (Throwable cause) {
StringBuilder sb = new StringBuilder(values.length * 2);
sb.append("error on removal of index: ").append(i).append(" [");
for (TestElement value : values) {
sb.append(value).append(" ");
}
sb.append("]");
throw new AssertionError(sb.toString(), cause);
}
}
assertEmptyQueue(queue);
}
private static void testRemoval(boolean typed) {
PriorityQueue<TestElement> queue = new DefaultPriorityQueue<>(TestElementComparator.INSTANCE, 4);
assertEmptyQueue(queue);
TestElement a = new TestElement(5);
TestElement b = new TestElement(10);
TestElement c = new TestElement(2);
TestElement d = new TestElement(6);
TestElement notInQueue = new TestElement(-1);
assertOffer(queue, a);
assertOffer(queue, b);
assertOffer(queue, c);
assertOffer(queue, d);
// Remove an element that isn't in the queue.
assertFalse(typed ? queue.removeTyped(notInQueue) : queue.remove(notInQueue));
assertSame(c, queue.peek());
assertEquals(4, queue.size());
// Remove the last element in the array, when the array is non-empty.
assertTrue(typed ? queue.removeTyped(b) : queue.remove(b));
assertSame(c, queue.peek());
assertEquals(3, queue.size());
// Re-insert the element after removal
assertOffer(queue, b);
assertSame(c, queue.peek());
assertEquals(4, queue.size());
// Repeat remove the last element in the array, when the array is non-empty.
assertTrue(typed ? queue.removeTyped(d) : queue.remove(d));
assertSame(c, queue.peek());
assertEquals(3, queue.size());
assertTrue(typed ? queue.removeTyped(b) : queue.remove(b));
assertSame(c, queue.peek());
assertEquals(2, queue.size());
// Remove the head of the queue.
assertTrue(typed ? queue.removeTyped(c) : queue.remove(c));
assertSame(a, queue.peek());
assertEquals(1, queue.size());
assertTrue(typed ? queue.removeTyped(a) : queue.remove(a));
assertEmptyQueue(queue);
}
@Test
public void testZeroInitialSize() {
PriorityQueue<TestElement> queue = new DefaultPriorityQueue<>(TestElementComparator.INSTANCE, 0);
assertEmptyQueue(queue);
TestElement e = new TestElement(1);
assertOffer(queue, e);
assertSame(e, queue.peek());
assertEquals(1, queue.size());
assertFalse(queue.isEmpty());
assertSame(e, queue.poll());
assertEmptyQueue(queue);
}
@Test
public void testPriorityChange() {
PriorityQueue<TestElement> queue = new DefaultPriorityQueue<>(TestElementComparator.INSTANCE, 0);
assertEmptyQueue(queue);
TestElement a = new TestElement(10);
TestElement b = new TestElement(20);
TestElement c = new TestElement(30);
TestElement d = new TestElement(25);
TestElement e = new TestElement(23);
TestElement f = new TestElement(15);
queue.add(a);
queue.add(b);
queue.add(c);
queue.add(d);
queue.add(e);
queue.add(f);
e.value = 35;
queue.priorityChanged(e);
a.value = 40;
queue.priorityChanged(a);
a.value = 31;
queue.priorityChanged(a);
d.value = 10;
queue.priorityChanged(d);
f.value = 5;
queue.priorityChanged(f);
List<TestElement> expectedOrderList = new ArrayList<>(queue.size());
expectedOrderList.addAll(Arrays.asList(a, b, c, d, e, f));
expectedOrderList.sort(TestElementComparator.INSTANCE);
assertEquals(expectedOrderList.size(), queue.size());
assertEquals(expectedOrderList.isEmpty(), queue.isEmpty());
Iterator<TestElement> itr = expectedOrderList.iterator();
while (itr.hasNext()) {
TestElement next = itr.next();
TestElement poll = queue.poll();
assertEquals(next, poll);
itr.remove();
assertEquals(expectedOrderList.size(), queue.size());
assertEquals(expectedOrderList.isEmpty(), queue.isEmpty());
}
}
private static void assertOffer(PriorityQueue<TestElement> queue, TestElement a) {
assertTrue(queue.offer(a));
assertTrue(queue.contains(a));
assertTrue(queue.containsTyped(a));
try { // An element can not be inserted more than 1 time.
queue.offer(a);
fail();
} catch (IllegalArgumentException ignored) {
// ignored
}
}
private static void assertEmptyQueue(PriorityQueue<TestElement> queue) {
assertNull(queue.peek());
assertNull(queue.poll());
assertEquals(0, queue.size());
assertTrue(queue.isEmpty());
}
private static final | DefaultPriorityQueueTest |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/bean/BeanDeserializerBase.java | {
"start": 1565,
"end": 42790
} | class ____.
*/
protected final JsonFormat.Shape _serializationShape;
/*
/**********************************************************************
/* Configuration for creating value instance
/**********************************************************************
*/
/**
* Object that handles details of constructing initial
* bean value (to which bind data to), unless instance
* is passed (via updateValue())
*/
protected final ValueInstantiator _valueInstantiator;
/**
* Deserializer that is used iff delegate-based creator is
* to be used for deserializing from JSON Object.
*<p>
* NOTE: cannot be {@code final} because we need to get it during
* {@code resolve()} method (and not contextualization).
*/
protected ValueDeserializer<Object> _delegateDeserializer;
/**
* Deserializer that is used iff array-delegate-based creator
* is to be used for deserializing from JSON Object.
*<p>
* NOTE: cannot be {@code final} because we need to get it during
* {@code resolve()} method (and not contextualization).
*/
protected ValueDeserializer<Object> _arrayDelegateDeserializer;
/**
* If the bean needs to be instantiated using constructor
* or factory method
* that takes one or more named properties as argument(s),
* this creator is used for instantiation.
* This value gets resolved during general resolution.
*/
protected PropertyBasedCreator _propertyBasedCreator;
/**
* Flag that is set to mark cases where deserialization from Object value
* using otherwise "standard" property binding will need to use non-default
* creation method: namely, either "full" delegation (array-delegation does
* not apply), or properties-based Creator method is used.
*<p>
* Note that flag is somewhat mis-named as it is not affected by scalar-delegating
* creators; it only has effect on Object Value binding.
*/
protected boolean _nonStandardCreation;
/**
* Flag that indicates that no "special features" whatsoever
* are enabled, so the simplest processing is possible.
*/
protected boolean _vanillaProcessing;
/*
/**********************************************************************
/* Property information, setters
/**********************************************************************
*/
/**
* Mapping of property names to properties, built when all properties
* to use have been successfully resolved.
*/
protected final BeanPropertyMap _beanProperties;
/**
* List of {@link ValueInjector}s, if any injectable values are
* expected by the bean; otherwise null.
* This includes injectors used for injecting values via setters
* and fields, but not ones passed through constructor parameters.
*/
protected final ValueInjector[] _injectables;
/**
* Fallback setter used for handling any properties that are not
* mapped to regular setters. If setter is not null, it will be
* called once for each such property.
*/
protected SettableAnyProperty _anySetter;
/**
* In addition to properties that are set, we will also keep
* track of recognized but ignorable properties: these will
* be skipped without errors or warnings.
*/
protected final Set<String> _ignorableProps;
/**
* Keep track of the the properties that needs to be specifically included.
*/
protected final Set<String> _includableProps;
/**
* Flag that can be set to ignore and skip unknown properties.
* If set, will not throw an exception for unknown properties.
*/
protected final boolean _ignoreAllUnknown;
/**
* Flag that indicates that some aspect of deserialization depends
* on active view used (if any)
*/
protected final boolean _needViewProcesing;
/**
* We may also have one or more back reference fields (usually
* zero or one).
*/
protected final Map<String, SettableBeanProperty> _backRefs;
/*
/**********************************************************************
/* Related handlers
/**********************************************************************
*/
/**
* Lazily constructed map used to contain deserializers needed
* for polymorphic subtypes.
* Note that this is <b>only needed</b> for polymorphic types,
* that is, when the actual type is not statically known.
* For other types this remains null.
*/
protected transient ConcurrentHashMap<ClassKey, ValueDeserializer<Object>> _subDeserializers;
/**
* If one of properties has "unwrapped" value, we need separate
* helper object
*/
protected UnwrappedPropertyHandler _unwrappedPropertyHandler;
/**
* Handler that we need if any of properties uses external
* type id.
*/
protected ExternalTypeHandler _externalTypeIdHandler;
/**
* If an Object Id is to be used for value handled by this
* deserializer, this reader is used for handling.
*/
protected final ObjectIdReader _objectIdReader;
/*
/**********************************************************************
/* Life-cycle, construction, initialization
/**********************************************************************
*/
/**
* Constructor used when initially building a deserializer
* instance, given a {@link BeanDeserializerBuilder} that
* contains configuration.
*/
protected BeanDeserializerBase(BeanDeserializerBuilder builder,
BeanDescription.Supplier beanDescRef,
BeanPropertyMap properties, Map<String, SettableBeanProperty> backRefs,
Set<String> ignorableProps, boolean ignoreAllUnknown,
Set<String> includableProps,
boolean hasViews)
{
super(beanDescRef.getType());
_beanType = beanDescRef.getType();
_valueInstantiator = builder.getValueInstantiator();
_delegateDeserializer = null;
_arrayDelegateDeserializer = null;
_propertyBasedCreator = null;
_beanProperties = properties;
_backRefs = backRefs;
_ignorableProps = ignorableProps;
_ignoreAllUnknown = ignoreAllUnknown;
_includableProps = includableProps;
_anySetter = builder.getAnySetter();
List<ValueInjector> injectables = builder.getInjectables();
_injectables = (injectables == null || injectables.isEmpty()) ? null
: injectables.toArray(new ValueInjector[0]);
_objectIdReader = builder.getObjectIdReader();
// 02-May-2020, tatu: This boolean setting is only used when binding from
// Object value, and hence does not consider "array-delegating" or various
// scalar-delegation cases. It is set when default (0-argument) constructor
// is NOT to be used when binding an Object value (or in case of
// POJO-as-array, Array value).
_nonStandardCreation = (_unwrappedPropertyHandler != null)
|| _valueInstantiator.canCreateUsingDelegate()
// [databind#2486]: as per above, array-delegating creator should not be considered
// as doing so will prevent use of Array-or-standard-Object deserialization
// || _valueInstantiator.canCreateUsingArrayDelegate()
|| _valueInstantiator.canCreateFromObjectWith()
|| !_valueInstantiator.canCreateUsingDefault()
;
// Any transformation we may need to apply?
_serializationShape = beanDescRef.findExpectedFormat(_beanType.getRawClass()).getShape();
_needViewProcesing = hasViews;
_vanillaProcessing = !_nonStandardCreation
&& (_injectables == null)
&& !_needViewProcesing
// also, may need to reorder stuff if we expect Object Id:
&& (_objectIdReader == null)
;
}
protected BeanDeserializerBase(BeanDeserializerBase src) {
this(src, src._ignoreAllUnknown);
}
protected BeanDeserializerBase(BeanDeserializerBase src, boolean ignoreAllUnknown)
{
super(src._beanType);
_beanType = src._beanType;
_valueInstantiator = src._valueInstantiator;
_delegateDeserializer = src._delegateDeserializer;
_arrayDelegateDeserializer = src._arrayDelegateDeserializer;
_propertyBasedCreator = src._propertyBasedCreator;
_beanProperties = src._beanProperties;
_backRefs = src._backRefs;
_ignorableProps = src._ignorableProps;
_ignoreAllUnknown = ignoreAllUnknown;
_includableProps = src._includableProps;
_anySetter = src._anySetter;
_injectables = src._injectables;
_objectIdReader = src._objectIdReader;
_nonStandardCreation = src._nonStandardCreation;
_unwrappedPropertyHandler = src._unwrappedPropertyHandler;
_needViewProcesing = src._needViewProcesing;
_serializationShape = src._serializationShape;
_vanillaProcessing = src._vanillaProcessing;
_externalTypeIdHandler = src._externalTypeIdHandler;
}
/**
* Constructor used in cases where unwrapping-with-name-change has been
* invoked and lookup indices need to be updated.
*/
protected BeanDeserializerBase(BeanDeserializerBase src,
UnwrappedPropertyHandler unwrapHandler, PropertyBasedCreator propertyBasedCreator,
BeanPropertyMap renamedProperties, boolean ignoreAllUnknown)
{
super(src._beanType);
_beanType = src._beanType;
_valueInstantiator = src._valueInstantiator;
_delegateDeserializer = src._delegateDeserializer;
_arrayDelegateDeserializer = src._arrayDelegateDeserializer;
_backRefs = src._backRefs;
_ignorableProps = src._ignorableProps;
_ignoreAllUnknown = ignoreAllUnknown;
_includableProps = src._includableProps;
_anySetter = src._anySetter;
_injectables = src._injectables;
_objectIdReader = src._objectIdReader;
_nonStandardCreation = src._nonStandardCreation;
_unwrappedPropertyHandler = unwrapHandler;
_propertyBasedCreator = propertyBasedCreator;
_beanProperties = renamedProperties;
_needViewProcesing = src._needViewProcesing;
_serializationShape = src._serializationShape;
// probably adds a twist, so:
_vanillaProcessing = false;
_externalTypeIdHandler = src._externalTypeIdHandler;
}
protected BeanDeserializerBase(BeanDeserializerBase src, ObjectIdReader oir)
{
super(src._beanType);
_beanType = src._beanType;
_valueInstantiator = src._valueInstantiator;
_delegateDeserializer = src._delegateDeserializer;
_arrayDelegateDeserializer = src._arrayDelegateDeserializer;
_propertyBasedCreator = src._propertyBasedCreator;
_backRefs = src._backRefs;
_ignorableProps = src._ignorableProps;
_ignoreAllUnknown = src._ignoreAllUnknown;
_includableProps = src._includableProps;
_anySetter = src._anySetter;
_injectables = src._injectables;
_nonStandardCreation = src._nonStandardCreation;
_unwrappedPropertyHandler = src._unwrappedPropertyHandler;
_needViewProcesing = src._needViewProcesing;
_serializationShape = src._serializationShape;
// then actual changes:
_objectIdReader = oir;
if (oir == null) {
_beanProperties = src._beanProperties;
_vanillaProcessing = src._vanillaProcessing;
} else {
// 18-Nov-2012, tatu: May or may not have annotations for id property;
// but no easy access. But hard to see id property being optional,
// so let's consider required at this point.
ObjectIdValueProperty idProp = new ObjectIdValueProperty(oir, PropertyMetadata.STD_REQUIRED);
_beanProperties = src._beanProperties.withProperty(idProp);
_vanillaProcessing = false;
}
_externalTypeIdHandler = src._externalTypeIdHandler;
}
/**
* @since 2.12
*/
public BeanDeserializerBase(BeanDeserializerBase src,
Set<String> ignorableProps, Set<String> includableProps)
{
super(src._beanType);
_beanType = src._beanType;
_valueInstantiator = src._valueInstantiator;
_delegateDeserializer = src._delegateDeserializer;
_arrayDelegateDeserializer = src._arrayDelegateDeserializer;
_propertyBasedCreator = src._propertyBasedCreator;
_backRefs = src._backRefs;
_ignorableProps = ignorableProps;
_ignoreAllUnknown = src._ignoreAllUnknown;
_includableProps = includableProps;
_anySetter = src._anySetter;
_injectables = src._injectables;
_nonStandardCreation = src._nonStandardCreation;
_unwrappedPropertyHandler = src._unwrappedPropertyHandler;
_needViewProcesing = src._needViewProcesing;
_serializationShape = src._serializationShape;
_vanillaProcessing = src._vanillaProcessing;
_objectIdReader = src._objectIdReader;
// 01-May-2016, tatu: [databind#1217]: Remove properties from mapping,
// to avoid them being deserialized
_beanProperties = src._beanProperties.withoutProperties(ignorableProps, includableProps);
_externalTypeIdHandler = src._externalTypeIdHandler;
}
protected BeanDeserializerBase(BeanDeserializerBase src, BeanPropertyMap beanProps)
{
super(src._beanType);
_beanType = src._beanType;
_valueInstantiator = src._valueInstantiator;
_delegateDeserializer = src._delegateDeserializer;
_arrayDelegateDeserializer = src._arrayDelegateDeserializer;
_propertyBasedCreator = src._propertyBasedCreator;
_beanProperties = beanProps;
_backRefs = src._backRefs;
_ignorableProps = src._ignorableProps;
_ignoreAllUnknown = src._ignoreAllUnknown;
_includableProps = src._includableProps;
_anySetter = src._anySetter;
_injectables = src._injectables;
_objectIdReader = src._objectIdReader;
_nonStandardCreation = src._nonStandardCreation;
_unwrappedPropertyHandler = src._unwrappedPropertyHandler;
_needViewProcesing = src._needViewProcesing;
_serializationShape = src._serializationShape;
_vanillaProcessing = src._vanillaProcessing;
_externalTypeIdHandler = src._externalTypeIdHandler;
}
public abstract BeanDeserializerBase withObjectIdReader(ObjectIdReader oir);
public abstract BeanDeserializerBase withByNameInclusion(Set<String> ignorableProps, Set<String> includableProps);
public abstract BeanDeserializerBase withIgnoreAllUnknown(boolean ignoreUnknown);
/**
* Mutant factory method that custom sub-classes must override; not left as
* abstract to prevent more drastic backwards compatibility problems.
*/
public BeanDeserializerBase withBeanProperties(BeanPropertyMap props) {
throw new UnsupportedOperationException("Class "+getClass().getName()
+" does not override `withBeanProperties()`, needs to");
}
@Override
public abstract ValueDeserializer<Object> unwrappingDeserializer(DeserializationContext ctxt,
NameTransformer unwrapper);
/**
* Fluent factory for creating a variant that can handle
* POJO output as a JSON Array. Implementations may ignore this request
* if no such input is possible.
*/
protected abstract BeanDeserializerBase asArrayDeserializer();
// @since 3.0
protected abstract void initNameMatcher(DeserializationContext ctxt);
/*
/**********************************************************************
/* Validation, post-processing
/**********************************************************************
*/
/**
* Method called to finalize setup of this deserializer,
* after deserializer itself has been registered.
* This is needed to handle recursive and transitive dependencies.
*/
@Override
public void resolve(DeserializationContext ctxt)
{
ExternalTypeHandler.Builder extTypes = null;
// if ValueInstantiator can use "creator" approach, need to resolve it here...
SettableBeanProperty[] creatorProps;
if (_valueInstantiator.canCreateFromObjectWith()) {
creatorProps = _valueInstantiator.getFromObjectArguments(ctxt.getConfig());
// 22-Jan-2018, tatu: May need to propagate "ignorable" status (from `Access.READ_ONLY`
// or perhaps class-ignorables) into Creator properties too. Cannot just delete,
// at this point, but is needed for further processing down the line
if (_ignorableProps != null || _includableProps != null) {
for (int i = 0, end = creatorProps.length; i < end; ++i) {
SettableBeanProperty prop = creatorProps[i];
if (IgnorePropertiesUtil.shouldIgnore(prop.getName(), _ignorableProps, _includableProps)) {
creatorProps[i].markAsIgnorable();
}
}
}
} else {
creatorProps = null;
}
UnwrappedPropertyHandler unwrapped = null;
// 24-Mar-2017, tatu: Looks like we may have to iterate over
// properties twice, to handle potential issues with recursive
// types (see [databind#1575] f.ex).
// First loop: find deserializer if not yet known, but do not yet
// contextualize (since that can lead to problems with self-references)
// 22-Jan-2018, tatu: NOTE! Need not check for `isIgnorable` as that can
// only happen for props in `creatorProps`
for (SettableBeanProperty prop : _beanProperties) {
// [databind#962]: no eager lookup for inject-only [creator] properties
if (prop.hasValueDeserializer() || prop.isInjectionOnly()) {
continue;
}
// [databind#125]: allow use of converters
ValueDeserializer<?> deser = _findConvertingDeserializer(ctxt, prop);
if (deser == null) {
deser = ctxt.findNonContextualValueDeserializer(prop.getType());
}
SettableBeanProperty newProp = prop.withValueDeserializer(deser);
if (prop != newProp) {
_replaceProperty(_beanProperties, creatorProps, prop, newProp);
}
}
// Second loop: contextualize, find other pieces
for (SettableBeanProperty origProp : _beanProperties) {
SettableBeanProperty prop = origProp;
ValueDeserializer<?> deser = prop.getValueDeserializer();
deser = ctxt.handlePrimaryContextualization(deser, prop, prop.getType());
prop = prop.withValueDeserializer(deser);
// Need to link managed references with matching back references
prop = _resolveManagedReferenceProperty(ctxt, prop);
// [databind#351]: need to wrap properties that require object id resolution.
if (!(prop instanceof ManagedReferenceProperty)) {
prop = _resolvedObjectIdProperty(ctxt, prop);
}
// Support unwrapped values (via @JsonUnwrapped)
NameTransformer xform = _findPropertyUnwrapper(ctxt, prop);
if (xform != null) {
ValueDeserializer<Object> orig = prop.getValueDeserializer();
ValueDeserializer<Object> unwrapping = orig.unwrappingDeserializer(ctxt, xform);
if ((unwrapping != orig) && (unwrapping != null)) {
prop = prop.withValueDeserializer(unwrapping);
if (unwrapped == null) {
unwrapped = new UnwrappedPropertyHandler();
}
if (prop instanceof CreatorProperty) {
unwrapped.addCreatorProperty(prop);
} else {
unwrapped.addProperty(prop);
}
// 12-Dec-2014, tatu: As per [databind#647], we will have problems if
// the original property is left in place. So let's remove it now.
// 25-Mar-2017, tatu: Wonder if this could be problematic wrt creators?
// (that is, should we remove it from creator too)
_beanProperties.remove(prop);
continue;
}
}
// 26-Oct-2016, tatu: Need to have access to value deserializer to know if
// merging needed, and now seems to be reasonable time to do that.
final PropertyMetadata md = prop.getMetadata();
prop = _resolveMergeAndNullSettings(ctxt, prop, md);
// non-static inner classes too:
prop = _resolveInnerClassValuedProperty(ctxt, prop);
if (prop != origProp) {
_replaceProperty(_beanProperties, creatorProps, origProp, prop);
}
// one more thing: if this property uses "external property" type inclusion,
// it needs different handling altogether
if (prop.hasValueTypeDeserializer()) {
TypeDeserializer typeDeser = prop.getValueTypeDeserializer();
if (typeDeser.getTypeInclusion() == JsonTypeInfo.As.EXTERNAL_PROPERTY) {
if (extTypes == null) {
extTypes = ExternalTypeHandler.builder(_beanType);
}
extTypes.addExternal(prop, typeDeser);
// In fact, remove from list of known properties to simplify later handling
_beanProperties.remove(prop);
continue;
}
}
}
// "any setter" may also need to be resolved now
if ((_anySetter != null) && !_anySetter.hasValueDeserializer()) {
_anySetter = _anySetter.withValueDeserializer(findDeserializer(ctxt,
_anySetter.getType(), _anySetter.getProperty()));
}
// as well as delegate-based constructor:
if (_valueInstantiator.canCreateUsingDelegate()) {
JavaType delegateType = _valueInstantiator.getDelegateType(ctxt.getConfig());
if (delegateType == null) {
ctxt.reportBadDefinition(_beanType, String.format(
"Invalid delegate-creator definition for %s: value instantiator (%s) returned true for 'canCreateUsingDelegate()', but null for 'getDelegateType()'",
ClassUtil.getTypeDescription(_beanType), ClassUtil.classNameOf(_valueInstantiator)));
}
_delegateDeserializer = _findDelegateDeserializer(ctxt, delegateType,
_valueInstantiator.getDelegateCreator());
}
// and array-delegate-based constructor:
if (_valueInstantiator.canCreateUsingArrayDelegate()) {
JavaType delegateType = _valueInstantiator.getArrayDelegateType(ctxt.getConfig());
if (delegateType == null) {
ctxt.reportBadDefinition(_beanType, String.format(
"Invalid delegate-creator definition for %s: value instantiator (%s) returned true for 'canCreateUsingArrayDelegate()', but null for 'getArrayDelegateType()'",
ClassUtil.getTypeDescription(_beanType), ClassUtil.classNameOf(_valueInstantiator)));
}
_arrayDelegateDeserializer = _findDelegateDeserializer(ctxt, delegateType,
_valueInstantiator.getArrayDelegateCreator());
}
// And now that we know CreatorProperty instances are also resolved can finally create the creator:
if (creatorProps != null) {
_propertyBasedCreator = PropertyBasedCreator.construct(ctxt, _valueInstantiator,
creatorProps, _beanProperties);
}
if (extTypes != null) {
// 21-Jun-2016, tatu: related to [databind#999], may need to link type ids too,
// so need to pass collected properties
_externalTypeIdHandler = extTypes.build(_beanProperties);
// we consider this non-standard, to offline handling
_nonStandardCreation = true;
}
_unwrappedPropertyHandler = unwrapped;
if (unwrapped != null) { // we consider this non-standard, to offline handling
_nonStandardCreation = true;
}
// may need to disable vanilla processing, if unwrapped handling was enabled...
_vanillaProcessing = _vanillaProcessing && !_nonStandardCreation;
}
protected void _replaceProperty(BeanPropertyMap props, SettableBeanProperty[] creatorProps,
SettableBeanProperty origProp, SettableBeanProperty newProp)
{
props.replace(origProp, newProp);
// [databind#795]: Make sure PropertyBasedCreator's properties stay in sync
if (creatorProps != null) {
// 18-May-2015, tatu: _Should_ start with consistent set. But can we really
// fully count on this? May need to revisit in future; seems to hold for now.
for (int i = 0, len = creatorProps.length; i < len; ++i) {
if (creatorProps[i] == origProp) {
creatorProps[i] = newProp;
return;
}
}
/*
// ... as per above, it is possible we'd need to add this as fallback
// if (but only if) identity check fails?
for (int i = 0, len = creatorProps.length; i < len; ++i) {
if (creatorProps[i].getName().equals(origProp.getName())) {
creatorProps[i] = newProp;
return;
}
}
*/
}
}
@SuppressWarnings("unchecked")
private ValueDeserializer<Object> _findDelegateDeserializer(DeserializationContext ctxt,
JavaType delegateType, AnnotatedWithParams delegateCreator)
{
// 27-Nov-2023, tatu: [databind#4200] Need to resolve PropertyMetadata.
// And all we have is the actual Creator method; but for annotations
// we actually need the one parameter -- if there is one
// (NOTE! This would not work for case of more than one parameter with
// delegation, others injected)
final BeanProperty property;
if ((delegateCreator != null) && (delegateCreator.getParameterCount() == 1)) {
AnnotatedMember delegator = delegateCreator.getParameter(0);
PropertyMetadata propMd = _getSetterInfo(ctxt, delegator, delegateType);
property = new BeanProperty.Std(TEMP_PROPERTY_NAME,
delegateType, null, delegator, propMd);
} else {
// No creator indicated; or Zero, or more than 2 arguments (since we don't
// know which one is the "real" delegating parameter. Although could possibly
// figure it out if someone provides actual use case
property = new BeanProperty.Std(TEMP_PROPERTY_NAME,
delegateType, null, delegateCreator,
PropertyMetadata.STD_OPTIONAL);
}
TypeDeserializer td = (TypeDeserializer) delegateType.getTypeHandler();
if (td == null) {
td = ctxt.findTypeDeserializer(delegateType);
}
// 04-May-2018, tatu: [databind#2021] check if there's custom deserializer attached
// to type (resolved from parameter)
ValueDeserializer<Object> dd = (ValueDeserializer<Object>) delegateType.getValueHandler();
if (dd == null) {
dd = findDeserializer(ctxt, delegateType, property);
} else {
dd = (ValueDeserializer<Object>) ctxt.handleSecondaryContextualization(dd, property, delegateType);
}
if (td != null) {
td = td.forProperty(property);
return new TypeWrappedDeserializer(td, dd);
}
return dd;
}
/**
* Method essentially copied from {@code BasicDeserializerFactory},
* needed to find {@link PropertyMetadata} for Delegating Creator,
* for access to annotation-derived info.
*/
protected PropertyMetadata _getSetterInfo(DeserializationContext ctxt,
AnnotatedMember accessor, JavaType type)
{
final AnnotationIntrospector intr = ctxt.getAnnotationIntrospector();
final DeserializationConfig config = ctxt.getConfig();
PropertyMetadata metadata = PropertyMetadata.STD_OPTIONAL;
boolean needMerge = true;
Nulls valueNulls = null;
Nulls contentNulls = null;
// NOTE: compared to `POJOPropertyBuilder`, we only have access to creator
// parameter, not other accessors, so code bit simpler
// Ok, first: does property itself have something to say?
if (intr != null) {
JsonSetter.Value setterInfo = intr.findSetterInfo(config, accessor);
if (setterInfo != null) {
valueNulls = setterInfo.nonDefaultValueNulls();
contentNulls = setterInfo.nonDefaultContentNulls();
}
}
// If not, config override?
if (needMerge || (valueNulls == null) || (contentNulls == null)) {
ConfigOverride co = config.getConfigOverride(type.getRawClass());
JsonSetter.Value setterInfo = co.getNullHandling();
if (setterInfo != null) {
if (valueNulls == null) {
valueNulls = setterInfo.nonDefaultValueNulls();
}
if (contentNulls == null) {
contentNulls = setterInfo.nonDefaultContentNulls();
}
}
}
if (needMerge || (valueNulls == null) || (contentNulls == null)) {
JsonSetter.Value setterInfo = config.getDefaultNullHandling();
if (valueNulls == null) {
valueNulls = setterInfo.nonDefaultValueNulls();
}
if (contentNulls == null) {
contentNulls = setterInfo.nonDefaultContentNulls();
}
}
if ((valueNulls != null) || (contentNulls != null)) {
metadata = metadata.withNulls(valueNulls, contentNulls);
}
return metadata;
}
/**
* Helper method that can be used to see if specified property is annotated
* to indicate use of a converter for property value (in case of container types,
* it is container type itself, not key or content type).
*<p>
* NOTE: returned deserializer is NOT yet contextualized, caller needs to take
* care to do that.
*/
protected ValueDeserializer<Object> _findConvertingDeserializer(DeserializationContext ctxt,
SettableBeanProperty prop)
{
final AnnotationIntrospector intr = ctxt.getAnnotationIntrospector();
if (intr != null) {
Object convDef = intr.findDeserializationConverter(ctxt.getConfig(), prop.getMember());
if (convDef != null) {
Converter<Object,Object> conv = ctxt.converterInstance(prop.getMember(), convDef);
JavaType delegateType = conv.getInputType(ctxt.getTypeFactory());
// 25-Mar-2017, tatu: should not yet contextualize
// ValueDeserializer<?> deser = ctxt.findContextualValueDeserializer(delegateType, prop);
ValueDeserializer<?> deser = ctxt.findNonContextualValueDeserializer(delegateType);
return new StdConvertingDeserializer<Object>(conv, delegateType, deser);
}
}
return null;
}
/**
* Although most of post-processing is done in resolve(), we only get
* access to referring property's annotations here; and this is needed
* to support per-property ObjectIds.
* We will also consider Shape transformations (read from Array) at this
* point, since it may come from either Class definition or property.
*/
@Override
public ValueDeserializer<?> createContextual(DeserializationContext ctxt,
BeanProperty property)
{
ObjectIdReader oir = _objectIdReader;
// First: may have an override for Object Id:
final AnnotationIntrospector intr = ctxt.getAnnotationIntrospector();
final DeserializationConfig config = ctxt.getConfig();
final AnnotatedMember accessor = _neitherNull(property, intr) ? property.getMember() : null;
if (accessor != null) {
ObjectIdInfo objectIdInfo = intr.findObjectIdInfo(config, accessor);
if (objectIdInfo != null) { // some code duplication here as well (from BeanDeserializerFactory)
// 2.1: allow modifications by "id ref" annotations as well:
objectIdInfo = intr.findObjectReferenceInfo(config, accessor, objectIdInfo);
Class<?> implClass = objectIdInfo.getGeneratorType();
// Property-based generator is trickier
JavaType idType;
SettableBeanProperty idProp;
ObjectIdGenerator<?> idGen;
ObjectIdResolver resolver = ctxt.objectIdResolverInstance(accessor, objectIdInfo);
if (implClass == ObjectIdGenerators.PropertyGenerator.class) {
PropertyName propName = objectIdInfo.getPropertyName();
idProp = findProperty(propName);
if (idProp == null) {
return ctxt.reportBadDefinition(_beanType, String.format(
"Invalid Object Id definition for %s: cannot find property with name %s",
ClassUtil.nameOf(handledType()), ClassUtil.name(propName)));
}
idType = idProp.getType();
idGen = new PropertyBasedObjectIdGenerator(objectIdInfo.getScope());
} else { // other types are to be simpler
JavaType type = ctxt.constructType(implClass);
idType = ctxt.getTypeFactory().findTypeParameters(type, ObjectIdGenerator.class)[0];
idProp = null;
idGen = ctxt.objectIdGeneratorInstance(accessor, objectIdInfo);
}
ValueDeserializer<?> deser = ctxt.findRootValueDeserializer(idType);
oir = ObjectIdReader.construct(idType, objectIdInfo.getPropertyName(),
idGen, deser, idProp, resolver);
}
}
// either way, need to resolve serializer:
BeanDeserializerBase contextual = this;
if (oir != null && oir != _objectIdReader) {
contextual = contextual.withObjectIdReader(oir);
}
// And possibly add more properties to ignore
if (accessor != null) {
contextual = _handleByNameInclusion(ctxt, intr, contextual, accessor);
}
// One more thing: are we asked to serialize POJO as array?
JsonFormat.Value format = findFormatOverrides(ctxt, property, handledType());
JsonFormat.Shape shape = null;
if (format != null) {
if (format.hasShape()) {
shape = format.getShape();
}
// 16-May-2016, tatu: How about per-property case-insensitivity?
Boolean B = format.getFeature(JsonFormat.Feature.ACCEPT_CASE_INSENSITIVE_PROPERTIES);
if (B != null) {
BeanPropertyMap propsOrig = _beanProperties;
BeanPropertyMap props = propsOrig.withCaseInsensitivity(B.booleanValue());
if (props != propsOrig) {
contextual = contextual.withBeanProperties(props);
}
}
}
contextual.initNameMatcher(ctxt);
if (shape == null) {
shape = _serializationShape;
}
if (shape == JsonFormat.Shape.ARRAY) {
contextual = contextual.asArrayDeserializer();
}
return contextual;
}
protected BeanDeserializerBase _handleByNameInclusion(DeserializationContext ctxt,
AnnotationIntrospector intr,
BeanDeserializerBase contextual,
AnnotatedMember accessor)
{
final DeserializationConfig config = ctxt.getConfig();
JsonIgnoreProperties.Value ignorals = intr.findPropertyIgnoralByName(config, accessor);
// 30-Mar-2020, tatu: As per [databind#2627], need to also allow
// per-property override to "ignore all unknown".
// NOTE: there is no way to override with `false` because annotation
// defaults to `false` (i.e. cannot know if `false` is explicit value)
if (ignorals.getIgnoreUnknown() && !_ignoreAllUnknown) {
contextual = contextual.withIgnoreAllUnknown(true);
}
final Set<String> namesToIgnore = ignorals.findIgnoredForDeserialization();
final Set<String> prevNamesToIgnore = contextual._ignorableProps;
final Set<String> newNamesToIgnore;
if (namesToIgnore.isEmpty()) {
newNamesToIgnore = prevNamesToIgnore;
} else if ((prevNamesToIgnore == null) || prevNamesToIgnore.isEmpty()) {
newNamesToIgnore = namesToIgnore;
} else {
newNamesToIgnore = new HashSet<String>(prevNamesToIgnore);
newNamesToIgnore.addAll(namesToIgnore);
}
final Set<String> prevNamesToInclude = contextual._includableProps;
final Set<String> newNamesToInclude = IgnorePropertiesUtil.combineNamesToInclude(prevNamesToInclude,
intr.findPropertyInclusionByName(config, accessor).getIncluded());
if ((newNamesToIgnore != prevNamesToIgnore)
|| (newNamesToInclude != prevNamesToInclude)) {
contextual = contextual.withByNameInclusion(newNamesToIgnore, newNamesToInclude);
}
return contextual;
}
/**
* Helper method called to see if given property is part of 'managed' property
* pair (managed + back reference), and if so, handle resolution details.
*/
protected SettableBeanProperty _resolveManagedReferenceProperty(DeserializationContext ctxt,
SettableBeanProperty prop)
{
String refName = prop.getManagedReferenceName();
if (refName == null) {
return prop;
}
ValueDeserializer<?> valueDeser = prop.getValueDeserializer();
SettableBeanProperty backProp = valueDeser.findBackReference(refName);
if (backProp == null) {
return ctxt.reportBadDefinition(_beanType, String.format(
"Cannot handle managed/back reference %s: no back reference property found from type %s",
ClassUtil.name(refName), ClassUtil.getTypeDescription(prop.getType())));
}
// also: verify that type is compatible
JavaType referredType = _beanType;
JavaType backRefType = backProp.getType();
boolean isContainer = prop.getType().isContainerType();
if (!backRefType.getRawClass().isAssignableFrom(referredType.getRawClass())) {
ctxt.reportBadDefinition(_beanType, String.format(
"Cannot handle managed/back reference %s: back reference type (%s) not compatible with managed type (%s)",
ClassUtil.name(refName), ClassUtil.getTypeDescription(backRefType),
referredType.getRawClass().getName()));
}
return new ManagedReferenceProperty(prop, refName, backProp, isContainer);
}
/**
* Method that wraps given property with {@link ObjectIdReferenceProperty}
* in case where object id resolution is required.
*/
protected SettableBeanProperty _resolvedObjectIdProperty(DeserializationContext ctxt,
SettableBeanProperty prop)
{
ObjectIdInfo objectIdInfo = prop.getObjectIdInfo();
ValueDeserializer<Object> valueDeser = prop.getValueDeserializer();
ObjectIdReader objectIdReader = (valueDeser == null) ? null : valueDeser.getObjectIdReader(ctxt);
if (objectIdInfo == null && objectIdReader == null) {
return prop;
}
return new ObjectIdReferenceProperty(prop, objectIdInfo);
}
/**
* Helper method called to see if given property might be so-called unwrapped
* property: these require special handling.
*/
protected NameTransformer _findPropertyUnwrapper(DeserializationContext ctxt,
SettableBeanProperty prop)
{
AnnotatedMember am = prop.getMember();
if (am != null) {
NameTransformer unwrapper = ctxt.getAnnotationIntrospector().findUnwrappingNameTransformer(
ctxt.getConfig(), am);
if (unwrapper != null) {
return unwrapper;
}
}
return null;
}
/**
* Helper method that will handle gruesome details of dealing with properties
* that have non-static inner | annotations |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/inject/DelegatingBeanDefinition.java | {
"start": 1300,
"end": 4636
} | interface ____<T> extends BeanDefinition<T> {
/**
* @return The target definition
*/
BeanDefinition<T> getTarget();
@Override
default boolean requiresMethodProcessing() {
return getTarget().requiresMethodProcessing();
}
@Override
default Optional<Class<? extends Annotation>> getScope() {
return getTarget().getScope();
}
@Override
default Optional<String> getScopeName() {
return getTarget().getScopeName();
}
@Override
default AnnotationMetadata getAnnotationMetadata() {
return getTarget().getAnnotationMetadata();
}
@Override
default <R> ExecutableMethod<T, R> getRequiredMethod(String name, Class<?>... argumentTypes) {
return getTarget().getRequiredMethod(name, argumentTypes);
}
@Override
default boolean isAbstract() {
return getTarget().isAbstract();
}
@Override
default boolean isSingleton() {
return getTarget().isSingleton();
}
@Override
default boolean isIterable() {
return getTarget().isIterable();
}
@Override
default Class<T> getBeanType() {
return getTarget().getBeanType();
}
@Override
default ConstructorInjectionPoint<T> getConstructor() {
return getTarget().getConstructor();
}
@Override
default Collection<Class<?>> getRequiredComponents() {
return getTarget().getRequiredComponents();
}
@Override
default Collection<MethodInjectionPoint<T, ?>> getInjectedMethods() {
return getTarget().getInjectedMethods();
}
@Override
default Collection<FieldInjectionPoint<T, ?>> getInjectedFields() {
return getTarget().getInjectedFields();
}
@Override
default Collection<MethodInjectionPoint<T, ?>> getPostConstructMethods() {
return getTarget().getPostConstructMethods();
}
@Override
default Collection<MethodInjectionPoint<T, ?>> getPreDestroyMethods() {
return getTarget().getPreDestroyMethods();
}
@Override
@NonNull
default String getName() {
return getTarget().getName();
}
@Override
default <R> Optional<ExecutableMethod<T, R>> findMethod(String name, Class<?>... argumentTypes) {
return getTarget().findMethod(name, argumentTypes);
}
@Override
default <R> Stream<ExecutableMethod<T, R>> findPossibleMethods(String name) {
return getTarget().findPossibleMethods(name);
}
@Override
default Collection<ExecutableMethod<T, ?>> getExecutableMethods() {
return getTarget().getExecutableMethods();
}
@Override
default boolean isPrimary() {
return getTarget().isPrimary();
}
@Override
default boolean isEnabled(BeanContext context) {
return getTarget().isEnabled(context);
}
@Override
default boolean isEnabled(@NonNull BeanContext context, @Nullable BeanResolutionContext resolutionContext) {
return getTarget().isEnabled(context, resolutionContext);
}
@Override
default Optional<Class<?>> getDeclaringType() {
return getTarget().getDeclaringType();
}
@Override
default @NonNull List<Argument<?>> getTypeArguments(String type) {
return getTarget().getTypeArguments(type);
}
}
| DelegatingBeanDefinition |
java | quarkusio__quarkus | extensions/security/deployment/src/main/java/io/quarkus/security/deployment/SecurityProcessor.java | {
"start": 8486,
"end": 21624
} | class ____ {
private static final Logger log = Logger.getLogger(SecurityProcessor.class);
private static final DotName STARTUP_EVENT_NAME = DotName.createSimple(StartupEvent.class.getName());
private static final Set<DotName> SECURITY_CHECK_ANNOTATIONS = Set.of(DotName.createSimple(RolesAllowed.class.getName()),
DotName.createSimple(PermissionsAllowed.class.getName()),
DotName.createSimple(PermissionsAllowed.List.class.getName()),
DotName.createSimple(Authenticated.class.getName()),
DotName.createSimple(DenyAll.class.getName()),
DotName.createSimple(PermitAll.class.getName()));
SecurityConfig security;
@BuildStep
SecurityTransformerBuildItem createSecurityTransformerBuildItem(
List<AdditionalSecurityAnnotationBuildItem> additionalSecurityAnnotationBuildItems) {
// collect security annotations
Map<AuthorizationType, Set<DotName>> authorizationTypeToSecurityAnnotations = new EnumMap<>(AuthorizationType.class);
authorizationTypeToSecurityAnnotations.put(SECURITY_CHECK, new HashSet<>(SECURITY_CHECK_ANNOTATIONS));
additionalSecurityAnnotationBuildItems.forEach(i -> authorizationTypeToSecurityAnnotations
.computeIfAbsent(i.getAuthorizationType(), k -> new HashSet<>()).add(i.getSecurityAnnotationName()));
return new SecurityTransformerBuildItem(authorizationTypeToSecurityAnnotations);
}
@BuildStep
List<AdditionalIndexedClassesBuildItem> registerAdditionalIndexedClassesBuildItem(
SecurityTransformerBuildItem securityTransformerBuildItem) {
// we need the combined index to contain security annotations in order to check for repeatable annotations
// (we do not hardcode here knowledge which annotation is repeatable and which one isn't, so we check all)
return List
.of(new AdditionalIndexedClassesBuildItem(securityTransformerBuildItem.getAllSecurityAnnotationNames()));
}
/**
* Create JCAProviderBuildItems for any configured provider names
*/
@BuildStep
void produceJcaSecurityProviders(BuildProducer<JCAProviderBuildItem> jcaProviders,
BuildProducer<BouncyCastleProviderBuildItem> bouncyCastleProvider,
BuildProducer<BouncyCastleJsseProviderBuildItem> bouncyCastleJsseProvider) {
Set<String> providers = security.securityProviders().orElse(Set.of());
for (String providerName : providers) {
if (SecurityProviderUtils.BOUNCYCASTLE_PROVIDER_NAME.equals(providerName)) {
bouncyCastleProvider.produce(new BouncyCastleProviderBuildItem());
} else if (SecurityProviderUtils.BOUNCYCASTLE_JSSE_PROVIDER_NAME.equals(providerName)) {
bouncyCastleJsseProvider.produce(new BouncyCastleJsseProviderBuildItem());
} else if (SecurityProviderUtils.BOUNCYCASTLE_FIPS_PROVIDER_NAME.equals(providerName)) {
bouncyCastleProvider.produce(new BouncyCastleProviderBuildItem(true));
} else if (SecurityProviderUtils.BOUNCYCASTLE_FIPS_JSSE_PROVIDER_NAME.equals(providerName)) {
bouncyCastleJsseProvider.produce(new BouncyCastleJsseProviderBuildItem(true));
} else {
jcaProviders
.produce(new JCAProviderBuildItem(providerName, security.securityProviderConfig().get(providerName)));
}
log.debugf("Added providerName: %s", providerName);
}
}
@BuildStep(onlyIf = NativeImageFutureDefault.RunTimeInitializeSecurityProvider.class)
void registerBouncyCastleReflection(CurateOutcomeBuildItem curateOutcomeBuildItem,
BuildProducer<ReflectiveClassBuildItem> reflection) {
if (curateOutcomeBuildItem.getApplicationModel().getDependencies().stream().anyMatch(
x -> x.getGroupId().equals("org.bouncycastle") && x.getArtifactId().startsWith("bcprov-"))) {
reflection.produce(ReflectiveClassBuildItem.builder("org.bouncycastle.jcajce.provider.symmetric.AES",
"org.bouncycastle.jcajce.provider.symmetric.AES$Mappings",
"org.bouncycastle.jcajce.provider.asymmetric.EC",
"org.bouncycastle.jcajce.provider.asymmetric.EC$Mappings",
"org.bouncycastle.jcajce.provider.asymmetric.RSA",
"org.bouncycastle.jcajce.provider.asymmetric.RSA$Mappings",
"org.bouncycastle.jcajce.provider.drbg.DRBG",
"org.bouncycastle.jcajce.provider.drbg.DRBG$Mappings").methods().fields()
.build());
}
}
/**
* Register the classes for reflection in the requested named providers
*
* @param classes - ReflectiveClassBuildItem producer
* @param jcaProviders - JCAProviderBuildItem for requested providers
* @throws URISyntaxException
* @throws MalformedURLException
*/
@BuildStep
void registerJCAProvidersForReflection(BuildProducer<ReflectiveClassBuildItem> classes,
List<JCAProviderBuildItem> jcaProviders,
BuildProducer<NativeImageSecurityProviderBuildItem> additionalProviders) throws IOException, URISyntaxException {
for (JCAProviderBuildItem provider : jcaProviders) {
List<String> providerClasses = registerProvider(provider.getProviderName(), provider.getProviderConfig(),
additionalProviders);
for (String className : providerClasses) {
classes.produce(ReflectiveClassBuildItem.builder(className).methods().fields().build());
log.debugf("Register JCA class: %s", className);
}
}
}
@BuildStep
void prepareBouncyCastleProviders(CurateOutcomeBuildItem curateOutcomeBuildItem,
BuildProducer<ReflectiveClassBuildItem> reflection,
BuildProducer<RuntimeInitializedClassBuildItem> runtimeReInitialized,
List<BouncyCastleProviderBuildItem> bouncyCastleProviders,
List<BouncyCastleJsseProviderBuildItem> bouncyCastleJsseProviders) throws Exception {
Optional<BouncyCastleJsseProviderBuildItem> bouncyCastleJsseProvider = getOne(bouncyCastleJsseProviders);
if (bouncyCastleJsseProvider.isPresent()) {
reflection.produce(
ReflectiveClassBuildItem.builder(SecurityProviderUtils.BOUNCYCASTLE_JSSE_PROVIDER_CLASS_NAME).methods()
.fields().build());
reflection.produce(
ReflectiveClassBuildItem.builder("org.bouncycastle.jsse.provider.DefaultSSLContextSpi$LazyManagers")
.methods().fields().build());
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem(
"org.bouncycastle.jsse.provider.DefaultSSLContextSpi$LazyManagers"));
prepareBouncyCastleProvider(curateOutcomeBuildItem, reflection, runtimeReInitialized,
bouncyCastleJsseProvider.get().isInFipsMode());
} else {
Optional<BouncyCastleProviderBuildItem> bouncyCastleProvider = getOne(bouncyCastleProviders);
if (bouncyCastleProvider.isPresent()) {
prepareBouncyCastleProvider(curateOutcomeBuildItem, reflection, runtimeReInitialized,
bouncyCastleProvider.get().isInFipsMode());
}
}
}
private static void prepareBouncyCastleProvider(CurateOutcomeBuildItem curateOutcomeBuildItem,
BuildProducer<ReflectiveClassBuildItem> reflection,
BuildProducer<RuntimeInitializedClassBuildItem> runtimeReInitialized, boolean isFipsMode) {
reflection
.produce(
ReflectiveClassBuildItem
.builder(isFipsMode ? SecurityProviderUtils.BOUNCYCASTLE_FIPS_PROVIDER_CLASS_NAME
: SecurityProviderUtils.BOUNCYCASTLE_PROVIDER_CLASS_NAME)
.methods().fields().build());
if (curateOutcomeBuildItem.getApplicationModel().getDependencies().stream().anyMatch(
x -> x.getGroupId().equals("org.bouncycastle") && x.getArtifactId().startsWith("bcprov-"))) {
reflection.produce(ReflectiveClassBuildItem.builder("org.bouncycastle.jcajce.provider.symmetric.AES",
"org.bouncycastle.jcajce.provider.symmetric.AES$CBC",
"org.bouncycastle.crypto.paddings.PKCS7Padding",
"org.bouncycastle.jcajce.provider.asymmetric.ec.KeyFactorySpi",
"org.bouncycastle.jcajce.provider.asymmetric.ec.KeyFactorySpi$EC",
"org.bouncycastle.jcajce.provider.asymmetric.ec.KeyFactorySpi$ECDSA",
"org.bouncycastle.jcajce.provider.asymmetric.ec.KeyPairGeneratorSpi",
"org.bouncycastle.jcajce.provider.asymmetric.ec.KeyPairGeneratorSpi$EC",
"org.bouncycastle.jcajce.provider.asymmetric.ec.KeyPairGeneratorSpi$ECDSA",
"org.bouncycastle.jcajce.provider.asymmetric.rsa.KeyFactorySpi",
"org.bouncycastle.jcajce.provider.asymmetric.rsa.KeyPairGeneratorSpi",
"org.bouncycastle.jcajce.provider.asymmetric.rsa.PSSSignatureSpi",
"org.bouncycastle.jcajce.provider.asymmetric.rsa.PSSSignatureSpi$SHA256withRSA").methods().fields()
.build());
}
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.crypto.CryptoServicesRegistrar"));
if (!isFipsMode) {
reflection.produce(ReflectiveClassBuildItem.builder("org.bouncycastle.jcajce.provider.drbg.DRBG$Default")
.methods().fields().build());
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.jcajce.provider.drbg.DRBG$Default"));
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.jcajce.provider.drbg.DRBG$NonceAndIV"));
// URLSeededEntropySourceProvider.seedStream may contain a reference to a 'FileInputStream' which includes
// references to FileDescriptors which aren't allowed in the image heap
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem(
"org.bouncycastle.jcajce.provider.drbg.DRBG$URLSeededEntropySourceProvider"));
} else {
reflection.produce(ReflectiveClassBuildItem.builder("org.bouncycastle.crypto.general.AES")
.methods().fields().build());
runtimeReInitialized.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.crypto.general.AES"));
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem(
"org.bouncycastle.crypto.asymmetric.NamedECDomainParameters"));
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.crypto.asymmetric.CustomNamedCurves"));
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.asn1.ua.DSTU4145NamedCurves"));
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.asn1.sec.SECNamedCurves"));
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.asn1.cryptopro.ECGOST3410NamedCurves"));
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.asn1.x9.X962NamedCurves"));
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.asn1.x9.ECNamedCurveTable"));
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.asn1.anssi.ANSSINamedCurves"));
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.asn1.teletrust.TeleTrusTNamedCurves"));
runtimeReInitialized.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.jcajce.spec.ECUtil"));
// start of BCFIPS 2.0
// started thread during initialization
runtimeReInitialized
.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.crypto.util.dispose.DisposalDaemon"));
// secure randoms
runtimeReInitialized.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.crypto.fips.FipsDRBG"));
runtimeReInitialized.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.crypto.fips.Utils"));
// re-detect JNI library availability
runtimeReInitialized.produce(new RuntimeInitializedClassBuildItem("org.bouncycastle.crypto.fips.NativeLoader"));
}
// Reinitialize | SecurityProcessor |
java | apache__flink | flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/utils/HadoopUtils.java | {
"start": 1294,
"end": 5255
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(HadoopUtils.class);
/** Merge HadoopConfiguration into JobConf. This is necessary for the HDFS configuration. */
public static void mergeHadoopConf(JobConf jobConf) {
// we have to load the global configuration here, because the HadoopInputFormatBase does not
// have access to a Flink configuration object
org.apache.flink.configuration.Configuration flinkConfiguration =
GlobalConfiguration.loadConfiguration();
Configuration hadoopConf = getHadoopConfiguration(flinkConfiguration);
for (Map.Entry<String, String> e : hadoopConf) {
if (jobConf.get(e.getKey()) == null) {
jobConf.set(e.getKey(), e.getValue());
}
}
}
/**
* Returns a new Hadoop Configuration object using the path to the hadoop conf configured in the
* main configuration (config.yaml). This method is public because its being used in the
* HadoopDataSource.
*
* @param flinkConfiguration Flink configuration object
* @return A Hadoop configuration instance
*/
public static Configuration getHadoopConfiguration(
org.apache.flink.configuration.Configuration flinkConfiguration) {
Configuration retConf = new Configuration();
// We need to load both core-site.xml and hdfs-site.xml to determine the default fs path and
// the hdfs configuration
// Try to load HDFS configuration from Hadoop's own configuration files
// Approach environment variables
for (String possibleHadoopConfPath : possibleHadoopConfPaths(flinkConfiguration)) {
if (new File(possibleHadoopConfPath).exists()) {
if (new File(possibleHadoopConfPath + "/core-site.xml").exists()) {
retConf.addResource(
new org.apache.hadoop.fs.Path(
possibleHadoopConfPath + "/core-site.xml"));
if (LOG.isDebugEnabled()) {
LOG.debug(
"Adding "
+ possibleHadoopConfPath
+ "/core-site.xml to hadoop configuration");
}
}
if (new File(possibleHadoopConfPath + "/hdfs-site.xml").exists()) {
retConf.addResource(
new org.apache.hadoop.fs.Path(
possibleHadoopConfPath + "/hdfs-site.xml"));
if (LOG.isDebugEnabled()) {
LOG.debug(
"Adding "
+ possibleHadoopConfPath
+ "/hdfs-site.xml to hadoop configuration");
}
}
}
}
return retConf;
}
/**
* Get possible Hadoop conf dir paths, based on environment variables and flink configuration.
*
* @param flinkConfiguration The flink configuration that may contain the path to Hadoop conf
* dir.
* @return an array of possible paths
*/
public static String[] possibleHadoopConfPaths(
org.apache.flink.configuration.Configuration flinkConfiguration) {
String[] possiblePaths = new String[4];
possiblePaths[0] = System.getenv("HADOOP_CONF_DIR");
if (System.getenv("HADOOP_HOME") != null) {
possiblePaths[1] = System.getenv("HADOOP_HOME") + "/conf";
possiblePaths[2] = System.getenv("HADOOP_HOME") + "/etc/hadoop"; // hadoop 2.2
}
return Arrays.stream(possiblePaths).filter(Objects::nonNull).toArray(String[]::new);
}
/** Private constructor to prevent instantiation. */
private HadoopUtils() {
throw new RuntimeException();
}
}
| HadoopUtils |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/validation/beanvalidation/BeanValidationBeanRegistrationAotProcessorTests.java | {
"start": 7899,
"end": 8109
} | class ____ { }
@Constraint(validatedBy = { ExistsValidator.class })
@Target({ METHOD, FIELD, ANNOTATION_TYPE, CONSTRUCTOR, PARAMETER, TYPE_USE })
@Retention(RUNTIME)
@Repeatable(Exists.List.class)
@ | EmptyClass |
java | spring-projects__spring-data-jpa | spring-data-envers/src/test/java/org/springframework/data/envers/repository/support/DefaultRevisionMetadataUnitTests.java | {
"start": 1083,
"end": 1568
} | class ____ {
private static final Instant NOW = Instant.now();
@Test // #112
void createsLocalDateTimeFromTimestamp() {
DefaultRevisionEntity entity = new DefaultRevisionEntity();
entity.setTimestamp(NOW.toEpochMilli());
DefaultRevisionMetadata metadata = new DefaultRevisionMetadata(entity);
assertThat(metadata.getRevisionDate())
.hasValue(LocalDateTime.ofInstant(NOW, ZoneOffset.systemDefault()).truncatedTo(ChronoUnit.MILLIS));
}
}
| DefaultRevisionMetadataUnitTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/JsltEndpointBuilderFactory.java | {
"start": 1436,
"end": 1555
} | interface ____ {
/**
* Builder for endpoint for the JSLT component.
*/
public | JsltEndpointBuilderFactory |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/jdk/PrimitiveArrayDeserializers.java | {
"start": 34542,
"end": 40106
} | class ____
extends PrimitiveArrayDeserializers<double[]>
{
public DoubleDeser() { super(double[].class); }
protected DoubleDeser(DoubleDeser base, NullValueProvider nuller, Boolean unwrapSingle) {
super(base, nuller, unwrapSingle);
}
@Override
protected PrimitiveArrayDeserializers<?> withResolved(NullValueProvider nuller,
Boolean unwrapSingle) {
return new DoubleDeser(this, nuller, unwrapSingle);
}
@Override
protected double[] _constructEmpty() {
return new double[0];
}
@Override
public double[] deserialize(JsonParser p, DeserializationContext ctxt) throws JacksonException
{
if (!p.isExpectedStartArrayToken()) {
double[] decoded = _deserializeBinaryVector(p, ctxt);
if (decoded != null) {
return decoded;
}
return handleNonArray(p, ctxt);
}
ArrayBuilders.DoubleBuilder builder = ctxt.getArrayBuilders().getDoubleBuilder();
double[] chunk = builder.resetAndStart();
int ix = 0;
try {
JsonToken t;
while ((t = p.nextToken()) != JsonToken.END_ARRAY) {
if (t == JsonToken.VALUE_NULL) {
if (_nuller != null) {
_nuller.getNullValue(ctxt);
continue;
}
}
double value = _parseDoublePrimitive(p, ctxt);
if (ix >= chunk.length) {
chunk = builder.appendCompletedChunk(chunk, ix);
ix = 0;
}
chunk[ix++] = value;
}
} catch (Exception e) {
throw DatabindException.wrapWithPath(ctxt, e,
new JacksonException.Reference(chunk, builder.bufferedSize() + ix));
}
return builder.completeAndClearBuffer(chunk, ix);
}
@Override
protected double[] handleSingleElementUnwrapped(JsonParser p,
DeserializationContext ctxt)
throws JacksonException
{
return new double[] { _parseDoublePrimitive(p, ctxt) };
}
@Override
protected double[] _concat(double[] oldValue, double[] newValue) {
int len1 = oldValue.length;
int len2 = newValue.length;
double[] result = Arrays.copyOf(oldValue, len1+len2);
System.arraycopy(newValue, 0, result, len1, len2);
return result;
}
private double[] _deserializeBinaryVector(JsonParser p, DeserializationContext ctxt)
throws JacksonException
{
JsonToken t = p.currentToken();
byte[] packed = null;
// Typical textual format case: base64 encoded String (for Packed Binary Vector)
if (t == JsonToken.VALUE_STRING) {
try {
packed = p.getBinaryValue(ctxt.getBase64Variant());
} catch (StreamReadException | DatabindException e) {
// [databind#1425], try to convert to a more usable one, as it's not really
// a JSON-level parse exception, but rather binding from JSON String into
// base64 decoded binary data
String msg = e.getOriginalMessage();
if (msg.contains("base64")) {
return (double[]) ctxt.handleWeirdStringValue(double[].class,
p.getString(), msg);
}
throw e;
}
} else if (t == JsonToken.VALUE_EMBEDDED_OBJECT) {
// Typical for binary formats
Object ob = p.getEmbeddedObject();
if (ob instanceof byte[] byteArray) {
packed = byteArray;
} else if (ob == null || (ob instanceof double[])) {
return (double[]) ob;
}
}
// Packed Binary Vector case
if (packed != null) {
return _unpack(ctxt, packed);
}
return null;
}
private double[] _unpack(DeserializationContext ctxt, byte[] bytes) throws JacksonException
{
final int bytesLen = bytes.length;
if ((bytesLen & 7) != 0) {
return (double[]) ctxt.reportInputMismatch(handledType(),
"Vector length for Packed Binary Double Vector (%d) not a multiple of 8 bytes", bytesLen);
}
final int vectorLen = bytesLen >> 3;
final double[] doubles = new double[vectorLen];
for (int in = 0, out = 0; in < bytesLen; ) {
int packed1 = (bytes[in++] << 24)
| ((bytes[in++] & 0xFF) << 16)
| ((bytes[in++] & 0xFF) << 8)
| (bytes[in++] & 0xFF);
int packed2 = (bytes[in++] << 24)
| ((bytes[in++] & 0xFF) << 16)
| ((bytes[in++] & 0xFF) << 8)
| (bytes[in++] & 0xFF);
long packed = ((long) packed1 << 32) | (packed2 & 0xFFFFFFFFL);
doubles[out++] = Double.longBitsToDouble(packed);
}
return doubles;
}
}
}
| DoubleDeser |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/framework/autoproxy/AbstractBeanFactoryAwareAdvisingPostProcessor.java | {
"start": 1694,
"end": 3044
} | class ____ extends AbstractAdvisingBeanPostProcessor
implements BeanFactoryAware {
protected @Nullable ConfigurableListableBeanFactory beanFactory;
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = (beanFactory instanceof ConfigurableListableBeanFactory clbf ? clbf : null);
AutoProxyUtils.applyDefaultProxyConfig(this, beanFactory);
}
@Override
protected ProxyFactory prepareProxyFactory(Object bean, String beanName) {
if (this.beanFactory != null) {
AutoProxyUtils.exposeTargetClass(this.beanFactory, beanName, bean.getClass());
}
ProxyFactory proxyFactory = super.prepareProxyFactory(bean, beanName);
if (this.beanFactory != null) {
if (AutoProxyUtils.shouldProxyTargetClass(this.beanFactory, beanName)) {
proxyFactory.setProxyTargetClass(true);
}
else {
Class<?>[] ifcs = AutoProxyUtils.determineExposedInterfaces(this.beanFactory, beanName);
if (ifcs != null) {
proxyFactory.setProxyTargetClass(false);
for (Class<?> ifc : ifcs) {
proxyFactory.addInterface(ifc);
}
}
}
}
return proxyFactory;
}
@Override
protected boolean isEligible(Object bean, String beanName) {
return (!AutoProxyUtils.isOriginalInstance(beanName, bean.getClass()) &&
super.isEligible(bean, beanName));
}
}
| AbstractBeanFactoryAwareAdvisingPostProcessor |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/configuration/plugins/PluginFinderTest.java | {
"start": 742,
"end": 3829
} | class ____ extends TestBase {
@Mock PluginSwitch switcher;
@InjectMocks PluginFinder finder;
public @Rule TemporaryFolder tmp = new TemporaryFolder();
@Test
public void empty_resources() {
assertNull(finder.findPluginClass(Collections.<URL>emptyList()));
}
@Test
public void no_valid_impl() throws Exception {
File f = tmp.newFile();
// when
IOUtil.writeText(" \n ", f);
// then
assertNull(finder.findPluginClass(asList(f.toURI().toURL())));
}
@Test
public void single_implementation() throws Exception {
File f = tmp.newFile();
when(switcher.isEnabled("foo.Foo")).thenReturn(true);
// when
IOUtil.writeText(" foo.Foo ", f);
// then
assertEquals("foo.Foo", finder.findPluginClass(asList(f.toURI().toURL())));
}
@Test
public void single_implementation_disabled() throws Exception {
File f = tmp.newFile();
when(switcher.isEnabled("foo.Foo")).thenReturn(false);
// when
IOUtil.writeText(" foo.Foo ", f);
// then
assertEquals(null, finder.findPluginClass(asList(f.toURI().toURL())));
}
@Test
public void multiple_implementations_only_one_enabled() throws Exception {
File f1 = tmp.newFile();
File f2 = tmp.newFile();
when(switcher.isEnabled("Bar")).thenReturn(true);
// when
IOUtil.writeText("Foo", f1);
IOUtil.writeText("Bar", f2);
// then
assertEquals("Bar", finder.findPluginClass(asList(f1.toURI().toURL(), f2.toURI().toURL())));
}
@Test
public void multiple_implementations_only_one_useful() throws Exception {
File f1 = tmp.newFile();
File f2 = tmp.newFile();
when(switcher.isEnabled(anyString())).thenReturn(true);
// when
IOUtil.writeText(" ", f1);
IOUtil.writeText("X", f2);
// then
assertEquals("X", finder.findPluginClass(asList(f1.toURI().toURL(), f2.toURI().toURL())));
}
@Test
public void multiple_empty_implementations() throws Exception {
File f1 = tmp.newFile();
File f2 = tmp.newFile();
when(switcher.isEnabled(anyString())).thenReturn(true);
// when
IOUtil.writeText(" ", f1);
IOUtil.writeText("\n", f2);
// then
assertEquals(null, finder.findPluginClass(asList(f1.toURI().toURL(), f2.toURI().toURL())));
}
@Test
public void problems_loading_impl() throws Exception {
String fileName = "xxx";
File f = tmp.newFile(fileName);
// when
IOUtil.writeText("Bar", f);
when(switcher.isEnabled(anyString())).thenThrow(new RuntimeException("Boo!"));
try {
// when
finder.findPluginClass(asList(f.toURI().toURL()));
// then
fail();
} catch (Exception e) {
assertThat(e).hasMessageContaining(fileName);
assertThat(e.getCause()).hasMessage("Boo!");
}
}
}
| PluginFinderTest |
java | apache__rocketmq | client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java | {
"start": 3477,
"end": 62167
} | class ____ extends ClientConfig implements MQProducer {
/**
* Wrapping internal implementations for virtually all methods presented in this class.
*/
protected final transient DefaultMQProducerImpl defaultMQProducerImpl;
private final Logger logger = LoggerFactory.getLogger(DefaultMQProducer.class);
private final Set<Integer> retryResponseCodes = new CopyOnWriteArraySet<>(Arrays.asList(
ResponseCode.TOPIC_NOT_EXIST,
ResponseCode.SERVICE_NOT_AVAILABLE,
ResponseCode.SYSTEM_ERROR,
ResponseCode.SYSTEM_BUSY,
ResponseCode.NO_PERMISSION,
ResponseCode.NO_BUYER_ID,
ResponseCode.NOT_IN_CURRENT_UNIT,
ResponseCode.GO_AWAY
));
/**
* Producer group conceptually aggregates all producer instances of exactly same role, which is particularly
* important when transactional messages are involved. </p>
* <p>
* For non-transactional messages, it does not matter as long as it's unique per process. </p>
* <p>
* See <a href="https://rocketmq.apache.org/docs/introduction/02concepts">core concepts</a> for more discussion.
*/
private String producerGroup;
/**
* Topics that need to be initialized for transaction producer
*/
private List<String> topics;
/**
* Just for testing or demo program
*/
private String createTopicKey = TopicValidator.AUTO_CREATE_TOPIC_KEY_TOPIC;
/**
* Number of queues to create per default topic.
*/
private volatile int defaultTopicQueueNums = 4;
/**
* Timeout for sending messages.
*/
private int sendMsgTimeout = 3000;
/**
* Max timeout for sending messages per request.
*/
private int sendMsgMaxTimeoutPerRequest = -1;
/**
* Compress message body threshold, namely, message body larger than 4k will be compressed on default.
*/
private int compressMsgBodyOverHowmuch = 1024 * 4;
/**
* Maximum number of retry to perform internally before claiming sending failure in synchronous mode. </p>
* <p>
* This may potentially cause message duplication which is up to application developers to resolve.
*/
private int retryTimesWhenSendFailed = 2;
/**
* Maximum number of retry to perform internally before claiming sending failure in asynchronous mode. </p>
* <p>
* This may potentially cause message duplication which is up to application developers to resolve.
*/
private int retryTimesWhenSendAsyncFailed = 2;
/**
* Indicate whether to retry another broker on sending failure internally.
*/
private boolean retryAnotherBrokerWhenNotStoreOK = false;
/**
* Maximum allowed message body size in bytes.
*/
private int maxMessageSize = 1024 * 1024 * 4; // 4M
/**
* Interface of asynchronous transfer data
*/
private TraceDispatcher traceDispatcher = null;
/**
* Switch flag instance for automatic batch message
*/
private boolean autoBatch = false;
/**
* Instance for batching message automatically
*/
private ProduceAccumulator produceAccumulator = null;
/**
* Indicate whether to block message when asynchronous sending traffic is too heavy.
*/
private boolean enableBackpressureForAsyncMode = false;
/**
* on BackpressureForAsyncMode, limit maximum number of on-going sending async messages
* default is 1024
*/
private int backPressureForAsyncSendNum = 1024;
/**
* on BackpressureForAsyncMode, limit maximum message size of on-going sending async messages
* default is 100M
*/
private int backPressureForAsyncSendSize = 100 * 1024 * 1024;
/**
* Maximum hold time of accumulator.
*/
private int batchMaxDelayMs = -1;
/**
* Maximum accumulation message body size for a single messageAccumulation.
*/
private long batchMaxBytes = -1;
/**
* Maximum message body size for produceAccumulator.
*/
private long totalBatchMaxBytes = -1;
private RPCHook rpcHook = null;
/**
* backPressureForAsyncSendNum is guaranteed to be modified at runtime and no new requests are allowed
*/
private final ReadWriteCASLock backPressureForAsyncSendNumLock = new ReadWriteCASLock();
/**
* backPressureForAsyncSendSize is guaranteed to be modified at runtime and no new requests are allowed
*/
private final ReadWriteCASLock backPressureForAsyncSendSizeLock = new ReadWriteCASLock();
/**
* Compress level of compress algorithm.
*/
private int compressLevel = Integer.parseInt(System.getProperty(MixAll.MESSAGE_COMPRESS_LEVEL, "5"));
/**
* Compress type of compress algorithm, default using ZLIB.
*/
private CompressionType compressType = CompressionType.of(System.getProperty(MixAll.MESSAGE_COMPRESS_TYPE, "ZLIB"));
/**
* Compressor of compress algorithm.
*/
private Compressor compressor = CompressorFactory.getCompressor(compressType);
/**
* Default constructor.
*/
public DefaultMQProducer() {
this(MixAll.DEFAULT_PRODUCER_GROUP);
}
/**
* Constructor specifying the RPC hook.
*
* @param rpcHook RPC hook to execute per each remoting command execution.
*/
public DefaultMQProducer(RPCHook rpcHook) {
this(MixAll.DEFAULT_PRODUCER_GROUP, rpcHook);
}
/**
* Constructor specifying producer group.
*
* @param producerGroup Producer group, see the name-sake field.
*/
public DefaultMQProducer(final String producerGroup) {
this(producerGroup, (RPCHook) null);
}
/**
* Constructor specifying both producer group and RPC hook.
*
* @param producerGroup Producer group, see the name-sake field.
* @param rpcHook RPC hook to execute per each remoting command execution.
*/
public DefaultMQProducer(final String producerGroup, RPCHook rpcHook) {
this(producerGroup, rpcHook, null);
}
/**
* Constructor specifying namespace, producer group, topics and RPC hook.
*
* @param producerGroup Producer group, see the name-sake field.
* @param rpcHook RPC hook to execute per each remoting command execution.
* @param topics Topic that needs to be initialized for routing
*/
public DefaultMQProducer(final String producerGroup, RPCHook rpcHook,
final List<String> topics) {
this(producerGroup, rpcHook, topics, false, null);
}
/**
* Constructor specifying producer group, enabled msgTrace flag and customized trace topic name.
*
* @param producerGroup Producer group, see the name-sake field.
* @param enableMsgTrace Switch flag instance for message trace.
* @param customizedTraceTopic The name value of message trace topic.If you don't config,you can use the default
* trace topic name.
*/
public DefaultMQProducer(final String producerGroup, boolean enableMsgTrace, final String customizedTraceTopic) {
this(producerGroup, null, enableMsgTrace, customizedTraceTopic);
}
/**
* Constructor specifying producer group.
*
* @param producerGroup Producer group, see the name-sake field.
* @param rpcHook RPC hook to execute per each remoting command execution.
* @param enableMsgTrace Switch flag instance for message trace.
* @param customizedTraceTopic The name value of message trace topic.If you don't config,you can use the default
* trace topic name.
*/
public DefaultMQProducer(final String producerGroup, RPCHook rpcHook, boolean enableMsgTrace,
final String customizedTraceTopic) {
this(producerGroup, rpcHook, null, enableMsgTrace, customizedTraceTopic);
}
/**
* Constructor specifying namespace, producer group, topics, RPC hook, enabled msgTrace flag and customized trace topic
* name.
*
* @param producerGroup Producer group, see the name-sake field.
* @param rpcHook RPC hook to execute per each remoting command execution.
* @param topics Topic that needs to be initialized for routing
* @param enableMsgTrace Switch flag instance for message trace.
* @param customizedTraceTopic The name value of message trace topic.If you don't config,you can use the default
* trace topic name.
*/
public DefaultMQProducer(final String producerGroup, RPCHook rpcHook, final List<String> topics,
boolean enableMsgTrace, final String customizedTraceTopic) {
this.producerGroup = producerGroup;
this.rpcHook = rpcHook;
this.topics = topics;
this.enableTrace = enableMsgTrace;
this.traceTopic = customizedTraceTopic;
defaultMQProducerImpl = new DefaultMQProducerImpl(this, rpcHook);
}
/**
* Constructor specifying producer group.
*
* @param namespace Namespace for this MQ Producer instance.
* @param producerGroup Producer group, see the name-sake field.
*/
@Deprecated
public DefaultMQProducer(final String namespace, final String producerGroup) {
this(namespace, producerGroup, null);
}
/**
* Constructor specifying namespace, producer group and RPC hook.
*
* @param namespace Namespace for this MQ Producer instance.
* @param producerGroup Producer group, see the name-sake field.
* @param rpcHook RPC hook to execute per each remoting command execution.
*/
@Deprecated
public DefaultMQProducer(final String namespace, final String producerGroup, RPCHook rpcHook) {
this.namespace = namespace;
this.producerGroup = producerGroup;
this.rpcHook = rpcHook;
defaultMQProducerImpl = new DefaultMQProducerImpl(this, rpcHook);
}
/**
* Constructor specifying namespace, producer group, RPC hook, enabled msgTrace flag and customized trace topic
* name.
*
* @param namespace Namespace for this MQ Producer instance.
* @param producerGroup Producer group, see the name-sake field.
* @param rpcHook RPC hook to execute per each remoting command execution.
* @param enableMsgTrace Switch flag instance for message trace.
* @param customizedTraceTopic The name value of message trace topic.If you don't config,you can use the default
* trace topic name.
*/
@Deprecated
public DefaultMQProducer(final String namespace, final String producerGroup, RPCHook rpcHook,
boolean enableMsgTrace, final String customizedTraceTopic) {
this(namespace, producerGroup, rpcHook);
//if client open the message trace feature
this.enableTrace = enableMsgTrace;
this.traceTopic = customizedTraceTopic;
}
/**
* Start this producer instance. </p>
*
* <strong> Much internal initializing procedures are carried out to make this instance prepared, thus, it's a must
* to invoke this method before sending or querying messages. </strong> </p>
*
* @throws MQClientException if there is any unexpected error.
*/
@Override
public void start() throws MQClientException {
this.setProducerGroup(withNamespace(this.producerGroup));
this.defaultMQProducerImpl.start();
if (this.produceAccumulator != null) {
this.produceAccumulator.start();
}
if (enableTrace) {
try {
AsyncTraceDispatcher dispatcher = new AsyncTraceDispatcher(producerGroup, TraceDispatcher.Type.PRODUCE, getTraceMsgBatchNum(), traceTopic, rpcHook);
dispatcher.setHostProducer(this.defaultMQProducerImpl);
dispatcher.setNamespaceV2(this.namespaceV2);
traceDispatcher = dispatcher;
this.defaultMQProducerImpl.registerSendMessageHook(
new SendMessageTraceHookImpl(traceDispatcher));
this.defaultMQProducerImpl.registerEndTransactionHook(
new EndTransactionTraceHookImpl(traceDispatcher));
this.defaultMQProducerImpl.getMqClientFactory().getMQClientAPIImpl().getRemotingClient()
.registerRPCHook(new DefaultRecallMessageTraceHook(traceDispatcher));
} catch (Throwable e) {
logger.error("system mqtrace hook init failed ,maybe can't send msg trace data");
}
}
if (null != traceDispatcher) {
if (traceDispatcher instanceof AsyncTraceDispatcher) {
((AsyncTraceDispatcher) traceDispatcher).getTraceProducer().setUseTLS(isUseTLS());
}
try {
traceDispatcher.start(this.getNamesrvAddr(), this.getAccessChannel());
} catch (MQClientException e) {
logger.warn("trace dispatcher start failed ", e);
}
}
}
/**
* This method shuts down this producer instance and releases related resources.
*/
@Override
public void shutdown() {
this.defaultMQProducerImpl.shutdown();
if (this.produceAccumulator != null) {
this.produceAccumulator.shutdown();
}
if (null != traceDispatcher) {
traceDispatcher.shutdown();
}
}
/**
* Fetch message queues of topic <code>topic</code>, to which we may send/publish messages.
*
* @param topic Topic to fetch.
* @return List of message queues readily to send messages to
* @throws MQClientException if there is any client error.
*/
@Override
public List<MessageQueue> fetchPublishMessageQueues(String topic) throws MQClientException {
return this.defaultMQProducerImpl.fetchPublishMessageQueues(withNamespace(topic));
}
private boolean canBatch(Message msg) {
// produceAccumulator is full
if (!produceAccumulator.tryAddMessage(msg)) {
return false;
}
// delay message do not support batch processing
if (msg.getDelayTimeLevel() > 0 || msg.getDelayTimeMs() > 0 || msg.getDelayTimeSec() > 0 || msg.getDeliverTimeMs() > 0) {
return false;
}
// retry message do not support batch processing
if (msg.getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
return false;
}
// message which have been assigned to producer group do not support batch processing
if (msg.getProperties().containsKey(MessageConst.PROPERTY_PRODUCER_GROUP)) {
return false;
}
return true;
}
/**
* Send message in synchronous mode. This method returns only when the sending procedure totally completes. </p>
*
* <strong>Warn:</strong> this method has internal retry-mechanism, that is, internal implementation will retry
* {@link #retryTimesWhenSendFailed} times before claiming failure. As a result, multiple messages may be potentially
* delivered to broker(s). It's up to the application developers to resolve potential duplication issue.
*
* @param msg Message to send.
* @return {@link SendResult} instance to inform senders details of the deliverable, say Message ID of the message,
* {@link SendStatus} indicating broker storage/replication status, message queue sent to, etc.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws MQBrokerException if there is any error with broker.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public SendResult send(
Message msg) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
if (this.getAutoBatch() && !(msg instanceof MessageBatch)) {
return sendByAccumulator(msg, null, null);
} else {
return sendDirect(msg, null, null);
}
}
/**
* Same to {@link #send(Message)} with send timeout specified in addition.
*
* @param msg Message to send.
* @param timeout send timeout.
* @return {@link SendResult} instance to inform senders details of the deliverable, say Message ID of the message,
* {@link SendStatus} indicating broker storage/replication status, message queue sent to, etc.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws MQBrokerException if there is any error with broker.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public SendResult send(Message msg,
long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
return this.defaultMQProducerImpl.send(msg, timeout);
}
/**
* Send message to broker asynchronously. </p>
* <p>
* This method returns immediately. On sending completion, <code>sendCallback</code> will be executed. </p>
* <p>
* Similar to {@link #send(Message)}, internal implementation would potentially retry up to {@link
* #retryTimesWhenSendAsyncFailed} times before claiming sending failure, which may yield message duplication and
* application developers are the one to resolve this potential issue.
*
* @param msg Message to send.
* @param sendCallback Callback to execute on sending completed, either successful or unsuccessful.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public void send(Message msg,
SendCallback sendCallback) throws MQClientException, RemotingException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
try {
if (this.getAutoBatch() && !(msg instanceof MessageBatch)) {
sendByAccumulator(msg, null, sendCallback);
} else {
sendDirect(msg, null, sendCallback);
}
} catch (Throwable e) {
sendCallback.onException(e);
}
}
/**
* Same to {@link #send(Message, SendCallback)} with send timeout specified in addition.
*
* @param msg message to send.
* @param sendCallback Callback to execute.
* @param timeout send timeout.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public void send(Message msg, SendCallback sendCallback, long timeout)
throws MQClientException, RemotingException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
this.defaultMQProducerImpl.send(msg, sendCallback, timeout);
}
/**
* Similar to <a href="https://en.wikipedia.org/wiki/User_Datagram_Protocol">UDP</a>, this method won't wait for
* acknowledgement from broker before return. Obviously, it has maximums throughput yet potentials of message loss.
*
* @param msg Message to send.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public void sendOneway(Message msg) throws MQClientException, RemotingException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
this.defaultMQProducerImpl.sendOneway(msg);
}
/**
* Same to {@link #send(Message)} with target message queue specified in addition.
*
* @param msg Message to send.
* @param mq Target message queue.
* @return {@link SendResult} instance to inform senders details of the deliverable, say Message ID of the message,
* {@link SendStatus} indicating broker storage/replication status, message queue sent to, etc.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws MQBrokerException if there is any error with broker.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public SendResult send(Message msg, MessageQueue mq)
throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
mq = queueWithNamespace(mq);
if (this.getAutoBatch() && !(msg instanceof MessageBatch)) {
return sendByAccumulator(msg, mq, null);
} else {
return sendDirect(msg, mq, null);
}
}
/**
* Same to {@link #send(Message)} with target message queue and send timeout specified.
*
* @param msg Message to send.
* @param mq Target message queue.
* @param timeout send timeout.
* @return {@link SendResult} instance to inform senders details of the deliverable, say Message ID of the message,
* {@link SendStatus} indicating broker storage/replication status, message queue sent to, etc.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws MQBrokerException if there is any error with broker.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public SendResult send(Message msg, MessageQueue mq, long timeout)
throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
return this.defaultMQProducerImpl.send(msg, queueWithNamespace(mq), timeout);
}
/**
* Same to {@link #send(Message, SendCallback)} with target message queue specified.
*
* @param msg Message to send.
* @param mq Target message queue.
* @param sendCallback Callback to execute on sending completed, either successful or unsuccessful.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public void send(Message msg, MessageQueue mq, SendCallback sendCallback)
throws MQClientException, RemotingException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
mq = queueWithNamespace(mq);
try {
if (this.getAutoBatch() && !(msg instanceof MessageBatch)) {
sendByAccumulator(msg, mq, sendCallback);
} else {
sendDirect(msg, mq, sendCallback);
}
} catch (MQBrokerException e) {
// ignore
}
}
/**
* Same to {@link #send(Message, SendCallback)} with target message queue and send timeout specified.
*
* @param msg Message to send.
* @param mq Target message queue.
* @param sendCallback Callback to execute on sending completed, either successful or unsuccessful.
* @param timeout Send timeout.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public void send(Message msg, MessageQueue mq, SendCallback sendCallback, long timeout)
throws MQClientException, RemotingException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
this.defaultMQProducerImpl.send(msg, queueWithNamespace(mq), sendCallback, timeout);
}
/**
* Same to {@link #sendOneway(Message)} with target message queue specified.
*
* @param msg Message to send.
* @param mq Target message queue.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public void sendOneway(Message msg,
MessageQueue mq) throws MQClientException, RemotingException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
this.defaultMQProducerImpl.sendOneway(msg, queueWithNamespace(mq));
}
/**
* Same to {@link #send(Message)} with message queue selector specified.
*
* @param msg Message to send.
* @param selector Message queue selector, through which we get target message queue to deliver message to.
* @param arg Argument to work along with message queue selector.
* @return {@link SendResult} instance to inform senders details of the deliverable, say Message ID of the message,
* {@link SendStatus} indicating broker storage/replication status, message queue sent to, etc.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws MQBrokerException if there is any error with broker.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public SendResult send(Message msg, MessageQueueSelector selector, Object arg)
throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
MessageQueue mq = this.defaultMQProducerImpl.invokeMessageQueueSelector(msg, selector, arg, this.getSendMsgTimeout());
mq = queueWithNamespace(mq);
if (this.getAutoBatch() && !(msg instanceof MessageBatch)) {
return sendByAccumulator(msg, mq, null);
} else {
return sendDirect(msg, mq, null);
}
}
/**
* Same to {@link #send(Message, MessageQueueSelector, Object)} with send timeout specified.
*
* @param msg Message to send.
* @param selector Message queue selector, through which we get target message queue to deliver message to.
* @param arg Argument to work along with message queue selector.
* @param timeout Send timeout.
* @return {@link SendResult} instance to inform senders details of the deliverable, say Message ID of the message,
* {@link SendStatus} indicating broker storage/replication status, message queue sent to, etc.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws MQBrokerException if there is any error with broker.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public SendResult send(Message msg, MessageQueueSelector selector, Object arg, long timeout)
throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
return this.defaultMQProducerImpl.send(msg, selector, arg, timeout);
}
/**
* Same to {@link #send(Message, SendCallback)} with message queue selector specified.
*
* @param msg Message to send.
* @param selector Message selector through which to get target message queue.
* @param arg Argument used along with message queue selector.
* @param sendCallback callback to execute on sending completion.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public void send(Message msg, MessageQueueSelector selector, Object arg, SendCallback sendCallback)
throws MQClientException, RemotingException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
try {
MessageQueue mq = this.defaultMQProducerImpl.invokeMessageQueueSelector(msg, selector, arg, this.getSendMsgTimeout());
mq = queueWithNamespace(mq);
if (this.getAutoBatch() && !(msg instanceof MessageBatch)) {
sendByAccumulator(msg, mq, sendCallback);
} else {
sendDirect(msg, mq, sendCallback);
}
} catch (Throwable e) {
sendCallback.onException(e);
}
}
/**
* Same to {@link #send(Message, MessageQueueSelector, Object, SendCallback)} with timeout specified.
*
* @param msg Message to send.
* @param selector Message selector through which to get target message queue.
* @param arg Argument used along with message queue selector.
* @param sendCallback callback to execute on sending completion.
* @param timeout Send timeout.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public void send(Message msg, MessageQueueSelector selector, Object arg, SendCallback sendCallback, long timeout)
throws MQClientException, RemotingException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
this.defaultMQProducerImpl.send(msg, selector, arg, sendCallback, timeout);
}
public SendResult sendDirect(Message msg, MessageQueue mq,
SendCallback sendCallback) throws MQClientException, RemotingException, InterruptedException, MQBrokerException {
// send in sync mode
if (sendCallback == null) {
if (mq == null) {
return this.defaultMQProducerImpl.send(msg);
} else {
return this.defaultMQProducerImpl.send(msg, mq);
}
} else {
if (mq == null) {
this.defaultMQProducerImpl.send(msg, sendCallback);
} else {
this.defaultMQProducerImpl.send(msg, mq, sendCallback);
}
return null;
}
}
public SendResult sendByAccumulator(Message msg, MessageQueue mq,
SendCallback sendCallback) throws MQClientException, RemotingException, InterruptedException, MQBrokerException {
// check whether it can batch
if (!canBatch(msg)) {
return sendDirect(msg, mq, sendCallback);
} else {
Validators.checkMessage(msg, this);
MessageClientIDSetter.setUniqID(msg);
if (sendCallback == null) {
return this.produceAccumulator.send(msg, mq, this);
} else {
this.produceAccumulator.send(msg, mq, sendCallback, this);
return null;
}
}
}
/**
* Send request message in synchronous mode. This method returns only when the consumer consume the request message and reply a message. </p>
*
* <strong>Warn:</strong> this method has internal retry-mechanism, that is, internal implementation will retry
* {@link #retryTimesWhenSendFailed} times before claiming failure. As a result, multiple messages may be potentially
* delivered to broker(s). It's up to the application developers to resolve potential duplication issue.
*
* @param msg request message to send
* @param timeout request timeout
* @return reply message
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws MQBrokerException if there is any broker error.
* @throws InterruptedException if the thread is interrupted.
* @throws RequestTimeoutException if request timeout.
*/
@Override
public Message request(final Message msg, final long timeout) throws RequestTimeoutException, MQClientException,
RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
return this.defaultMQProducerImpl.request(msg, timeout);
}
/**
* Request asynchronously. </p>
* This method returns immediately. On receiving reply message, <code>requestCallback</code> will be executed. </p>
* <p>
* Similar to {@link #request(Message, long)}, internal implementation would potentially retry up to {@link
* #retryTimesWhenSendAsyncFailed} times before claiming sending failure, which may yield message duplication and
* application developers are the one to resolve this potential issue.
*
* @param msg request message to send
* @param requestCallback callback to execute on request completion.
* @param timeout request timeout
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the thread is interrupted.
* @throws MQBrokerException if there is any broker error.
*/
@Override
public void request(final Message msg, final RequestCallback requestCallback, final long timeout)
throws MQClientException, RemotingException, InterruptedException, MQBrokerException {
msg.setTopic(withNamespace(msg.getTopic()));
this.defaultMQProducerImpl.request(msg, requestCallback, timeout);
}
/**
* Same to {@link #request(Message, long)} with message queue selector specified.
*
* @param msg request message to send
* @param selector message queue selector, through which we get target message queue to deliver message to.
* @param arg argument to work along with message queue selector.
* @param timeout timeout of request.
* @return reply message
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws MQBrokerException if there is any broker error.
* @throws InterruptedException if the thread is interrupted.
* @throws RequestTimeoutException if request timeout.
*/
@Override
public Message request(final Message msg, final MessageQueueSelector selector, final Object arg,
final long timeout) throws MQClientException, RemotingException, MQBrokerException,
InterruptedException, RequestTimeoutException {
msg.setTopic(withNamespace(msg.getTopic()));
return this.defaultMQProducerImpl.request(msg, selector, arg, timeout);
}
/**
* Same to {@link #request(Message, RequestCallback, long)} with target message selector specified.
*
* @param msg request message to send
* @param selector message queue selector, through which we get target message queue to deliver message to.
* @param arg argument to work along with message queue selector.
* @param requestCallback callback to execute on request completion.
* @param timeout timeout of request.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the thread is interrupted.
* @throws MQBrokerException if there is any broker error.
*/
@Override
public void request(final Message msg, final MessageQueueSelector selector, final Object arg,
final RequestCallback requestCallback, final long timeout) throws MQClientException, RemotingException,
InterruptedException, MQBrokerException {
msg.setTopic(withNamespace(msg.getTopic()));
this.defaultMQProducerImpl.request(msg, selector, arg, requestCallback, timeout);
}
/**
* Same to {@link #request(Message, long)} with target message queue specified in addition.
*
* @param msg request message to send
* @param mq target message queue.
* @param timeout request timeout
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws MQBrokerException if there is any broker error.
* @throws InterruptedException if the thread is interrupted.
* @throws RequestTimeoutException if request timeout.
*/
@Override
public Message request(final Message msg, final MessageQueue mq, final long timeout)
throws MQClientException, RemotingException, MQBrokerException, InterruptedException, RequestTimeoutException {
msg.setTopic(withNamespace(msg.getTopic()));
return this.defaultMQProducerImpl.request(msg, mq, timeout);
}
/**
* Same to {@link #request(Message, RequestCallback, long)} with target message queue specified.
*
* @param msg request message to send
* @param mq target message queue.
* @param requestCallback callback to execute on request completion.
* @param timeout timeout of request.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the thread is interrupted.
* @throws MQBrokerException if there is any broker error.
*/
@Override
public void request(final Message msg, final MessageQueue mq, final RequestCallback requestCallback, long timeout)
throws MQClientException, RemotingException, InterruptedException, MQBrokerException {
msg.setTopic(withNamespace(msg.getTopic()));
this.defaultMQProducerImpl.request(msg, mq, requestCallback, timeout);
}
/**
* Same to {@link #sendOneway(Message)} with message queue selector specified.
*
* @param msg Message to send.
* @param selector Message queue selector, through which to determine target message queue to deliver message
* @param arg Argument used along with message queue selector.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Override
public void sendOneway(Message msg, MessageQueueSelector selector, Object arg)
throws MQClientException, RemotingException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
this.defaultMQProducerImpl.sendOneway(msg, selector, arg);
}
/**
* This method is used to send transactional messages.
*
* @param msg Transactional message to send.
* @param arg Argument used along with local transaction executor.
* @return Transaction result.
* @throws MQClientException
*/
@Override
public TransactionSendResult sendMessageInTransaction(Message msg,
Object arg) throws MQClientException {
throw new RuntimeException("sendMessageInTransaction not implement, please use TransactionMQProducer class");
}
/**
* This method will be removed in a certain version after April 5, 2020, so please do not use this method.
*
* @param key accessKey
* @param newTopic topic name
* @param queueNum topic's queue number
* @param attributes
* @throws MQClientException if there is any client error.
*/
@Deprecated
@Override
public void createTopic(String key, String newTopic, int queueNum,
Map<String, String> attributes) throws MQClientException {
createTopic(key, withNamespace(newTopic), queueNum, 0, null);
}
/**
* Create a topic on broker. This method will be removed in a certain version after April 5, 2020, so please do not
* use this method.
*
* @param key accessKey
* @param newTopic topic name
* @param queueNum topic's queue number
* @param topicSysFlag topic system flag
* @param attributes
* @throws MQClientException if there is any client error.
*/
@Deprecated
@Override
public void createTopic(String key, String newTopic, int queueNum, int topicSysFlag,
Map<String, String> attributes) throws MQClientException {
this.defaultMQProducerImpl.createTopic(key, withNamespace(newTopic), queueNum, topicSysFlag);
}
/**
* Search consume queue offset of the given time stamp.
*
* @param mq Instance of MessageQueue
* @param timestamp from when in milliseconds.
* @return Consume queue offset.
* @throws MQClientException if there is any client error.
*/
@Override
public long searchOffset(MessageQueue mq, long timestamp) throws MQClientException {
return this.defaultMQProducerImpl.searchOffset(queueWithNamespace(mq), timestamp);
}
/**
* Query maximum offset of the given message queue.
* <p>
* This method will be removed in a certain version after April 5, 2020, so please do not use this method.
*
* @param mq Instance of MessageQueue
* @return maximum offset of the given consume queue.
* @throws MQClientException if there is any client error.
*/
@Deprecated
@Override
public long maxOffset(MessageQueue mq) throws MQClientException {
return this.defaultMQProducerImpl.maxOffset(queueWithNamespace(mq));
}
/**
* Query minimum offset of the given message queue.
* <p>
* This method will be removed in a certain version after April 5, 2020, so please do not use this method.
*
* @param mq Instance of MessageQueue
* @return minimum offset of the given message queue.
* @throws MQClientException if there is any client error.
*/
@Deprecated
@Override
public long minOffset(MessageQueue mq) throws MQClientException {
return this.defaultMQProducerImpl.minOffset(queueWithNamespace(mq));
}
/**
* Query the earliest message store time.
* <p>
* This method will be removed in a certain version after April 5, 2020, so please do not use this method.
*
* @param mq Instance of MessageQueue
* @return earliest message store time.
* @throws MQClientException if there is any client error.
*/
@Deprecated
@Override
public long earliestMsgStoreTime(MessageQueue mq) throws MQClientException {
return this.defaultMQProducerImpl.earliestMsgStoreTime(queueWithNamespace(mq));
}
/**
* Query message by key.
* <p>
* This method will be removed in a certain version after April 5, 2020, so please do not use this method.
*
* @param topic message topic
* @param key message key index word
* @param maxNum max message number
* @param begin from when
* @param end to when
* @return QueryResult instance contains matched messages.
* @throws MQClientException if there is any client error.
* @throws InterruptedException if the thread is interrupted.
*/
@Deprecated
@Override
public QueryResult queryMessage(String topic, String key, int maxNum, long begin, long end)
throws MQClientException, InterruptedException {
return this.defaultMQProducerImpl.queryMessage(withNamespace(topic), key, maxNum, begin, end);
}
/**
* Query message of the given message ID.
* <p>
* This method will be removed in a certain version after April 5, 2020, so please do not use this method.
*
* @param topic Topic
* @param msgId Message ID
* @return Message specified.
* @throws MQBrokerException if there is any broker error.
* @throws MQClientException if there is any client error.
* @throws RemotingException if there is any network-tier error.
* @throws InterruptedException if the sending thread is interrupted.
*/
@Deprecated
@Override
public MessageExt viewMessage(String topic,
String msgId) throws RemotingException, MQBrokerException, InterruptedException, MQClientException {
try {
return this.defaultMQProducerImpl.viewMessage(topic, msgId);
} catch (Exception ignored) {
}
return this.defaultMQProducerImpl.queryMessageByUniqKey(withNamespace(topic), msgId);
}
@Override
public SendResult send(
Collection<Message> msgs) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
return this.defaultMQProducerImpl.send(batch(msgs));
}
@Override
public SendResult send(Collection<Message> msgs,
long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
return this.defaultMQProducerImpl.send(batch(msgs), timeout);
}
@Override
public SendResult send(Collection<Message> msgs,
MessageQueue messageQueue) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
return this.defaultMQProducerImpl.send(batch(msgs), messageQueue);
}
@Override
public SendResult send(Collection<Message> msgs, MessageQueue messageQueue,
long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
return this.defaultMQProducerImpl.send(batch(msgs), messageQueue, timeout);
}
@Override
public void send(Collection<Message> msgs,
SendCallback sendCallback) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
this.defaultMQProducerImpl.send(batch(msgs), sendCallback);
}
@Override
public void send(Collection<Message> msgs, SendCallback sendCallback,
long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
this.defaultMQProducerImpl.send(batch(msgs), sendCallback, timeout);
}
@Override
public void send(Collection<Message> msgs, MessageQueue mq,
SendCallback sendCallback) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
this.defaultMQProducerImpl.send(batch(msgs), queueWithNamespace(mq), sendCallback);
}
@Override
public void send(Collection<Message> msgs, MessageQueue mq,
SendCallback sendCallback,
long timeout) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
this.defaultMQProducerImpl.send(batch(msgs), queueWithNamespace(mq), sendCallback, timeout);
}
@Override
public String recallMessage(String topic, String recallHandle)
throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
return this.defaultMQProducerImpl.recallMessage(withNamespace(topic), recallHandle);
}
/**
* Sets an Executor to be used for executing callback methods.
*
* @param callbackExecutor the instance of Executor
*/
public void setCallbackExecutor(final ExecutorService callbackExecutor) {
this.defaultMQProducerImpl.setCallbackExecutor(callbackExecutor);
}
/**
* Sets an Executor to be used for executing asynchronous send.
*
* @param asyncSenderExecutor the instance of Executor
*/
public void setAsyncSenderExecutor(final ExecutorService asyncSenderExecutor) {
this.defaultMQProducerImpl.setAsyncSenderExecutor(asyncSenderExecutor);
}
/**
* Add response code for retrying.
*
* @param responseCode response code, {@link ResponseCode}
*/
public void addRetryResponseCode(int responseCode) {
this.retryResponseCodes.add(responseCode);
}
private MessageBatch batch(Collection<Message> msgs) throws MQClientException {
MessageBatch msgBatch;
try {
msgBatch = MessageBatch.generateFromList(msgs);
for (Message message : msgBatch) {
Validators.checkMessage(message, this);
MessageClientIDSetter.setUniqID(message);
message.setTopic(withNamespace(message.getTopic()));
}
MessageClientIDSetter.setUniqID(msgBatch);
msgBatch.setBody(msgBatch.encode());
} catch (Exception e) {
throw new MQClientException("Failed to initiate the MessageBatch", e);
}
msgBatch.setTopic(withNamespace(msgBatch.getTopic()));
return msgBatch;
}
public int getBatchMaxDelayMs() {
if (this.produceAccumulator == null) {
return 0;
}
return produceAccumulator.getBatchMaxDelayMs();
}
public void batchMaxDelayMs(int holdMs) {
this.batchMaxDelayMs = holdMs;
if (this.produceAccumulator != null) {
this.produceAccumulator.batchMaxDelayMs(holdMs);
}
}
public long getBatchMaxBytes() {
if (this.produceAccumulator == null) {
return 0;
}
return produceAccumulator.getBatchMaxBytes();
}
public void batchMaxBytes(long holdSize) {
this.batchMaxBytes = holdSize;
if (this.produceAccumulator != null) {
this.produceAccumulator.batchMaxBytes(holdSize);
}
}
public long getTotalBatchMaxBytes() {
if (this.produceAccumulator == null) {
return 0;
}
return produceAccumulator.getTotalBatchMaxBytes();
}
public void totalBatchMaxBytes(long totalHoldSize) {
this.totalBatchMaxBytes = totalHoldSize;
if (this.produceAccumulator != null) {
this.produceAccumulator.totalBatchMaxBytes(totalHoldSize);
}
}
public boolean getAutoBatch() {
if (this.produceAccumulator == null) {
return false;
}
return this.autoBatch;
}
public void setAutoBatch(boolean autoBatch) {
this.autoBatch = autoBatch;
}
public String getProducerGroup() {
return producerGroup;
}
public void setProducerGroup(String producerGroup) {
this.producerGroup = producerGroup;
}
public String getCreateTopicKey() {
return createTopicKey;
}
public void setCreateTopicKey(String createTopicKey) {
this.createTopicKey = createTopicKey;
}
public int getSendMsgTimeout() {
return sendMsgTimeout;
}
public void setSendMsgTimeout(int sendMsgTimeout) {
this.sendMsgTimeout = sendMsgTimeout;
}
public int getSendMsgMaxTimeoutPerRequest() {
return sendMsgMaxTimeoutPerRequest;
}
public void setSendMsgMaxTimeoutPerRequest(int sendMsgMaxTimeoutPerRequest) {
this.sendMsgMaxTimeoutPerRequest = sendMsgMaxTimeoutPerRequest;
}
public int getCompressMsgBodyOverHowmuch() {
return compressMsgBodyOverHowmuch;
}
public void setCompressMsgBodyOverHowmuch(int compressMsgBodyOverHowmuch) {
this.compressMsgBodyOverHowmuch = compressMsgBodyOverHowmuch;
}
@Deprecated
public DefaultMQProducerImpl getDefaultMQProducerImpl() {
return defaultMQProducerImpl;
}
public boolean isRetryAnotherBrokerWhenNotStoreOK() {
return retryAnotherBrokerWhenNotStoreOK;
}
public void setRetryAnotherBrokerWhenNotStoreOK(boolean retryAnotherBrokerWhenNotStoreOK) {
this.retryAnotherBrokerWhenNotStoreOK = retryAnotherBrokerWhenNotStoreOK;
}
public int getMaxMessageSize() {
return maxMessageSize;
}
public void setMaxMessageSize(int maxMessageSize) {
this.maxMessageSize = maxMessageSize;
}
public int getDefaultTopicQueueNums() {
return defaultTopicQueueNums;
}
public void setDefaultTopicQueueNums(int defaultTopicQueueNums) {
this.defaultTopicQueueNums = defaultTopicQueueNums;
}
public int getRetryTimesWhenSendFailed() {
return retryTimesWhenSendFailed;
}
public void setRetryTimesWhenSendFailed(int retryTimesWhenSendFailed) {
this.retryTimesWhenSendFailed = retryTimesWhenSendFailed;
}
public boolean isSendMessageWithVIPChannel() {
return isVipChannelEnabled();
}
public void setSendMessageWithVIPChannel(final boolean sendMessageWithVIPChannel) {
this.setVipChannelEnabled(sendMessageWithVIPChannel);
}
public long[] getNotAvailableDuration() {
return this.defaultMQProducerImpl.getNotAvailableDuration();
}
public void setNotAvailableDuration(final long[] notAvailableDuration) {
this.defaultMQProducerImpl.setNotAvailableDuration(notAvailableDuration);
}
public long[] getLatencyMax() {
return this.defaultMQProducerImpl.getLatencyMax();
}
public void setLatencyMax(final long[] latencyMax) {
this.defaultMQProducerImpl.setLatencyMax(latencyMax);
}
public boolean isSendLatencyFaultEnable() {
return this.defaultMQProducerImpl.isSendLatencyFaultEnable();
}
public void setSendLatencyFaultEnable(final boolean sendLatencyFaultEnable) {
this.defaultMQProducerImpl.setSendLatencyFaultEnable(sendLatencyFaultEnable);
}
public int getRetryTimesWhenSendAsyncFailed() {
return retryTimesWhenSendAsyncFailed;
}
public void setRetryTimesWhenSendAsyncFailed(final int retryTimesWhenSendAsyncFailed) {
this.retryTimesWhenSendAsyncFailed = retryTimesWhenSendAsyncFailed;
}
public TraceDispatcher getTraceDispatcher() {
return traceDispatcher;
}
public Set<Integer> getRetryResponseCodes() {
return retryResponseCodes;
}
public boolean isEnableBackpressureForAsyncMode() {
return enableBackpressureForAsyncMode;
}
public void setEnableBackpressureForAsyncMode(boolean enableBackpressureForAsyncMode) {
this.enableBackpressureForAsyncMode = enableBackpressureForAsyncMode;
}
public int getBackPressureForAsyncSendNum() {
return backPressureForAsyncSendNum;
}
/**
* For user modify backPressureForAsyncSendNum at runtime
*/
public void setBackPressureForAsyncSendNum(int backPressureForAsyncSendNum) {
this.backPressureForAsyncSendNumLock.acquireWriteLock();
backPressureForAsyncSendNum = Math.max(backPressureForAsyncSendNum, 10);
int acquiredBackPressureForAsyncSendNum = this.backPressureForAsyncSendNum
- defaultMQProducerImpl.getSemaphoreAsyncSendNumAvailablePermits();
this.backPressureForAsyncSendNum = backPressureForAsyncSendNum;
defaultMQProducerImpl.setSemaphoreAsyncSendNum(backPressureForAsyncSendNum - acquiredBackPressureForAsyncSendNum);
this.backPressureForAsyncSendNumLock.releaseWriteLock();
}
public int getBackPressureForAsyncSendSize() {
return backPressureForAsyncSendSize;
}
/**
* For user modify backPressureForAsyncSendSize at runtime
*/
public void setBackPressureForAsyncSendSize(int backPressureForAsyncSendSize) {
this.backPressureForAsyncSendSizeLock.acquireWriteLock();
backPressureForAsyncSendSize = Math.max(backPressureForAsyncSendSize, 1024 * 1024);
int acquiredBackPressureForAsyncSendSize = this.backPressureForAsyncSendSize
- defaultMQProducerImpl.getSemaphoreAsyncSendSizeAvailablePermits();
this.backPressureForAsyncSendSize = backPressureForAsyncSendSize;
defaultMQProducerImpl.setSemaphoreAsyncSendSize(backPressureForAsyncSendSize - acquiredBackPressureForAsyncSendSize);
this.backPressureForAsyncSendSizeLock.releaseWriteLock();
}
/**
* Used for system internal adjust backPressureForAsyncSendSize
*/
public void setBackPressureForAsyncSendSizeInsideAdjust(int backPressureForAsyncSendSize) {
this.backPressureForAsyncSendSize = backPressureForAsyncSendSize;
}
/**
* Used for system internal adjust backPressureForAsyncSendNum
*/
public void setBackPressureForAsyncSendNumInsideAdjust(int backPressureForAsyncSendNum) {
this.backPressureForAsyncSendNum = backPressureForAsyncSendNum;
}
public void acquireBackPressureForAsyncSendSizeLock() {
this.backPressureForAsyncSendSizeLock.acquireReadLock();
}
public void releaseBackPressureForAsyncSendSizeLock() {
this.backPressureForAsyncSendSizeLock.releaseReadLock();
}
public void acquireBackPressureForAsyncSendNumLock() {
this.backPressureForAsyncSendNumLock.acquireReadLock();
}
public void releaseBackPressureForAsyncSendNumLock() {
this.backPressureForAsyncSendNumLock.releaseReadLock();
}
public List<String> getTopics() {
return topics;
}
public void setTopics(List<String> topics) {
this.topics = topics;
}
@Override
public void setStartDetectorEnable(boolean startDetectorEnable) {
super.setStartDetectorEnable(startDetectorEnable);
this.defaultMQProducerImpl.getMqFaultStrategy().setStartDetectorEnable(startDetectorEnable);
}
public int getCompressLevel() {
return compressLevel;
}
public void setCompressLevel(int compressLevel) {
this.compressLevel = compressLevel;
}
public CompressionType getCompressType() {
return compressType;
}
public void setCompressType(CompressionType compressType) {
this.compressType = compressType;
this.compressor = CompressorFactory.getCompressor(compressType);
}
public Compressor getCompressor() {
return compressor;
}
public void initProduceAccumulator() {
this.produceAccumulator = MQClientManager.getInstance().getOrCreateProduceAccumulator(this);
if (this.batchMaxDelayMs > -1) {
this.produceAccumulator.batchMaxDelayMs(this.batchMaxDelayMs);
}
if (this.batchMaxBytes > -1) {
this.produceAccumulator.batchMaxBytes(this.batchMaxBytes);
}
if (this.totalBatchMaxBytes > -1) {
this.produceAccumulator.totalBatchMaxBytes(this.totalBatchMaxBytes);
}
}
}
| DefaultMQProducer |
java | micronaut-projects__micronaut-core | http-tck/src/main/java/io/micronaut/http/tck/RequestSupplier.java | {
"start": 869,
"end": 949
} | interface ____ extends Function<ServerUnderTest, HttpRequest<?>> {
}
| RequestSupplier |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/generics/MultipleEmbeddedGenericsTest.java | {
"start": 6004,
"end": 6379
} | class ____<A extends GenericEmbeddableOne, B extends GenericEmbeddableTwo> {
@Embedded
private A firstEmbedded;
@Embedded
private B secondEmbedded;
public GenericEntity() {
}
public GenericEntity(A firstEmbedded, B secondEmbedded) {
this.firstEmbedded = firstEmbedded;
this.secondEmbedded = secondEmbedded;
}
}
@Embeddable
public static | GenericEntity |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RTopicReactive.java | {
"start": 1004,
"end": 3156
} | interface ____ {
/**
* Get topic channel names
*
* @return channel names
*/
List<String> getChannelNames();
/**
* Publish the message to all subscribers of this topic asynchronously
*
* @param message to send
* @return the <code>Future</code> object with number of clients that received the message
*/
Mono<Long> publish(Object message);
/**
* Subscribes to status changes of this topic
*
* @param listener for messages
* @return listener id
* @see org.redisson.api.listener.StatusListener
*/
Mono<Integer> addListener(StatusListener listener);
/**
* Subscribes to this topic.
* <code>MessageListener.onMessage</code> is called when any message
* is published on this topic.
*
* @param <M> type of message
* @param type - type of message
* @param listener for messages
* @return locally unique listener id
* @see org.redisson.api.listener.MessageListener
*/
<M> Mono<Integer> addListener(Class<M> type, MessageListener<M> listener);
/**
* Removes the listener by <code>id</code> for listening this topic
*
* @param listenerIds - message listener ids
* @return void
*/
Mono<Void> removeListener(Integer... listenerIds);
/**
* Removes the listener by <code>instance</code> for listening this topic
*
* @param listener - message listener
* @return void
*/
Mono<Void> removeListener(MessageListener<?> listener);
/**
* Returns continues stream of published messages.
*
* @param <M> type of message
* @param type - type of message to listen
* @return stream of messages
*/
<M> Flux<M> getMessages(Class<M> type);
/**
* Returns amount of subscribers to this topic across all Redisson instances.
* Each subscriber may have multiple listeners.
*
* @return amount of subscribers
*/
Mono<Long> countSubscribers();
/**
* Removes all listeners from this topic
*
* @return void
*/
Mono<Void> removeAllListeners();
}
| RTopicReactive |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java | {
"start": 10156,
"end": 17828
} | enum ____
* </ol>
* , {@code true} otherwise.
*/
private boolean validateKeyTypeIsHashable(TypeInformation<?> type) {
try {
return (type instanceof PojoTypeInfo)
? !type.getTypeClass()
.getMethod("hashCode")
.getDeclaringClass()
.equals(Object.class)
: !(isArrayType(type) || isEnumType(type));
} catch (NoSuchMethodException ignored) {
// this should never happen as we are just searching for the hashCode() method.
}
return false;
}
private static boolean isArrayType(TypeInformation<?> type) {
return type instanceof PrimitiveArrayTypeInfo
|| type instanceof BasicArrayTypeInfo
|| type instanceof ObjectArrayTypeInfo;
}
private static boolean isEnumType(TypeInformation<?> type) {
return type instanceof EnumTypeInfo;
}
// ------------------------------------------------------------------------
// properties
// ------------------------------------------------------------------------
/**
* Gets the key selector that can get the key by which the stream if partitioned from the
* elements.
*
* @return The key selector for the key.
*/
@Internal
public KeySelector<T, KEY> getKeySelector() {
return this.keySelector;
}
/**
* Gets the type of the key by which the stream is partitioned.
*
* @return The type of the key by which the stream is partitioned.
*/
@Internal
public TypeInformation<KEY> getKeyType() {
return keyType;
}
@Override
protected DataStream<T> setConnectionType(StreamPartitioner<T> partitioner) {
throw new UnsupportedOperationException("Cannot override partitioning for KeyedStream.");
}
// ------------------------------------------------------------------------
// basic transformations
// ------------------------------------------------------------------------
@Override
protected <R> SingleOutputStreamOperator<R> doTransform(
final String operatorName,
final TypeInformation<R> outTypeInfo,
final StreamOperatorFactory<R> operatorFactory) {
SingleOutputStreamOperator<R> returnStream =
super.doTransform(operatorName, outTypeInfo, operatorFactory);
// inject the key selector and key type
OneInputTransformation<T, R> transform =
(OneInputTransformation<T, R>) returnStream.getTransformation();
transform.setStateKeySelector(keySelector);
transform.setStateKeyType(keyType);
if (isEnableAsyncState) {
transform.enableAsyncState();
}
return returnStream;
}
@Override
public DataStreamSink<T> addSink(SinkFunction<T> sinkFunction) {
DataStreamSink<T> result = super.addSink(sinkFunction);
result.getLegacyTransformation().setStateKeySelector(keySelector);
result.getLegacyTransformation().setStateKeyType(keyType);
return result;
}
/**
* Applies the given {@link KeyedProcessFunction} on the input stream, thereby creating a
* transformed output stream.
*
* <p>The function will be called for every element in the input streams and can produce zero or
* more output elements. Contrary to the {@link DataStream#flatMap(FlatMapFunction)} function,
* this function can also query the time and set timers. When reacting to the firing of set
* timers the function can directly emit elements and/or register yet more timers.
*
* @param keyedProcessFunction The {@link KeyedProcessFunction} that is called for each element
* in the stream.
* @param <R> The type of elements emitted by the {@code KeyedProcessFunction}.
* @return The transformed {@link DataStream}.
*/
@PublicEvolving
public <R> SingleOutputStreamOperator<R> process(
KeyedProcessFunction<KEY, T, R> keyedProcessFunction) {
TypeInformation<R> outType =
TypeExtractor.getUnaryOperatorReturnType(
keyedProcessFunction,
KeyedProcessFunction.class,
1,
2,
TypeExtractor.NO_INDEX,
getType(),
Utils.getCallLocationName(),
true);
return process(keyedProcessFunction, outType);
}
/**
* Applies the given {@link KeyedProcessFunction} on the input stream, thereby creating a
* transformed output stream.
*
* <p>The function will be called for every element in the input streams and can produce zero or
* more output elements. Contrary to the {@link DataStream#flatMap(FlatMapFunction)} function,
* this function can also query the time and set timers. When reacting to the firing of set
* timers the function can directly emit elements and/or register yet more timers.
*
* @param keyedProcessFunction The {@link KeyedProcessFunction} that is called for each element
* in the stream.
* @param outputType {@link TypeInformation} for the result type of the function.
* @param <R> The type of elements emitted by the {@code KeyedProcessFunction}.
* @return The transformed {@link DataStream}.
*/
@Internal
public <R> SingleOutputStreamOperator<R> process(
KeyedProcessFunction<KEY, T, R> keyedProcessFunction, TypeInformation<R> outputType) {
OneInputStreamOperator<T, R> operator =
isEnableAsyncState()
? new AsyncKeyedProcessOperator<>(clean(keyedProcessFunction))
: new KeyedProcessOperator<>(clean(keyedProcessFunction));
return transform("KeyedProcess", outputType, operator);
}
// ------------------------------------------------------------------------
// Flat Map
// ------------------------------------------------------------------------
@Override
public <R> SingleOutputStreamOperator<R> flatMap(
FlatMapFunction<T, R> flatMapper, TypeInformation<R> outputType) {
OneInputStreamOperator<T, R> operator =
isEnableAsyncState()
? new AsyncStreamFlatMap<>(clean(flatMapper))
: new StreamFlatMap<>(clean(flatMapper));
return transform("Flat Map", outputType, operator);
}
// ------------------------------------------------------------------------
// Joining
// ------------------------------------------------------------------------
/**
* Join elements of this {@link KeyedStream} with elements of another {@link KeyedStream} over a
* time interval that can be specified with {@link IntervalJoin#between(Duration, Duration)}.
*
* @param otherStream The other keyed stream to join this keyed stream with
* @param <T1> Type parameter of elements in the other stream
* @return An instance of {@link IntervalJoin} with this keyed stream and the other keyed stream
*/
@PublicEvolving
public <T1> IntervalJoin<T, T1, KEY> intervalJoin(KeyedStream<T1, KEY> otherStream) {
return new IntervalJoin<>(this, otherStream);
}
/**
* Perform a join over a time interval.
*
* @param <T1> The type parameter of the elements in the first streams
* @param <T2> The type parameter of the elements in the second stream
*/
@PublicEvolving
public static | type |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java | {
"start": 7382,
"end": 9507
} | class ____ extends Plugin {
private final List<NamedWriteableRegistry.Entry> namedWritables = new ArrayList<>();
private final List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>();
public TestCustomMetadataPlugin() {
registerBuiltinWritables();
}
private <T extends Metadata.ProjectCustom> void registerMetadataCustom(
String name,
Writeable.Reader<T> reader,
Writeable.Reader<NamedDiff<?>> diffReader,
CheckedFunction<XContentParser, T, IOException> parser
) {
namedWritables.add(new NamedWriteableRegistry.Entry(Metadata.ProjectCustom.class, name, reader));
namedWritables.add(new NamedWriteableRegistry.Entry(NamedDiff.class, name, diffReader));
namedXContents.add(new NamedXContentRegistry.Entry(Metadata.ProjectCustom.class, new ParseField(name), parser));
}
private void registerBuiltinWritables() {
Map.<String, Function<String, TestProjectCustomMetadata>>of(
SnapshotMetadata.TYPE,
SnapshotMetadata::new,
GatewayMetadata.TYPE,
GatewayMetadata::new,
ApiMetadata.TYPE,
ApiMetadata::new,
NonApiMetadata.TYPE,
NonApiMetadata::new
)
.forEach(
(type, constructor) -> registerMetadataCustom(
type,
in -> TestProjectCustomMetadata.readFrom(constructor, in),
in -> TestProjectCustomMetadata.readDiffFrom(type, in),
parser -> TestProjectCustomMetadata.fromXContent(constructor, parser)
)
);
}
@Override
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
return namedWritables;
}
@Override
public List<NamedXContentRegistry.Entry> getNamedXContent() {
return namedXContents;
}
}
private abstract static | TestCustomMetadataPlugin |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java | {
"start": 1220,
"end": 4369
} | class ____ {
public static FileCollection createFileCollectionFromNonTransitiveArtifactsView(
Configuration configuration,
Spec<ComponentIdentifier> componentFilter
) {
return createNonTransitiveArtifactsView(configuration, componentFilter).getFiles();
}
public static ArtifactView createNonTransitiveArtifactsView(Configuration configuration) {
return createNonTransitiveArtifactsView(configuration, identifier -> true);
}
public static ArtifactView createNonTransitiveArtifactsView(Configuration configuration, Spec<ComponentIdentifier> componentFilter) {
ResolvableDependencies incoming = configuration.getIncoming();
return incoming.artifactView(viewConfiguration -> {
Provider<Set<ComponentIdentifier>> firstLevelDependencyComponents = incoming.getResolutionResult()
.getRootComponent()
.map(
rootComponent -> rootComponent.getDependencies()
.stream()
.filter(dependency -> dependency instanceof ResolvedDependencyResult)
.map(dependency -> (ResolvedDependencyResult) dependency)
.filter(dependency -> dependency.getSelected() instanceof ResolvedComponentResult)
.map(dependency -> dependency.getSelected().getId())
.collect(Collectors.toSet())
);
viewConfiguration.componentFilter(
new AndSpec<>(identifier -> firstLevelDependencyComponents.get().contains(identifier), componentFilter)
);
});
}
/**
* This method gives us an artifact view of a configuration that filters out all
* project dependencies that are not shadowed jars.
* Basically a thirdparty only view of the dependency tree.
*/
public static FileCollection thirdPartyDependenciesView(Configuration configuration) {
ResolvableDependencies incoming = configuration.getIncoming();
return incoming.artifactView(v -> {
// resolve componentIdentifier for all shadowed project dependencies
Provider<Set<ComponentIdentifier>> shadowedDependencies = incoming.getResolutionResult()
.getRootComponent()
.map(
root -> root.getDependencies()
.stream()
.filter(dep -> dep instanceof ResolvedDependencyResult)
.map(dep -> (ResolvedDependencyResult) dep)
.filter(dep -> dep.getResolvedVariant().getDisplayName() == ShadowBasePlugin.SHADOW)
.filter(dep -> dep.getSelected() instanceof ResolvedComponentResult)
.map(dep -> dep.getSelected().getId())
.collect(Collectors.toSet())
);
// filter out project dependencies if they are not a shadowed dependency
v.componentFilter(i -> (i instanceof ProjectComponentIdentifier == false || shadowedDependencies.get().contains(i)));
}).getFiles();
}
}
| DependenciesUtils |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/ReflectionUtils.java | {
"start": 26662,
"end": 27634
} | class ____ analyze
* @param fc the callback to invoke for each field
* @param ff the filter that determines the fields to apply the callback to
* @throws IllegalStateException if introspection fails
*/
public static void doWithFields(Class<?> clazz, FieldCallback fc, @Nullable FieldFilter ff) {
// Keep backing up the inheritance hierarchy.
Class<?> targetClass = clazz;
do {
for (Field field : getDeclaredFields(targetClass)) {
if (ff != null && !ff.matches(field)) {
continue;
}
try {
fc.doWith(field);
}
catch (IllegalAccessException ex) {
throw new IllegalStateException("Not allowed to access field '" + field.getName() + "': " + ex);
}
}
targetClass = targetClass.getSuperclass();
}
while (targetClass != null && targetClass != Object.class);
}
/**
* This variant retrieves {@link Class#getDeclaredFields()} from a local cache
* in order to avoid defensive array copying.
* @param clazz the | to |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/util/reflection/ParameterizedConstructorInstantiatorTest.java | {
"start": 5192,
"end": 5293
} | class ____ {
public OneConstructor(Observer observer) {}
}
private static | OneConstructor |
java | netty__netty | codec-socks/src/main/java/io/netty/handler/codec/socks/SocksRequest.java | {
"start": 1041,
"end": 1490
} | class ____ extends SocksMessage {
private final SocksRequestType requestType;
protected SocksRequest(SocksRequestType requestType) {
super(SocksMessageType.REQUEST);
this.requestType = ObjectUtil.checkNotNull(requestType, "requestType");
}
/**
* Returns socks request type
*
* @return socks request type
*/
public SocksRequestType requestType() {
return requestType;
}
}
| SocksRequest |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/QuerySearchApplicationAction.java | {
"start": 421,
"end": 715
} | class ____ {
public static final String NAME = "indices:data/read/xpack/application/search_application/search";
public static final ActionType<SearchResponse> INSTANCE = new ActionType<>(NAME);
private QuerySearchApplicationAction() {/* no instances */}
}
| QuerySearchApplicationAction |
java | hibernate__hibernate-orm | hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/lock/internal/TeradataLockingSupport.java | {
"start": 497,
"end": 1224
} | class ____ implements LockingSupport, LockingSupport.Metadata {
@Override
public Metadata getMetadata() {
return this;
}
@Override
public LockTimeoutType getLockTimeoutType(Timeout timeout) {
if ( timeout.milliseconds() == Timeouts.NO_WAIT_MILLI ) {
return LockTimeoutType.QUERY;
}
// todo (db-locking) : maybe getConnectionLockTimeoutStrategy?
return LockTimeoutType.NONE;
}
@Override
public OuterJoinLockingType getOuterJoinLockingType() {
return OuterJoinLockingType.UNSUPPORTED;
}
@Override
public ConnectionLockTimeoutStrategy getConnectionLockTimeoutStrategy() {
// todo (db-locking) : not sure about this for Teradata...
return ConnectionLockTimeoutStrategy.NONE;
}
}
| TeradataLockingSupport |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/StronglyTypeTimeTest.java | {
"start": 10027,
"end": 10383
} | class ____ {
private static final long F1_RETRY_MILLISECONDS = 5000;
public Duration frobber() {
return Duration.ofMillis(F1_RETRY_MILLISECONDS);
}
}
""")
.addOutputLines(
"Test.java",
"""
import java.time.Duration;
| Test |
java | apache__camel | tooling/maven/camel-maven-plugin/src/main/java/org/apache/camel/maven/DebugMojo.java | {
"start": 1541,
"end": 2827
} | class ____ extends DevMojo {
/**
* Indicates whether the message processing done by Camel should be suspended as long as a debugger is not attached.
*/
@Parameter(property = "camel.suspend", defaultValue = "true")
private boolean suspend;
@Inject
public DebugMojo(RepositorySystem repositorySystem) {
super(repositorySystem);
}
@Override
protected void beforeBootstrapCamel() throws Exception {
super.beforeBootstrapCamel();
// Enable JMX
System.setProperty("org.apache.camel.jmx.disabled", "false");
// Enable the suspend mode.
System.setProperty(BacklogDebugger.SUSPEND_MODE_SYSTEM_PROP_NAME, Boolean.toString(suspend));
String suspendMode = System.getenv(BacklogDebugger.SUSPEND_MODE_ENV_VAR_NAME);
if (suspendMode != null && Boolean.parseBoolean(suspendMode) != suspend) {
throw new MojoExecutionException(
String.format(
"The environment variable %s has been set and prevents to configure the suspend mode. Please remove it first.",
BacklogDebugger.SUSPEND_MODE_ENV_VAR_NAME));
}
}
@Override
protected String goal() {
return "camel:debug";
}
}
| DebugMojo |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/customproviders/UniVoidRequestFilter.java | {
"start": 301,
"end": 1078
} | class ____ {
@ServerRequestFilter
Uni<Void> uniVoid(UriInfo uriInfo, HttpHeaders httpHeaders, ContainerRequestContext requestContext) {
String exceptionHeader = httpHeaders.getHeaderString("some-uni-exception-input");
if ((exceptionHeader != null) && !exceptionHeader.isEmpty()) {
return Uni.createFrom().failure(new UniException(exceptionHeader));
}
return Uni.createFrom().deferred(() -> {
String inputHeader = httpHeaders.getHeaderString("some-uni-input");
if (inputHeader != null) {
requestContext.getHeaders().putSingle("custom-uni-header", uriInfo.getPath() + "-" + inputHeader);
}
return Uni.createFrom().nullItem();
});
}
}
| UniVoidRequestFilter |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql2rel/RelDecorrelator.java | {
"start": 69282,
"end": 71551
} | class ____ extends RexShuttle {
private final RelNode currentRel;
private final Map<RelNode, Frame> map;
private final CorelMap cm;
private DecorrelateRexShuttle(RelNode currentRel, Map<RelNode, Frame> map, CorelMap cm) {
this.currentRel = requireNonNull(currentRel, "currentRel");
this.map = requireNonNull(map, "map");
this.cm = requireNonNull(cm, "cm");
}
@Override
public RexNode visitFieldAccess(RexFieldAccess fieldAccess) {
int newInputOutputOffset = 0;
for (RelNode input : currentRel.getInputs()) {
final Frame frame = map.get(input);
if (frame != null) {
// try to find in this input rel the position of corVar
final CorRef corRef = cm.mapFieldAccessToCorRef.get(fieldAccess);
if (corRef != null) {
Integer newInputPos = frame.corDefOutputs.get(corRef.def());
if (newInputPos != null) {
// This input does produce the corVar referenced.
return new RexInputRef(
newInputPos + newInputOutputOffset,
frame.r.getRowType().getFieldList().get(newInputPos).getType());
}
}
// this input does not produce the corVar needed
newInputOutputOffset += frame.r.getRowType().getFieldCount();
} else {
// this input is not rewritten
newInputOutputOffset += input.getRowType().getFieldCount();
}
}
return fieldAccess;
}
@Override
public RexNode visitInputRef(RexInputRef inputRef) {
final RexInputRef ref = getNewForOldInputRef(currentRel, map, inputRef);
if (ref.getIndex() == inputRef.getIndex() && ref.getType() == inputRef.getType()) {
return inputRef; // re-use old object, to prevent needless expr cloning
}
return ref;
}
}
/** Shuttle that removes correlations. */
private | DecorrelateRexShuttle |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/service/repository/extrnal/ExternalConfigInfoGrayPersistServiceImpl.java | {
"start": 3657,
"end": 23391
} | class ____ implements ConfigInfoGrayPersistService {
private DataSourceService dataSourceService;
protected JdbcTemplate jt;
protected TransactionTemplate tjt;
private MapperManager mapperManager;
private HistoryConfigInfoPersistService historyConfigInfoPersistService;
public ExternalConfigInfoGrayPersistServiceImpl(
@Qualifier("externalHistoryConfigInfoPersistServiceImpl") HistoryConfigInfoPersistService historyConfigInfoPersistService) {
this.historyConfigInfoPersistService = historyConfigInfoPersistService;
this.dataSourceService = DynamicDataSource.getInstance().getDataSource();
this.jt = dataSourceService.getJdbcTemplate();
this.tjt = dataSourceService.getTransactionTemplate();
Boolean isDataSourceLogEnable = EnvUtil.getProperty(CommonConstant.NACOS_PLUGIN_DATASOURCE_LOG, Boolean.class,
false);
this.mapperManager = MapperManager.instance(isDataSourceLogEnable);
}
@Override
public <E> PaginationHelper<E> createPaginationHelper() {
return new ExternalStoragePaginationHelperImpl<>(jt);
}
@Override
public ConfigInfoStateWrapper findConfigInfo4GrayState(final String dataId, final String group, final String tenant,
String grayName) {
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_GRAY);
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
String grayNameTmp = StringUtils.isBlank(grayName) ? StringUtils.EMPTY : grayName.trim();
try {
return this.jt.queryForObject(configInfoGrayMapper.select(
Arrays.asList("id", "data_id", "group_id", "tenant_id", "gray_rule", "gmt_modified"),
Arrays.asList("data_id", "group_id", "tenant_id", "gray_name")),
new Object[] {dataId, group, tenantTmp, grayNameTmp}, CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER);
} catch (EmptyResultDataAccessException e) {
return null;
}
}
private ConfigOperateResult getGrayOperateResult(String dataId, String group, String tenant, String grayName) {
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
ConfigInfoStateWrapper configInfo4Gray = this.findConfigInfo4GrayState(dataId, group, tenantTmp, grayName);
if (configInfo4Gray == null) {
return new ConfigOperateResult(false);
}
return new ConfigOperateResult(configInfo4Gray.getId(), configInfo4Gray.getLastModified());
}
@Override
public ConfigOperateResult addConfigInfo4Gray(ConfigInfo configInfo, String grayName, String grayRule, String srcIp,
String srcUser) {
return tjt.execute(status -> {
String tenantTmp =
StringUtils.isBlank(configInfo.getTenant()) ? StringUtils.EMPTY : configInfo.getTenant().trim();
String grayNameTmp = StringUtils.isBlank(grayName) ? StringUtils.EMPTY : grayName.trim();
String grayRuleTmp = StringUtils.isBlank(grayRule) ? StringUtils.EMPTY : grayRule.trim();
try {
addConfigInfoGrayAtomic(-1, configInfo, grayNameTmp, grayRuleTmp, srcIp, srcUser);
if (!GRAY_MIGRATE_FLAG.get()) {
Timestamp now = new Timestamp(System.currentTimeMillis());
historyConfigInfoPersistService.insertConfigHistoryAtomic(0, configInfo, srcIp, srcUser, now, "I",
Constants.GRAY, grayNameTmp,
ConfigExtInfoUtil.getExtInfoFromGrayInfo(grayNameTmp, grayRuleTmp, srcUser));
}
return getGrayOperateResult(configInfo.getDataId(), configInfo.getGroup(), tenantTmp, grayNameTmp);
} catch (Exception e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
});
}
@Override
public void addConfigInfoGrayAtomic(long configGrayId, ConfigInfo configInfo, String grayName, String grayRule,
String srcIp, String srcUser) {
String appNameTmp = StringUtils.defaultEmptyIfBlank(configInfo.getAppName());
String tenantTmp = StringUtils.defaultEmptyIfBlank(configInfo.getTenant());
String md5 = MD5Utils.md5Hex(configInfo.getContent(), Constants.ENCODE);
final String encryptedDataKey =
configInfo.getEncryptedDataKey() == null ? StringUtils.EMPTY : configInfo.getEncryptedDataKey();
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_GRAY);
jt.update(configInfoGrayMapper.insert(
Arrays.asList("data_id", "group_id", "tenant_id", "gray_name", "gray_rule", "app_name", "content",
"encrypted_data_key", "md5", "src_ip", "src_user", "gmt_create@NOW()", "gmt_modified@NOW()")),
configInfo.getDataId(), configInfo.getGroup(), tenantTmp, grayName, grayRule, appNameTmp,
configInfo.getContent(), encryptedDataKey, md5, srcIp, srcUser);
}
@Override
public ConfigOperateResult insertOrUpdateGray(final ConfigInfo configInfo, final String grayName,
final String grayRule, final String srcIp, final String srcUser) {
if (findConfigInfo4GrayState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant(), grayName)
== null) {
return addConfigInfo4Gray(configInfo, grayName, grayRule, srcIp, srcUser);
} else {
return updateConfigInfo4Gray(configInfo, grayName, grayRule, srcIp, srcUser);
}
}
@Override
public ConfigOperateResult insertOrUpdateGrayCas(final ConfigInfo configInfo, final String grayName,
final String grayRule, final String srcIp, final String srcUser) {
if (findConfigInfo4GrayState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant(), grayName)
== null) {
return addConfigInfo4Gray(configInfo, grayName, grayRule, srcIp, srcUser);
} else {
return updateConfigInfo4GrayCas(configInfo, grayName, grayRule, srcIp, srcUser);
}
}
@Override
public void removeConfigInfoGray(final String dataId, final String group, final String tenant,
final String grayName, final String srcIp, final String srcUser) {
tjt.execute(new TransactionCallbackWithoutResult() {
@Override
protected void doInTransactionWithoutResult(TransactionStatus status) {
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
String grayNameTmp = StringUtils.isBlank(grayName) ? StringUtils.EMPTY : grayName;
try {
ConfigInfoGrayWrapper oldConfigAllInfo4Gray = findConfigInfo4Gray(dataId, group, tenantTmp,
grayNameTmp);
if (oldConfigAllInfo4Gray == null) {
return;
}
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(
dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_GRAY);
jt.update(
configInfoGrayMapper.delete(Arrays.asList("data_id", "group_id", "tenant_id", "gray_name")),
dataId, group, tenantTmp, grayNameTmp);
if (!GRAY_MIGRATE_FLAG.get()) {
Timestamp now = new Timestamp(System.currentTimeMillis());
historyConfigInfoPersistService.insertConfigHistoryAtomic(oldConfigAllInfo4Gray.getId(),
oldConfigAllInfo4Gray, srcIp, srcUser, now, "D", Constants.GRAY, grayNameTmp,
ConfigExtInfoUtil.getExtInfoFromGrayInfo(oldConfigAllInfo4Gray.getGrayName(),
oldConfigAllInfo4Gray.getGrayRule(), oldConfigAllInfo4Gray.getSrcUser()));
}
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
}
});
}
@Override
public ConfigOperateResult updateConfigInfo4Gray(ConfigInfo configInfo, String grayName, String grayRule,
String srcIp, String srcUser) {
return tjt.execute(status -> {
String appNameTmp = StringUtils.defaultEmptyIfBlank(configInfo.getAppName());
String tenantTmp = StringUtils.defaultEmptyIfBlank(configInfo.getTenant());
String grayNameTmp = StringUtils.isBlank(grayName) ? StringUtils.EMPTY : grayName.trim();
String grayRuleTmp = StringUtils.isBlank(grayRule) ? StringUtils.EMPTY : grayRule.trim();
try {
ConfigInfoGrayWrapper oldConfigAllInfo4Gray = findConfigInfo4Gray(configInfo.getDataId(),
configInfo.getGroup(), tenantTmp, grayNameTmp);
if (oldConfigAllInfo4Gray == null) {
if (LogUtil.FATAL_LOG.isErrorEnabled()) {
LogUtil.FATAL_LOG.error("expected config info[dataid:{}, group:{}, tenent:{}] but not found.",
configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant());
}
}
String md5 = MD5Utils.md5Hex(configInfo.getContent(), Constants.ENCODE);
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(
dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_GRAY);
jt.update(configInfoGrayMapper.update(
Arrays.asList("content", "encrypted_data_key", "md5", "src_ip", "src_user",
"gmt_modified@NOW()", "app_name", "gray_rule"),
Arrays.asList("data_id", "group_id", "tenant_id", "gray_name")), configInfo.getContent(),
configInfo.getEncryptedDataKey(), md5, srcIp, srcUser, appNameTmp, grayRuleTmp,
configInfo.getDataId(), configInfo.getGroup(), tenantTmp, grayNameTmp);
Timestamp now = new Timestamp(System.currentTimeMillis());
if (!GRAY_MIGRATE_FLAG.get()) {
historyConfigInfoPersistService.insertConfigHistoryAtomic(oldConfigAllInfo4Gray.getId(),
oldConfigAllInfo4Gray, srcIp, srcUser, now, "U", Constants.GRAY, grayNameTmp,
ConfigExtInfoUtil.getExtInfoFromGrayInfo(oldConfigAllInfo4Gray.getGrayName(),
oldConfigAllInfo4Gray.getGrayRule(), oldConfigAllInfo4Gray.getSrcUser()));
}
return getGrayOperateResult(configInfo.getDataId(), configInfo.getGroup(), tenantTmp, grayNameTmp);
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
});
}
@Override
public ConfigOperateResult updateConfigInfo4GrayCas(ConfigInfo configInfo, String grayName, String grayRule,
String srcIp, String srcUser) {
return tjt.execute(status -> {
String appNameTmp = StringUtils.defaultEmptyIfBlank(configInfo.getAppName());
String tenantTmp = StringUtils.defaultEmptyIfBlank(configInfo.getTenant());
String grayNameTmp = StringUtils.isBlank(grayName) ? StringUtils.EMPTY : grayName.trim();
String grayRuleTmp = StringUtils.isBlank(grayRule) ? StringUtils.EMPTY : grayRule.trim();
try {
String md5 = MD5Utils.md5Hex(configInfo.getContent(), Constants.ENCODE);
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(
dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_GRAY);
MapperContext context = new MapperContext();
context.putUpdateParameter(FieldConstant.CONTENT, configInfo.getContent());
context.putUpdateParameter(FieldConstant.MD5, md5);
context.putUpdateParameter(FieldConstant.SRC_IP, srcIp);
context.putUpdateParameter(FieldConstant.SRC_USER, srcUser);
context.putUpdateParameter(FieldConstant.APP_NAME, appNameTmp);
context.putWhereParameter(FieldConstant.DATA_ID, configInfo.getDataId());
context.putWhereParameter(FieldConstant.GROUP_ID, configInfo.getGroup());
context.putWhereParameter(FieldConstant.TENANT_ID, tenantTmp);
context.putWhereParameter(FieldConstant.GRAY_NAME, grayNameTmp);
context.putWhereParameter(FieldConstant.GRAY_RULE, grayRuleTmp);
context.putWhereParameter(FieldConstant.MD5, configInfo.getMd5());
final MapperResult mapperResult = configInfoGrayMapper.updateConfigInfo4GrayCas(context);
boolean success = jt.update(mapperResult.getSql(), mapperResult.getParamList().toArray()) > 0;
ConfigInfoGrayWrapper oldConfigAllInfo4Gray = findConfigInfo4Gray(configInfo.getDataId(),
configInfo.getGroup(), tenantTmp, grayNameTmp);
if (oldConfigAllInfo4Gray == null) {
if (LogUtil.FATAL_LOG.isErrorEnabled()) {
LogUtil.FATAL_LOG.error("expected config info[dataid:{}, group:{}, tenent:{}] but not found.",
configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant());
}
}
if (!GRAY_MIGRATE_FLAG.get()) {
Timestamp now = new Timestamp(System.currentTimeMillis());
historyConfigInfoPersistService.insertConfigHistoryAtomic(oldConfigAllInfo4Gray.getId(),
oldConfigAllInfo4Gray, srcIp, srcUser, now, "U", Constants.GRAY, grayNameTmp,
ConfigExtInfoUtil.getExtInfoFromGrayInfo(oldConfigAllInfo4Gray.getGrayName(),
oldConfigAllInfo4Gray.getGrayRule(), oldConfigAllInfo4Gray.getSrcUser()));
}
if (success) {
return getGrayOperateResult(configInfo.getDataId(), configInfo.getGroup(), tenantTmp, grayNameTmp);
} else {
return new ConfigOperateResult(false);
}
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
});
}
@Override
public ConfigInfoGrayWrapper findConfigInfo4Gray(final String dataId, final String group, final String tenant,
final String grayName) {
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
String grayNameTmp = StringUtils.isBlank(grayName) ? StringUtils.EMPTY : grayName.trim();
try {
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_GRAY);
return this.jt.queryForObject(configInfoGrayMapper.select(
Arrays.asList("id", "data_id", "group_id", "tenant_id", "gray_name", "gray_rule", "app_name",
"content", "md5", "encrypted_data_key", "gmt_modified", "src_user"),
Arrays.asList("data_id", "group_id", "tenant_id", "gray_name")),
new Object[] {dataId, group, tenantTmp, grayNameTmp}, CONFIG_INFO_GRAY_WRAPPER_ROW_MAPPER);
} catch (EmptyResultDataAccessException e) { // Indicates that the data does not exist, returns null.
return null;
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
}
@Override
public int configInfoGrayCount() {
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_GRAY);
String sql = configInfoGrayMapper.count(null);
Integer result = jt.queryForObject(sql, Integer.class);
if (result == null) {
throw new IllegalArgumentException("configInfoGrayCount error");
}
return result;
}
@Override
public Page<ConfigInfoGrayWrapper> findAllConfigInfoGrayForDumpAll(final int pageNo, final int pageSize) {
final int startRow = (pageNo - 1) * pageSize;
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_GRAY);
String sqlCountRows = configInfoGrayMapper.count(null);
MapperResult sqlFetchRows = configInfoGrayMapper.findAllConfigInfoGrayForDumpAllFetchRows(
new MapperContext(startRow, pageSize));
PaginationHelper<ConfigInfoGrayWrapper> helper = createPaginationHelper();
try {
return helper.fetchPageLimit(sqlCountRows, sqlFetchRows.getSql(), sqlFetchRows.getParamList().toArray(),
pageNo, pageSize, CONFIG_INFO_GRAY_WRAPPER_ROW_MAPPER);
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
}
@Override
public List<ConfigInfoGrayWrapper> findChangeConfig(final Timestamp startTime, long lastMaxId, final int pageSize) {
try {
ConfigInfoGrayMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_GRAY);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.START_TIME, startTime);
context.putWhereParameter(FieldConstant.PAGE_SIZE, pageSize);
context.putWhereParameter(FieldConstant.LAST_MAX_ID, lastMaxId);
MapperResult mapperResult = configInfoMapper.findChangeConfig(context);
return jt.query(mapperResult.getSql(), mapperResult.getParamList().toArray(),
CONFIG_INFO_GRAY_WRAPPER_ROW_MAPPER);
} catch (DataAccessException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
}
@Override
public List<String> findConfigInfoGrays(final String dataId, final String group, final String tenant) {
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_GRAY);
String selectSql = configInfoGrayMapper.select(Collections.singletonList("gray_name"),
Arrays.asList("data_id", "group_id", "tenant_id"));
return jt.queryForList(selectSql, new Object[] {dataId, group, tenantTmp}, String.class);
}
}
| ExternalConfigInfoGrayPersistServiceImpl |
java | google__gson | gson/src/test/java/com/google/gson/functional/ParameterizedTypesTest.java | {
"start": 17299,
"end": 19395
} | class ____<Q extends Quantity>
implements Measurable<Q>, Field<Amount<?>>, Serializable, Immutable {
private static final long serialVersionUID = -7560491093120970437L;
int value = 30;
}
@Test
public void testDeepParameterizedTypeSerialization() {
Amount<MyQuantity> amount = new Amount<>();
String json = gson.toJson(amount);
assertThat(json).contains("value");
assertThat(json).contains("30");
}
@Test
public void testDeepParameterizedTypeDeserialization() {
String json = "{value:30}";
Type type = new TypeToken<Amount<MyQuantity>>() {}.getType();
Amount<MyQuantity> amount = gson.fromJson(json, type);
assertThat(amount.value).isEqualTo(30);
}
// End: tests to reproduce issue 103
private static void assertCorrectlyDeserialized(Object object) {
@SuppressWarnings("unchecked")
List<Quantity> list = (List<Quantity>) object;
assertThat(list.size()).isEqualTo(1);
assertThat(list.get(0).q).isEqualTo(4);
}
@Test
public void testGsonFromJsonTypeToken() {
TypeToken<List<Quantity>> typeToken = new TypeToken<>() {};
Type type = typeToken.getType();
{
JsonObject jsonObject = new JsonObject();
jsonObject.addProperty("q", 4);
JsonArray jsonArray = new JsonArray();
jsonArray.add(jsonObject);
assertCorrectlyDeserialized(gson.fromJson(jsonArray, typeToken));
assertCorrectlyDeserialized(gson.fromJson(jsonArray, type));
}
String json = "[{\"q\":4}]";
{
assertCorrectlyDeserialized(gson.fromJson(json, typeToken));
assertCorrectlyDeserialized(gson.fromJson(json, type));
}
{
assertCorrectlyDeserialized(gson.fromJson(new StringReader(json), typeToken));
assertCorrectlyDeserialized(gson.fromJson(new StringReader(json), type));
}
{
JsonReader reader = new JsonReader(new StringReader(json));
assertCorrectlyDeserialized(gson.fromJson(reader, typeToken));
reader = new JsonReader(new StringReader(json));
assertCorrectlyDeserialized(gson.fromJson(reader, type));
}
}
}
| Amount |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/longs/Longs_assertIsPositive_Test.java | {
"start": 1097,
"end": 2302
} | class ____ extends LongsBaseTest {
@Test
void should_succeed_since_actual_is_positive() {
longs.assertIsPositive(someInfo(), 6L);
}
@Test
void should_fail_since_actual_is_not_positive() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> longs.assertIsPositive(someInfo(), -6L))
.withMessage("%nExpecting actual:%n -6L%nto be greater than:%n 0L%n".formatted());
}
@Test
void should_succeed_since_actual_is_positive_according_to_custom_comparison_strategy() {
longsWithAbsValueComparisonStrategy.assertIsPositive(someInfo(), -1L);
}
@Test
void should_fail_since_actual_is_not_positive_according_to_custom_comparison_strategy() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> longsWithAbsValueComparisonStrategy.assertIsPositive(someInfo(),
0L))
.withMessage("%nExpecting actual:%n 0L%nto be greater than:%n 0L%nwhen comparing values using AbsValueComparator".formatted());
}
}
| Longs_assertIsPositive_Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/paths/FetchAndJoinTest.java | {
"start": 947,
"end": 1863
} | class ____ {
@Test
public void testImplicitJoinFromExplicitCollectionJoin(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
final CriteriaBuilder builder = entityManager.getCriteriaBuilder();
final CriteriaQuery<Entity1> criteria = builder.createQuery( Entity1.class );
final Root<Entity1> root = criteria.from( Entity1.class );
final Join<Entity1, Entity2> entity2Join = root.join( Entity1_.entity2,
JoinType.INNER ); // illegal with fetch join
final Fetch<Entity1, Entity2> entity2Fetch = root.fetch( Entity1_.entity2, JoinType.INNER ); // <=== REMOVE
entity2Fetch.fetch( Entity2_.entity3 ); // <=== REMOVE
criteria.where( builder.equal( root.get( Entity1_.value ), "test" ),
builder.equal( entity2Join.get( Entity2_.value ), "test" ) ); // illegal with fetch join
entityManager.createQuery( criteria ).getResultList();
} );
}
}
| FetchAndJoinTest |
java | google__auto | value/src/test/java/com/google/auto/value/processor/GeneratedDoesNotExistTest.java | {
"start": 4757,
"end": 5625
} | class ____<T> implements InvocationHandler {
final T original;
OverridableInvocationHandler(T original) {
this.original = original;
}
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
try {
Method override = getClass().getMethod(method.getName(), method.getParameterTypes());
if (override.getDeclaringClass() == getClass()) {
return override.invoke(this, args);
}
} catch (NoSuchMethodException ignored) {
// OK: we don't have an override for this method, so just invoke the original method.
}
return method.invoke(original, args);
}
}
private static <T> T partialProxy(Class<T> type, OverridableInvocationHandler<T> handler) {
return Reflection.newProxy(type, handler);
}
private static | OverridableInvocationHandler |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java | {
"start": 14759,
"end": 14919
} | interface ____ {
KeyStore get();
char[] keyPassword();
boolean modified();
}
// package access for testing
static | SecurityStore |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ImportSelectorTests.java | {
"start": 17537,
"end": 19678
} | class ____ implements DeferredImportSelector.Group,
BeanClassLoaderAware, ResourceLoaderAware, BeanFactoryAware, EnvironmentAware {
static ClassLoader classLoader;
static ResourceLoader resourceLoader;
static BeanFactory beanFactory;
static Environment environment;
static AtomicInteger instancesCount = new AtomicInteger();
static MultiValueMap<AnnotationMetadata, String> imports = new LinkedMultiValueMap<>();
public TestImportGroup() {
TestImportGroup.instancesCount.incrementAndGet();
}
static void cleanup() {
TestImportGroup.classLoader = null;
TestImportGroup.beanFactory = null;
TestImportGroup.resourceLoader = null;
TestImportGroup.environment = null;
TestImportGroup.instancesCount = new AtomicInteger();
TestImportGroup.imports.clear();
}
static Map<String, List<String>> allImports() {
return TestImportGroup.imports.entrySet()
.stream()
.collect(Collectors.toMap(entry -> entry.getKey().getClassName(),
Map.Entry::getValue));
}
private final List<Entry> instanceImports = new ArrayList<>();
@Override
public void process(AnnotationMetadata metadata, DeferredImportSelector selector) {
for (String importClassName : selector.selectImports(metadata)) {
this.instanceImports.add(new Entry(metadata, importClassName));
}
TestImportGroup.imports.addAll(metadata,
Arrays.asList(selector.selectImports(metadata)));
}
@Override
public Iterable<Entry> selectImports() {
ArrayList<Entry> content = new ArrayList<>(this.instanceImports);
Collections.reverse(content);
return content;
}
@Override
public void setBeanClassLoader(ClassLoader classLoader) {
TestImportGroup.classLoader = classLoader;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
TestImportGroup.beanFactory = beanFactory;
}
@Override
public void setResourceLoader(ResourceLoader resourceLoader) {
TestImportGroup.resourceLoader = resourceLoader;
}
@Override
public void setEnvironment(Environment environment) {
TestImportGroup.environment = environment;
}
}
}
| TestImportGroup |
java | elastic__elasticsearch | x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/TimeoutChecker.java | {
"start": 7961,
"end": 8562
} | class ____ {
final TimeValue timeout;
final AtomicBoolean registered;
final Collection<Matcher> matchers;
boolean timedOut;
WatchDogEntry(TimeValue timeout) {
this.timeout = timeout;
this.registered = new AtomicBoolean(false);
this.matchers = new CopyOnWriteArrayList<>();
}
private void timedOut() {
timedOut = true;
}
private boolean isTimedOut() {
return timedOut;
}
}
}
}
| WatchDogEntry |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/aspectj/DeclareParentsAdvisor.java | {
"start": 1292,
"end": 3667
} | class ____ implements IntroductionAdvisor {
private final Advice advice;
private final Class<?> introducedInterface;
private final ClassFilter typePatternClassFilter;
/**
* Create a new advisor for this DeclareParents field.
* @param interfaceType static field defining the introduction
* @param typePattern type pattern the introduction is restricted to
* @param defaultImpl the default implementation class
*/
public DeclareParentsAdvisor(Class<?> interfaceType, String typePattern, Class<?> defaultImpl) {
this(interfaceType, typePattern,
new DelegatePerTargetObjectIntroductionInterceptor(defaultImpl, interfaceType));
}
/**
* Create a new advisor for this DeclareParents field.
* @param interfaceType static field defining the introduction
* @param typePattern type pattern the introduction is restricted to
* @param delegateRef the delegate implementation object
*/
public DeclareParentsAdvisor(Class<?> interfaceType, String typePattern, Object delegateRef) {
this(interfaceType, typePattern, new DelegatingIntroductionInterceptor(delegateRef));
}
/**
* Private constructor to share common code between impl-based delegate and reference-based delegate
* (cannot use method such as init() to share common code, due the use of final fields).
* @param interfaceType static field defining the introduction
* @param typePattern type pattern the introduction is restricted to
* @param interceptor the delegation advice as {@link IntroductionInterceptor}
*/
private DeclareParentsAdvisor(Class<?> interfaceType, String typePattern, IntroductionInterceptor interceptor) {
this.advice = interceptor;
this.introducedInterface = interfaceType;
// Excludes methods implemented.
ClassFilter typePatternFilter = new TypePatternClassFilter(typePattern);
ClassFilter exclusion = (clazz -> !this.introducedInterface.isAssignableFrom(clazz));
this.typePatternClassFilter = ClassFilters.intersection(typePatternFilter, exclusion);
}
@Override
public ClassFilter getClassFilter() {
return this.typePatternClassFilter;
}
@Override
public void validateInterfaces() throws IllegalArgumentException {
// Do nothing
}
@Override
public Advice getAdvice() {
return this.advice;
}
@Override
public Class<?>[] getInterfaces() {
return new Class<?>[] {this.introducedInterface};
}
}
| DeclareParentsAdvisor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.