language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__rocketmq | proxy/src/main/java/org/apache/rocketmq/proxy/grpc/v2/common/ResponseWriter.java | {
"start": 1174,
"end": 3034
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(LoggerName.PROXY_LOGGER_NAME);
protected static final Object INSTANCE_CREATE_LOCK = new Object();
protected static volatile ResponseWriter instance;
public static ResponseWriter getInstance() {
if (instance == null) {
synchronized (INSTANCE_CREATE_LOCK) {
if (instance == null) {
instance = new ResponseWriter();
}
}
}
return instance;
}
public <T> void write(StreamObserver<T> observer, final T response) {
if (writeResponse(observer, response)) {
observer.onCompleted();
}
}
public <T> boolean writeResponse(StreamObserver<T> observer, final T response) {
if (null == response) {
return false;
}
log.debug("start to write response. response: {}", response);
if (isCancelled(observer)) {
log.warn("client has cancelled the request. response to write: {}", response);
return false;
}
try {
observer.onNext(response);
} catch (StatusRuntimeException statusRuntimeException) {
if (Status.CANCELLED.equals(statusRuntimeException.getStatus())) {
log.warn("client has cancelled the request. response to write: {}", response);
return false;
}
throw statusRuntimeException;
}
return true;
}
public <T> boolean isCancelled(StreamObserver<T> observer) {
if (observer instanceof ServerCallStreamObserver) {
final ServerCallStreamObserver<T> serverCallStreamObserver = (ServerCallStreamObserver<T>) observer;
return serverCallStreamObserver.isCancelled();
}
return false;
}
}
| ResponseWriter |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/get/TransportMultiGetActionTests.java | {
"start": 3191,
"end": 11588
} | class ____ extends ESTestCase {
private static ThreadPool threadPool;
private static TransportService transportService;
private static ProjectResolver projectResolver;
private static ClusterService clusterService;
private static TransportMultiGetAction transportAction;
@BeforeClass
public static void beforeClass() throws Exception {
threadPool = new TestThreadPool(TransportMultiGetActionTests.class.getSimpleName());
transportService = new TransportService(
Settings.EMPTY,
mock(Transport.class),
threadPool,
TransportService.NOOP_TRANSPORT_INTERCEPTOR,
boundAddress -> DiscoveryNodeUtils.builder(randomBase64UUID())
.applySettings(Settings.builder().put("node.name", "node1").build())
.address(boundAddress.publishAddress())
.build(),
null,
emptySet()
);
ProjectId projectId = randomProjectIdOrDefault();
projectResolver = TestProjectResolvers.singleProject(projectId);
final Index index1 = new Index("index1", randomBase64UUID());
final Index index2 = new Index("index2", randomBase64UUID());
final ProjectMetadata project = ProjectMetadata.builder(projectId)
.put(
new IndexMetadata.Builder(index1.getName()).settings(
indexSettings(IndexVersion.current(), 1, 1).put(IndexMetadata.SETTING_INDEX_UUID, index1.getUUID())
)
.putMapping(
XContentHelper.convertToJson(
BytesReference.bytes(
XContentFactory.jsonBuilder()
.startObject()
.startObject("_doc")
.startObject("_routing")
.field("required", false)
.endObject()
.endObject()
.endObject()
),
true,
XContentType.JSON
)
)
)
.put(
new IndexMetadata.Builder(index2.getName()).settings(
indexSettings(IndexVersion.current(), 1, 1).put(IndexMetadata.SETTING_INDEX_UUID, index1.getUUID())
)
.putMapping(
XContentHelper.convertToJson(
BytesReference.bytes(
XContentFactory.jsonBuilder()
.startObject()
.startObject("_doc")
.startObject("_routing")
.field("required", true)
.endObject()
.endObject()
.endObject()
),
true,
XContentType.JSON
)
)
)
.build();
final ClusterState clusterState = ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName()))
.metadata(new Metadata.Builder().put(project))
.build();
final ShardIterator index1ShardIterator = new ShardIterator(new ShardId(index1, randomInt()), Collections.emptyList());
final ShardIterator index2ShardIterator = new ShardIterator(new ShardId(index2, randomInt()), Collections.emptyList());
final OperationRouting operationRouting = mock(OperationRouting.class);
when(
operationRouting.getShards(
eq(clusterState.projectState(projectId)),
eq(index1.getName()),
anyString(),
nullable(String.class),
nullable(String.class)
)
).thenReturn(index1ShardIterator);
when(
operationRouting.getShards(
eq(clusterState.projectState(projectId)),
eq(index2.getName()),
anyString(),
nullable(String.class),
nullable(String.class)
)
).thenReturn(index2ShardIterator);
clusterService = mock(ClusterService.class);
when(clusterService.localNode()).thenReturn(transportService.getLocalNode());
when(clusterService.state()).thenReturn(clusterState);
when(clusterService.operationRouting()).thenReturn(operationRouting);
}
@AfterClass
public static void afterClass() {
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
threadPool = null;
transportService = null;
clusterService = null;
transportAction = null;
}
public void testTransportMultiGetAction() {
final Task task = createTask();
final NodeClient client = new NodeClient(Settings.EMPTY, threadPool, TestProjectResolvers.alwaysThrow());
final MultiGetRequestBuilder request = new MultiGetRequestBuilder(client);
request.add(new MultiGetRequest.Item("index1", "1"));
request.add(new MultiGetRequest.Item("index1", "2"));
final AtomicBoolean shardActionInvoked = new AtomicBoolean(false);
transportAction = new TransportMultiGetAction(
transportService,
clusterService,
client,
new ActionFilters(emptySet()),
projectResolver,
new Resolver(),
mock(IndicesService.class)
) {
@Override
protected void executeShardAction(
final ActionListener<MultiGetResponse> listener,
final AtomicArray<MultiGetItemResponse> responses,
final Map<ShardId, MultiGetShardRequest> shardRequests
) {
shardActionInvoked.set(true);
assertEquals(2, responses.length());
assertNull(responses.get(0));
assertNull(responses.get(1));
}
};
ActionTestUtils.execute(transportAction, task, request.request(), ActionListener.noop());
assertTrue(shardActionInvoked.get());
}
public void testTransportMultiGetAction_withMissingRouting() {
final Task task = createTask();
final NodeClient client = new NodeClient(Settings.EMPTY, threadPool, TestProjectResolvers.alwaysThrow());
final MultiGetRequestBuilder request = new MultiGetRequestBuilder(client);
request.add(new MultiGetRequest.Item("index2", "1").routing("1"));
request.add(new MultiGetRequest.Item("index2", "2"));
final AtomicBoolean shardActionInvoked = new AtomicBoolean(false);
transportAction = new TransportMultiGetAction(
transportService,
clusterService,
client,
new ActionFilters(emptySet()),
projectResolver,
new Resolver(),
mock(IndicesService.class)
) {
@Override
protected void executeShardAction(
final ActionListener<MultiGetResponse> listener,
final AtomicArray<MultiGetItemResponse> responses,
final Map<ShardId, MultiGetShardRequest> shardRequests
) {
shardActionInvoked.set(true);
assertEquals(2, responses.length());
assertNull(responses.get(0));
assertThat(responses.get(1).getFailure().getFailure(), instanceOf(RoutingMissingException.class));
assertThat(responses.get(1).getFailure().getFailure().getMessage(), equalTo("routing is required for [index2]/[_doc]/[2]"));
}
};
ActionTestUtils.execute(transportAction, task, request.request(), ActionListener.noop());
assertTrue(shardActionInvoked.get());
}
private static Task createTask() {
return new Task(
randomLong(),
"transport",
TransportMultiGetAction.NAME,
"description",
new TaskId(randomLong() + ":" + randomLong()),
emptyMap()
);
}
static | TransportMultiGetActionTests |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/memory/MemorySegment.java | {
"start": 2886,
"end": 74899
} | class ____ {
/** System property for activating multiple free segment check, for testing purpose. */
public static final String CHECK_MULTIPLE_FREE_PROPERTY =
"flink.tests.check-segment-multiple-free";
private static final boolean checkMultipleFree =
System.getProperties().containsKey(CHECK_MULTIPLE_FREE_PROPERTY);
/** The unsafe handle for transparent memory copied (heap / off-heap). */
@SuppressWarnings("restriction")
private static final sun.misc.Unsafe UNSAFE = MemoryUtils.UNSAFE;
/** The beginning of the byte array contents, relative to the byte array object. */
@SuppressWarnings("restriction")
private static final long BYTE_ARRAY_BASE_OFFSET = UNSAFE.arrayBaseOffset(byte[].class);
/**
* Constant that flags the byte order. Because this is a boolean constant, the JIT compiler can
* use this well to aggressively eliminate the non-applicable code paths.
*/
private static final boolean LITTLE_ENDIAN =
(ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN);
// ------------------------------------------------------------------------
/**
* The heap byte array object relative to which we access the memory.
*
* <p>Is non-<tt>null</tt> if the memory is on the heap, and is <tt>null</tt>, if the memory is
* off the heap. If we have this buffer, we must never void this reference, or the memory
* segment will point to undefined addresses outside the heap and may in out-of-order execution
* cases cause segmentation faults.
*/
@Nullable private final byte[] heapMemory;
/**
* The direct byte buffer that wraps the off-heap memory. This memory segment holds a reference
* to that buffer, so as long as this memory segment lives, the memory will not be released.
*/
@Nullable private ByteBuffer offHeapBuffer;
/**
* The address to the data, relative to the heap memory byte array. If the heap memory byte
* array is <tt>null</tt>, this becomes an absolute memory address outside the heap.
*/
private long address;
/**
* The address one byte after the last addressable byte, i.e. <tt>address + size</tt> while the
* segment is not disposed.
*/
private final long addressLimit;
/** The size in bytes of the memory segment. */
private final int size;
/** Optional owner of the memory segment. */
@Nullable private final Object owner;
@Nullable private Runnable cleaner;
/**
* Wrapping is not allowed when the underlying memory is unsafe. Unsafe memory can be actively
* released, without reference counting. Therefore, access from wrapped buffers, which may not
* be aware of the releasing of memory, could be risky.
*/
private final boolean allowWrap;
private final AtomicBoolean isFreedAtomic;
/**
* Creates a new memory segment that represents the memory of the byte array.
*
* <p>Since the byte array is backed by on-heap memory, this memory segment holds its data on
* heap. The buffer must be at least of size 8 bytes.
*
* <p>The memory segment references the given owner.
*
* @param buffer The byte array whose memory is represented by this memory segment.
* @param owner The owner references by this memory segment.
*/
MemorySegment(@Nonnull byte[] buffer, @Nullable Object owner) {
this.heapMemory = buffer;
this.offHeapBuffer = null;
this.size = buffer.length;
this.address = BYTE_ARRAY_BASE_OFFSET;
this.addressLimit = this.address + this.size;
this.owner = owner;
this.allowWrap = true;
this.cleaner = null;
this.isFreedAtomic = new AtomicBoolean(false);
}
/**
* Creates a new memory segment that represents the memory backing the given direct byte buffer.
* Note that the given ByteBuffer must be direct {@link
* java.nio.ByteBuffer#allocateDirect(int)}, otherwise this method with throw an
* IllegalArgumentException.
*
* <p>The memory segment references the given owner.
*
* @param buffer The byte buffer whose memory is represented by this memory segment.
* @param owner The owner references by this memory segment.
* @throws IllegalArgumentException Thrown, if the given ByteBuffer is not direct.
*/
MemorySegment(@Nonnull ByteBuffer buffer, @Nullable Object owner) {
this(buffer, owner, true, null);
}
/**
* Creates a new memory segment that represents the memory backing the given direct byte buffer.
* Note that the given ByteBuffer must be direct {@link
* java.nio.ByteBuffer#allocateDirect(int)}, otherwise this method with throw an
* IllegalArgumentException.
*
* <p>The memory segment references the given owner.
*
* @param buffer The byte buffer whose memory is represented by this memory segment.
* @param owner The owner references by this memory segment.
* @param allowWrap Whether wrapping {@link ByteBuffer}s from the segment is allowed.
* @param cleaner The cleaner to be called on free segment.
* @throws IllegalArgumentException Thrown, if the given ByteBuffer is not direct.
*/
MemorySegment(
@Nonnull ByteBuffer buffer,
@Nullable Object owner,
boolean allowWrap,
@Nullable Runnable cleaner) {
this.heapMemory = null;
this.offHeapBuffer = buffer;
this.size = buffer.capacity();
this.address = getByteBufferAddress(buffer);
this.addressLimit = this.address + this.size;
this.owner = owner;
this.allowWrap = allowWrap;
this.cleaner = cleaner;
this.isFreedAtomic = new AtomicBoolean(false);
}
// ------------------------------------------------------------------------
// Memory Segment Operations
// ------------------------------------------------------------------------
/**
* Gets the size of the memory segment, in bytes.
*
* @return The size of the memory segment.
*/
public int size() {
return size;
}
/**
* Checks whether the memory segment was freed.
*
* @return <tt>true</tt>, if the memory segment has been freed, <tt>false</tt> otherwise.
*/
@VisibleForTesting
public boolean isFreed() {
return address > addressLimit;
}
/**
* Frees this memory segment.
*
* <p>After this operation has been called, no further operations are possible on the memory
* segment and will fail. The actual memory (heap or off-heap) will only be released after this
* memory segment object has become garbage collected.
*/
public void free() {
if (isFreedAtomic.getAndSet(true)) {
// the segment has already been freed
if (checkMultipleFree) {
throw new IllegalStateException("MemorySegment can be freed only once!");
}
} else {
// this ensures we can place no more data and trigger
// the checks for the freed segment
address = addressLimit + 1;
offHeapBuffer = null; // to enable GC of unsafe memory
if (cleaner != null) {
cleaner.run();
cleaner = null;
}
}
}
/**
* Checks whether this memory segment is backed by off-heap memory.
*
* @return <tt>true</tt>, if the memory segment is backed by off-heap memory, <tt>false</tt> if
* it is backed by heap memory.
*/
public boolean isOffHeap() {
return heapMemory == null;
}
/**
* Returns the byte array of on-heap memory segments.
*
* @return underlying byte array
* @throws IllegalStateException if the memory segment does not represent on-heap memory
*/
public byte[] getArray() {
if (heapMemory != null) {
return heapMemory;
} else {
throw new IllegalStateException("Memory segment does not represent heap memory");
}
}
/**
* Returns the off-heap buffer of memory segments.
*
* @return underlying off-heap buffer
* @throws IllegalStateException if the memory segment does not represent off-heap buffer
*/
public ByteBuffer getOffHeapBuffer() {
if (offHeapBuffer != null) {
return offHeapBuffer;
} else {
throw new IllegalStateException("Memory segment does not represent off-heap buffer");
}
}
/**
* Returns the memory address of off-heap memory segments.
*
* @return absolute memory address outside the heap
* @throws IllegalStateException if the memory segment does not represent off-heap memory
*/
public long getAddress() {
if (heapMemory == null) {
return address;
} else {
throw new IllegalStateException("Memory segment does not represent off heap memory");
}
}
/**
* Wraps the chunk of the underlying memory located between <tt>offset</tt> and <tt>offset +
* length</tt> in a NIO ByteBuffer. The ByteBuffer has the full segment as capacity and the
* offset and length parameters set the buffers position and limit.
*
* @param offset The offset in the memory segment.
* @param length The number of bytes to be wrapped as a buffer.
* @return A <tt>ByteBuffer</tt> backed by the specified portion of the memory segment.
* @throws IndexOutOfBoundsException Thrown, if offset is negative or larger than the memory
* segment size, or if the offset plus the length is larger than the segment size.
*/
public ByteBuffer wrap(int offset, int length) {
if (!allowWrap) {
throw new UnsupportedOperationException(
"Wrap is not supported by this segment. This usually indicates that the underlying memory is unsafe, thus transferring of ownership is not allowed.");
}
return wrapInternal(offset, length);
}
private ByteBuffer wrapInternal(int offset, int length) {
if (address <= addressLimit) {
if (heapMemory != null) {
return ByteBuffer.wrap(heapMemory, offset, length);
} else {
try {
ByteBuffer wrapper = Preconditions.checkNotNull(offHeapBuffer).duplicate();
wrapper.limit(offset + length);
wrapper.position(offset);
return wrapper;
} catch (IllegalArgumentException e) {
throw new IndexOutOfBoundsException();
}
}
} else {
throw new IllegalStateException("segment has been freed");
}
}
/**
* Gets the owner of this memory segment. Returns null, if the owner was not set.
*
* @return The owner of the memory segment, or null, if it does not have an owner.
*/
@Nullable
public Object getOwner() {
return owner;
}
// ------------------------------------------------------------------------
// Random Access get() and put() methods
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
// Notes on the implementation: We try to collapse as many checks as
// possible. We need to obey the following rules to make this safe
// against segfaults:
//
// - Grab mutable fields onto the stack before checking and using. This
// guards us against concurrent modifications which invalidate the
// pointers
// - Use subtractions for range checks, as they are tolerant
// ------------------------------------------------------------------------
/**
* Reads the byte at the given position.
*
* @param index The position from which the byte will be read
* @return The byte at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger or equal to the
* size of the memory segment.
*/
public byte get(int index) {
final long pos = address + index;
if (index >= 0 && pos < addressLimit) {
return UNSAFE.getByte(heapMemory, pos);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Writes the given byte into this buffer at the given position.
*
* @param index The index at which the byte will be written.
* @param b The byte value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger or equal to the
* size of the memory segment.
*/
public void put(int index, byte b) {
final long pos = address + index;
if (index >= 0 && pos < addressLimit) {
UNSAFE.putByte(heapMemory, pos, b);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Bulk get method. Copies dst.length memory from the specified position to the destination
* memory.
*
* @param index The position at which the first byte will be read.
* @param dst The memory into which the memory will be copied.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or too large that the
* data between the index and the memory segment end is not enough to fill the destination
* array.
*/
public void get(int index, byte[] dst) {
get(index, dst, 0, dst.length);
}
/**
* Bulk put method. Copies src.length memory from the source memory into the memory segment
* beginning at the specified position.
*
* @param index The index in the memory segment array, where the data is put.
* @param src The source array to copy the data from.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or too large such that
* the array size exceed the amount of memory between the index and the memory segment's
* end.
*/
public void put(int index, byte[] src) {
put(index, src, 0, src.length);
}
/**
* Bulk get method. Copies length memory from the specified position to the destination memory,
* beginning at the given offset.
*
* @param index The position at which the first byte will be read.
* @param dst The memory into which the memory will be copied.
* @param offset The copying offset in the destination memory.
* @param length The number of bytes to be copied.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or too large that the
* requested number of bytes exceed the amount of memory between the index and the memory
* segment's end.
*/
public void get(int index, byte[] dst, int offset, int length) {
// check the byte array offset and length and the status
if ((offset | length | (offset + length) | (dst.length - (offset + length))) < 0) {
throw new IndexOutOfBoundsException();
}
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - length) {
final long arrayAddress = BYTE_ARRAY_BASE_OFFSET + offset;
UNSAFE.copyMemory(heapMemory, pos, dst, arrayAddress, length);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
throw new IndexOutOfBoundsException(
String.format(
"pos: %d, length: %d, index: %d, offset: %d",
pos, length, index, offset));
}
}
/**
* Bulk put method. Copies length memory starting at position offset from the source memory into
* the memory segment starting at the specified index.
*
* @param index The position in the memory segment array, where the data is put.
* @param src The source array to copy the data from.
* @param offset The offset in the source array where the copying is started.
* @param length The number of bytes to copy.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or too large such that
* the array portion to copy exceed the amount of memory between the index and the memory
* segment's end.
*/
public void put(int index, byte[] src, int offset, int length) {
// check the byte array offset and length
if ((offset | length | (offset + length) | (src.length - (offset + length))) < 0) {
throw new IndexOutOfBoundsException();
}
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - length) {
final long arrayAddress = BYTE_ARRAY_BASE_OFFSET + offset;
UNSAFE.copyMemory(src, arrayAddress, heapMemory, pos, length);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Reads one byte at the given position and returns its boolean representation.
*
* @param index The position from which the memory will be read.
* @return The boolean value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 1.
*/
public boolean getBoolean(int index) {
return get(index) != 0;
}
/**
* Writes one byte containing the byte value into this buffer at the given position.
*
* @param index The position at which the memory will be written.
* @param value The char value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 1.
*/
public void putBoolean(int index, boolean value) {
put(index, (byte) (value ? 1 : 0));
}
/**
* Reads a char value from the given position, in the system's native byte order.
*
* @param index The position from which the memory will be read.
* @return The char value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
@SuppressWarnings("restriction")
public char getChar(int index) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 2) {
return UNSAFE.getChar(heapMemory, pos);
} else if (address > addressLimit) {
throw new IllegalStateException("This segment has been freed.");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Reads a character value (16 bit, 2 bytes) from the given position, in little-endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #getChar(int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #getChar(int)} is the
* preferable choice.
*
* @param index The position from which the value will be read.
* @return The character value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public char getCharLittleEndian(int index) {
if (LITTLE_ENDIAN) {
return getChar(index);
} else {
return Character.reverseBytes(getChar(index));
}
}
/**
* Reads a character value (16 bit, 2 bytes) from the given position, in big-endian byte order.
* This method's speed depends on the system's native byte order, and it is possibly slower than
* {@link #getChar(int)}. For most cases (such as transient storage in memory or serialization
* for I/O and network), it suffices to know that the byte order in which the value is written
* is the same as the one in which it is read, and {@link #getChar(int)} is the preferable
* choice.
*
* @param index The position from which the value will be read.
* @return The character value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public char getCharBigEndian(int index) {
if (LITTLE_ENDIAN) {
return Character.reverseBytes(getChar(index));
} else {
return getChar(index);
}
}
/**
* Writes a char value to the given position, in the system's native byte order.
*
* @param index The position at which the memory will be written.
* @param value The char value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
@SuppressWarnings("restriction")
public void putChar(int index, char value) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 2) {
UNSAFE.putChar(heapMemory, pos, value);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Writes the given character (16 bit, 2 bytes) to the given position in little-endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #putChar(int, char)}. For most cases (such as transient storage in memory
* or serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #putChar(int, char)}
* is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The char value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public void putCharLittleEndian(int index, char value) {
if (LITTLE_ENDIAN) {
putChar(index, value);
} else {
putChar(index, Character.reverseBytes(value));
}
}
/**
* Writes the given character (16 bit, 2 bytes) to the given position in big-endian byte order.
* This method's speed depends on the system's native byte order, and it is possibly slower than
* {@link #putChar(int, char)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #putChar(int, char)}
* is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The char value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public void putCharBigEndian(int index, char value) {
if (LITTLE_ENDIAN) {
putChar(index, Character.reverseBytes(value));
} else {
putChar(index, value);
}
}
/**
* Reads a short integer value (16 bit, 2 bytes) from the given position, composing them into a
* short value according to the current byte order.
*
* @param index The position from which the memory will be read.
* @return The short value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public short getShort(int index) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 2) {
return UNSAFE.getShort(heapMemory, pos);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Reads a short integer value (16 bit, 2 bytes) from the given position, in little-endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #getShort(int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #getShort(int)} is
* the preferable choice.
*
* @param index The position from which the value will be read.
* @return The short value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public short getShortLittleEndian(int index) {
if (LITTLE_ENDIAN) {
return getShort(index);
} else {
return Short.reverseBytes(getShort(index));
}
}
/**
* Reads a short integer value (16 bit, 2 bytes) from the given position, in big-endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #getShort(int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #getShort(int)} is
* the preferable choice.
*
* @param index The position from which the value will be read.
* @return The short value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public short getShortBigEndian(int index) {
if (LITTLE_ENDIAN) {
return Short.reverseBytes(getShort(index));
} else {
return getShort(index);
}
}
/**
* Writes the given short value into this buffer at the given position, using the native byte
* order of the system.
*
* @param index The position at which the value will be written.
* @param value The short value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public void putShort(int index, short value) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 2) {
UNSAFE.putShort(heapMemory, pos, value);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Writes the given short integer value (16 bit, 2 bytes) to the given position in little-endian
* byte order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #putShort(int, short)}. For most cases (such as transient storage in
* memory or serialization for I/O and network), it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read, and {@link
* #putShort(int, short)} is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The short value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public void putShortLittleEndian(int index, short value) {
if (LITTLE_ENDIAN) {
putShort(index, value);
} else {
putShort(index, Short.reverseBytes(value));
}
}
/**
* Writes the given short integer value (16 bit, 2 bytes) to the given position in big-endian
* byte order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #putShort(int, short)}. For most cases (such as transient storage in
* memory or serialization for I/O and network), it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read, and {@link
* #putShort(int, short)} is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The short value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public void putShortBigEndian(int index, short value) {
if (LITTLE_ENDIAN) {
putShort(index, Short.reverseBytes(value));
} else {
putShort(index, value);
}
}
/**
* Reads an int value (32bit, 4 bytes) from the given position, in the system's native byte
* order. This method offers the best speed for integer reading and should be used unless a
* specific byte order is required. In most cases, it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read (such as transient
* storage in memory, or serialization for I/O and network), making this method the preferable
* choice.
*
* @param index The position from which the value will be read.
* @return The int value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public int getInt(int index) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 4) {
return UNSAFE.getInt(heapMemory, pos);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Reads an int value (32bit, 4 bytes) from the given position, in little-endian byte order.
* This method's speed depends on the system's native byte order, and it is possibly slower than
* {@link #getInt(int)}. For most cases (such as transient storage in memory or serialization
* for I/O and network), it suffices to know that the byte order in which the value is written
* is the same as the one in which it is read, and {@link #getInt(int)} is the preferable
* choice.
*
* @param index The position from which the value will be read.
* @return The int value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public int getIntLittleEndian(int index) {
if (LITTLE_ENDIAN) {
return getInt(index);
} else {
return Integer.reverseBytes(getInt(index));
}
}
/**
* Reads an int value (32bit, 4 bytes) from the given position, in big-endian byte order. This
* method's speed depends on the system's native byte order, and it is possibly slower than
* {@link #getInt(int)}. For most cases (such as transient storage in memory or serialization
* for I/O and network), it suffices to know that the byte order in which the value is written
* is the same as the one in which it is read, and {@link #getInt(int)} is the preferable
* choice.
*
* @param index The position from which the value will be read.
* @return The int value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public int getIntBigEndian(int index) {
if (LITTLE_ENDIAN) {
return Integer.reverseBytes(getInt(index));
} else {
return getInt(index);
}
}
/**
* Writes the given int value (32bit, 4 bytes) to the given position in the system's native byte
* order. This method offers the best speed for integer writing and should be used unless a
* specific byte order is required. In most cases, it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read (such as transient
* storage in memory, or serialization for I/O and network), making this method the preferable
* choice.
*
* @param index The position at which the value will be written.
* @param value The int value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public void putInt(int index, int value) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 4) {
UNSAFE.putInt(heapMemory, pos, value);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Writes the given int value (32bit, 4 bytes) to the given position in little endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #putInt(int, int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #putInt(int, int)} is
* the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The int value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public void putIntLittleEndian(int index, int value) {
if (LITTLE_ENDIAN) {
putInt(index, value);
} else {
putInt(index, Integer.reverseBytes(value));
}
}
/**
* Writes the given int value (32bit, 4 bytes) to the given position in big endian byte order.
* This method's speed depends on the system's native byte order, and it is possibly slower than
* {@link #putInt(int, int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #putInt(int, int)} is
* the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The int value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public void putIntBigEndian(int index, int value) {
if (LITTLE_ENDIAN) {
putInt(index, Integer.reverseBytes(value));
} else {
putInt(index, value);
}
}
/**
* Reads a long value (64bit, 8 bytes) from the given position, in the system's native byte
* order. This method offers the best speed for long integer reading and should be used unless a
* specific byte order is required. In most cases, it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read (such as transient
* storage in memory, or serialization for I/O and network), making this method the preferable
* choice.
*
* @param index The position from which the value will be read.
* @return The long value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public long getLong(int index) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 8) {
return UNSAFE.getLong(heapMemory, pos);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Reads a long integer value (64bit, 8 bytes) from the given position, in little endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #getLong(int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #getLong(int)} is the
* preferable choice.
*
* @param index The position from which the value will be read.
* @return The long value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public long getLongLittleEndian(int index) {
if (LITTLE_ENDIAN) {
return getLong(index);
} else {
return Long.reverseBytes(getLong(index));
}
}
/**
* Reads a long integer value (64bit, 8 bytes) from the given position, in big endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #getLong(int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #getLong(int)} is the
* preferable choice.
*
* @param index The position from which the value will be read.
* @return The long value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public long getLongBigEndian(int index) {
if (LITTLE_ENDIAN) {
return Long.reverseBytes(getLong(index));
} else {
return getLong(index);
}
}
/**
* Writes the given long value (64bit, 8 bytes) to the given position in the system's native
* byte order. This method offers the best speed for long integer writing and should be used
* unless a specific byte order is required. In most cases, it suffices to know that the byte
* order in which the value is written is the same as the one in which it is read (such as
* transient storage in memory, or serialization for I/O and network), making this method the
* preferable choice.
*
* @param index The position at which the value will be written.
* @param value The long value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public void putLong(int index, long value) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 8) {
UNSAFE.putLong(heapMemory, pos, value);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
}
/**
* Writes the given long value (64bit, 8 bytes) to the given position in little endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #putLong(int, long)}. For most cases (such as transient storage in memory
* or serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #putLong(int, long)}
* is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The long value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public void putLongLittleEndian(int index, long value) {
if (LITTLE_ENDIAN) {
putLong(index, value);
} else {
putLong(index, Long.reverseBytes(value));
}
}
/**
* Writes the given long value (64bit, 8 bytes) to the given position in big endian byte order.
* This method's speed depends on the system's native byte order, and it is possibly slower than
* {@link #putLong(int, long)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #putLong(int, long)}
* is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The long value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public void putLongBigEndian(int index, long value) {
if (LITTLE_ENDIAN) {
putLong(index, Long.reverseBytes(value));
} else {
putLong(index, value);
}
}
/**
* Reads a single-precision floating point value (32bit, 4 bytes) from the given position, in
* the system's native byte order. This method offers the best speed for float reading and
* should be used unless a specific byte order is required. In most cases, it suffices to know
* that the byte order in which the value is written is the same as the one in which it is read
* (such as transient storage in memory, or serialization for I/O and network), making this
* method the preferable choice.
*
* @param index The position from which the value will be read.
* @return The float value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public float getFloat(int index) {
return Float.intBitsToFloat(getInt(index));
}
/**
* Reads a single-precision floating point value (32bit, 4 bytes) from the given position, in
* little endian byte order. This method's speed depends on the system's native byte order, and
* it is possibly slower than {@link #getFloat(int)}. For most cases (such as transient storage
* in memory or serialization for I/O and network), it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read, and {@link
* #getFloat(int)} is the preferable choice.
*
* @param index The position from which the value will be read.
* @return The long value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public float getFloatLittleEndian(int index) {
return Float.intBitsToFloat(getIntLittleEndian(index));
}
/**
* Reads a single-precision floating point value (32bit, 4 bytes) from the given position, in
* big endian byte order. This method's speed depends on the system's native byte order, and it
* is possibly slower than {@link #getFloat(int)}. For most cases (such as transient storage in
* memory or serialization for I/O and network), it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read, and {@link
* #getFloat(int)} is the preferable choice.
*
* @param index The position from which the value will be read.
* @return The long value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public float getFloatBigEndian(int index) {
return Float.intBitsToFloat(getIntBigEndian(index));
}
/**
* Writes the given single-precision float value (32bit, 4 bytes) to the given position in the
* system's native byte order. This method offers the best speed for float writing and should be
* used unless a specific byte order is required. In most cases, it suffices to know that the
* byte order in which the value is written is the same as the one in which it is read (such as
* transient storage in memory, or serialization for I/O and network), making this method the
* preferable choice.
*
* @param index The position at which the value will be written.
* @param value The float value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public void putFloat(int index, float value) {
putInt(index, Float.floatToRawIntBits(value));
}
/**
* Writes the given single-precision float value (32bit, 4 bytes) to the given position in
* little endian byte order. This method's speed depends on the system's native byte order, and
* it is possibly slower than {@link #putFloat(int, float)}. For most cases (such as transient
* storage in memory or serialization for I/O and network), it suffices to know that the byte
* order in which the value is written is the same as the one in which it is read, and {@link
* #putFloat(int, float)} is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The long value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public void putFloatLittleEndian(int index, float value) {
putIntLittleEndian(index, Float.floatToRawIntBits(value));
}
/**
* Writes the given single-precision float value (32bit, 4 bytes) to the given position in big
* endian byte order. This method's speed depends on the system's native byte order, and it is
* possibly slower than {@link #putFloat(int, float)}. For most cases (such as transient storage
* in memory or serialization for I/O and network), it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read, and {@link
* #putFloat(int, float)} is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The long value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public void putFloatBigEndian(int index, float value) {
putIntBigEndian(index, Float.floatToRawIntBits(value));
}
/**
* Reads a double-precision floating point value (64bit, 8 bytes) from the given position, in
* the system's native byte order. This method offers the best speed for double reading and
* should be used unless a specific byte order is required. In most cases, it suffices to know
* that the byte order in which the value is written is the same as the one in which it is read
* (such as transient storage in memory, or serialization for I/O and network), making this
* method the preferable choice.
*
* @param index The position from which the value will be read.
* @return The double value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public double getDouble(int index) {
return Double.longBitsToDouble(getLong(index));
}
/**
* Reads a double-precision floating point value (64bit, 8 bytes) from the given position, in
* little endian byte order. This method's speed depends on the system's native byte order, and
* it is possibly slower than {@link #getDouble(int)}. For most cases (such as transient storage
* in memory or serialization for I/O and network), it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read, and {@link
* #getDouble(int)} is the preferable choice.
*
* @param index The position from which the value will be read.
* @return The long value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public double getDoubleLittleEndian(int index) {
return Double.longBitsToDouble(getLongLittleEndian(index));
}
/**
* Reads a double-precision floating point value (64bit, 8 bytes) from the given position, in
* big endian byte order. This method's speed depends on the system's native byte order, and it
* is possibly slower than {@link #getDouble(int)}. For most cases (such as transient storage in
* memory or serialization for I/O and network), it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read, and {@link
* #getDouble(int)} is the preferable choice.
*
* @param index The position from which the value will be read.
* @return The long value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public double getDoubleBigEndian(int index) {
return Double.longBitsToDouble(getLongBigEndian(index));
}
/**
* Writes the given double-precision floating-point value (64bit, 8 bytes) to the given position
* in the system's native byte order. This method offers the best speed for double writing and
* should be used unless a specific byte order is required. In most cases, it suffices to know
* that the byte order in which the value is written is the same as the one in which it is read
* (such as transient storage in memory, or serialization for I/O and network), making this
* method the preferable choice.
*
* @param index The position at which the memory will be written.
* @param value The double value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public void putDouble(int index, double value) {
putLong(index, Double.doubleToRawLongBits(value));
}
/**
* Writes the given double-precision floating-point value (64bit, 8 bytes) to the given position
* in little endian byte order. This method's speed depends on the system's native byte order,
* and it is possibly slower than {@link #putDouble(int, double)}. For most cases (such as
* transient storage in memory or serialization for I/O and network), it suffices to know that
* the byte order in which the value is written is the same as the one in which it is read, and
* {@link #putDouble(int, double)} is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The long value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public void putDoubleLittleEndian(int index, double value) {
putLongLittleEndian(index, Double.doubleToRawLongBits(value));
}
/**
* Writes the given double-precision floating-point value (64bit, 8 bytes) to the given position
* in big endian byte order. This method's speed depends on the system's native byte order, and
* it is possibly slower than {@link #putDouble(int, double)}. For most cases (such as transient
* storage in memory or serialization for I/O and network), it suffices to know that the byte
* order in which the value is written is the same as the one in which it is read, and {@link
* #putDouble(int, double)} is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The long value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public void putDoubleBigEndian(int index, double value) {
putLongBigEndian(index, Double.doubleToRawLongBits(value));
}
// -------------------------------------------------------------------------
// Bulk Read and Write Methods
// -------------------------------------------------------------------------
public void get(DataOutput out, int offset, int length) throws IOException {
if (address <= addressLimit) {
if (heapMemory != null) {
out.write(heapMemory, offset, length);
} else {
while (length >= 8) {
out.writeLong(getLongBigEndian(offset));
offset += 8;
length -= 8;
}
while (length > 0) {
out.writeByte(get(offset));
offset++;
length--;
}
}
} else {
throw new IllegalStateException("segment has been freed");
}
}
/**
* Bulk put method. Copies length memory from the given DataInput to the memory starting at
* position offset.
*
* @param in The DataInput to get the data from.
* @param offset The position in the memory segment to copy the chunk to.
* @param length The number of bytes to get.
* @throws IOException Thrown, if the DataInput encountered a problem upon reading, such as an
* End-Of-File.
*/
public void put(DataInput in, int offset, int length) throws IOException {
if (address <= addressLimit) {
if (heapMemory != null) {
in.readFully(heapMemory, offset, length);
} else {
while (length >= 8) {
putLongBigEndian(offset, in.readLong());
offset += 8;
length -= 8;
}
while (length > 0) {
put(offset, in.readByte());
offset++;
length--;
}
}
} else {
throw new IllegalStateException("segment has been freed");
}
}
/**
* Bulk get method. Copies {@code numBytes} bytes from this memory segment, starting at position
* {@code offset} to the target {@code ByteBuffer}. The bytes will be put into the target buffer
* starting at the buffer's current position. If this method attempts to write more bytes than
* the target byte buffer has remaining (with respect to {@link ByteBuffer#remaining()}), this
* method will cause a {@link java.nio.BufferOverflowException}.
*
* @param offset The position where the bytes are started to be read from in this memory
* segment.
* @param target The ByteBuffer to copy the bytes to.
* @param numBytes The number of bytes to copy.
* @throws IndexOutOfBoundsException If the offset is invalid, or this segment does not contain
* the given number of bytes (starting from offset), or the target byte buffer does not have
* enough space for the bytes.
* @throws ReadOnlyBufferException If the target buffer is read-only.
*/
public void get(int offset, ByteBuffer target, int numBytes) {
// check the byte array offset and length
if ((offset | numBytes | (offset + numBytes)) < 0) {
throw new IndexOutOfBoundsException();
}
if (target.isReadOnly()) {
throw new ReadOnlyBufferException();
}
final int targetOffset = target.position();
final int remaining = target.remaining();
if (remaining < numBytes) {
throw new BufferOverflowException();
}
if (target.isDirect()) {
// copy to the target memory directly
final long targetPointer = getByteBufferAddress(target) + targetOffset;
final long sourcePointer = address + offset;
if (sourcePointer <= addressLimit - numBytes) {
UNSAFE.copyMemory(heapMemory, sourcePointer, null, targetPointer, numBytes);
target.position(targetOffset + numBytes);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
throw new IndexOutOfBoundsException();
}
} else if (target.hasArray()) {
// move directly into the byte array
get(offset, target.array(), targetOffset + target.arrayOffset(), numBytes);
// this must be after the get() call to ensue that the byte buffer is not
// modified in case the call fails
target.position(targetOffset + numBytes);
} else {
// other types of byte buffers
throw new IllegalArgumentException(
"The target buffer is not direct, and has no array.");
}
}
/**
* Bulk put method. Copies {@code numBytes} bytes from the given {@code ByteBuffer}, into this
* memory segment. The bytes will be read from the target buffer starting at the buffer's
* current position, and will be written to this memory segment starting at {@code offset}. If
* this method attempts to read more bytes than the target byte buffer has remaining (with
* respect to {@link ByteBuffer#remaining()}), this method will cause a {@link
* java.nio.BufferUnderflowException}.
*
* @param offset The position where the bytes are started to be written to in this memory
* segment.
* @param source The ByteBuffer to copy the bytes from.
* @param numBytes The number of bytes to copy.
* @throws IndexOutOfBoundsException If the offset is invalid, or the source buffer does not
* contain the given number of bytes, or this segment does not have enough space for the
* bytes (counting from offset).
*/
public void put(int offset, ByteBuffer source, int numBytes) {
// check the byte array offset and length
if ((offset | numBytes | (offset + numBytes)) < 0) {
throw new IndexOutOfBoundsException();
}
final int sourceOffset = source.position();
final int remaining = source.remaining();
if (remaining < numBytes) {
throw new BufferUnderflowException();
}
if (source.isDirect()) {
// copy to the target memory directly
final long sourcePointer = getByteBufferAddress(source) + sourceOffset;
final long targetPointer = address + offset;
if (targetPointer <= addressLimit - numBytes) {
UNSAFE.copyMemory(null, sourcePointer, heapMemory, targetPointer, numBytes);
source.position(sourceOffset + numBytes);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
throw new IndexOutOfBoundsException();
}
} else if (source.hasArray()) {
// move directly into the byte array
put(offset, source.array(), sourceOffset + source.arrayOffset(), numBytes);
// this must be after the get() call to ensue that the byte buffer is not
// modified in case the call fails
source.position(sourceOffset + numBytes);
} else {
// other types of byte buffers
for (int i = 0; i < numBytes; i++) {
put(offset++, source.get());
}
}
}
/**
* Bulk copy method. Copies {@code numBytes} bytes from this memory segment, starting at
* position {@code offset} to the target memory segment. The bytes will be put into the target
* segment starting at position {@code targetOffset}.
*
* @param offset The position where the bytes are started to be read from in this memory
* segment.
* @param target The memory segment to copy the bytes to.
* @param targetOffset The position in the target memory segment to copy the chunk to.
* @param numBytes The number of bytes to copy.
* @throws IndexOutOfBoundsException If either of the offsets is invalid, or the source segment
* does not contain the given number of bytes (starting from offset), or the target segment
* does not have enough space for the bytes (counting from targetOffset).
*/
public void copyTo(int offset, MemorySegment target, int targetOffset, int numBytes) {
final byte[] thisHeapRef = this.heapMemory;
final byte[] otherHeapRef = target.heapMemory;
final long thisPointer = this.address + offset;
final long otherPointer = target.address + targetOffset;
if ((numBytes | offset | targetOffset) >= 0
&& thisPointer <= this.addressLimit - numBytes
&& otherPointer <= target.addressLimit - numBytes) {
UNSAFE.copyMemory(thisHeapRef, thisPointer, otherHeapRef, otherPointer, numBytes);
} else if (this.address > this.addressLimit) {
throw new IllegalStateException("this memory segment has been freed.");
} else if (target.address > target.addressLimit) {
throw new IllegalStateException("target memory segment has been freed.");
} else {
throw new IndexOutOfBoundsException(
String.format(
"offset=%d, targetOffset=%d, numBytes=%d, address=%d, targetAddress=%d",
offset, targetOffset, numBytes, this.address, target.address));
}
}
/**
* Bulk copy method. Copies {@code numBytes} bytes to target unsafe object and pointer. NOTE:
* This is an unsafe method, no check here, please be careful.
*
* @param offset The position where the bytes are started to be read from in this memory
* segment.
* @param target The unsafe memory to copy the bytes to.
* @param targetPointer The position in the target unsafe memory to copy the chunk to.
* @param numBytes The number of bytes to copy.
* @throws IndexOutOfBoundsException If the source segment does not contain the given number of
* bytes (starting from offset).
*/
public void copyToUnsafe(int offset, Object target, int targetPointer, int numBytes) {
final long thisPointer = this.address + offset;
if (thisPointer + numBytes > addressLimit) {
throw new IndexOutOfBoundsException(
String.format(
"offset=%d, numBytes=%d, address=%d", offset, numBytes, this.address));
}
UNSAFE.copyMemory(this.heapMemory, thisPointer, target, targetPointer, numBytes);
}
/**
* Bulk copy method. Copies {@code numBytes} bytes from source unsafe object and pointer. NOTE:
* This is an unsafe method, no check here, please be careful.
*
* @param offset The position where the bytes are started to be write in this memory segment.
* @param source The unsafe memory to copy the bytes from.
* @param sourcePointer The position in the source unsafe memory to copy the chunk from.
* @param numBytes The number of bytes to copy.
* @throws IndexOutOfBoundsException If this segment can not contain the given number of bytes
* (starting from offset).
*/
public void copyFromUnsafe(int offset, Object source, int sourcePointer, int numBytes) {
final long thisPointer = this.address + offset;
if (thisPointer + numBytes > addressLimit) {
throw new IndexOutOfBoundsException(
String.format(
"offset=%d, numBytes=%d, address=%d", offset, numBytes, this.address));
}
UNSAFE.copyMemory(source, sourcePointer, this.heapMemory, thisPointer, numBytes);
}
// -------------------------------------------------------------------------
// Comparisons & Swapping
// -------------------------------------------------------------------------
/**
* Compares two memory segment regions.
*
* @param seg2 Segment to compare this segment with
* @param offset1 Offset of this segment to start comparing
* @param offset2 Offset of seg2 to start comparing
* @param len Length of the compared memory region
* @return 0 if equal, -1 if seg1 < seg2, 1 otherwise
*/
public int compare(MemorySegment seg2, int offset1, int offset2, int len) {
while (len >= 8) {
long l1 = this.getLongBigEndian(offset1);
long l2 = seg2.getLongBigEndian(offset2);
if (l1 != l2) {
return (l1 < l2) ^ (l1 < 0) ^ (l2 < 0) ? -1 : 1;
}
offset1 += 8;
offset2 += 8;
len -= 8;
}
while (len > 0) {
int b1 = this.get(offset1) & 0xff;
int b2 = seg2.get(offset2) & 0xff;
int cmp = b1 - b2;
if (cmp != 0) {
return cmp;
}
offset1++;
offset2++;
len--;
}
return 0;
}
/**
* Compares two memory segment regions with different length.
*
* @param seg2 Segment to compare this segment with
* @param offset1 Offset of this segment to start comparing
* @param offset2 Offset of seg2 to start comparing
* @param len1 Length of this memory region to compare
* @param len2 Length of seg2 to compare
* @return 0 if equal, -1 if seg1 < seg2, 1 otherwise
*/
public int compare(MemorySegment seg2, int offset1, int offset2, int len1, int len2) {
final int minLength = Math.min(len1, len2);
int c = compare(seg2, offset1, offset2, minLength);
return c == 0 ? (len1 - len2) : c;
}
/**
* Swaps bytes between two memory segments, using the given auxiliary buffer.
*
* @param tempBuffer The auxiliary buffer in which to put data during triangle swap.
* @param seg2 Segment to swap bytes with
* @param offset1 Offset of this segment to start swapping
* @param offset2 Offset of seg2 to start swapping
* @param len Length of the swapped memory region
*/
public void swapBytes(
byte[] tempBuffer, MemorySegment seg2, int offset1, int offset2, int len) {
if ((offset1 | offset2 | len | (tempBuffer.length - len)) >= 0) {
final long thisPos = this.address + offset1;
final long otherPos = seg2.address + offset2;
if (thisPos <= this.addressLimit - len && otherPos <= seg2.addressLimit - len) {
// this -> temp buffer
UNSAFE.copyMemory(
this.heapMemory, thisPos, tempBuffer, BYTE_ARRAY_BASE_OFFSET, len);
// other -> this
UNSAFE.copyMemory(seg2.heapMemory, otherPos, this.heapMemory, thisPos, len);
// temp buffer -> other
UNSAFE.copyMemory(
tempBuffer, BYTE_ARRAY_BASE_OFFSET, seg2.heapMemory, otherPos, len);
return;
} else if (this.address > this.addressLimit) {
throw new IllegalStateException("this memory segment has been freed.");
} else if (seg2.address > seg2.addressLimit) {
throw new IllegalStateException("other memory segment has been freed.");
}
}
// index is in fact invalid
throw new IndexOutOfBoundsException(
String.format(
"offset1=%d, offset2=%d, len=%d, bufferSize=%d, address1=%d, address2=%d",
offset1, offset2, len, tempBuffer.length, this.address, seg2.address));
}
/**
* Equals two memory segment regions.
*
* @param seg2 Segment to equal this segment with
* @param offset1 Offset of this segment to start equaling
* @param offset2 Offset of seg2 to start equaling
* @param length Length of the equaled memory region
* @return true if equal, false otherwise
*/
public boolean equalTo(MemorySegment seg2, int offset1, int offset2, int length) {
int i = 0;
// we assume unaligned accesses are supported.
// Compare 8 bytes at a time.
while (i <= length - 8) {
if (getLong(offset1 + i) != seg2.getLong(offset2 + i)) {
return false;
}
i += 8;
}
// cover the last (length % 8) elements.
while (i < length) {
if (get(offset1 + i) != seg2.get(offset2 + i)) {
return false;
}
i += 1;
}
return true;
}
/**
* Get the heap byte array object.
*
* @return Return non-null if the memory is on the heap, and return null if the memory if off
* the heap.
*/
public byte[] getHeapMemory() {
return heapMemory;
}
/**
* Applies the given process function on a {@link ByteBuffer} that represents this entire
* segment.
*
* <p>Note: The {@link ByteBuffer} passed into the process function is temporary and could
* become invalid after the processing. Thus, the process function should not try to keep any
* reference of the {@link ByteBuffer}.
*
* @param processFunction to be applied to the segment as {@link ByteBuffer}.
* @return the value that the process function returns.
*/
public <T> T processAsByteBuffer(Function<ByteBuffer, T> processFunction) {
return Preconditions.checkNotNull(processFunction).apply(wrapInternal(0, size));
}
/**
* Supplies a {@link ByteBuffer} that represents this entire segment to the given process
* consumer.
*
* <p>Note: The {@link ByteBuffer} passed into the process consumer is temporary and could
* become invalid after the processing. Thus, the process consumer should not try to keep any
* reference of the {@link ByteBuffer}.
*
* @param processConsumer to accept the segment as {@link ByteBuffer}.
*/
public void processAsByteBuffer(Consumer<ByteBuffer> processConsumer) {
Preconditions.checkNotNull(processConsumer).accept(wrapInternal(0, size));
}
}
| MemorySegment |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidation.java | {
"start": 1141,
"end": 1261
} | class ____ used to define and handle specific validation rules or requirements within a configuration context.
*/
public | is |
java | processing__processing4 | app/src/processing/app/syntax/InputHandler.java | {
"start": 13616,
"end": 14318
} | class ____ implements ActionListener {
public void actionPerformed(ActionEvent evt) {
JEditTextArea textArea = getTextArea(evt);
if (!textArea.isEditable()) {
textArea.getToolkit().beep();
return;
}
if (textArea.getSelectionStart() != textArea.getSelectionStop()) {
textArea.setSelectedText("");
} else {
int caret = textArea.getCaretPosition();
if (caret == 0) {
textArea.getToolkit().beep();
return;
}
try {
textArea.getDocument().remove(caret - 1,1);
} catch(BadLocationException bl) {
bl.printStackTrace();
}
}
}
}
public static | backspace |
java | FasterXML__jackson-core | src/main/java/tools/jackson/core/util/JsonGeneratorDelegate.java | {
"start": 228,
"end": 17498
} | class ____ extends JsonGenerator
{
/**
* Delegate object that method calls are delegated to.
*/
protected JsonGenerator delegate;
/**
* Whether copy methods
* ({@link #copyCurrentEvent}, {@link #copyCurrentStructure},
* {@link #writeTree} and {@link #writePOJO})
* are to be called (true), or handled by this object (false).
*/
protected boolean delegateCopyMethods;
/*
/**********************************************************************
/* Construction, initialization
/**********************************************************************
*/
public JsonGeneratorDelegate(JsonGenerator d) {
this(d, true);
}
/**
* @param d Underlying generator to delegate calls to
* @param delegateCopyMethods Flag assigned to <code>delagateCopyMethod</code>
* and which defines whether copy methods are handled locally (false), or
* delegated to configured
*/
public JsonGeneratorDelegate(JsonGenerator d, boolean delegateCopyMethods) {
delegate = d;
this.delegateCopyMethods = delegateCopyMethods;
}
@Override
public Object currentValue() {
return delegate.currentValue();
}
@Override
public void assignCurrentValue(Object v) {
delegate.assignCurrentValue(v);
}
/*
/**********************************************************************
/* Public API, metadata
/**********************************************************************
*/
@Override public FormatSchema getSchema() { return delegate.getSchema(); }
@Override public Version version() { return delegate.version(); }
@Override public Object streamWriteOutputTarget() { return delegate.streamWriteOutputTarget(); }
@Override public int streamWriteOutputBuffered() { return delegate.streamWriteOutputBuffered(); }
/*
/**********************************************************************
/* Public API, capability introspection
/**********************************************************************
*/
@Override
public boolean canWriteTypeId() { return delegate.canWriteTypeId(); }
@Override
public boolean canWriteObjectId() { return delegate.canWriteObjectId(); }
@Override
public boolean canOmitProperties() { return delegate.canOmitProperties(); }
@Override
public boolean has(StreamWriteCapability capability) { return delegate.has(capability); }
@Override
public JacksonFeatureSet<StreamWriteCapability> streamWriteCapabilities() {
return delegate.streamWriteCapabilities();
}
/*
/**********************************************************************
/* Public API, configuration
/**********************************************************************
*/
@Override
public boolean isEnabled(StreamWriteFeature f) { return delegate.isEnabled(f); }
@Override
public int streamWriteFeatures() { return delegate.streamWriteFeatures(); }
@Override
public JsonGenerator configure(StreamWriteFeature f, boolean state) {
delegate.configure(f, state);
return this;
}
/*
/**********************************************************************
/* Configuring generator
/**********************************************************************
*/
@Override
public int getHighestNonEscapedChar() { return delegate.getHighestNonEscapedChar(); }
@Override
public CharacterEscapes getCharacterEscapes() { return delegate.getCharacterEscapes(); }
@Override
public PrettyPrinter getPrettyPrinter() { return delegate.getPrettyPrinter(); }
/*
/**********************************************************************
/* Public API, write methods, structural
/**********************************************************************
*/
@Override
public JsonGenerator writeStartArray() throws JacksonException {
delegate.writeStartArray();
return this;
}
@Override
public JsonGenerator writeStartArray(Object forValue) throws JacksonException {
delegate.writeStartArray(forValue);
return this;
}
@Override
public JsonGenerator writeStartArray(Object forValue, int size) throws JacksonException {
delegate.writeStartArray(forValue, size);
return this;
}
@Override
public JsonGenerator writeEndArray() throws JacksonException {
delegate.writeEndArray();
return this;
}
@Override
public JsonGenerator writeStartObject() throws JacksonException {
delegate.writeStartObject();
return this;
}
@Override
public JsonGenerator writeStartObject(Object forValue) throws JacksonException {
delegate.writeStartObject(forValue);
return this;
}
@Override
public JsonGenerator writeStartObject(Object forValue, int size) throws JacksonException {
delegate.writeStartObject(forValue, size);
return this;
}
@Override
public JsonGenerator writeEndObject() throws JacksonException {
delegate.writeEndObject();
return this;
}
@Override
public JsonGenerator writeName(String name) throws JacksonException {
delegate.writeName(name);
return this;
}
@Override
public JsonGenerator writeName(SerializableString name) throws JacksonException {
delegate.writeName(name);
return this;
}
@Override
public JsonGenerator writePropertyId(long id) throws JacksonException {
delegate.writePropertyId(id);
return this;
}
@Override
public JsonGenerator writeArray(int[] array, int offset, int length) throws JacksonException {
delegate.writeArray(array, offset, length);
return this;
}
@Override
public JsonGenerator writeArray(long[] array, int offset, int length) throws JacksonException {
delegate.writeArray(array, offset, length);
return this;
}
@Override
public JsonGenerator writeArray(double[] array, int offset, int length) throws JacksonException {
delegate.writeArray(array, offset, length);
return this;
}
@Override
public JsonGenerator writeArray(String[] array, int offset, int length) throws JacksonException {
delegate.writeArray(array, offset, length);
return this;
}
/*
/**********************************************************************
/* Public API, write methods, text/String values
/**********************************************************************
*/
@Override
public JsonGenerator writeString(String text) throws JacksonException {
delegate.writeString(text);
return this;
}
@Override
public JsonGenerator writeString(Reader reader, int len) throws JacksonException {
delegate.writeString(reader, len);
return this;
}
@Override
public JsonGenerator writeString(char[] text, int offset, int len) throws JacksonException {
delegate.writeString(text, offset, len);
return this;
}
@Override
public JsonGenerator writeString(SerializableString text) throws JacksonException {
delegate.writeString(text);
return this;
}
@Override
public JsonGenerator writeRawUTF8String(byte[] text, int offset, int length) throws JacksonException {
delegate.writeRawUTF8String(text, offset, length);
return this;
}
@Override
public JsonGenerator writeUTF8String(byte[] text, int offset, int length) throws JacksonException {
delegate.writeUTF8String(text, offset, length);
return this;
}
/*
/**********************************************************************
/* Public API, write methods, binary/raw content
/**********************************************************************
*/
@Override
public JsonGenerator writeRaw(String text) throws JacksonException {
delegate.writeRaw(text);
return this;
}
@Override
public JsonGenerator writeRaw(String text, int offset, int len) throws JacksonException {
delegate.writeRaw(text, offset, len);
return this;
}
@Override
public JsonGenerator writeRaw(SerializableString raw) throws JacksonException {
delegate.writeRaw(raw);
return this;
}
@Override
public JsonGenerator writeRaw(char[] text, int offset, int len) throws JacksonException {
delegate.writeRaw(text, offset, len);
return this;
}
@Override
public JsonGenerator writeRaw(char c) throws JacksonException {
delegate.writeRaw(c);
return this;
}
@Override
public JsonGenerator writeRawValue(String text) throws JacksonException {
delegate.writeRawValue(text);
return this;
}
@Override
public JsonGenerator writeRawValue(String text, int offset, int len) throws JacksonException {
delegate.writeRawValue(text, offset, len);
return this;
}
@Override
public JsonGenerator writeRawValue(char[] text, int offset, int len) throws JacksonException {
delegate.writeRawValue(text, offset, len);
return this;
}
@Override
public JsonGenerator writeBinary(Base64Variant b64variant, byte[] data, int offset, int len) throws JacksonException {
delegate.writeBinary(b64variant, data, offset, len);
return this;
}
@Override
public int writeBinary(Base64Variant b64variant, InputStream data, int dataLength) throws JacksonException {
return delegate.writeBinary(b64variant, data, dataLength);
}
/*
/**********************************************************************
/* Public API, write methods, other value types
/**********************************************************************
*/
@Override
public JsonGenerator writeNumber(short v) throws JacksonException {
delegate.writeNumber(v);
return this;
}
@Override
public JsonGenerator writeNumber(int v) throws JacksonException {
delegate.writeNumber(v);
return this;
}
@Override
public JsonGenerator writeNumber(long v) throws JacksonException {
delegate.writeNumber(v);
return this;
}
@Override
public JsonGenerator writeNumber(BigInteger v) throws JacksonException {
delegate.writeNumber(v);
return this;
}
@Override
public JsonGenerator writeNumber(double v) throws JacksonException {
delegate.writeNumber(v);
return this;
}
@Override
public JsonGenerator writeNumber(float v) throws JacksonException {
delegate.writeNumber(v);
return this;
}
@Override
public JsonGenerator writeNumber(BigDecimal v) throws JacksonException {
delegate.writeNumber(v);
return this;
}
@Override
public JsonGenerator writeNumber(String encodedValue) throws JacksonException {
delegate.writeNumber(encodedValue);
return this;
}
@Override
public JsonGenerator writeNumber(char[] encodedValueBuffer, int offset, int length) throws JacksonException {
delegate.writeNumber(encodedValueBuffer, offset, length);
return this;
}
@Override
public JsonGenerator writeBoolean(boolean state) throws JacksonException {
delegate.writeBoolean(state);
return this;
}
@Override
public JsonGenerator writeNull() throws JacksonException {
delegate.writeNull();
return this;
}
/*
/**********************************************************************
/* Public API, convenience property-write methods
/**********************************************************************
*/
// 04-Oct-2019, tatu: Reminder: these should NOT be delegated, unless matching
// methods in `FilteringGeneratorDelegate` are re-defined to "split" calls again
// public JsonGenerator writeBinaryProperty(String propName, byte[] data) throws JacksonException {
// public JsonGenerator writeBooleanProperty(String propName, boolean value) throws JacksonException {
// public JsonGenerator writeNullProperty(String propName) throws JacksonException {
// public JsonGenerator writeStringProperty(String propName, String value) throws JacksonException {
// public JsonGenerator writeNumberProperty(String propName, short value) throws JacksonException {
// public JsonGenerator writeArrayPropertyStart(String propName) throws JacksonException {
// public JsonGenerator writeObjectPropertyStart(String propName) throws JacksonException {
// public JsonGenerator writePOJOProperty(String propName, Object pojo) throws JacksonException {
// Sole exception being this method as it is not a "combo" method
@Override
public JsonGenerator writeOmittedProperty(String propName) throws JacksonException {
delegate.writeOmittedProperty(propName);
return this;
}
/*
/**********************************************************************
/* Public API, write methods, Native Ids
/**********************************************************************
*/
@Override
public JsonGenerator writeObjectId(Object id) throws JacksonException {
delegate.writeObjectId(id);
return this;
}
@Override
public JsonGenerator writeObjectRef(Object id) throws JacksonException {
delegate.writeObjectRef(id);
return this;
}
@Override
public JsonGenerator writeTypeId(Object id) throws JacksonException {
delegate.writeTypeId(id);
return this;
}
@Override
public JsonGenerator writeEmbeddedObject(Object object) throws JacksonException {
delegate.writeEmbeddedObject(object);
return this;
}
/*
/**********************************************************************
/* Public API, write methods, serializing Java objects
/**********************************************************************
*/
@Override
public JsonGenerator writePOJO(Object pojo) throws JacksonException {
if (delegateCopyMethods) {
delegate.writePOJO(pojo);
return this;
}
if (pojo == null) {
writeNull();
} else {
objectWriteContext().writeValue(this, pojo);
}
return this;
}
@Override
public JsonGenerator writeTree(TreeNode tree) throws JacksonException {
if (delegateCopyMethods) {
delegate.writeTree(tree);
return this;
}
// As with 'writeObject()', we are not check if write would work
if (tree == null) {
writeNull();
} else {
objectWriteContext().writeTree(this, tree);
}
return this;
}
/*
/**********************************************************************
/* Public API, convenience property write methods
/**********************************************************************
*/
// // These are fine, just delegate to other methods...
/*
/**********************************************************************
/* Public API, copy-through methods
/**********************************************************************
*/
@Override
public void copyCurrentEvent(JsonParser p) throws JacksonException {
if (delegateCopyMethods) delegate.copyCurrentEvent(p);
else super.copyCurrentEvent(p);
}
@Override
public void copyCurrentStructure(JsonParser p) throws JacksonException {
if (delegateCopyMethods) delegate.copyCurrentStructure(p);
else super.copyCurrentStructure(p);
}
/*
/**********************************************************************
/* Public API, context access
/**********************************************************************
*/
@Override public TokenStreamContext streamWriteContext() { return delegate.streamWriteContext(); }
@Override public ObjectWriteContext objectWriteContext() { return delegate.objectWriteContext(); }
/*
/**********************************************************************
/* Public API, buffer handling
/**********************************************************************
*/
@Override public void flush() { delegate.flush(); }
@Override public void close() { delegate.close(); }
/*
/**********************************************************************
/* Closeable implementation
/**********************************************************************
*/
@Override public boolean isClosed() { return delegate.isClosed(); }
/*
/**********************************************************************
/* Extended API
/**********************************************************************
*/
/**
* @return Underlying generator that calls are delegated to
*/
public JsonGenerator delegate() { return delegate; }
}
| JsonGeneratorDelegate |
java | apache__kafka | raft/src/test/java/org/apache/kafka/raft/RaftEventSimulationTest.java | {
"start": 24961,
"end": 25035
} | interface ____ {
void validate();
}
private static | Validation |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/HandlerInstantiationTest.java | {
"start": 2150,
"end": 2524
} | class ____ extends ValueDeserializer<MyBean>
{
public String _prefix = "";
public MyBeanDeserializer(String p) {
_prefix = p;
}
@Override
public MyBean deserialize(JsonParser jp, DeserializationContext ctxt)
{
return new MyBean(_prefix+jp.getString());
}
}
static | MyBeanDeserializer |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/inject/guice/OverridesGuiceInjectableMethodTest.java | {
"start": 5175,
"end": 5247
} | class ____ extends TestClass1 {}
}\
""")
.doTest();
}
}
| TestClass7 |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeFileSystemStatistics.java | {
"start": 1575,
"end": 4015
} | class ____ extends AbstractWasbTestWithTimeout{
@Test
public void test_001_NativeAzureFileSystemMocked() throws Exception {
AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount.createMock();
assumeNotNull(testAccount);
testStatisticsWithAccount(testAccount);
}
@Test
public void test_002_NativeAzureFileSystemPageBlobLive() throws Exception {
Configuration conf = new Configuration();
// Configure the page blob directories key so every file created is a page blob.
conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
// Configure the atomic rename directories key so every folder will have
// atomic rename applied.
conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount.create(conf);
assumeNotNull(testAccount);
testStatisticsWithAccount(testAccount);
}
@Test
public void test_003_NativeAzureFileSystem() throws Exception {
AzureBlobStorageTestAccount testAccount = AzureBlobStorageTestAccount.create();
assumeNotNull(testAccount);
testStatisticsWithAccount(testAccount);
}
private void testStatisticsWithAccount(AzureBlobStorageTestAccount testAccount) throws Exception {
assumeNotNull(testAccount);
NativeAzureFileSystem fs = testAccount.getFileSystem();
testStatistics(fs);
cleanupTestAccount(testAccount);
}
/**
* When tests are ran in parallel, this tests will fail because
* FileSystem.Statistics is per FileSystem class.
*/
@SuppressWarnings("deprecation")
private void testStatistics(NativeAzureFileSystem fs) throws Exception {
FileSystem.clearStatistics();
FileSystem.Statistics stats = FileSystem.getStatistics("wasb",
NativeAzureFileSystem.class);
assertEquals(0, stats.getBytesRead());
assertEquals(0, stats.getBytesWritten());
Path newFile = new Path("testStats");
writeStringToFile(fs, newFile, "12345678");
assertEquals(8, stats.getBytesWritten());
assertEquals(0, stats.getBytesRead());
String readBack = readStringFromFile(fs, newFile);
assertEquals("12345678", readBack);
assertEquals(8, stats.getBytesRead());
assertEquals(8, stats.getBytesWritten());
assertTrue(fs.delete(newFile, true));
assertEquals(8, stats.getBytesRead());
assertEquals(8, stats.getBytesWritten());
}
}
| ITestNativeFileSystemStatistics |
java | apache__flink | flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/embedded/EmbeddedPythonKeyedCoProcessOperator.java | {
"start": 7236,
"end": 7965
} | class ____ {
private final TimerService timerService;
private TimeDomain timeDomain;
private InternalTimer<K, VoidNamespace> timer;
OnTimerContextImpl(TimerService timerService) {
this.timerService = timerService;
}
public long timestamp() {
return timer.getTimestamp();
}
public TimerService timerService() {
return timerService;
}
public int timeDomain() {
return timeDomain.ordinal();
}
@SuppressWarnings("unchecked")
public Object getCurrentKey() {
return keyConverter.toExternal((K) ((Row) timer.getKey()).getField(0));
}
}
}
| OnTimerContextImpl |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/language/simple/myconverter/MyCustomDate.java | {
"start": 1024,
"end": 1444
} | class ____ {
private final int year;
private final int month;
private final int date;
public MyCustomDate(int year, int month, int date) {
this.year = year;
this.month = month;
this.date = date;
}
public int getYear() {
return year;
}
public int getMonth() {
return month;
}
public int getDate() {
return date;
}
}
| MyCustomDate |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/expressions/resolver/rules/ResolveCallByArgumentsRule.java | {
"start": 35495,
"end": 35983
} | class ____ implements ModelSemantics {
private final ModelReferenceExpression modelRef;
private TableApiModelSemantics(ModelReferenceExpression modelRef) {
this.modelRef = modelRef;
}
@Override
public DataType inputDataType() {
return modelRef.getInputDataType();
}
@Override
public DataType outputDataType() {
return modelRef.getOutputDataType();
}
}
}
| TableApiModelSemantics |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/AnnotationPositionTest.java | {
"start": 3079,
"end": 3324
} | interface ____ {
String value() default "";
}
""");
@Test
public void nonTypeAnnotation() {
refactoringHelper
.addInputLines(
"Test.java",
"""
| EitherUse |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/injectionstrategy/cdi/_default/CdiDefaultCompileOptionFieldMapperTest.java | {
"start": 1226,
"end": 2059
} | class ____ {
@RegisterExtension
final GeneratedSource generatedSource = new GeneratedSource();
@ProcessorTest
public void shouldHaveFieldInjection() {
generatedSource.forMapper( CustomerCdiDefaultCompileOptionFieldMapper.class )
.content()
.contains( "import javax.enterprise.context.ApplicationScoped;" )
.contains( "import javax.inject.Inject;" )
.contains( "@Inject" + lineSeparator() + " private GenderCdiDefaultCompileOptionFieldMapper" )
.contains( "@ApplicationScoped" + lineSeparator() + "public class" )
.doesNotContain( "public CustomerCdiDefaultCompileOptionFieldMapperImpl(" )
.doesNotContain( "jakarta.inject" )
.doesNotContain( "jakarta.enterprise" );
}
}
| CdiDefaultCompileOptionFieldMapperTest |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/typeutils/SortedMapSerializerSnapshot.java | {
"start": 1660,
"end": 4860
} | class ____<K, V> implements TypeSerializerSnapshot<SortedMap<K, V>> {
private Comparator<K> comparator;
private NestedSerializersSnapshotDelegate nestedSerializersSnapshotDelegate;
private static final int CURRENT_VERSION = 3;
@SuppressWarnings("unused")
public SortedMapSerializerSnapshot() {
// this constructor is used when restoring from a checkpoint/savepoint.
}
SortedMapSerializerSnapshot(SortedMapSerializer<K, V> sortedMapSerializer) {
this.comparator = sortedMapSerializer.getComparator();
TypeSerializer[] typeSerializers =
new TypeSerializer<?>[] {
sortedMapSerializer.getKeySerializer(), sortedMapSerializer.getValueSerializer()
};
this.nestedSerializersSnapshotDelegate =
new NestedSerializersSnapshotDelegate(typeSerializers);
}
@Override
public int getCurrentVersion() {
return CURRENT_VERSION;
}
@Override
public void writeSnapshot(DataOutputView out) throws IOException {
checkState(comparator != null, "Comparator cannot be null.");
InstantiationUtil.serializeObject(new DataOutputViewStream(out), comparator);
nestedSerializersSnapshotDelegate.writeNestedSerializerSnapshots(out);
}
@Override
public void readSnapshot(int readVersion, DataInputView in, ClassLoader userCodeClassLoader)
throws IOException {
try {
comparator =
InstantiationUtil.deserializeObject(
new DataInputViewStream(in), userCodeClassLoader);
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
this.nestedSerializersSnapshotDelegate =
NestedSerializersSnapshotDelegate.readNestedSerializerSnapshots(
in, userCodeClassLoader);
}
@Override
public SortedMapSerializer restoreSerializer() {
TypeSerializer<?>[] nestedSerializers =
nestedSerializersSnapshotDelegate.getRestoredNestedSerializers();
@SuppressWarnings("unchecked")
TypeSerializer<K> keySerializer = (TypeSerializer<K>) nestedSerializers[0];
@SuppressWarnings("unchecked")
TypeSerializer<V> valueSerializer = (TypeSerializer<V>) nestedSerializers[1];
return new SortedMapSerializer(comparator, keySerializer, valueSerializer);
}
@Override
public TypeSerializerSchemaCompatibility<SortedMap<K, V>> resolveSchemaCompatibility(
TypeSerializerSnapshot<SortedMap<K, V>> oldSerializerSnapshot) {
if (!(oldSerializerSnapshot instanceof SortedMapSerializerSnapshot)) {
return TypeSerializerSchemaCompatibility.incompatible();
}
SortedMapSerializerSnapshot<K, V> oldSortedMapSerializerSnapshot =
(SortedMapSerializerSnapshot<K, V>) oldSerializerSnapshot;
if (!comparator.equals(oldSortedMapSerializerSnapshot.comparator)) {
return TypeSerializerSchemaCompatibility.incompatible();
} else {
return TypeSerializerSchemaCompatibility.compatibleAsIs();
}
}
}
| SortedMapSerializerSnapshot |
java | apache__dubbo | dubbo-compatible/src/test/java/org/apache/dubbo/metadata/annotation/processing/util/LoggerUtilsTest.java | {
"start": 1189,
"end": 1587
} | class ____ {
@Test
void testLogger() {
assertNotNull(LoggerUtils.LOGGER);
}
@Test
void testInfo() {
info("Hello,World");
info("Hello,%s", "World");
info("%s,%s", "Hello", "World");
}
@Test
void testWarn() {
warn("Hello,World");
warn("Hello,%s", "World");
warn("%s,%s", "Hello", "World");
}
}
| LoggerUtilsTest |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging/runtime/src/main/java/io/quarkus/smallrye/reactivemessaging/runtime/ContextualEmitterImpl.java | {
"start": 1123,
"end": 4852
} | class ____<T> extends AbstractEmitter<T> implements ContextualEmitter<T> {
public ContextualEmitterImpl(EmitterConfiguration configuration, long defaultBufferSize) {
super(configuration, defaultBufferSize);
}
@Override
public void sendAndAwait(T payload) {
sendMessage(Message.of(payload)).await().indefinitely();
}
@Override
public Cancellable sendAndForget(T payload) {
return send(payload).subscribe().with(x -> {
// Do nothing.
}, ProviderLogging.log::failureEmittingMessage);
}
@Override
public Uni<Void> send(T payload) {
return sendMessage(Message.of(payload));
}
@Override
public <M extends Message<? extends T>> void sendMessageAndAwait(M msg) {
sendMessage(msg).await().indefinitely();
}
@Override
public <M extends Message<? extends T>> Cancellable sendMessageAndForget(M msg) {
return sendMessage(msg).subscribe().with(x -> {
// Do nothing.
}, ProviderLogging.log::failureEmittingMessage);
}
@Override
@CheckReturnValue
public <M extends Message<? extends T>> Uni<Void> sendMessage(M msg) {
if (msg == null) {
throw ex.illegalArgumentForNullValue();
}
// If we are running on a Vert.x context, we need to capture the context to switch back
// during the emission.
Context context = Vertx.currentContext();
// context propagation capture and duplicate the context
var msgUni = Uni.createFrom().item(() -> createContextualMessage((Message<? extends T>) msg, context));
if (context != null) {
msgUni = msgUni.emitOn(r -> context.runOnContext(x -> r.run()));
}
// emit the message, skip context propagation as it is unnecessary here
Uni<Void> uni = transformToUni(msgUni, message -> ContextualEmitterImpl.emitter(e -> {
try {
emit(message
.withAck(() -> {
e.complete(null);
return msg.ack();
})
.withNack(t -> {
e.fail(t);
return msg.nack(t);
}));
} catch (Exception t) {
// Capture synchronous exception and nack the message.
msg.nack(t);
throw t;
}
}));
// switch back to the caller context
if (context != null) {
return uni.emitOn(r -> context.runOnContext(x -> r.run()));
} else {
return uni;
}
}
private static <T, M extends Message<T>> Message<T> createContextualMessage(M msg, Context context) {
if (context == null) {
// No context, return the message with a new context as is.
return ContextAwareMessage.withContextMetadata(msg);
} else {
// create new context and copy local data from previous context
ContextInternal internal = (ContextInternal) context;
ContextInternal newCtx = internal.duplicate();
newCtx.localContextData().putAll(internal.localContextData());
return msg.addMetadata(new LocalContextMetadata(newCtx));
}
}
public static <T> Uni<T> emitter(Consumer<UniEmitter<? super T>> emitter) {
return Infrastructure.onUniCreation(new UniCreateWithEmitter<>(emitter));
}
public static <T, R> Uni<R> transformToUni(Uni<T> upstream, Function<? super T, Uni<? extends R>> mapper) {
return Infrastructure.onUniCreation(new UniOnItemTransformToUni<>(upstream, mapper));
}
}
| ContextualEmitterImpl |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/scheduling/SpeculativeExecutionITCase.java | {
"start": 33018,
"end": 34802
} | class ____
implements OutputFormat<Long>, FinalizeOnMaster, SupportsConcurrentExecutionAttempts {
private static final long serialVersionUID = 1L;
private static volatile boolean foundSpeculativeAttempt;
private int taskNumber;
private boolean taskFailed;
private final Map<Long, Long> numberCountResult = new HashMap<>();
@Override
public void configure(Configuration parameters) {}
@Override
public void open(InitializationContext context) throws IOException {
taskNumber = context.getTaskNumber();
}
@Override
public void writeRecord(Long value) throws IOException {
try {
numberCountResult.merge(value, 1L, Long::sum);
if (taskNumber == 0) {
maybeSleep();
}
} catch (Throwable t) {
taskFailed = true;
}
}
@Override
public void close() throws IOException {
if (!taskFailed) {
numberCountResults.put(taskNumber, numberCountResult);
}
}
@Override
public void finalizeGlobal(FinalizationContext context) throws IOException {
for (int i = 0; i < context.getParallelism(); i++) {
if (context.getFinishedAttempt(i) != 0) {
foundSpeculativeAttempt = true;
}
}
}
}
private static void maybeSleep() {
if (slowTaskCounter.getAndDecrement() > 0) {
try {
Thread.sleep(Integer.MAX_VALUE);
} catch (Exception e) {
throw new RuntimeException();
}
}
}
}
| DummySpeculativeOutputFormat |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/serialization/RecordDeserializer.java | {
"start": 1138,
"end": 1253
} | interface ____<T extends IOReadableWritable> {
/** Status of the deserialization result. */
| RecordDeserializer |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-jackson/deployment/src/test/java/io/quarkus/resteasy/reactive/jackson/deployment/test/AbstractNonEmptySerializationTest.java | {
"start": 509,
"end": 1653
} | class ____ implements ObjectMapperCustomizer {
@Override
public void customize(ObjectMapper objectMapper) {
objectMapper
.enable(DeserializationFeature.FAIL_ON_NULL_CREATOR_PROPERTIES)
.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
.setSerializationInclusion(JsonInclude.Include.NON_EMPTY);
}
}
@Test
public void testObject() {
RestAssured.get("/json-include/my-object")
.then()
.statusCode(200)
.contentType("application/json")
.body("name", Matchers.equalTo("name"))
.body("description", Matchers.equalTo("description"))
.body("map.test", Matchers.equalTo(1))
.body("strings[0]", Matchers.equalTo("test"));
}
@Test
public void testEmptyObject() {
RestAssured.get("/json-include/my-object-empty")
.then()
.statusCode(200)
.contentType("application/json")
.body(Matchers.is("{}"));
}
}
| NonEmptyObjectMapperCustomizer |
java | apache__maven | api/maven-api-core/src/main/java/org/apache/maven/api/cache/RequestCacheFactory.java | {
"start": 1169,
"end": 1464
} | interface ____ {
/**
* Creates a new RequestCache instance.
* The created cache should be configured according to the current Maven session
* and environment settings.
*
* @return A new RequestCache instance
*/
RequestCache createCache();
}
| RequestCacheFactory |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/Completable.java | {
"start": 667,
"end": 2163
} | interface ____<T> {
/**
* Set the result. The instance will be marked as succeeded and completed.
* <p/>
*
* @param result the result
* @throws IllegalStateException when this instance is already completed or failed
*/
default void succeed(T result) {
complete(result, null);
}
/**
* Shortcut for {@code succeed(null)}
*
* @throws IllegalStateException when this instance is already completed or failed
*/
default void succeed() {
complete(null, null);
}
/**
* Set the failure. This instance will be marked as failed and completed.
*
* @param failure the failure
* @throws IllegalStateException when this instance is already completed or failed
*/
default void fail(Throwable failure) {
complete(null, failure);
}
/**
* Calls {@link #fail(Throwable)} with the {@code message}.
*
* @param message the failure message
* @throws IllegalStateException when this instance is already completed or failed
*/
default void fail(String message) {
complete(null, new NoStackTraceThrowable(message));
}
/**
* Complete this instance
*
* <ul>
* <li>when {@code failure} is {@code null}, a success is signaled</li>
* <li>otherwise a failure is signaled</li>
* </ul>
*
* @param result the result
* @param failure the failure
* @throws IllegalStateException when this instance is already completed
*/
void complete(T result, Throwable failure);
}
| Completable |
java | spring-projects__spring-boot | buildSrc/src/main/java/org/springframework/boot/build/bom/bomr/VersionOption.java | {
"start": 1495,
"end": 1872
} | class ____ extends VersionOption {
private final VersionAlignment alignedWith;
AlignedVersionOption(DependencyVersion version, VersionAlignment alignedWith) {
super(version);
this.alignedWith = alignedWith;
}
@Override
public String toString() {
return super.toString() + " (aligned with " + this.alignedWith + ")";
}
}
static final | AlignedVersionOption |
java | apache__hadoop | hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/PersistencePolicies.java | {
"start": 991,
"end": 1534
} | interface ____ {
/**
* The record persists until removed manually: {@value}.
*/
String PERMANENT = "permanent";
/**
* Remove when the YARN application defined in the id field
* terminates: {@value}.
*/
String APPLICATION = "application";
/**
* Remove when the current YARN application attempt ID finishes: {@value}.
*/
String APPLICATION_ATTEMPT = "application-attempt";
/**
* Remove when the YARN container in the ID field finishes: {@value}
*/
String CONTAINER = "container";
}
| PersistencePolicies |
java | elastic__elasticsearch | x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/InferenceUpgradeTestCase.java | {
"start": 1057,
"end": 6648
} | class ____ extends ParameterizedRollingUpgradeTestCase {
static final String MODELS_RENAMED_TO_ENDPOINTS_FEATURE = "gte_v8.15.0";
public InferenceUpgradeTestCase(@Name("upgradedNodes") int upgradedNodes) {
super(upgradedNodes);
}
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.version(getOldClusterVersion(), isOldClusterDetachedVersion())
.nodes(NODE_NUM)
.setting("xpack.security.enabled", "false")
.setting("xpack.license.self_generated.type", "trial")
.build();
@Override
protected ElasticsearchCluster getUpgradeCluster() {
return cluster;
}
protected static String getUrl(MockWebServer webServer) {
return format("http://%s:%s", webServer.getHostName(), webServer.getPort());
}
protected void delete(String inferenceId, TaskType taskType) throws IOException {
var request = new Request("DELETE", Strings.format("_inference/%s/%s", taskType, inferenceId));
var response = client().performRequest(request);
assertOK(response);
}
protected void delete(String inferenceId) throws IOException {
var request = new Request("DELETE", Strings.format("_inference/%s", inferenceId));
var response = client().performRequest(request);
assertOK(response);
}
protected Map<String, Object> getAll() throws IOException {
var request = new Request("GET", "_inference/_all");
var response = client().performRequest(request);
assertOK(response);
return entityAsMap(response);
}
protected Map<String, Object> get(String inferenceId) throws IOException {
var endpoint = Strings.format("_inference/%s", inferenceId);
var request = new Request("GET", endpoint);
var response = client().performRequest(request);
assertOK(response);
return entityAsMap(response);
}
protected Map<String, Object> get(TaskType taskType, String inferenceId) throws IOException {
var endpoint = Strings.format("_inference/%s/%s", taskType, inferenceId);
var request = new Request("GET", endpoint);
var response = client().performRequest(request);
assertOK(response);
return entityAsMap(response);
}
@SuppressWarnings("unchecked")
protected Map<String, Map<String, Object>> getMinimalConfigs() throws IOException {
var endpoint = "_cluster/state?filter_path=metadata.model_registry";
var request = new Request("GET", endpoint);
var response = client().performRequest(request);
assertOK(response);
return (Map<String, Map<String, Object>>) XContentMapValues.extractValue("metadata.model_registry.models", entityAsMap(response));
}
protected Map<String, Object> inference(String inferenceId, TaskType taskType, String input) throws IOException {
var endpoint = Strings.format("_inference/%s/%s", taskType, inferenceId);
var request = new Request("POST", endpoint);
request.setJsonEntity("{\"input\": [" + '"' + input + '"' + "]}");
var response = client().performRequest(request);
assertOK(response);
return entityAsMap(response);
}
protected Map<String, Object> rerank(String inferenceId, List<String> inputs, String query) throws IOException {
var endpoint = Strings.format("_inference/rerank/%s", inferenceId);
var request = new Request("POST", endpoint);
StringBuilder body = new StringBuilder("{").append("\"query\":\"").append(query).append("\",").append("\"input\":[");
for (int i = 0; i < inputs.size(); i++) {
body.append("\"").append(inputs.get(i)).append("\"");
if (i < inputs.size() - 1) {
body.append(",");
}
}
body.append("]}");
request.setJsonEntity(body.toString());
var response = client().performRequest(request);
assertOK(response);
return entityAsMap(response);
}
protected void put(String inferenceId, String modelConfig, TaskType taskType) throws IOException {
String endpoint = Strings.format("_inference/%s/%s?error_trace", taskType, inferenceId);
var request = new Request("PUT", endpoint);
request.setJsonEntity(modelConfig);
var response = client().performRequest(request);
assertOKAndConsume(response);
}
@SuppressWarnings("unchecked")
protected void deleteAll() throws IOException {
var endpoints = (List<Map<String, Object>>) get(TaskType.ANY, "*").get("endpoints");
for (var endpoint : endpoints) {
try {
delete((String) endpoint.get("inference_id"));
} catch (Exception exc) {
assertThat(exc.getMessage(), containsString("reserved inference endpoint"));
}
}
}
@SuppressWarnings("unchecked")
// in version 8.15, there was a breaking change where "models" was renamed to "endpoints"
LinkedList<Map<String, Object>> getConfigsWithBreakingChangeHandling(TaskType testTaskType, String oldClusterId) throws IOException {
var response = get(testTaskType, oldClusterId);
LinkedList<Map<String, Object>> configs;
configs = new LinkedList<>((List<Map<String, Object>>) response.getOrDefault("endpoints", List.of()));
configs.addAll((List<Map<String, Object>>) response.getOrDefault("models", List.of()));
return configs;
}
}
| InferenceUpgradeTestCase |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/WhenSectionHelper.java | {
"start": 10648,
"end": 14706
} | class ____ {
private final SectionBlock block;
private final CaseOperator caseOperator;
private final List<Expression> params;
public CaseBlock(SectionBlock block, SectionInitContext context) {
this.block = block;
this.caseOperator = CaseOperator.from(block.parameters.values());
ImmutableList.Builder<Expression> builder = ImmutableList.builder();
Iterator<String> iterator = block.parameters.values().iterator();
if (block.parameters.size() > 1) {
// Skip the first param -> operator
iterator.next();
}
while (iterator.hasNext()) {
builder.add(context.parseValue(iterator.next()));
}
this.params = builder.build();
}
CompletionStage<Boolean> resolve(SectionResolutionContext context, Object value) {
if (params.isEmpty()) {
return CompletedStage.of(true);
} else if (params.size() == 1) {
Expression paramExpr = params.get(0);
if (paramExpr.isLiteral()) {
// A param is very often a literal, there's no need for async constructs
return CompletedStage.of(
caseOperator.evaluate(value, Collections.singletonList(paramExpr.getLiteral())));
}
return context.resolutionContext().evaluate(paramExpr)
.thenApply(p -> caseOperator.evaluate(value, Collections.singletonList(p)));
} else {
// in, not in
CompletableFuture<?>[] allResults = new CompletableFuture<?>[params.size()];
List<CompletableFuture<?>> results = new LinkedList<>();
int i = 0;
Iterator<Expression> it = params.iterator();
while (it.hasNext()) {
Expression expression = it.next();
CompletableFuture<Object> result = context.resolutionContext().evaluate(expression).toCompletableFuture();
allResults[i++] = result;
if (!expression.isLiteral()) {
results.add(result);
}
}
if (results.isEmpty()) {
// Parameters are literals only
return CompletedStage.of(caseOperator.evaluate(value,
Arrays.stream(allResults).map(t1 -> {
try {
return t1.get();
} catch (InterruptedException | ExecutionException e) {
throw new IllegalStateException(e);
}
}).collect(Collectors.toList())));
}
return CompletableFuture.allOf(results.toArray(new CompletableFuture[0]))
.thenApply(new Function<Void, Boolean>() {
@Override
public Boolean apply(Void t) {
return caseOperator.evaluate(value,
Arrays.stream(allResults).map(t1 -> {
try {
return t1.get();
} catch (InterruptedException | ExecutionException e) {
throw new IllegalStateException(e);
}
}).collect(Collectors.toList()));
}
});
}
}
boolean resolveEnum(SectionResolutionContext context, Object value) {
if (params.isEmpty()) {
return true;
}
String enumValue = value.toString();
if (params.size() == 1) {
// case | CaseBlock |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/event/StartupEvent.java | {
"start": 782,
"end": 1114
} | class ____ extends BeanContextEvent {
/**
* Constructs a prototypical Event.
*
* @param beanContext The object on which the Event initially occurred.
* @throws IllegalArgumentException if source is null.
*/
public StartupEvent(BeanContext beanContext) {
super(beanContext);
}
}
| StartupEvent |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java | {
"start": 1794,
"end": 2022
} | class ____ the record.
*/
@SuppressWarnings("unchecked")
public static <T extends BaseRecord>
Class<? extends BaseRecord> getRecordClass(final Class<T> clazz) {
// We ignore the Impl classes and go to the super | for |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/misc/SwitchDomainSnapshotOperation.java | {
"start": 1270,
"end": 3891
} | class ____ extends AbstractSnapshotOperation {
private final String snapshotDir = "naming_persistent";
private final String snapshotArchive = "naming_persistent.zip";
private final SwitchManager switchManager;
private final Serializer serializer;
public SwitchDomainSnapshotOperation(ReentrantReadWriteLock lock, SwitchManager switchManager,
Serializer serializer) {
super(lock);
this.switchManager = switchManager;
this.serializer = serializer;
}
@Override
protected boolean writeSnapshot(Writer writer) throws Exception {
final String writePath = writer.getPath();
final String parentPath = Paths.get(writePath, snapshotDir).toString();
DiskUtils.deleteDirectory(parentPath);
DiskUtils.forceMkdir(parentPath);
this.switchManager.dumpSnapshot(parentPath);
final String outputFile = Paths.get(writePath, snapshotArchive).toString();
final Checksum checksum = new CRC64();
DiskUtils.compress(writePath, snapshotDir, outputFile, checksum);
DiskUtils.deleteDirectory(parentPath);
final LocalFileMeta meta = new LocalFileMeta();
meta.append(CHECK_SUM_KEY, Long.toHexString(checksum.getValue()));
return writer.addFile(snapshotArchive, meta);
}
@Override
protected boolean readSnapshot(Reader reader) throws Exception {
final String readerPath = reader.getPath();
final String sourceFile = Paths.get(readerPath, snapshotArchive).toString();
final Checksum checksum = new CRC64();
DiskUtils.decompress(sourceFile, readerPath, checksum);
LocalFileMeta fileMeta = reader.getFileMeta(snapshotArchive);
if (fileMeta.getFileMeta().containsKey(CHECK_SUM_KEY)) {
if (!Objects.equals(Long.toHexString(checksum.getValue()), fileMeta.get(CHECK_SUM_KEY))) {
throw new IllegalArgumentException("Snapshot checksum failed");
}
}
final String loadPath = Paths.get(readerPath, snapshotDir).toString();
Loggers.RAFT.info("snapshot load from : {}", loadPath);
this.switchManager.loadSnapshot(loadPath);
DiskUtils.deleteDirectory(loadPath);
return true;
}
@Override
protected String getSnapshotSaveTag() {
return SwitchDomainSnapshotOperation.class.getSimpleName() + ".SAVE";
}
@Override
protected String getSnapshotLoadTag() {
return SwitchDomainSnapshotOperation.class.getSimpleName() + ".LOAD";
}
}
| SwitchDomainSnapshotOperation |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 411377,
"end": 413776
} | class ____ extends YamlDeserializerBase<GrokDataFormat> {
public GrokDataFormatDeserializer() {
super(GrokDataFormat.class);
}
@Override
protected GrokDataFormat newInstance() {
return new GrokDataFormat();
}
@Override
protected boolean setProperty(GrokDataFormat target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "allowMultipleMatchesPerLine": {
String val = asText(node);
target.setAllowMultipleMatchesPerLine(val);
break;
}
case "flattened": {
String val = asText(node);
target.setFlattened(val);
break;
}
case "id": {
String val = asText(node);
target.setId(val);
break;
}
case "namedOnly": {
String val = asText(node);
target.setNamedOnly(val);
break;
}
case "pattern": {
String val = asText(node);
target.setPattern(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "groovy",
inline = true,
types = org.apache.camel.model.language.GroovyExpression.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Groovy",
description = "Evaluates a Groovy script.",
deprecated = false,
properties = {
@YamlProperty(name = "expression", type = "string", required = true, description = "The expression value in your chosen language syntax", displayName = "Expression"),
@YamlProperty(name = "id", type = "string", description = "Sets the id of this node", displayName = "Id"),
@YamlProperty(name = "resultType", type = "string", description = "Sets the | GrokDataFormatDeserializer |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/bigdecimal/BigDecimalAssert_isCloseToPercentage_Test.java | {
"start": 937,
"end": 1429
} | class ____ extends BigDecimalAssertBaseTest {
private final Percentage percentage = withPercentage(5);
private final BigDecimal value = BigDecimal.TEN;
@Override
protected BigDecimalAssert invoke_api_method() {
return assertions.isCloseTo(value, percentage);
}
@Override
protected void verify_internal_effects() {
verify(bigDecimals).assertIsCloseToPercentage(getInfo(assertions), getActual(assertions), value, percentage);
}
}
| BigDecimalAssert_isCloseToPercentage_Test |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/scope/ScopedProxyBeanRegistrationAotProcessor.java | {
"start": 3666,
"end": 6572
} | class ____ extends BeanRegistrationCodeFragmentsDecorator {
private static final String REGISTERED_BEAN_PARAMETER_NAME = "registeredBean";
private final RegisteredBean registeredBean;
private final String targetBeanName;
private final BeanDefinition targetBeanDefinition;
ScopedProxyBeanRegistrationCodeFragments(BeanRegistrationCodeFragments delegate,
RegisteredBean registeredBean, String targetBeanName, BeanDefinition targetBeanDefinition) {
super(delegate);
this.registeredBean = registeredBean;
this.targetBeanName = targetBeanName;
this.targetBeanDefinition = targetBeanDefinition;
}
@Override
public ClassName getTarget(RegisteredBean registeredBean) {
return ClassName.get(this.targetBeanDefinition.getResolvableType().toClass());
}
@Override
public CodeBlock generateNewBeanDefinitionCode(GenerationContext generationContext,
ResolvableType beanType, BeanRegistrationCode beanRegistrationCode) {
return super.generateNewBeanDefinitionCode(generationContext,
this.targetBeanDefinition.getResolvableType(), beanRegistrationCode);
}
@Override
public CodeBlock generateSetBeanDefinitionPropertiesCode(
GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode,
RootBeanDefinition beanDefinition, Predicate<String> attributeFilter) {
RootBeanDefinition processedBeanDefinition = new RootBeanDefinition(beanDefinition);
processedBeanDefinition.setTargetType(this.targetBeanDefinition.getResolvableType());
processedBeanDefinition.getPropertyValues().removePropertyValue("targetBeanName");
return super.generateSetBeanDefinitionPropertiesCode(generationContext,
beanRegistrationCode, processedBeanDefinition, attributeFilter);
}
@Override
public CodeBlock generateInstanceSupplierCode(
GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode,
boolean allowDirectSupplierShortcut) {
GeneratedMethod generatedMethod = beanRegistrationCode.getMethods()
.add("getScopedProxyInstance", method -> {
method.addJavadoc("Create the scoped proxy bean instance for '$L'.",
this.registeredBean.getBeanName());
method.addModifiers(Modifier.PRIVATE, Modifier.STATIC);
method.returns(ScopedProxyFactoryBean.class);
method.addParameter(RegisteredBean.class, REGISTERED_BEAN_PARAMETER_NAME);
method.addStatement("$T factory = new $T()",
ScopedProxyFactoryBean.class, ScopedProxyFactoryBean.class);
method.addStatement("factory.setTargetBeanName($S)", this.targetBeanName);
method.addStatement("factory.setBeanFactory($L.getBeanFactory())",
REGISTERED_BEAN_PARAMETER_NAME);
method.addStatement("return factory");
});
return CodeBlock.of("$T.of($L)", InstanceSupplier.class,
generatedMethod.toMethodReference().toCodeBlock());
}
}
}
| ScopedProxyBeanRegistrationCodeFragments |
java | quarkusio__quarkus | extensions/jdbc/jdbc-db2/deployment/src/main/java/io/quarkus/jdbc/db2/deployment/JDBCDB2Processor.java | {
"start": 2335,
"end": 8274
} | class ____ {
private static final String DB2_DRIVER_CLASS = "com.ibm.db2.jcc.DB2Driver";
private static final DotName RESOURCE_BUNDLE_DOT_NAME = DotName.createSimple(ResourceBundle.class);
private static final DotName LIST_RESOURCE_BUNDLE_DOT_NAME = DotName.createSimple(ListResourceBundle.class);
private static final String DB2_DRIVER_ROOT_PACKAGE = "com.ibm.db2";
@BuildStep
FeatureBuildItem feature() {
return new FeatureBuildItem(Feature.JDBC_DB2);
}
@BuildStep
void indexDriver(BuildProducer<IndexDependencyBuildItem> indexDependencies,
BuildProducer<AdditionalIndexedClassesBuildItem> additionalIndexedClasses) {
indexDependencies.produce(new IndexDependencyBuildItem("com.ibm.db2", "jcc"));
additionalIndexedClasses.produce(new AdditionalIndexedClassesBuildItem(RESOURCE_BUNDLE_DOT_NAME.toString(),
LIST_RESOURCE_BUNDLE_DOT_NAME.toString()));
}
@BuildStep
void registerDriver(BuildProducer<JdbcDriverBuildItem> jdbcDriver,
SslNativeConfigBuildItem sslNativeConfigBuildItem) {
jdbcDriver.produce(new JdbcDriverBuildItem(DatabaseKind.DB2, DB2_DRIVER_CLASS,
"com.ibm.db2.jcc.DB2XADataSource"));
}
@BuildStep
DevServicesDatasourceConfigurationHandlerBuildItem devDbHandler() {
return DevServicesDatasourceConfigurationHandlerBuildItem.jdbc(DatabaseKind.DB2);
}
@BuildStep
void configureAgroalConnection(BuildProducer<AdditionalBeanBuildItem> additionalBeans,
Capabilities capabilities) {
if (capabilities.isPresent(Capability.AGROAL)) {
additionalBeans
.produce(new AdditionalBeanBuildItem.Builder().addBeanClass(DB2AgroalConnectionConfigurer.class)
.setDefaultScope(BuiltinScope.APPLICATION.getName())
.setUnremovable()
.build());
}
}
@BuildStep
void registerForReflection(BuildProducer<ReflectiveClassBuildItem> reflectiveClass) {
//Not strictly necessary when using Agroal, as it also registers
//any JDBC driver being configured explicitly through its configuration.
//We register it for the sake of people not using Agroal,
//for example when the driver is used with OpenTelemetry JDBC instrumentation.
reflectiveClass.produce(ReflectiveClassBuildItem.builder(DB2_DRIVER_CLASS)
.reason(getClass().getName() + " DB2 JDBC driver classes")
.build());
// register resource bundles for reflection (they are apparently classes...)
reflectiveClass.produce(ReflectiveClassBuildItem.builder(
Resources.class,
ResourceKeys.class,
SqljResources.class,
T2uResourceKeys.class,
T2uResources.class,
T2zResourceKeys.class,
T2zResources.class,
T4ResourceKeys.class,
T4Resources.class)
.reason(getClass().getName() + " DB2 JDBC driver classes")
.build());
reflectiveClass.produce(ReflectiveClassBuildItem.builder("com.ibm.pdq.cmx.client.DataSourceFactory")
.reason(getClass().getName() + " accessed reflectively by Db2 JDBC driver")
.build());
}
@BuildStep
void registerResources(CombinedIndexBuildItem index,
BuildProducer<NativeImageResourceBuildItem> resource,
BuildProducer<NativeImageResourceBundleBuildItem> resourceBundle) {
resource.produce(new NativeImageResourceBuildItem("pdq.properties"));
resource.produce(new NativeImageResourceBuildItem("com/ibm/db2/cmx/runtime/internal/resources/messages.properties"));
// we need to register for reflection all the classes of the driver that are ResourceBundles
for (ClassInfo bundle : index.getIndex().getAllKnownSubclasses(RESOURCE_BUNDLE_DOT_NAME)) {
if (!bundle.name().toString().startsWith(DB2_DRIVER_ROOT_PACKAGE)) {
continue;
}
resourceBundle.produce(new NativeImageResourceBundleBuildItem(bundle.name().toString()));
}
}
@BuildStep
NativeImageConfigBuildItem build() {
// The DB2 JDBC driver has been updated with conditional checks for the
// "QuarkusWithJcc" system property which will no-op some code paths that
// are not needed for T4 JDBC usage and are incompatible with native mode
return NativeImageConfigBuildItem.builder()
.addNativeImageSystemProperty("QuarkusWithJcc", "true")
.build();
}
@BuildStep
NativeImageEnableAllCharsetsBuildItem enableAllCharsets() {
// When connecting to DB2 on z/OS the Cp037 charset is required
return new NativeImageEnableAllCharsetsBuildItem();
}
@BuildStep
void registerServiceBinding(Capabilities capabilities,
BuildProducer<ServiceProviderBuildItem> serviceProvider,
BuildProducer<DefaultDataSourceDbKindBuildItem> dbKind) {
if (capabilities.isPresent(Capability.KUBERNETES_SERVICE_BINDING)) {
serviceProvider.produce(
new ServiceProviderBuildItem("io.quarkus.kubernetes.service.binding.runtime.ServiceBindingConverter",
DB2ServiceBindingConverter.class.getName()));
}
dbKind.produce(new DefaultDataSourceDbKindBuildItem(DatabaseKind.DB2));
}
@BuildStep
NativeImageAllowIncompleteClasspathBuildItem allowIncompleteClasspath() {
// The DB2 JDBC driver uses reflection to load classes that are not present in the classpath
// Without it, the following error is thrown:
// Discovered unresolved type during parsing: com.ibm.db2.jcc.licenses.ConParam. This error is reported at image build time because | JDBCDB2Processor |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncNamenodeProtocol.java | {
"start": 1727,
"end": 5332
} | class ____ extends RouterAsyncProtocolTestBase {
private RouterAsyncNamenodeProtocol asyncNamenodeProtocol;
private RouterNamenodeProtocol namenodeProtocol;
@BeforeEach
public void setup() throws Exception {
asyncNamenodeProtocol = new RouterAsyncNamenodeProtocol(getRouterAsyncRpcServer());
namenodeProtocol = new RouterNamenodeProtocol(getRouterRpcServer());
}
@Test
public void getBlocks() throws Exception {
DatanodeInfo[] dns = getRouter().getClient()
.getNamenode().getDatanodeReport(HdfsConstants.DatanodeReportType.ALL);
DatanodeInfo dn0 = dns[0];
asyncNamenodeProtocol.getBlocks(dn0, 1024, 0, 0,
null);
BlocksWithLocations asyncRouterBlockLocations = syncReturn(BlocksWithLocations.class);
assertNotNull(asyncRouterBlockLocations);
BlocksWithLocations syncRouterBlockLocations = namenodeProtocol.getBlocks(dn0, 1024,
0, 0, null);
BlockWithLocations[] asyncRouterBlocks = asyncRouterBlockLocations.getBlocks();
BlockWithLocations[] syncRouterBlocks = syncRouterBlockLocations.getBlocks();
assertEquals(asyncRouterBlocks.length, syncRouterBlocks.length);
for (int i = 0; i < syncRouterBlocks.length; i++) {
assertEquals(
asyncRouterBlocks[i].getBlock().getBlockId(),
syncRouterBlocks[i].getBlock().getBlockId());
}
}
@Test
public void getBlockKeys() throws Exception {
asyncNamenodeProtocol.getBlockKeys();
ExportedBlockKeys asyncBlockKeys = syncReturn(ExportedBlockKeys.class);
assertNotNull(asyncBlockKeys);
ExportedBlockKeys syncBlockKeys = namenodeProtocol.getBlockKeys();
compareBlockKeys(asyncBlockKeys, syncBlockKeys);
}
@Test
public void getTransactionID() throws Exception {
asyncNamenodeProtocol.getTransactionID();
long asyncTransactionID = syncReturn(Long.class);
assertNotNull(asyncTransactionID);
long transactionID = namenodeProtocol.getTransactionID();
assertEquals(asyncTransactionID, transactionID);
}
@Test
public void getMostRecentCheckpointTxId() throws Exception {
asyncNamenodeProtocol.getMostRecentCheckpointTxId();
long asyncMostRecentCheckpointTxId = syncReturn(Long.class);
assertNotNull(asyncMostRecentCheckpointTxId);
long mostRecentCheckpointTxId = namenodeProtocol.getMostRecentCheckpointTxId();
assertEquals(asyncMostRecentCheckpointTxId, mostRecentCheckpointTxId);
}
@Test
public void versionRequest() throws Exception {
asyncNamenodeProtocol.versionRequest();
NamespaceInfo asyncNamespaceInfo = syncReturn(NamespaceInfo.class);
assertNotNull(asyncNamespaceInfo);
NamespaceInfo syncNamespaceInfo = namenodeProtocol.versionRequest();
compareVersion(asyncNamespaceInfo, syncNamespaceInfo);
}
private void compareBlockKeys(
ExportedBlockKeys blockKeys, ExportedBlockKeys otherBlockKeys) {
assertEquals(blockKeys.getCurrentKey(), otherBlockKeys.getCurrentKey());
assertEquals(blockKeys.getKeyUpdateInterval(), otherBlockKeys.getKeyUpdateInterval());
assertEquals(blockKeys.getTokenLifetime(), otherBlockKeys.getTokenLifetime());
}
private void compareVersion(NamespaceInfo version, NamespaceInfo otherVersion) {
assertEquals(version.getBlockPoolID(), otherVersion.getBlockPoolID());
assertEquals(version.getNamespaceID(), otherVersion.getNamespaceID());
assertEquals(version.getClusterID(), otherVersion.getClusterID());
assertEquals(version.getLayoutVersion(), otherVersion.getLayoutVersion());
assertEquals(version.getCTime(), otherVersion.getCTime());
}
} | TestRouterAsyncNamenodeProtocol |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestHttpSigner.java | {
"start": 2135,
"end": 5635
} | class ____ extends AbstractS3ATestBase {
private static final Logger LOG = LoggerFactory
.getLogger(ITestHttpSigner.class);
private static final String TEST_ID_KEY = "TEST_ID_KEY";
private static final String TEST_REGION_KEY = "TEST_REGION_KEY";
private final UserGroupInformation ugi1 = UserGroupInformation.createRemoteUser("user1");
private final UserGroupInformation ugi2 = UserGroupInformation.createRemoteUser("user2");
private String regionName;
private String endpoint;
@BeforeEach
@Override
public void setup() throws Exception {
super.setup();
final S3AFileSystem fs = getFileSystem();
final Configuration conf = fs.getConf();
// determine the endpoint -skipping the test.
endpoint = conf.getTrimmed(Constants.ENDPOINT, Constants.CENTRAL_ENDPOINT);
LOG.debug("Test endpoint is {}", endpoint);
regionName = conf.getTrimmed(Constants.AWS_REGION, "");
if (regionName.isEmpty()) {
regionName = determineRegion(fs.getBucket());
}
LOG.debug("Determined region name to be [{}] for bucket [{}]", regionName,
fs.getBucket());
}
private String determineRegion(String bucketName) throws IOException {
return getS3AInternals().getBucketLocation(bucketName);
}
@AfterEach
@Override
public void teardown() throws Exception {
super.teardown();
FileSystem.closeAllForUGI(ugi1);
FileSystem.closeAllForUGI(ugi2);
}
private Configuration createTestConfig(String identifier) {
Configuration conf = createConfiguration();
removeBaseAndBucketOverrides(conf,
CUSTOM_SIGNERS,
SIGNING_ALGORITHM_S3);
conf.setBoolean(HTTP_SIGNER_ENABLED, true);
conf.set(HTTP_SIGNER_CLASS_NAME, CustomHttpSigner.class.getName());
conf.set(TEST_ID_KEY, identifier);
conf.set(TEST_REGION_KEY, regionName);
// make absolutely sure there is no caching.
disableFilesystemCaching(conf);
return conf;
}
@Test
public void testCustomSignerAndInitializer()
throws IOException, InterruptedException {
final Path basePath = path(getMethodName());
FileSystem fs1 = runStoreOperationsAndVerify(ugi1,
new Path(basePath, "customsignerpath1"), "id1");
FileSystem fs2 = runStoreOperationsAndVerify(ugi2,
new Path(basePath, "customsignerpath2"), "id2");
}
private S3AFileSystem runStoreOperationsAndVerify(UserGroupInformation ugi,
Path finalPath, String identifier)
throws IOException, InterruptedException {
Configuration conf = createTestConfig(identifier);
return ugi.doAs((PrivilegedExceptionAction<S3AFileSystem>) () -> {
S3AFileSystem fs = (S3AFileSystem)finalPath.getFileSystem(conf);
fs.mkdirs(finalPath);
// now do some more operations to make sure all is good.
final Path subdir = new Path(finalPath, "year=1970/month=1/day=1");
fs.mkdirs(subdir);
final Path file1 = new Path(subdir, "file1");
ContractTestUtils.touch(fs, new Path(subdir, "file1"));
fs.listStatus(subdir);
fs.delete(file1, false);
ContractTestUtils.touch(fs, new Path(subdir, "file1"));
// create a magic file.
if (fs.isMagicCommitEnabled()) {
createMagicFile(fs, subdir);
ContentSummary summary = fs.getContentSummary(finalPath);
fs.getS3AInternals().abortMultipartUploads(subdir);
fs.rename(subdir, new Path(finalPath, "renamed"));
fs.delete(finalPath, true);
}
return fs;
});
}
}
| ITestHttpSigner |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/context/DubboSpringInitializer.java | {
"start": 1821,
"end": 9557
} | class ____ {
private static final Logger logger = LoggerFactory.getLogger(DubboSpringInitializer.class);
private static final Map<BeanDefinitionRegistry, DubboSpringInitContext> REGISTRY_CONTEXT_MAP =
new ConcurrentHashMap<>();
public DubboSpringInitializer() {}
public static void initialize(BeanDefinitionRegistry registry) {
// prepare context and do customize
DubboSpringInitContext context = new DubboSpringInitContext();
// Spring ApplicationContext may not ready at this moment (e.g. load from xml), so use registry as key
if (REGISTRY_CONTEXT_MAP.putIfAbsent(registry, context) != null) {
return;
}
// find beanFactory
ConfigurableListableBeanFactory beanFactory = findBeanFactory(registry);
// init dubbo context
initContext(context, registry, beanFactory);
}
public static boolean remove(BeanDefinitionRegistry registry) {
return REGISTRY_CONTEXT_MAP.remove(registry) != null;
}
public static boolean remove(ApplicationContext springContext) {
AutowireCapableBeanFactory autowireCapableBeanFactory = springContext.getAutowireCapableBeanFactory();
for (Map.Entry<BeanDefinitionRegistry, DubboSpringInitContext> entry : REGISTRY_CONTEXT_MAP.entrySet()) {
DubboSpringInitContext initContext = entry.getValue();
if (initContext.getApplicationContext() == springContext
|| initContext.getBeanFactory() == autowireCapableBeanFactory
|| initContext.getRegistry() == autowireCapableBeanFactory) {
DubboSpringInitContext context = REGISTRY_CONTEXT_MAP.remove(entry.getKey());
logger.info("Unbind " + safeGetModelDesc(context.getModuleModel()) + " from spring container: "
+ ObjectUtils.identityToString(entry.getKey()));
return true;
}
}
return false;
}
static Map<BeanDefinitionRegistry, DubboSpringInitContext> getContextMap() {
return REGISTRY_CONTEXT_MAP;
}
static DubboSpringInitContext findBySpringContext(ApplicationContext applicationContext) {
for (DubboSpringInitContext initContext : REGISTRY_CONTEXT_MAP.values()) {
if (initContext.getApplicationContext() == applicationContext) {
return initContext;
}
}
return null;
}
private static void initContext(
DubboSpringInitContext context,
BeanDefinitionRegistry registry,
ConfigurableListableBeanFactory beanFactory) {
context.setRegistry(registry);
context.setBeanFactory(beanFactory);
// customize context, you can change the bind module model via DubboSpringInitCustomizer SPI
customize(context);
// init ModuleModel
ModuleModel moduleModel = context.getModuleModel();
if (moduleModel == null) {
ApplicationModel applicationModel;
if (findContextForApplication(ApplicationModel.defaultModel()) == null) {
// first spring context use default application instance
applicationModel = ApplicationModel.defaultModel();
logger.info("Use default application: " + applicationModel.getDesc());
} else {
// create a new application instance for later spring context
applicationModel = FrameworkModel.defaultModel().newApplication();
logger.info("Create new application: " + applicationModel.getDesc());
}
// init ModuleModel
moduleModel = applicationModel.getDefaultModule();
context.setModuleModel(moduleModel);
logger.info("Use default module model of target application: " + moduleModel.getDesc());
} else {
logger.info("Use module model from customizer: " + moduleModel.getDesc());
}
logger.info(
"Bind " + moduleModel.getDesc() + " to spring container: " + ObjectUtils.identityToString(registry));
// set module attributes
Map<String, Object> moduleAttributes = context.getModuleAttributes();
if (moduleAttributes.size() > 0) {
moduleModel.getAttributes().putAll(moduleAttributes);
}
// bind dubbo initialization context to spring context
registerContextBeans(beanFactory, context);
// mark context as bound
context.markAsBound();
moduleModel.setLifeCycleManagedExternally(true);
if (!AotWithSpringDetector.useGeneratedArtifacts()) {
// register common beans
DubboBeanUtils.registerCommonBeans(registry);
}
}
private static String safeGetModelDesc(ScopeModel scopeModel) {
return scopeModel != null ? scopeModel.getDesc() : null;
}
private static ConfigurableListableBeanFactory findBeanFactory(BeanDefinitionRegistry registry) {
ConfigurableListableBeanFactory beanFactory;
if (registry instanceof ConfigurableListableBeanFactory) {
beanFactory = (ConfigurableListableBeanFactory) registry;
} else if (registry instanceof GenericApplicationContext) {
GenericApplicationContext genericApplicationContext = (GenericApplicationContext) registry;
beanFactory = genericApplicationContext.getBeanFactory();
} else {
throw new IllegalStateException("Can not find Spring BeanFactory from registry: "
+ registry.getClass().getName());
}
return beanFactory;
}
private static void registerContextBeans(
ConfigurableListableBeanFactory beanFactory, DubboSpringInitContext context) {
// register singleton
if (!beanFactory.containsSingleton(DubboSpringInitContext.class.getName())) {
registerSingleton(beanFactory, context);
}
if (!beanFactory.containsSingleton(
context.getApplicationModel().getClass().getName())) {
registerSingleton(beanFactory, context.getApplicationModel());
}
if (!beanFactory.containsSingleton(context.getModuleModel().getClass().getName())) {
registerSingleton(beanFactory, context.getModuleModel());
}
}
private static void registerSingleton(ConfigurableListableBeanFactory beanFactory, Object bean) {
beanFactory.registerSingleton(bean.getClass().getName(), bean);
}
private static DubboSpringInitContext findContextForApplication(ApplicationModel applicationModel) {
for (DubboSpringInitContext initializationContext : REGISTRY_CONTEXT_MAP.values()) {
if (initializationContext.getApplicationModel() == applicationModel) {
return initializationContext;
}
}
return null;
}
private static void customize(DubboSpringInitContext context) {
// find initialization customizers
Set<DubboSpringInitCustomizer> customizers = FrameworkModel.defaultModel()
.getExtensionLoader(DubboSpringInitCustomizer.class)
.getSupportedExtensionInstances();
for (DubboSpringInitCustomizer customizer : customizers) {
customizer.customize(context);
}
// load customizers in thread local holder
DubboSpringInitCustomizerHolder customizerHolder = DubboSpringInitCustomizerHolder.get();
customizers = customizerHolder.getCustomizers();
for (DubboSpringInitCustomizer customizer : customizers) {
customizer.customize(context);
}
customizerHolder.clearCustomizers();
}
}
| DubboSpringInitializer |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TestInstancePostProcessorAndPreDestroyCallbackTests.java | {
"start": 5288,
"end": 5638
} | class ____ {
ExceptionInTestClassConstructorTestCase() {
callSequence.add("exceptionThrowingConstructor");
throw new RuntimeException("in constructor");
}
@Test
void test() {
callSequence.add("test");
}
}
// -------------------------------------------------------------------------
static | ExceptionInTestClassConstructorTestCase |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/records/JsonIdentityOnRecord5238Test.java | {
"start": 401,
"end": 755
} | class ____
extends DatabindTestUtil
{
// Record-based data
record ExampleRecord(List<ThingRecord> allThings, ThingRecord selected) { }
@JsonIdentityInfo(generator = ObjectIdGenerators.PropertyGenerator.class, property = "id")
record ThingRecord(int id, String name) { }
// POJO-based data
static | JsonIdentityOnRecord5238Test |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java | {
"start": 4449,
"end": 30476
} | class ____ implements ImageLoader {
protected final DateFormat dateFormat =
new SimpleDateFormat("yyyy-MM-dd HH:mm");
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
-40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51 };
private int imageVersion = 0;
private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();
private final Map<Long, String> dirNodeMap = new HashMap<Long, String>();
/* (non-Javadoc)
* @see ImageLoader#canProcessVersion(int)
*/
@Override
public boolean canLoadVersion(int version) {
for(int v : versions)
if(v == version) return true;
return false;
}
/* (non-Javadoc)
* @see ImageLoader#processImage(java.io.DataInputStream, ImageVisitor, boolean)
*/
@Override
public void loadImage(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
boolean done = false;
try {
v.start();
v.visitEnclosingElement(ImageElement.FS_IMAGE);
imageVersion = in.readInt();
if( !canLoadVersion(imageVersion))
throw new IOException("Cannot process fslayout version " + imageVersion);
if (NameNodeLayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imageVersion)) {
LayoutFlags.read(in);
}
v.visit(ImageElement.IMAGE_VERSION, imageVersion);
v.visit(ImageElement.NAMESPACE_ID, in.readInt());
long numInodes = in.readLong();
v.visit(ImageElement.GENERATION_STAMP, in.readLong());
if (NameNodeLayoutVersion.supports(Feature.SEQUENTIAL_BLOCK_ID, imageVersion)) {
v.visit(ImageElement.GENERATION_STAMP_V2, in.readLong());
v.visit(ImageElement.GENERATION_STAMP_V1_LIMIT, in.readLong());
v.visit(ImageElement.LAST_ALLOCATED_BLOCK_ID, in.readLong());
}
if (NameNodeLayoutVersion.supports(Feature.STORED_TXIDS, imageVersion)) {
v.visit(ImageElement.TRANSACTION_ID, in.readLong());
}
if (NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
v.visit(ImageElement.LAST_INODE_ID, in.readLong());
}
boolean supportSnapshot = NameNodeLayoutVersion.supports(Feature.SNAPSHOT,
imageVersion);
if (supportSnapshot) {
v.visit(ImageElement.SNAPSHOT_COUNTER, in.readInt());
int numSnapshots = in.readInt();
v.visit(ImageElement.NUM_SNAPSHOTS_TOTAL, numSnapshots);
for (int i = 0; i < numSnapshots; i++) {
processSnapshot(in, v);
}
}
if (NameNodeLayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) {
boolean isCompressed = in.readBoolean();
v.visit(ImageElement.IS_COMPRESSED, String.valueOf(isCompressed));
if (isCompressed) {
String codecClassName = Text.readString(in);
v.visit(ImageElement.COMPRESS_CODEC, codecClassName);
CompressionCodecFactory codecFac = new CompressionCodecFactory(
new Configuration());
CompressionCodec codec = codecFac.getCodecByClassName(codecClassName);
if (codec == null) {
throw new IOException("Image compression codec not supported: "
+ codecClassName);
}
in = new DataInputStream(codec.createInputStream(in));
}
}
processINodes(in, v, numInodes, skipBlocks, supportSnapshot);
subtreeMap.clear();
dirNodeMap.clear();
processINodesUC(in, v, skipBlocks);
if (NameNodeLayoutVersion.supports(Feature.DELEGATION_TOKEN, imageVersion)) {
processDelegationTokens(in, v);
}
if (NameNodeLayoutVersion.supports(Feature.CACHING, imageVersion)) {
processCacheManagerState(in, v);
}
v.leaveEnclosingElement(); // FSImage
done = true;
} finally {
if (done) {
v.finish();
} else {
v.finishAbnormally();
}
}
}
/**
* Process CacheManager state from the fsimage.
*/
private void processCacheManagerState(DataInputStream in, ImageVisitor v)
throws IOException {
v.visit(ImageElement.CACHE_NEXT_ENTRY_ID, in.readLong());
final int numPools = in.readInt();
for (int i=0; i<numPools; i++) {
v.visit(ImageElement.CACHE_POOL_NAME, Text.readString(in));
processCachePoolPermission(in, v);
v.visit(ImageElement.CACHE_POOL_WEIGHT, in.readInt());
}
final int numEntries = in.readInt();
for (int i=0; i<numEntries; i++) {
v.visit(ImageElement.CACHE_ENTRY_PATH, Text.readString(in));
v.visit(ImageElement.CACHE_ENTRY_REPLICATION, in.readShort());
v.visit(ImageElement.CACHE_ENTRY_POOL_NAME, Text.readString(in));
}
}
/**
* Process the Delegation Token related section in fsimage.
*
* @param in DataInputStream to process
* @param v Visitor to walk over records
*/
private void processDelegationTokens(DataInputStream in, ImageVisitor v)
throws IOException {
v.visit(ImageElement.CURRENT_DELEGATION_KEY_ID, in.readInt());
int numDKeys = in.readInt();
v.visitEnclosingElement(ImageElement.DELEGATION_KEYS,
ImageElement.NUM_DELEGATION_KEYS, numDKeys);
for(int i =0; i < numDKeys; i++) {
DelegationKey key = new DelegationKey();
key.readFields(in);
v.visit(ImageElement.DELEGATION_KEY, key.toString());
}
v.leaveEnclosingElement();
v.visit(ImageElement.DELEGATION_TOKEN_SEQUENCE_NUMBER, in.readInt());
int numDTokens = in.readInt();
v.visitEnclosingElement(ImageElement.DELEGATION_TOKENS,
ImageElement.NUM_DELEGATION_TOKENS, numDTokens);
for(int i=0; i<numDTokens; i++){
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
long expiryTime = in.readLong();
v.visitEnclosingElement(ImageElement.DELEGATION_TOKEN_IDENTIFIER);
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_KIND,
id.getKind().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_SEQNO,
id.getSequenceNumber());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_OWNER,
id.getOwner().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_RENEWER,
id.getRenewer().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_REALUSER,
id.getRealUser().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE,
id.getIssueDate());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE,
id.getMaxDate());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME,
expiryTime);
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID,
id.getMasterKeyId());
v.leaveEnclosingElement(); // DELEGATION_TOKEN_IDENTIFIER
}
v.leaveEnclosingElement(); // DELEGATION_TOKENS
}
/**
* Process the INodes under construction section of the fsimage.
*
* @param in DataInputStream to process
* @param v Visitor to walk over inodes
* @param skipBlocks Walk over each block?
*/
private void processINodesUC(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
int numINUC = in.readInt();
v.visitEnclosingElement(ImageElement.INODES_UNDER_CONSTRUCTION,
ImageElement.NUM_INODES_UNDER_CONSTRUCTION, numINUC);
for(int i = 0; i < numINUC; i++) {
v.visitEnclosingElement(ImageElement.INODE_UNDER_CONSTRUCTION);
byte [] name = FSImageSerialization.readBytes(in);
String n = new String(name, StandardCharsets.UTF_8);
v.visit(ImageElement.INODE_PATH, n);
if (NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
long inodeId = in.readLong();
v.visit(ImageElement.INODE_ID, inodeId);
}
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
v.visit(ImageElement.PREFERRED_BLOCK_SIZE, in.readLong());
int numBlocks = in.readInt();
processBlocks(in, v, numBlocks, skipBlocks);
processPermission(in, v);
v.visit(ImageElement.CLIENT_NAME, FSImageSerialization.readString(in));
v.visit(ImageElement.CLIENT_MACHINE, FSImageSerialization.readString(in));
// Skip over the datanode descriptors, which are still stored in the
// file but are not used by the datanode or loaded into memory
int numLocs = in.readInt();
for(int j = 0; j < numLocs; j++) {
in.readShort();
in.readLong();
in.readLong();
in.readLong();
in.readInt();
FSImageSerialization.readString(in);
FSImageSerialization.readString(in);
WritableUtils.readEnum(in, AdminStates.class);
}
v.leaveEnclosingElement(); // INodeUnderConstruction
}
v.leaveEnclosingElement(); // INodesUnderConstruction
}
/**
* Process the blocks section of the fsimage.
*
* @param in Datastream to process
* @param v Visitor to walk over inodes
* @param skipBlocks Walk over each block?
*/
private void processBlocks(DataInputStream in, ImageVisitor v,
int numBlocks, boolean skipBlocks) throws IOException {
v.visitEnclosingElement(ImageElement.BLOCKS,
ImageElement.NUM_BLOCKS, numBlocks);
// directory or symlink or reference node, no blocks to process
if(numBlocks < 0) {
v.leaveEnclosingElement(); // Blocks
return;
}
if(skipBlocks) {
int bytesToSkip = ((Long.SIZE * 3 /* fields */) / 8 /*bits*/) * numBlocks;
if(in.skipBytes(bytesToSkip) != bytesToSkip)
throw new IOException("Error skipping over blocks");
} else {
for(int j = 0; j < numBlocks; j++) {
v.visitEnclosingElement(ImageElement.BLOCK);
v.visit(ImageElement.BLOCK_ID, in.readLong());
v.visit(ImageElement.NUM_BYTES, in.readLong());
v.visit(ImageElement.GENERATION_STAMP, in.readLong());
v.leaveEnclosingElement(); // Block
}
}
v.leaveEnclosingElement(); // Blocks
}
/**
* Extract the INode permissions stored in the fsimage file.
*
* @param in Datastream to process
* @param v Visitor to walk over inodes
*/
private void processPermission(DataInputStream in, ImageVisitor v)
throws IOException {
v.visitEnclosingElement(ImageElement.PERMISSIONS);
v.visit(ImageElement.USER_NAME, Text.readString(in));
v.visit(ImageElement.GROUP_NAME, Text.readString(in));
FsPermission fsp = new FsPermission(in.readShort());
v.visit(ImageElement.PERMISSION_STRING, fsp.toString());
v.leaveEnclosingElement(); // Permissions
}
/**
* Extract CachePool permissions stored in the fsimage file.
*
* @param in Datastream to process
* @param v Visitor to walk over inodes
*/
private void processCachePoolPermission(DataInputStream in, ImageVisitor v)
throws IOException {
v.visitEnclosingElement(ImageElement.PERMISSIONS);
v.visit(ImageElement.CACHE_POOL_OWNER_NAME, Text.readString(in));
v.visit(ImageElement.CACHE_POOL_GROUP_NAME, Text.readString(in));
FsPermission fsp = new FsPermission(in.readShort());
v.visit(ImageElement.CACHE_POOL_PERMISSION_STRING, fsp.toString());
v.leaveEnclosingElement(); // Permissions
}
/**
* Process the INode records stored in the fsimage.
*
* @param in Datastream to process
* @param v Visitor to walk over INodes
* @param numInodes Number of INodes stored in file
* @param skipBlocks Process all the blocks within the INode?
* @param supportSnapshot Whether or not the imageVersion supports snapshot
* @throws VisitException
* @throws IOException
*/
private void processINodes(DataInputStream in, ImageVisitor v,
long numInodes, boolean skipBlocks, boolean supportSnapshot)
throws IOException {
v.visitEnclosingElement(ImageElement.INODES,
ImageElement.NUM_INODES, numInodes);
if (NameNodeLayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
if (!supportSnapshot) {
processLocalNameINodes(in, v, numInodes, skipBlocks);
} else {
processLocalNameINodesWithSnapshot(in, v, skipBlocks);
}
} else { // full path name
processFullNameINodes(in, v, numInodes, skipBlocks);
}
v.leaveEnclosingElement(); // INodes
}
/**
* Process image with full path name
*
* @param in image stream
* @param v visitor
* @param numInodes number of indoes to read
* @param skipBlocks skip blocks or not
* @throws IOException if there is any error occurs
*/
private void processLocalNameINodes(DataInputStream in, ImageVisitor v,
long numInodes, boolean skipBlocks) throws IOException {
// process root
processINode(in, v, skipBlocks, "", false);
numInodes--;
while (numInodes > 0) {
numInodes -= processDirectory(in, v, skipBlocks);
}
}
private int processDirectory(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
String parentName = FSImageSerialization.readString(in);
return processChildren(in, v, skipBlocks, parentName);
}
/**
* Process image with local path name and snapshot support
*
* @param in image stream
* @param v visitor
* @param skipBlocks skip blocks or not
*/
private void processLocalNameINodesWithSnapshot(DataInputStream in,
ImageVisitor v, boolean skipBlocks) throws IOException {
// process root
processINode(in, v, skipBlocks, "", false);
processDirectoryWithSnapshot(in, v, skipBlocks);
}
/**
* Process directories when snapshot is supported.
*/
private void processDirectoryWithSnapshot(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
// 1. load dir node id
long inodeId = in.readLong();
String dirName = dirNodeMap.remove(inodeId);
Boolean visitedRef = subtreeMap.get(inodeId);
if (visitedRef != null) {
if (visitedRef.booleanValue()) { // the subtree has been visited
return;
} else { // first time to visit
subtreeMap.put(inodeId, true);
}
} // else the dir is not linked by a RefNode, thus cannot be revisited
// 2. load possible snapshots
processSnapshots(in, v, dirName);
// 3. load children nodes
processChildren(in, v, skipBlocks, dirName);
// 4. load possible directory diff list
processDirectoryDiffList(in, v, dirName);
// recursively process sub-directories
final int numSubTree = in.readInt();
for (int i = 0; i < numSubTree; i++) {
processDirectoryWithSnapshot(in, v, skipBlocks);
}
}
/**
* Process snapshots of a snapshottable directory
*/
private void processSnapshots(DataInputStream in, ImageVisitor v,
String rootName) throws IOException {
final int numSnapshots = in.readInt();
if (numSnapshots >= 0) {
v.visitEnclosingElement(ImageElement.SNAPSHOTS,
ImageElement.NUM_SNAPSHOTS, numSnapshots);
for (int i = 0; i < numSnapshots; i++) {
// process snapshot
v.visitEnclosingElement(ImageElement.SNAPSHOT);
v.visit(ImageElement.SNAPSHOT_ID, in.readInt());
v.leaveEnclosingElement();
}
v.visit(ImageElement.SNAPSHOT_QUOTA, in.readInt());
v.leaveEnclosingElement();
}
}
private void processSnapshot(DataInputStream in, ImageVisitor v)
throws IOException {
v.visitEnclosingElement(ImageElement.SNAPSHOT);
v.visit(ImageElement.SNAPSHOT_ID, in.readInt());
// process root of snapshot
v.visitEnclosingElement(ImageElement.SNAPSHOT_ROOT);
processINode(in, v, true, "", false);
v.leaveEnclosingElement();
v.leaveEnclosingElement();
}
private void processDirectoryDiffList(DataInputStream in, ImageVisitor v,
String currentINodeName) throws IOException {
final int numDirDiff = in.readInt();
if (numDirDiff >= 0) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFFS,
ImageElement.NUM_SNAPSHOT_DIR_DIFF, numDirDiff);
for (int i = 0; i < numDirDiff; i++) {
// process directory diffs in reverse chronological oder
processDirectoryDiff(in, v, currentINodeName);
}
v.leaveEnclosingElement();
}
}
private void processDirectoryDiff(DataInputStream in, ImageVisitor v,
String currentINodeName) throws IOException {
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF);
int snapshotId = in.readInt();
v.visit(ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId);
v.visit(ImageElement.SNAPSHOT_DIR_DIFF_CHILDREN_SIZE, in.readInt());
// process snapshotINode
boolean useRoot = in.readBoolean();
if (!useRoot) {
if (in.readBoolean()) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES);
if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
processINodeDirectoryAttributes(in, v, currentINodeName);
} else {
processINode(in, v, true, currentINodeName, true);
}
v.leaveEnclosingElement();
}
}
// process createdList
int createdSize = in.readInt();
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_CREATEDLIST,
ImageElement.SNAPSHOT_DIR_DIFF_CREATEDLIST_SIZE, createdSize);
for (int i = 0; i < createdSize; i++) {
String createdNode = FSImageSerialization.readString(in);
v.visit(ImageElement.SNAPSHOT_DIR_DIFF_CREATED_INODE, createdNode);
}
v.leaveEnclosingElement();
// process deletedList
int deletedSize = in.readInt();
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_DELETEDLIST,
ImageElement.SNAPSHOT_DIR_DIFF_DELETEDLIST_SIZE, deletedSize);
for (int i = 0; i < deletedSize; i++) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_DELETED_INODE);
processINode(in, v, false, currentINodeName, true);
v.leaveEnclosingElement();
}
v.leaveEnclosingElement();
v.leaveEnclosingElement();
}
private void processINodeDirectoryAttributes(DataInputStream in, ImageVisitor v,
String parentName) throws IOException {
final String pathName = readINodePath(in, parentName);
v.visit(ImageElement.INODE_PATH, pathName);
processPermission(in, v);
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
v.visit(ImageElement.NS_QUOTA, in.readLong());
v.visit(ImageElement.DS_QUOTA, in.readLong());
}
/** Process children under a directory */
private int processChildren(DataInputStream in, ImageVisitor v,
boolean skipBlocks, String parentName) throws IOException {
int numChildren = in.readInt();
for (int i = 0; i < numChildren; i++) {
processINode(in, v, skipBlocks, parentName, false);
}
return numChildren;
}
/**
* Process image with full path name
*
* @param in image stream
* @param v visitor
* @param numInodes number of indoes to read
* @param skipBlocks skip blocks or not
* @throws IOException if there is any error occurs
*/
private void processFullNameINodes(DataInputStream in, ImageVisitor v,
long numInodes, boolean skipBlocks) throws IOException {
for(long i = 0; i < numInodes; i++) {
processINode(in, v, skipBlocks, null, false);
}
}
private String readINodePath(DataInputStream in, String parentName)
throws IOException {
String pathName = FSImageSerialization.readString(in);
if (parentName != null) { // local name
pathName = "/" + pathName;
if (!"/".equals(parentName)) { // children of non-root directory
pathName = parentName + pathName;
}
}
return pathName;
}
/**
* Process an INode
*
* @param in image stream
* @param v visitor
* @param skipBlocks skip blocks or not
* @param parentName the name of its parent node
* @param isSnapshotCopy whether or not the inode is a snapshot copy
* @throws IOException
*/
private void processINode(DataInputStream in, ImageVisitor v,
boolean skipBlocks, String parentName, boolean isSnapshotCopy)
throws IOException {
boolean supportSnapshot =
NameNodeLayoutVersion.supports(Feature.SNAPSHOT, imageVersion);
boolean supportInodeId =
NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion);
v.visitEnclosingElement(ImageElement.INODE);
final String pathName = readINodePath(in, parentName);
v.visit(ImageElement.INODE_PATH, pathName);
long inodeId = HdfsConstants.GRANDFATHER_INODE_ID;
if (supportInodeId) {
inodeId = in.readLong();
v.visit(ImageElement.INODE_ID, inodeId);
}
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
if(NameNodeLayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion))
v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
v.visit(ImageElement.BLOCK_SIZE, in.readLong());
int numBlocks = in.readInt();
processBlocks(in, v, numBlocks, skipBlocks);
if (numBlocks >= 0) { // File
if (supportSnapshot) {
// make sure subtreeMap only contains entry for directory
subtreeMap.remove(inodeId);
// process file diffs
processFileDiffList(in, v, parentName);
if (isSnapshotCopy) {
boolean underConstruction = in.readBoolean();
if (underConstruction) {
v.visit(ImageElement.CLIENT_NAME,
FSImageSerialization.readString(in));
v.visit(ImageElement.CLIENT_MACHINE,
FSImageSerialization.readString(in));
}
}
}
processPermission(in, v);
} else if (numBlocks == -1) { // Directory
if (supportSnapshot && supportInodeId) {
dirNodeMap.put(inodeId, pathName);
}
v.visit(ImageElement.NS_QUOTA, in.readLong());
if (NameNodeLayoutVersion.supports(Feature.DISKSPACE_QUOTA,
imageVersion)) {
v.visit(ImageElement.DS_QUOTA, in.readLong());
}
if (supportSnapshot) {
boolean snapshottable = in.readBoolean();
if (!snapshottable) {
boolean withSnapshot = in.readBoolean();
v.visit(ImageElement.IS_WITHSNAPSHOT_DIR, Boolean.toString(withSnapshot));
} else {
v.visit(ImageElement.IS_SNAPSHOTTABLE_DIR, Boolean.toString(snapshottable));
}
}
processPermission(in, v);
} else if (numBlocks == -2) {
v.visit(ImageElement.SYMLINK, Text.readString(in));
processPermission(in, v);
} else if (numBlocks == -3) { // reference node
final boolean isWithName = in.readBoolean();
int snapshotId = in.readInt();
if (isWithName) {
v.visit(ImageElement.SNAPSHOT_LAST_SNAPSHOT_ID, snapshotId);
} else {
v.visit(ImageElement.SNAPSHOT_DST_SNAPSHOT_ID, snapshotId);
}
final boolean firstReferred = in.readBoolean();
if (firstReferred) {
// if a subtree is linked by multiple "parents", the corresponding dir
// must be referred by a reference node. we put the reference node into
// the subtreeMap here and let its value be false. when we later visit
// the subtree for the first time, we change the value to true.
subtreeMap.put(inodeId, false);
v.visitEnclosingElement(ImageElement.SNAPSHOT_REF_INODE);
processINode(in, v, skipBlocks, parentName, isSnapshotCopy);
v.leaveEnclosingElement(); // referred inode
} else {
v.visit(ImageElement.SNAPSHOT_REF_INODE_ID, in.readLong());
}
}
v.leaveEnclosingElement(); // INode
}
private void processINodeFileAttributes(DataInputStream in, ImageVisitor v,
String parentName) throws IOException {
final String pathName = readINodePath(in, parentName);
v.visit(ImageElement.INODE_PATH, pathName);
processPermission(in, v);
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
if(NameNodeLayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion)) {
v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
}
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.BLOCK_SIZE, in.readLong());
}
private void processFileDiffList(DataInputStream in, ImageVisitor v,
String currentINodeName) throws IOException {
final int size = in.readInt();
if (size >= 0) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFFS,
ImageElement.NUM_SNAPSHOT_FILE_DIFF, size);
for (int i = 0; i < size; i++) {
processFileDiff(in, v, currentINodeName);
}
v.leaveEnclosingElement();
}
}
private void processFileDiff(DataInputStream in, ImageVisitor v,
String currentINodeName) throws IOException {
int snapshotId = in.readInt();
v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFF,
ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId);
v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong());
if (in.readBoolean()) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES);
if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
processINodeFileAttributes(in, v, currentINodeName);
} else {
processINode(in, v, true, currentINodeName, true);
}
v.leaveEnclosingElement();
}
v.leaveEnclosingElement();
}
/**
* Helper method to format dates during processing.
* @param date Date as read from image file
* @return String version of date format
*/
private String formatDate(long date) {
return dateFormat.format(new Date(date));
}
}
| ImageLoaderCurrent |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/BDDSoftAssertions_wasSuccess_Test.java | {
"start": 870,
"end": 3531
} | class ____ extends BaseAssertionsTest {
private BDDSoftAssertions softly;
@BeforeEach
void setup() {
Assertions.setRemoveAssertJRelatedElementsFromStackTrace(false);
softly = new BDDSoftAssertions();
}
@Test
void should_return_success_of_last_assertion() {
softly.then(true).isFalse();
softly.then(true).isEqualTo(true);
then(softly.wasSuccess()).isTrue();
}
@Test
void should_return_success_of_last_assertion_with_nested_calls() {
softly.then(true).isFalse();
softly.then(true).isTrue(); // isTrue() calls isEqualTo(true)
then(softly.wasSuccess()).isTrue();
}
@Test
void should_return_failure_of_last_assertion() {
softly.then(true).isTrue();
softly.then(true).isEqualTo(false);
then(softly.wasSuccess()).isFalse();
}
@Test
void should_return_failure_of_last_assertion_with_multilple_nested_calls() {
softly.then(true).isTrue();
softly.then(true).isFalse(); // isFalse() calls isEqualTo(false)
then(softly.wasSuccess()).isFalse();
}
@Test
void should_return_failure_of_last_assertion_with_nested_calls() {
// scenario to avoid:
// -- softly.then(true).isFalse()
// ----- proxied isFalse() -> calls isEqualTo(false) which is proxied
// ------- proxied isEqualTo(false) : catch AssertionError => wasSuccess = false, back to outer call
// ---- proxied isFalse() : no AssertionError caught => last result success = true
softly.then(true).isFalse();
then(softly.wasSuccess()).isFalse();
}
@Test
void should_return_failure_after_fail() {
// GIVEN
String failureMessage = "Should not reach here";
// WHEN
softly.fail(failureMessage);
// THEN
then(softly.wasSuccess()).isFalse();
then(softly.errorsCollected()).hasSize(1);
then(softly.errorsCollected().get(0)).hasMessageStartingWith(failureMessage);
}
@Test
void should_return_failure_after_fail_with_parameters() {
// GIVEN
String failureMessage = "Should not reach %s or %s";
// WHEN
softly.fail(failureMessage, "here", "here");
// THEN
then(softly.wasSuccess()).isFalse();
}
@Test
void should_return_failure_after_fail_with_throwable() {
// GIVEN
String failureMessage = "Should not reach here";
IllegalStateException realCause = new IllegalStateException();
// WHEN
softly.fail(failureMessage, realCause);
// THEN
then(softly.wasSuccess()).isFalse();
}
@Test
void should_return_failure_after_shouldHaveThrown() {
// WHEN
softly.shouldHaveThrown(IllegalArgumentException.class);
// THEN
then(softly.wasSuccess()).isFalse();
}
}
| BDDSoftAssertions_wasSuccess_Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/refresh/RefreshEntityWithLazyPropertyTest.java | {
"start": 1761,
"end": 7162
} | class ____ {
private static final Long PERSON_ID = 1L;
private static final Long ASSISTANT_PROFESSOR_POSITION_ID = 1L;
private static final Long PROFESSOR_POSITION_ID = 2L;
private static final String ASSISTANT_POSITION_DESCRIPTION = "Assistant Professor";
private static final String POSITION_DESCRIPTION = "Professor";
private static final String PROFESSOR_FIRST_NAME = "John";
private static final String PROFESSOR_LAST_NAME = "Doe";
@BeforeEach
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Position professorPosition = new Position( PROFESSOR_POSITION_ID, POSITION_DESCRIPTION );
session.persist( professorPosition );
Position assistantProfessor = new Position( ASSISTANT_PROFESSOR_POSITION_ID,
ASSISTANT_POSITION_DESCRIPTION );
session.persist( assistantProfessor );
Person person = new Person( PERSON_ID, PROFESSOR_FIRST_NAME, PROFESSOR_LAST_NAME, assistantProfessor,
professorPosition );
session.persist( person );
} );
}
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testRefreshOfLazyField(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Person p = session.find( Person.class, PERSON_ID );
assertThat( p.getLastName() ).isEqualTo( PROFESSOR_LAST_NAME );
String updatedLastName = "Johnson";
session.createMutationQuery( "update Person p " +
"set p.lastName = :lastName " +
"where p.id = :id"
)
.setParameter( "lastName", updatedLastName )
.setParameter( "id", PERSON_ID )
.executeUpdate();
session.refresh( p );
assertThat( p.getLastName() ).isEqualTo( updatedLastName );
} );
}
@Test
public void testRefreshOfLazyFormula(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Person p = session.find( Person.class, PERSON_ID );
assertThat( p.getFullName() ).isEqualTo( "John Doe" );
p.setLastName( "Johnson" );
session.flush();
session.refresh( p );
assertThat( p.getFullName() ).isEqualTo( "John Johnson" );
} );
}
@Test
public void testRefreshOfLazyOneToMany(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Person p = session.find( Person.class, PERSON_ID );
assertThat( p.getCourses().size() ).isEqualTo( 0 );
session.createMutationQuery( "insert into Course (id, title, person) values (:id, :title, :person) " )
.setParameter( "id", 0 )
.setParameter( "title", "Book Title" )
.setParameter( "person", p )
.executeUpdate();
session.refresh( p );
assertThat( p.getCourses().size() ).isEqualTo( 1 );
} );
}
@Test
public void testRefreshOfLazyManyToOne(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Person p = session.find( Person.class, PERSON_ID );
assertThat( p.getPosition().id ).isEqualTo( ASSISTANT_PROFESSOR_POSITION_ID );
Position professorPosition = session.find( Position.class, PROFESSOR_POSITION_ID );
session.createMutationQuery(
"update Person p " +
"set p.position = :position " +
"where p.id = :personId "
)
.setParameter( "position", professorPosition )
.setParameter( "personId", p.getId() )
.executeUpdate();
session.refresh( p );
assertThat( p.getPosition().id ).isEqualTo( PROFESSOR_POSITION_ID );
} );
}
@Test
public void testRefreshOfLazyManyToOneCascadeRefresh(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Person p = session.find( Person.class, PERSON_ID );
Position position = p.getPosition();
assertThat( position.getId() ).isEqualTo( ASSISTANT_PROFESSOR_POSITION_ID );
assertThat( position.getDescription() ).isEqualTo( ASSISTANT_POSITION_DESCRIPTION );
String newAssistantProfessorDescription = "Assistant Professor 2";
session.createMutationQuery(
"update Position " +
"set description = :description " +
"where id = :id "
)
.setParameter( "description", newAssistantProfessorDescription )
.setParameter( "id", ASSISTANT_PROFESSOR_POSITION_ID )
.executeUpdate();
session.refresh( p );
// the association has been refreshed because it's annotated with `cascade = CascadeType.REFRESH`
assertThat( p.getPosition().getDescription() ).isEqualTo( newAssistantProfessorDescription );
} );
}
@Test
public void testRefreshOfLazyManyToOneNoCascadeRefresh(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Person p = session.find( Person.class, PERSON_ID );
Position position = p.getPreviousPosition();
assertThat( position.getId() ).isEqualTo( PROFESSOR_POSITION_ID );
assertThat( position.getDescription() ).isEqualTo( POSITION_DESCRIPTION );
String newAssistantProfessorDescription = "Assistant Professor 2";
session.createMutationQuery(
"update Position " +
"set description = :description " +
"where id = :id "
)
.setParameter( "description", newAssistantProfessorDescription )
.setParameter( "id", PROFESSOR_POSITION_ID )
.executeUpdate();
session.refresh( p );
// the association has not been refreshed because it's not annotated with `cascade = CascadeType.REFRESH`
assertThat( p.getPreviousPosition().getDescription() ).isEqualTo( POSITION_DESCRIPTION );
} );
}
@Entity(name = "Person")
public static | RefreshEntityWithLazyPropertyTest |
java | elastic__elasticsearch | x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsActionTests.java | {
"start": 2973,
"end": 30545
} | class ____ extends MonitoringIntegTestCase {
private MockWebServer webServer;
private MockWebServer createMockWebServer() throws IOException {
MockWebServer server = new MockWebServer();
server.start();
return server;
}
@Before
public void startWebServer() throws IOException {
webServer = createMockWebServer();
}
@After
public void stopWebServer() {
if (webServer != null) {
webServer.close();
}
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
// Parent conf
.put(super.nodeSettings(nodeOrdinal, otherSettings))
// Disable monitoring
.put("xpack.monitoring.collection.enabled", false)
.put("xpack.monitoring.collection.interval", "1s")
// X-Pack configuration
.put("xpack.license.self_generated.type", "trial")
.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false)
.build();
}
private void stopMonitoring() {
// Clean up any persistent settings we have added
updateClusterSettings(
Settings.builder()
.putNull(MonitoringService.ENABLED.getKey())
.putNull("xpack.monitoring.elasticsearch.collection.enabled")
.putNull("xpack.monitoring.exporters._local.type")
.putNull("xpack.monitoring.exporters._local.enabled")
.putNull("xpack.monitoring.exporters._local.cluster_alerts.management.enabled")
.putNull("xpack.monitoring.exporters.remoteCluster.type")
.putNull("xpack.monitoring.exporters.remoteCluster.enabled")
.putNull("xpack.monitoring.exporters.remoteCluster.host")
.putNull("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled")
);
// Make sure to clean up the migration setting if it is set
updateClusterSettings(Settings.builder().putNull(Monitoring.MIGRATION_DECOMMISSION_ALERTS.getKey()));
}
@TestLogging(
value = "org.elasticsearch.xpack.monitoring.exporter.local:trace",
reason = "to ensure we log local exporter on trace level"
)
public void testLocalAlertsRemoval() throws Exception {
try {
// start monitoring service
final Settings.Builder exporterSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
.put("xpack.monitoring.exporters._local.type", LocalExporter.TYPE)
.put("xpack.monitoring.exporters._local.enabled", true)
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", true);
// enable local exporter
updateClusterSettings(exporterSettings);
// ensure resources exist
ensureInitialLocalResources();
// call migration api
MonitoringMigrateAlertsResponse response = client().execute(
MonitoringMigrateAlertsAction.INSTANCE,
new MonitoringMigrateAlertsRequest()
).actionGet();
// check response
assertThat(response.getExporters().size(), is(1));
MonitoringMigrateAlertsResponse.ExporterMigrationResult localExporterResult = response.getExporters().get(0);
assertThat(localExporterResult.getName(), is("_local"));
assertThat(localExporterResult.getType(), is(LocalExporter.TYPE));
assertThat(localExporterResult.isMigrationComplete(), is(true));
assertThat(localExporterResult.getReason(), nullValue());
// ensure no watches
assertWatchesExist(false);
} finally {
stopMonitoring();
}
}
@TestLogging(
value = "org.elasticsearch.xpack.monitoring.exporter.local:trace",
reason = "to ensure we log local exporter on trace level"
)
public void testRepeatedLocalAlertsRemoval() throws Exception {
try {
// start monitoring service
final Settings.Builder exporterSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
.put("xpack.monitoring.exporters._local.type", LocalExporter.TYPE)
.put("xpack.monitoring.exporters._local.enabled", true)
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", true);
// enable local exporter
updateClusterSettings(exporterSettings);
// ensure resources exist
ensureInitialLocalResources();
// call migration api
MonitoringMigrateAlertsResponse response = client().execute(
MonitoringMigrateAlertsAction.INSTANCE,
new MonitoringMigrateAlertsRequest()
).actionGet();
// check response
assertThat(response.getExporters().size(), is(1));
MonitoringMigrateAlertsResponse.ExporterMigrationResult localExporterResult = response.getExporters().get(0);
assertThat(localExporterResult.getName(), is("_local"));
assertThat(localExporterResult.getType(), is(LocalExporter.TYPE));
assertThat(localExporterResult.isMigrationComplete(), is(true));
assertThat(localExporterResult.getReason(), nullValue());
// ensure no watches
assertWatchesExist(false);
// call migration api again
response = client().execute(MonitoringMigrateAlertsAction.INSTANCE, new MonitoringMigrateAlertsRequest()).actionGet();
// check second response
assertThat(response.getExporters().size(), is(1));
localExporterResult = response.getExporters().get(0);
assertThat(localExporterResult.getName(), is("_local"));
assertThat(localExporterResult.getType(), is(LocalExporter.TYPE));
assertThat(localExporterResult.isMigrationComplete(), is(true));
assertThat(localExporterResult.getReason(), nullValue());
} finally {
stopMonitoring();
}
}
public void testDisabledLocalExporterAlertsRemoval() throws Exception {
try {
// start monitoring service
final Settings.Builder exporterSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
.put("xpack.monitoring.exporters._local.type", LocalExporter.TYPE)
.put("xpack.monitoring.exporters._local.enabled", true)
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", true);
// enable local exporter
updateClusterSettings(exporterSettings);
// ensure resources exist
ensureInitialLocalResources();
// new disable local exporter
final Settings.Builder disableSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
.put("xpack.monitoring.exporters._local.type", LocalExporter.TYPE)
.put("xpack.monitoring.exporters._local.enabled", false)
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", true);
updateClusterSettings(disableSettings);
// call migration api
MonitoringMigrateAlertsResponse response = client().execute(
MonitoringMigrateAlertsAction.INSTANCE,
new MonitoringMigrateAlertsRequest()
).actionGet();
// check response
assertThat(response.getExporters().size(), is(1));
MonitoringMigrateAlertsResponse.ExporterMigrationResult localExporterResult = response.getExporters().get(0);
assertThat(localExporterResult.getName(), is("_local"));
assertThat(localExporterResult.getType(), is(LocalExporter.TYPE));
assertThat(localExporterResult.isMigrationComplete(), is(true));
assertThat(localExporterResult.getReason(), nullValue());
// ensure no watches
assertWatchesExist(false);
} finally {
stopMonitoring();
}
}
public void testLocalExporterWithAlertingDisabled() throws Exception {
try {
// start monitoring service
final Settings.Builder exporterSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
.put("xpack.monitoring.exporters._local.type", LocalExporter.TYPE)
.put("xpack.monitoring.exporters._local.enabled", true)
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", true);
// enable local exporter
updateClusterSettings(exporterSettings);
// ensure resources exist
ensureInitialLocalResources();
// new disable local exporter's cluster alerts
final Settings.Builder disableSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
.put("xpack.monitoring.exporters._local.type", LocalExporter.TYPE)
.put("xpack.monitoring.exporters._local.enabled", true)
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", false);
updateClusterSettings(disableSettings);
// call migration api
MonitoringMigrateAlertsResponse response = client().execute(
MonitoringMigrateAlertsAction.INSTANCE,
new MonitoringMigrateAlertsRequest()
).actionGet();
// check response
assertThat(response.getExporters().size(), is(1));
MonitoringMigrateAlertsResponse.ExporterMigrationResult localExporterResult = response.getExporters().get(0);
assertThat(localExporterResult.getName(), is("_local"));
assertThat(localExporterResult.getType(), is(LocalExporter.TYPE));
assertThat(localExporterResult.isMigrationComplete(), is(false));
assertThat(localExporterResult.getReason(), notNullValue());
assertThat(localExporterResult.getReason().getMessage(), is("cannot manage cluster alerts because alerting is disabled"));
} finally {
stopMonitoring();
}
}
public void testRemoteAlertsRemoval() throws Exception {
try {
// start monitoring service
final Settings.Builder exporterSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
// Make sure to not collect ES stats in background. Our web server expects requests in a particular order.
.put("xpack.monitoring.elasticsearch.collection.enabled", false)
.put("xpack.monitoring.exporters.remoteCluster.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters.remoteCluster.enabled", true)
.put("xpack.monitoring.exporters.remoteCluster.host", webServer.getHostName() + ":" + webServer.getPort())
.put("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled", true);
// enable http exporter
updateClusterSettings(exporterSettings);
// enqueue delete request expectations for alerts
enqueueWatcherResponses(webServer, true);
// call migration api
MonitoringMigrateAlertsResponse response = client().execute(
MonitoringMigrateAlertsAction.INSTANCE,
new MonitoringMigrateAlertsRequest()
).actionGet();
// check that all "remote watches" were deleted by the exporter
assertThat(response.getExporters().size(), is(1));
MonitoringMigrateAlertsResponse.ExporterMigrationResult localExporterResult = response.getExporters().get(0);
assertThat(localExporterResult.getName(), is("remoteCluster"));
assertThat(localExporterResult.getType(), is(HttpExporter.TYPE));
assertThat(localExporterResult.isMigrationComplete(), is(true));
assertThat(localExporterResult.getReason(), nullValue());
// ensure no watches
assertMonitorWatches(webServer, true);
} finally {
stopMonitoring();
webServer.clearRequests();
}
}
public void testDisabledRemoteAlertsRemoval() throws Exception {
try {
// start monitoring service
final Settings.Builder exporterSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
// Make sure to not collect ES stats in background. Our web server expects requests in a particular order.
.put("xpack.monitoring.elasticsearch.collection.enabled", false)
.put("xpack.monitoring.exporters.remoteCluster.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters.remoteCluster.enabled", false)
.put("xpack.monitoring.exporters.remoteCluster.host", webServer.getHostName() + ":" + webServer.getPort())
.put("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled", true);
// configure disabled http exporter
updateClusterSettings(exporterSettings);
// enqueue delete request expectations for alerts
enqueueWatcherResponses(webServer, true);
// call migration api
MonitoringMigrateAlertsResponse response = client().execute(
MonitoringMigrateAlertsAction.INSTANCE,
new MonitoringMigrateAlertsRequest()
).actionGet();
// check that the disabled http exporter was enabled this one time in order to remove watches
assertThat(response.getExporters().size(), is(1));
MonitoringMigrateAlertsResponse.ExporterMigrationResult localExporterResult = response.getExporters().get(0);
assertThat(localExporterResult.getName(), is("remoteCluster"));
assertThat(localExporterResult.getType(), is(HttpExporter.TYPE));
assertThat(localExporterResult.isMigrationComplete(), is(true));
assertThat(localExporterResult.getReason(), nullValue());
// ensure no watches
assertMonitorWatches(webServer, true);
} finally {
stopMonitoring();
webServer.clearRequests();
}
}
public void testRemoteAlertsRemovalWhenOriginalMonitoringClusterIsGone() throws Exception {
try {
// start monitoring service
final Settings.Builder exporterSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
// Make sure to not collect ES stats in background. Our web server expects requests in a particular order.
.put("xpack.monitoring.elasticsearch.collection.enabled", false)
.put("xpack.monitoring.exporters.remoteCluster.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters.remoteCluster.enabled", false)
.put("xpack.monitoring.exporters.remoteCluster.host", webServer.getHostName() + ":" + webServer.getPort())
.put("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled", true);
// create a disabled http exporter
updateClusterSettings(exporterSettings);
// call migration api
MonitoringMigrateAlertsResponse response = client().execute(
MonitoringMigrateAlertsAction.INSTANCE,
new MonitoringMigrateAlertsRequest()
).actionGet();
// check that migration failed due to monitoring cluster not responding
assertThat(response.getExporters().size(), is(1));
MonitoringMigrateAlertsResponse.ExporterMigrationResult localExporterResult = response.getExporters().get(0);
assertThat(localExporterResult.getName(), is("remoteCluster"));
assertThat(localExporterResult.getType(), is(HttpExporter.TYPE));
assertThat(localExporterResult.isMigrationComplete(), is(false));
// this might be a messier exception in practice like connection refused, but hey, testability
assertThat(localExporterResult.getReason().getMessage(), is("Connection is closed"));
} finally {
stopMonitoring();
webServer.clearRequests();
}
}
public void testRemoteAlertsRemovalFailure() throws Exception {
try {
// start monitoring service
final Settings.Builder exporterSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
// Make sure to not collect ES stats in background. Our web server expects requests in a particular order.
.put("xpack.monitoring.elasticsearch.collection.enabled", false)
.put("xpack.monitoring.exporters.remoteCluster.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters.remoteCluster.enabled", true)
.put("xpack.monitoring.exporters.remoteCluster.host", webServer.getHostName() + ":" + webServer.getPort())
.put("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled", true);
// enable http exporter
updateClusterSettings(exporterSettings);
// enqueue a "watcher available" response, but then a "failure to delete watch" response
enqueueResponse(webServer, 200, """
{"features":{"watcher":{"available":true,"enabled":true}}}""");
enqueueResponse(webServer, 500, "{\"error\":{}}");
// call migration api
MonitoringMigrateAlertsResponse response = client().execute(
MonitoringMigrateAlertsAction.INSTANCE,
new MonitoringMigrateAlertsRequest()
).actionGet();
// check that an error is reported while trying to remove a remote watch
assertThat(response.getExporters().size(), is(1));
MonitoringMigrateAlertsResponse.ExporterMigrationResult localExporterResult = response.getExporters().get(0);
assertThat(localExporterResult.getName(), is("remoteCluster"));
assertThat(localExporterResult.getType(), is(HttpExporter.TYPE));
assertThat(localExporterResult.isMigrationComplete(), is(false));
assertThat(localExporterResult.getReason().getMessage(), startsWith("method [DELETE], host ["));
assertThat(
localExporterResult.getReason().getMessage(),
endsWith("status line [HTTP/1.1 500 Internal Server Error]\n{\"error\":{}}")
);
} finally {
stopMonitoring();
webServer.clearRequests();
}
}
public void testRemoteAlertsRemoteDisallowsWatcher() throws Exception {
try {
// start monitoring service
final Settings.Builder exporterSettings = Settings.builder()
.put(MonitoringService.ENABLED.getKey(), true)
// Make sure to not collect ES stats in background. Our web server expects requests in a particular order.
.put("xpack.monitoring.elasticsearch.collection.enabled", false)
.put("xpack.monitoring.exporters.remoteCluster.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters.remoteCluster.enabled", true)
.put("xpack.monitoring.exporters.remoteCluster.host", webServer.getHostName() + ":" + webServer.getPort())
.put("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled", true);
// enable http exporter
updateClusterSettings(exporterSettings);
// enqueue a "watcher available" response, but then a "failure to delete watch" response
enqueueWatcherResponses(webServer, false);
// call migration api
MonitoringMigrateAlertsResponse response = client().execute(
MonitoringMigrateAlertsAction.INSTANCE,
new MonitoringMigrateAlertsRequest()
).actionGet();
// Migration is marked as complete since watcher is disabled on remote cluster.
assertThat(response.getExporters().size(), is(1));
MonitoringMigrateAlertsResponse.ExporterMigrationResult localExporterResult = response.getExporters().get(0);
assertThat(localExporterResult.getName(), is("remoteCluster"));
assertThat(localExporterResult.getType(), is(HttpExporter.TYPE));
assertThat(localExporterResult.isMigrationComplete(), is(true));
// ensure responses
assertMonitorWatches(webServer, false);
} finally {
stopMonitoring();
webServer.clearRequests();
}
}
private void ensureInitialLocalResources() throws Exception {
// Should trigger setting up alert watches via LocalExporter#openBulk(...) and
// then eventually to LocalExporter#setupIfElectedMaster(...)
// Sometimes this last method doesn't install watches, because elected master node doesn't export monitor documents.
// and then these assertions here fail.
{
MonitoringBulkRequest request = new MonitoringBulkRequest();
request.add(LocalExporterIntegTests.createMonitoringBulkDoc());
String masterNode = internalCluster().getMasterName();
MonitoringBulkResponse response = client(masterNode).execute(MonitoringBulkAction.INSTANCE, request).actionGet();
assertThat(response.status(), equalTo(RestStatus.OK));
}
waitForWatcherIndices();
assertBusy(() -> {
assertThat(indexExists(".monitoring-*"), is(true));
ensureYellowAndNoInitializingShards(".monitoring-*");
checkMonitoringTemplates();
assertWatchesExist(true);
}, 20, TimeUnit.SECONDS); // Watcher can be slow to allocate all watches required
}
/**
* Checks that the monitoring templates have been created by the local exporter
*/
private void checkMonitoringTemplates() {
final Set<String> templates = new HashSet<>();
templates.add(".monitoring-alerts-7");
templates.add(".monitoring-es");
templates.add(".monitoring-kibana");
templates.add(".monitoring-logstash");
templates.add(".monitoring-beats");
GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates(TEST_REQUEST_TIMEOUT, ".monitoring-*").get();
Set<String> actualTemplates = response.getIndexTemplates().stream().map(IndexTemplateMetadata::getName).collect(Collectors.toSet());
assertEquals(templates, actualTemplates);
}
private void assertWatchesExist(boolean exist) {
// Check if watches index exists
if (client().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(".watches").get().getIndices().length == 0) {
fail("Expected [.watches] index with cluster alerts present, but no [.watches] index was found");
}
Arrays.stream(ClusterAlertsUtil.WATCH_IDS)
.map(n -> ClusterAlertsUtil.createUniqueWatchId(clusterService(), n))
.map(watch -> client().execute(GetWatchAction.INSTANCE, new GetWatchRequest(watch)).actionGet())
.filter(r -> r.isFound() != exist)
.findAny()
.ifPresent(r -> fail((exist ? "missing" : "found") + " watch [" + r.getId() + "]"));
}
protected List<String> monitoringTemplateNames() {
return Arrays.stream(MonitoringTemplateRegistry.TEMPLATE_NAMES).collect(Collectors.toList());
}
private void enqueueWatcherResponses(final MockWebServer mockWebServer, final boolean remoteClusterAllowsWatcher) throws IOException {
// if the remote cluster doesn't allow watcher, then we only check for it and we're done
if (remoteClusterAllowsWatcher) {
// X-Pack exists and Watcher can be used
enqueueResponse(mockWebServer, 200, """
{"features":{"watcher":{"available":true,"enabled":true}}}""");
// add delete responses
enqueueDeleteClusterAlertResponses(mockWebServer);
} else {
// X-Pack exists but Watcher just cannot be used
if (randomBoolean()) {
final String responseBody = randomFrom("""
{"features":{"watcher":{"available":false,"enabled":true}}}""", """
{"features":{"watcher":{"available":true,"enabled":false}}}""", "{}");
enqueueResponse(mockWebServer, 200, responseBody);
} else {
// X-Pack is not installed
enqueueResponse(mockWebServer, 404, "{}");
}
}
}
private void enqueueDeleteClusterAlertResponses(final MockWebServer mockWebServer) throws IOException {
for (final String watchId : ClusterAlertsUtil.WATCH_IDS) {
enqueueDeleteClusterAlertResponse(mockWebServer, watchId);
}
}
private void enqueueDeleteClusterAlertResponse(final MockWebServer mockWebServer, final String watchId) throws IOException {
if (randomBoolean()) {
enqueueResponse(mockWebServer, 404, "watch [" + watchId + "] did not exist");
} else {
enqueueResponse(mockWebServer, 200, "watch [" + watchId + "] deleted");
}
}
private void enqueueResponse(MockWebServer mockWebServer, int responseCode, String body) throws IOException {
mockWebServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body));
}
private String watcherCheckQueryString() {
return "filter_path=" + WATCHER_CHECK_PARAMETERS.get("filter_path");
}
private String resourceClusterAlertQueryString() {
return "filter_path=" + CLUSTER_ALERT_VERSION_PARAMETERS.get("filter_path");
}
private void assertMonitorWatches(final MockWebServer mockWebServer, final boolean remoteClusterAllowsWatcher) {
MockRequest request = mockWebServer.takeRequest();
// GET /_xpack
assertThat(request.getMethod(), equalTo("GET"));
assertThat(request.getUri().getPath(), equalTo("/_xpack"));
assertThat(request.getUri().getQuery(), equalTo(watcherCheckQueryString()));
if (remoteClusterAllowsWatcher) {
for (final Tuple<String, String> watch : monitoringWatches()) {
final String uniqueWatchId = ClusterAlertsUtil.createUniqueWatchId(clusterService(), watch.v1());
request = mockWebServer.takeRequest();
// GET / PUT if we are allowed to use it
assertThat(request.getMethod(), equalTo("DELETE"));
assertThat(request.getUri().getPath(), equalTo("/_watcher/watch/" + uniqueWatchId));
assertThat(request.getUri().getQuery(), equalTo(resourceClusterAlertQueryString()));
}
}
}
protected void waitForWatcherIndices() throws Exception {
awaitIndexExists(Watch.INDEX);
assertBusy(() -> ensureYellowAndNoInitializingShards(Watch.INDEX));
}
}
| TransportMonitoringMigrateAlertsActionTests |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamWrapperQueryBuilder.java | {
"start": 1042,
"end": 3418
} | class ____ implements QueryBuilder {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
QueryBuilder.class,
"planwrapper",
PlanStreamWrapperQueryBuilder::new
);
private static final TransportVersion ESQL_FIXED_INDEX_LIKE = TransportVersion.fromName("esql_fixed_index_like");
private final Configuration configuration;
private final QueryBuilder next;
public PlanStreamWrapperQueryBuilder(Configuration configuration, QueryBuilder next) {
this.configuration = configuration;
this.next = next;
}
public PlanStreamWrapperQueryBuilder(StreamInput in) throws IOException {
configuration = Configuration.readWithoutTables(in);
PlanStreamInput planStreamInput = new PlanStreamInput(in, in.namedWriteableRegistry(), configuration);
next = planStreamInput.readNamedWriteable(QueryBuilder.class);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
configuration.withoutTables().writeTo(out);
new PlanStreamOutput(out, configuration).writeNamedWriteable(next);
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return ESQL_FIXED_INDEX_LIKE;
}
@Override
public boolean supportsVersion(TransportVersion version) {
return expressionTransportSupported(version);
}
@Override
public Query toQuery(SearchExecutionContext context) throws IOException {
return next.toQuery(context);
}
@Override
public QueryBuilder queryName(String queryName) {
next.queryName(queryName);
return this;
}
@Override
public String queryName() {
return next.queryName();
}
@Override
public float boost() {
return next.boost();
}
@Override
public QueryBuilder boost(float boost) {
next.boost(boost);
return this;
}
@Override
public String getName() {
return getWriteableName();
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return next.toXContent(builder, params);
}
public QueryBuilder next() {
return next;
}
}
| PlanStreamWrapperQueryBuilder |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/component/extension/ComponentVerifierExtensionHelper.java | {
"start": 4301,
"end": 4521
} | class ____ extends ErrorAttribute implements GroupAttribute {
GroupErrorAttribute(String name) {
super(name);
}
}
private ComponentVerifierExtensionHelper() {
}
}
| GroupErrorAttribute |
java | apache__logging-log4j2 | log4j-perf-test/src/main/java/org/apache/logging/log4j/layout/template/json/JsonTemplateLayoutBenchmarkReport.java | {
"start": 2055,
"end": 2576
} | enum ____ {
;
private static final Charset CHARSET = StandardCharsets.UTF_8;
public static void main(final String[] args) throws Exception {
final CliArgs cliArgs = CliArgs.parseArgs(args);
final JmhSetup jmhSetup = JmhSetup.ofJmhResult(cliArgs.jmhResultJsonFile);
final List<JmhSummary> jmhSummaries = JmhSummary.ofJmhResult(cliArgs.jmhResultJsonFile);
dumpReport(cliArgs.outputAdocFile, jmhSetup, jmhSummaries);
}
private static final | JsonTemplateLayoutBenchmarkReport |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/aggregator/AggregateProcessorTest.java | {
"start": 1998,
"end": 21277
} | class ____ extends ContextTestSupport {
private ExecutorService executorService;
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Override
@BeforeEach
public void setUp() throws Exception {
super.setUp();
executorService = Executors.newSingleThreadExecutor();
}
@Test
public void testAggregateProcessorCompletionPredicate() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("A+B+END");
mock.expectedPropertyReceived(Exchange.AGGREGATED_COMPLETED_BY, "predicate");
AsyncProcessor done = new SendProcessor(context.getEndpoint("mock:result"));
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
Predicate complete = body().contains("END");
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setCompletionPredicate(complete);
ap.setEagerCheckCompletion(false);
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
e2.getIn().setHeader("id", 123);
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("END");
e3.getIn().setHeader("id", 123);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("D");
e4.getIn().setHeader("id", 123);
ap.process(e1);
ap.process(e2);
ap.process(e3);
ap.process(e4);
assertMockEndpointsSatisfied();
ap.stop();
}
@Test
public void testAggregateProcessorCompletionPredicateEager() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("A+B+END");
mock.expectedPropertyReceived(Exchange.AGGREGATED_COMPLETED_BY, "predicate");
AsyncProcessor done = new SendProcessor(context.getEndpoint("mock:result"));
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
Predicate complete = body().isEqualTo("END");
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setCompletionPredicate(complete);
ap.setEagerCheckCompletion(true);
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
e2.getIn().setHeader("id", 123);
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("END");
e3.getIn().setHeader("id", 123);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("D");
e4.getIn().setHeader("id", 123);
ap.process(e1);
ap.process(e2);
ap.process(e3);
ap.process(e4);
assertMockEndpointsSatisfied();
ap.stop();
}
@Test
public void testAggregateProcessorCompletionAggregatedSize() throws Exception {
doTestAggregateProcessorCompletionAggregatedSize(false);
}
@Test
public void testAggregateProcessorCompletionAggregatedSizeEager() throws Exception {
doTestAggregateProcessorCompletionAggregatedSize(true);
}
private void doTestAggregateProcessorCompletionAggregatedSize(boolean eager) throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("A+B+C");
mock.expectedPropertyReceived(Exchange.AGGREGATED_COMPLETED_BY, "size");
AsyncProcessor done = new SendProcessor(context.getEndpoint("mock:result"));
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setCompletionSize(3);
ap.setEagerCheckCompletion(eager);
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
e2.getIn().setHeader("id", 123);
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("C");
e3.getIn().setHeader("id", 123);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("D");
e4.getIn().setHeader("id", 123);
ap.process(e1);
ap.process(e2);
ap.process(e3);
ap.process(e4);
assertMockEndpointsSatisfied();
ap.stop();
}
@Test
public void testAggregateProcessorCompletionTimeout() throws Exception {
doTestAggregateProcessorCompletionTimeout(false);
}
@Test
public void testAggregateProcessorCompletionTimeoutEager() throws Exception {
doTestAggregateProcessorCompletionTimeout(true);
}
private void doTestAggregateProcessorCompletionTimeout(boolean eager) throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("A+B+C");
mock.expectedPropertyReceived(Exchange.AGGREGATED_COMPLETED_BY, "timeout");
AsyncProcessor done = new SendProcessor(context.getEndpoint("mock:result"));
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setCompletionTimeout(100);
ap.setEagerCheckCompletion(eager);
ap.setCompletionTimeoutCheckerInterval(10);
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
e2.getIn().setHeader("id", 123);
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("C");
e3.getIn().setHeader("id", 123);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("D");
e4.getIn().setHeader("id", 123);
ap.process(e1);
Thread.sleep(5);
ap.process(e2);
Thread.sleep(10);
ap.process(e3);
Thread.sleep(150);
ap.process(e4);
assertMockEndpointsSatisfied();
ap.stop();
}
@Test
public void testAggregateCompletionInterval() throws Exception {
// camel context must be started
context.start();
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("A+B+C", "D");
mock.expectedPropertyReceived(Exchange.AGGREGATED_COMPLETED_BY, "interval");
AsyncProcessor done = new SendProcessor(context.getEndpoint("mock:result"));
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setCompletionInterval(100);
ap.setCompletionTimeoutCheckerInterval(10);
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
e2.getIn().setHeader("id", 123);
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("C");
e3.getIn().setHeader("id", 123);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("D");
e4.getIn().setHeader("id", 123);
ap.process(e1);
ap.process(e2);
ap.process(e3);
Thread.sleep(250);
ap.process(e4);
assertMockEndpointsSatisfied();
ap.stop();
}
@Test
public void testAggregateIgnoreInvalidCorrelationKey() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("A+C+END");
AsyncProcessor done = new SendProcessor(context.getEndpoint("mock:result"));
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
Predicate complete = body().contains("END");
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setCompletionPredicate(complete);
ap.setIgnoreInvalidCorrelationKeys(true);
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("C");
e3.getIn().setHeader("id", 123);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("END");
e4.getIn().setHeader("id", 123);
ap.process(e1);
ap.process(e2);
ap.process(e3);
ap.process(e4);
assertMockEndpointsSatisfied();
ap.stop();
}
@Test
public void testAggregateBadCorrelationKey() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("A+C+END");
AsyncProcessor done = new SendProcessor(context.getEndpoint("mock:result"));
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
Predicate complete = body().contains("END");
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setCompletionPredicate(complete);
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("C");
e3.getIn().setHeader("id", 123);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("END");
e4.getIn().setHeader("id", 123);
ap.process(e1);
ap.process(e2);
Exception e = e2.getException();
assertNotNull(e);
assertTrue(e.getMessage().startsWith("Invalid correlation key."));
ap.process(e3);
ap.process(e4);
assertMockEndpointsSatisfied();
ap.stop();
}
@Test
public void testAggregateCloseCorrelationKeyOnCompletion() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("A+B+END");
AsyncProcessor done = new SendProcessor(context.getEndpoint("mock:result"));
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
Predicate complete = body().contains("END");
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setCompletionPredicate(complete);
ap.setCloseCorrelationKeyOnCompletion(1000);
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
e2.getIn().setHeader("id", 123);
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("END");
e3.getIn().setHeader("id", 123);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("C");
e4.getIn().setHeader("id", 123);
ap.process(e1);
ap.process(e2);
ap.process(e3);
ap.process(e4);
Exception e = e4.getException();
assertNotNull(e);
assertTrue(e.getMessage().startsWith("The correlation key [123] has been closed."));
assertMockEndpointsSatisfied();
ap.stop();
}
@Test
public void testAggregateUseBatchSizeFromConsumer() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("A+B", "C+D+E");
mock.expectedPropertyReceived(Exchange.AGGREGATED_COMPLETED_BY, "consumer");
AsyncProcessor done = new SendProcessor(context.getEndpoint("mock:result"));
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setCompletionSize(100);
ap.setCompletionFromBatchConsumer(true);
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
e1.setProperty(Exchange.BATCH_INDEX, 0);
e1.setProperty(Exchange.BATCH_SIZE, 2);
e1.setProperty(Exchange.BATCH_COMPLETE, false);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
e2.getIn().setHeader("id", 123);
e2.setProperty(Exchange.BATCH_INDEX, 1);
e2.setProperty(Exchange.BATCH_SIZE, 2);
e2.setProperty(Exchange.BATCH_COMPLETE, true);
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("C");
e3.getIn().setHeader("id", 123);
e3.setProperty(Exchange.BATCH_INDEX, 0);
e3.setProperty(Exchange.BATCH_SIZE, 3);
e3.setProperty(Exchange.BATCH_COMPLETE, false);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("D");
e4.getIn().setHeader("id", 123);
e4.setProperty(Exchange.BATCH_INDEX, 1);
e4.setProperty(Exchange.BATCH_SIZE, 3);
e4.setProperty(Exchange.BATCH_COMPLETE, false);
Exchange e5 = new DefaultExchange(context);
e5.getIn().setBody("E");
e5.getIn().setHeader("id", 123);
e5.setProperty(Exchange.BATCH_INDEX, 2);
e5.setProperty(Exchange.BATCH_SIZE, 3);
e5.setProperty(Exchange.BATCH_COMPLETE, true);
ap.process(e1);
ap.process(e2);
ap.process(e3);
ap.process(e4);
ap.process(e5);
assertMockEndpointsSatisfied();
ap.stop();
}
@Test
public void testAggregateLogFailedExchange() throws Exception {
doTestAggregateLogFailedExchange(null);
}
@Test
public void testAggregateHandleFailedExchange() throws Exception {
final AtomicBoolean tested = new AtomicBoolean();
ExceptionHandler myHandler = new ExceptionHandler() {
public void handleException(Throwable exception) {
}
public void handleException(String message, Throwable exception) {
}
public void handleException(String message, Exchange exchange, Throwable exception) {
assertEquals("Error processing aggregated exchange", message);
assertEquals("B+Kaboom+END", exchange.getIn().getBody());
assertEquals("Damn", exception.getMessage());
tested.set(true);
}
};
doTestAggregateLogFailedExchange(myHandler);
assertTrue(tested.get());
}
private void doTestAggregateLogFailedExchange(ExceptionHandler handler) throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("A+END");
AsyncProcessor done = new AsyncProcessorSupport() {
public boolean process(Exchange exchange, AsyncCallback callback) {
try {
if (exchange.getIn().getBody(String.class).contains("Kaboom")) {
throw new IllegalArgumentException("Damn");
} else {
SendProcessor send = new SendProcessor(context.getEndpoint("mock:result"));
send.start();
send.process(exchange, callback);
}
} catch (Exception e) {
exchange.setException(e);
callback.done(false);
}
return false;
}
};
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setEagerCheckCompletion(true);
ap.setCompletionPredicate(body().isEqualTo("END"));
if (handler != null) {
ap.setExceptionHandler(handler);
}
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
e2.getIn().setHeader("id", 456);
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("Kaboom");
e3.getIn().setHeader("id", 456);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("END");
e4.getIn().setHeader("id", 456);
Exchange e5 = new DefaultExchange(context);
e5.getIn().setBody("END");
e5.getIn().setHeader("id", 123);
ap.process(e1);
ap.process(e2);
ap.process(e3);
ap.process(e4);
ap.process(e5);
assertMockEndpointsSatisfied();
ap.stop();
}
@Test
public void testAggregateForceCompletion() throws Exception {
// camel context must be started
context.start();
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceivedInAnyOrder("B+END", "A+END");
mock.expectedPropertyReceived(Exchange.AGGREGATED_COMPLETED_BY, "force");
AsyncProcessor done = new SendProcessor(context.getEndpoint("mock:result"));
Expression corr = header("id");
AggregationStrategy as = new BodyInAggregatingStrategy();
AggregateProcessor ap = new AggregateProcessor(context, done, corr, as, executorService, true);
ap.setCompletionSize(10);
ap.start();
Exchange e1 = new DefaultExchange(context);
e1.getIn().setBody("A");
e1.getIn().setHeader("id", 123);
Exchange e2 = new DefaultExchange(context);
e2.getIn().setBody("B");
e2.getIn().setHeader("id", 456);
Exchange e3 = new DefaultExchange(context);
e3.getIn().setBody("END");
e3.getIn().setHeader("id", 123);
Exchange e4 = new DefaultExchange(context);
e4.getIn().setBody("END");
e4.getIn().setHeader("id", 456);
ap.process(e1);
ap.process(e2);
ap.process(e3);
ap.process(e4);
assertEquals(0, mock.getExchanges().size(), "should not have completed yet");
ap.forceCompletionOfAllGroups();
assertMockEndpointsSatisfied();
ap.stop();
}
}
| AggregateProcessorTest |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterStateManager.java | {
"start": 4147,
"end": 4362
} | class ____ various handler classes corresponding to share
* state RPCs. It also holds an {@link InterBrokerSendThread} specialization
* which manages the sending the RPC requests over the network.
* This | encapsulates |
java | micronaut-projects__micronaut-core | inject-java/src/main/java/io/micronaut/annotation/processing/visitor/JavaAnnotationElement.java | {
"start": 944,
"end": 1508
} | class ____ extends JavaClassElement implements AnnotationElement {
/**
* @param nativeElement The native element
* @param annotationMetadataFactory The annotation metadata factory
* @param visitorContext The visitor context
*/
JavaAnnotationElement(JavaNativeElement.Class nativeElement,
ElementAnnotationMetadataFactory annotationMetadataFactory,
JavaVisitorContext visitorContext) {
super(nativeElement, annotationMetadataFactory, visitorContext);
}
}
| JavaAnnotationElement |
java | apache__camel | core/camel-core-catalog/src/main/java/org/apache/camel/catalog/impl/AbstractCamelCatalog.java | {
"start": 54307,
"end": 73878
} | enum ____ style
if (value.equalsIgnoreCase(s) || dashEC.equalsIgnoreCase(s) || valueEC.equalsIgnoreCase(s)) {
found = true;
break;
}
}
if (!found) {
handleNotFound(result, value, longKey, enums);
}
}
String javaType = row.getJavaType();
if (!optionPlaceholder && !lookup && javaType != null
&& (javaType.startsWith("java.util.Map") || javaType.startsWith("java.util.Properties"))) {
// there must be a valid suffix
if (isValidSuffix(suffix)) {
result.addInvalidMap(longKey, value);
} else if (suffix.startsWith("[") && !suffix.contains("]")) {
result.addInvalidMap(longKey, value);
}
}
if (!optionPlaceholder && !lookup && javaType != null && "array".equals(row.getType())) {
// there must be a suffix and it must be using [] style
if (isValidSuffix(suffix)) {
result.addInvalidArray(longKey, value);
} else if (!suffix.startsWith("[") && !suffix.contains("]")) {
result.addInvalidArray(longKey, value);
} else {
String index = StringHelper.before(suffix.substring(1), "]");
// value must be an integer
boolean valid = validateInteger(index);
if (!valid) {
result.addInvalidInteger(longKey, index);
}
}
}
}
}
private static boolean isValidSuffix(String suffix) {
return suffix == null || suffix.isEmpty() || suffix.equals(".");
}
private static boolean acceptConfigurationPropertyKey(String key) {
if (key == null) {
return false;
}
return key.startsWith("camel.component.")
|| key.startsWith("camel.dataformat.")
|| key.startsWith("camel.language.")
|| key.startsWith("camel.main.")
|| key.startsWith("camel.resilience4j.")
|| key.startsWith("camel.faulttolerance.")
|| key.startsWith("camel.threadpool.")
|| key.startsWith("camel.health.")
|| key.startsWith("camel.lra.")
|| key.startsWith("camel.rest.");
}
private LanguageValidationResult doValidateSimple(ClassLoader classLoader, String simple, boolean predicate) {
if (classLoader == null) {
classLoader = getClass().getClassLoader();
}
// if there are {{ }}} property placeholders then we need to resolve them to something else
// as the simple parse cannot resolve them before parsing as we dont run the actual Camel application
// with property placeholders setup so we need to dummy this by replace the {{ }} to something else
// therefore we use a more unlikely character: {{XXX}} to ~^XXX^~
String resolved = simple.replaceAll("\\{\\{(.+)\\}\\}", "~^$1^~");
LanguageValidationResult answer = new LanguageValidationResult(simple);
Object context;
Object instance = null;
Class<?> clazz;
try {
// need a simple camel context for the simple language parser to be able to parse
clazz = classLoader.loadClass("org.apache.camel.impl.engine.SimpleCamelContext");
context = clazz.getDeclaredConstructor(boolean.class).newInstance(false);
clazz = classLoader.loadClass("org.apache.camel.language.simple.SimpleLanguage");
instance = clazz.getDeclaredConstructor().newInstance();
clazz = classLoader.loadClass("org.apache.camel.CamelContext");
instance.getClass().getMethod("setCamelContext", clazz).invoke(instance, context);
} catch (Exception e) {
clazz = null;
answer.setError(e.getMessage());
}
if (clazz != null) {
Throwable cause = null;
try {
if (predicate) {
instance.getClass().getMethod("createPredicate", String.class).invoke(instance, resolved);
} else {
instance.getClass().getMethod("createExpression", String.class).invoke(instance, resolved);
}
} catch (InvocationTargetException e) {
cause = e.getTargetException();
} catch (Exception e) {
cause = e;
}
if (cause != null) {
// reverse ~^XXX^~ back to {{XXX}}
String errMsg = cause.getMessage();
errMsg = errMsg.replaceAll("\\~\\^(.+)\\^\\~", "{{$1}}");
answer.setError(errMsg);
// is it simple parser exception then we can grab the index where the problem is
if (cause.getClass().getName().equals("org.apache.camel.language.simple.types.SimpleIllegalSyntaxException")
|| cause.getClass().getName().equals("org.apache.camel.language.simple.types.SimpleParserException")) {
try {
// we need to grab the index field from those simple parser exceptions
Method method = cause.getClass().getMethod("getIndex");
Object result = method.invoke(cause);
if (result != null) {
int index = (int) result;
answer.setIndex(index);
}
} catch (Exception i) {
// ignore
}
}
// we need to grab the short message field from this simple syntax exception
if (cause.getClass().getName().equals("org.apache.camel.language.simple.types.SimpleIllegalSyntaxException")) {
try {
Method method = cause.getClass().getMethod("getShortMessage");
Object result = method.invoke(cause);
if (result != null) {
String msg = (String) result;
answer.setShortError(msg);
}
} catch (Exception i) {
// ignore
}
if (answer.getShortError() == null) {
// fallback and try to make existing message short instead
String msg = answer.getError();
// grab everything before " at location " which would be regarded as the short message
int idx = msg.indexOf(" at location ");
if (idx > 0) {
msg = msg.substring(0, idx);
answer.setShortError(msg);
}
}
}
}
}
return answer;
}
private LanguageValidationResult doValidateGroovy(ClassLoader classLoader, String groovy, boolean predicate) {
if (classLoader == null) {
classLoader = getClass().getClassLoader();
}
// if there are {{ }}} property placeholders then we need to resolve them to something else
// as the simple parse cannot resolve them before parsing as we dont run the actual Camel application
// with property placeholders setup so we need to dummy this by replace the {{ }} to something else
// therefore we use a more unlikely character: {{XXX}} to ~^XXX^~
String resolved = groovy.replaceAll("\\{\\{(.+)\\}\\}", "~^$1^~");
LanguageValidationResult answer = new LanguageValidationResult(groovy);
Object context;
Object instance = null;
Class<?> clazz;
try {
// need a simple camel context for the groovy language parser to be able to parse
clazz = classLoader.loadClass("org.apache.camel.impl.engine.SimpleCamelContext");
context = clazz.getDeclaredConstructor(boolean.class).newInstance(false);
clazz = classLoader.loadClass("org.apache.camel.language.groovy.GroovyLanguage");
instance = clazz.getDeclaredConstructor().newInstance();
clazz = classLoader.loadClass("org.apache.camel.CamelContext");
instance.getClass().getMethod("setCamelContext", clazz).invoke(instance, context);
} catch (Exception e) {
clazz = null;
answer.setError(e.getMessage());
}
if (clazz != null) {
Throwable cause = null;
try {
if (predicate) {
instance.getClass().getMethod("validatePredicate", String.class).invoke(instance, resolved);
} else {
instance.getClass().getMethod("validateExpression", String.class).invoke(instance, resolved);
}
} catch (InvocationTargetException e) {
cause = e.getTargetException();
} catch (Exception e) {
cause = e;
}
if (cause != null) {
// reverse ~^XXX^~ back to {{XXX}}
String errMsg = cause.getMessage();
errMsg = errMsg.replaceAll("\\~\\^(.+)\\^\\~", "{{$1}}");
answer.setError(errMsg);
// is it simple parser exception then we can grab the index where the problem is
if (cause.getClass().getName().equals("org.apache.camel.language.groovy.GroovyValidationException")) {
try {
// we need to grab the index field from those simple parser exceptions
Method method = cause.getClass().getMethod("getIndex");
Object result = method.invoke(cause);
if (result != null) {
int index = (int) result;
answer.setIndex(index);
}
} catch (Exception i) {
// ignore
}
}
// we need to grab the short message field from this simple syntax exception
if (answer.getShortError() == null) {
// fallback and try to make existing message short instead
String msg = answer.getError();
// grab everything before " @ " which would be regarded as the short message
LineNumberReader lnr = new LineNumberReader(new StringReader(msg));
try {
String line = lnr.readLine();
do {
if (line.contains(" @ ")) {
// skip leading Scrip_xxxx.groovy: N:
if (line.startsWith("Script_") && StringHelper.countChar(line, ':') > 2) {
line = StringHelper.after(line, ":", line);
line = StringHelper.after(line, ":", line);
line = line.trim();
}
answer.setShortError(line);
break;
}
line = lnr.readLine();
} while (line != null);
} catch (Exception e) {
// ignore
}
}
}
}
return answer;
}
public LanguageValidationResult validateLanguagePredicate(ClassLoader classLoader, String language, String text) {
if ("simple".equals(language)) {
return doValidateSimple(classLoader, text, true);
} else if ("groovy".equals(language)) {
return doValidateGroovy(classLoader, text, true);
} else {
return doValidateLanguage(classLoader, language, text, true);
}
}
public LanguageValidationResult validateLanguageExpression(ClassLoader classLoader, String language, String text) {
if ("simple".equals(language)) {
return doValidateSimple(classLoader, text, false);
} else if ("groovy".equals(language)) {
return doValidateGroovy(classLoader, text, false);
} else {
return doValidateLanguage(classLoader, language, text, false);
}
}
private LanguageValidationResult doValidateLanguage(
ClassLoader classLoader, String language, String text, boolean predicate) {
if (classLoader == null) {
classLoader = getClass().getClassLoader();
}
LanguageValidationResult answer = new LanguageValidationResult(text);
Map<String, Object> options = null;
if (language.contains("?")) {
String query = URISupport.extractQuery(language);
language = StringHelper.before(language, "?");
try {
options = URISupport.parseQuery(query);
} catch (Exception e) {
answer.setError("Cannot parse language options: " + query);
return answer;
}
}
LanguageModel model = languageModel(language);
if (model == null) {
answer.setError("Unknown language " + language);
return answer;
}
String className = model.getJavaType();
if (className == null) {
answer.setError("Cannot find javaType for language " + language);
return answer;
}
Object instance = null;
Class<?> clazz = null;
try {
clazz = classLoader.loadClass(className);
instance = clazz.getDeclaredConstructor().newInstance();
} catch (Exception e) {
// ignore
}
// set options on the language
if (options != null) {
final Map<String, Object> fOptions = options;
final Object fInstance = instance;
ReflectionHelper.doWithFields(clazz, field -> {
Object value = fOptions.get(field.getName());
if (value != null) {
ReflectionHelper.setField(field, fInstance, value);
}
});
}
if (clazz != null && instance != null) {
Throwable cause = null;
Object obj;
try {
try {
// favour using the validate method if present as this is for tooling usage
if (predicate) {
instance.getClass().getMethod("validatePredicate", String.class).invoke(instance, text);
} else {
instance.getClass().getMethod("validateExpression", String.class).invoke(instance, text);
}
return answer;
} catch (NoSuchMethodException e) {
// ignore
}
if (predicate) {
instance.getClass().getMethod("createPredicate", String.class).invoke(instance, text);
} else {
instance.getClass().getMethod("createExpression", String.class).invoke(instance, text);
}
} catch (InvocationTargetException e) {
cause = e.getTargetException();
} catch (Exception e) {
cause = e;
}
if (cause != null) {
answer.setError(cause.getMessage());
}
}
return answer;
}
/**
* Special logic for log endpoints to deal when showAll=true
*/
private Map<String, String> filterProperties(String scheme, Map<String, String> options) {
if ("log".equals(scheme)) {
String showAll = options.get("showAll");
if ("true".equals(showAll)) {
Map<String, String> filtered = new LinkedHashMap<>();
// remove all the other showXXX options when showAll=true
for (Map.Entry<String, String> entry : options.entrySet()) {
String key = entry.getKey();
boolean skip = key.startsWith("show") && !key.equals("showAll");
if (!skip) {
filtered.put(key, entry.getValue());
}
}
return filtered;
}
}
// use as-is
return options;
}
private static boolean validateInteger(String value) {
boolean valid = false;
try {
Integer.parseInt(value);
valid = true;
} catch (Exception e) {
// ignore
}
return valid;
}
private static boolean validateDuration(String value) {
boolean valid = false;
try {
Long.parseLong(value);
valid = true;
} catch (Exception e) {
// ignore
}
if (!valid) {
try {
if (value.startsWith("P") || value.startsWith("-P") || value.startsWith("p") || value.startsWith("-p")) {
// its a duration
Duration.parse(value);
} else {
// it may be a time pattern, such as 5s for 5 seconds = 5000
TimePatternConverter.toMilliSeconds(value);
}
valid = true;
} catch (Exception e) {
// ignore
}
}
return valid;
}
private static String stripOptionalPrefixFromName(Map<String, BaseOptionModel> rows, String name) {
for (BaseOptionModel row : rows.values()) {
String optionalPrefix = row.getOptionalPrefix();
if (optionalPrefix != null && !optionalPrefix.isEmpty() && name.startsWith(optionalPrefix)) {
// try again
return stripOptionalPrefixFromName(rows, name.substring(optionalPrefix.length()));
} else {
if (name.equalsIgnoreCase(row.getName())) {
break;
}
}
}
return name;
}
private static String getPropertyNameFromNameWithPrefix(Map<String, BaseOptionModel> rows, String name) {
for (BaseOptionModel row : rows.values()) {
String prefix = row.getPrefix();
if (prefix != null && !prefix.isEmpty() && name.startsWith(prefix)) {
return row.getName();
}
}
return null;
}
/**
* Converts the string from dash format into camel case (hello-great-world -> helloGreatWorld)
*
* @param text the string
* @return the string camel cased
*/
private static String dashToCamelCase(String text) {
if (text == null) {
return null;
}
if (!isDashed(text)) {
return text;
}
StringBuilder sb = new StringBuilder();
for (int i = 0; i < text.length(); i++) {
char c = text.charAt(i);
if (c == '-') {
i++;
sb.append(Character.toUpperCase(text.charAt(i)));
} else {
sb.append(c);
}
}
return sb.toString();
}
}
| naming |
java | netty__netty | transport/src/main/java/io/netty/channel/ChannelException.java | {
"start": 1888,
"end": 2401
} | class ____ extends ChannelException {
private static final long serialVersionUID = -6384642137753538579L;
StacklessChannelException(String message, Throwable cause, boolean shared) {
super(message, cause, shared);
}
// Override fillInStackTrace() so we not populate the backtrace via a native call and so leak the
// Classloader.
@Override
public Throwable fillInStackTrace() {
return this;
}
}
}
| StacklessChannelException |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/support/replication/ReplicationSplitHelperTests.java | {
"start": 28847,
"end": 29031
} | class ____ extends ReplicationResponse {
TestResponse() {
setShardInfo(ReplicationResponse.ShardInfo.EMPTY);
}
}
private abstract static | TestResponse |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerJsonReport.java | {
"start": 1264,
"end": 2650
} | class ____ {
@JsonProperty("SlowNode")
private final String slowNode;
@JsonProperty("SlowPeerLatencyWithReportingNodes")
private final SortedSet<SlowPeerLatencyWithReportingNode> slowPeerLatencyWithReportingNodes;
SlowPeerJsonReport(
@JsonProperty("SlowNode")
String slowNode,
@JsonProperty("SlowPeerLatencyWithReportingNodes")
SortedSet<SlowPeerLatencyWithReportingNode> slowPeerLatencyWithReportingNodes) {
this.slowNode = slowNode;
this.slowPeerLatencyWithReportingNodes = slowPeerLatencyWithReportingNodes;
}
public String getSlowNode() {
return slowNode;
}
public SortedSet<SlowPeerLatencyWithReportingNode> getSlowPeerLatencyWithReportingNodes() {
return slowPeerLatencyWithReportingNodes;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SlowPeerJsonReport that = (SlowPeerJsonReport) o;
return new EqualsBuilder()
.append(slowNode, that.slowNode)
.append(slowPeerLatencyWithReportingNodes, that.slowPeerLatencyWithReportingNodes)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(slowNode)
.append(slowPeerLatencyWithReportingNodes)
.toHashCode();
}
}
| SlowPeerJsonReport |
java | spring-projects__spring-security | oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/jackson2/OAuth2AuthorizedClientMixin.java | {
"start": 1279,
"end": 1907
} | class ____ used to serialize/deserialize {@link OAuth2AuthorizedClient}.
*
* @author Joe Grandja
* @since 5.3
* @see OAuth2AuthorizedClient
* @see OAuth2ClientJackson2Module
* @deprecated as of 7.0 in favor of
* {@code org.springframework.security.oauth2.client.jackson.OAuth2AuthorizedClientMixin}
* based on Jackson 3
*/
@Deprecated(forRemoval = true)
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS)
@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.ANY, getterVisibility = JsonAutoDetect.Visibility.NONE,
isGetterVisibility = JsonAutoDetect.Visibility.NONE)
@JsonIgnoreProperties(ignoreUnknown = true)
abstract | is |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/writer/BeanDefinitionWriter.java | {
"start": 257459,
"end": 259584
} | interface ____ {
boolean hasInjectScope();
}
private record InjectMethodBuildCommand(TypedElement declaringType, MethodElement methodElement,
boolean requiresReflection, int methodIndex) {
}
private record InjectMethodSignature(
VariableDef.This aThis,
List<VariableDef.MethodParameter> methodParameters,
VariableDef beanResolutionContext,
VariableDef beanContext,
VariableDef instanceVar
) {
private InjectMethodSignature(VariableDef.This aThis,
List<VariableDef.MethodParameter> methodParameters,
VariableDef instanceVar) {
this(aThis, methodParameters, methodParameters.get(0), methodParameters.get(1), instanceVar);
}
}
private record StaticBlock(@NonNull
StatementDef statement,
@NonNull
FieldDef annotationMetadataField,
@NonNull
FieldDef failedInitializationField,
@NonNull
FieldDef constructorRefField,
@Nullable
FieldDef injectionMethodsField,
@Nullable
FieldDef injectionFieldsField,
@Nullable
FieldDef annotationInjectionsFieldType,
@Nullable
FieldDef typeArgumentsField,
@Nullable
FieldDef executableMethodsField,
@NonNull
FieldDef precalculatedInfoField,
@Nullable
FieldDef preStartConditionsField,
@Nullable
FieldDef postStartConditionsField) {
}
}
| InjectMethodCommand |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/scenario/ConnectionInterruptionReactiveTest.java | {
"start": 2812,
"end": 6894
} | class
____ tracker = ReconnectionTimingTracker.withName("Reactive Commands").trackWithEventBus(client);
StatefulRedisConnection<String, String> connection = client.connect();
// Also track with state listener for comparison
tracker.trackWithStateListener(connection);
RedisReactiveCommands<String, String> reactive = connection.reactive();
String keyName = "counter";
// Setup: Set initial counter value
StepVerifier.create(reactive.set(keyName, "0")).expectNext("OK").verifyComplete();
AtomicLong commandsSubmitted = new AtomicLong();
List<Throwable> capturedExceptions = new CopyOnWriteArrayList<>();
// Start a flux that imitates an application using the client
Disposable subscription = Flux.interval(Duration.ofMillis(100)).flatMap(i -> reactive.incr(keyName)
// We should count all attempts, because Lettuce retransmits failed commands
.doFinally(value -> {
commandsSubmitted.incrementAndGet();
log.debug("Commands submitted {}", commandsSubmitted.get());
}).onErrorResume(e -> {
log.warn("Error executing command", e);
capturedExceptions.add(e);
return Mono.empty();
})).subscribe();
// Trigger the fault injection
Map<String, Object> params = new HashMap<>();
params.put("bdb_id", standalone.getBdbId());
Mono<Boolean> actionCompleted = faultClient.triggerActionAndWait(triggerAction, params, CHECK_INTERVAL, DELAY_AFTER,
DEFAULT_TIMEOUT);
StepVerifier.create(actionCompleted).expectNext(true).verifyComplete();
// Stop the command execution
subscription.dispose();
// Verify results
StepVerifier.create(reactive.get(keyName).map(Long::parseLong)).consumeNextWith(value -> {
log.info("Final counter value: {}, commands submitted: {}", value, commandsSubmitted.get());
}).verifyComplete();
log.info("Captured exceptions: {}", capturedExceptions);
// Log and assert reconnection timing metrics using the tracker
tracker.logStats();
assertThat(tracker.hasReconnections()).isTrue();
assertThat(tracker.getStats().getTotalEventBusReconnectionTime())
.isLessThan(Duration.ofSeconds(expectedReconnectionDurationInSeconds));
assertThat(tracker.getStats().getTotalStateListenerReconnectionTime())
.isLessThan(Duration.ofSeconds(expectedReconnectionDurationInSeconds));
tracker.dispose();
connection.close();
client.shutdown();
}
@ParameterizedTest(name = "PubSub Reconnection on {0}")
@CsvSource({ "dmc_restart,3", "network_failure,5" })
@DisplayName("PubSub connections should automatically reconnect and resume message delivery during failures")
public void testWithPubSub(String triggerAction, int expectedReconnectionDurationInSeconds) {
RedisURI uri = RedisURI.builder(RedisURI.create(standalone.getEndpoints().get(0)))
.withAuthentication(standalone.getUsername(), standalone.getPassword()).build();
RedisClient subscriberClient = RedisClient.create(uri);
subscriberClient.setOptions(RecommendedSettingsProvider.forConnectionInterruption());
RedisClient publisherClient = RedisClient.create(uri);
publisherClient.setOptions(RecommendedSettingsProvider.forConnectionInterruption());
StatefulRedisConnection<String, String> publisherConnection = publisherClient.connect();
RedisReactiveCommands<String, String> publisherReactive = publisherConnection.reactive();
AtomicLong messagesSent = new AtomicLong();
AtomicLong messagesReceived = new AtomicLong();
List<Throwable> subscriberExceptions = new CopyOnWriteArrayList<>();
List<String> receivedMessages = new CopyOnWriteArrayList<>();
// Track reconnection timing using the utility | ReconnectionTimingTracker |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/BasicOutputExpressionNode.java | {
"start": 1529,
"end": 2599
} | class ____ extends BasicExpressionNode<BasicOutputExpressionNode>
implements Block, OutputNode {
@XmlElementRef
private List<ProcessorDefinition<?>> outputs = new ArrayList<>();
public BasicOutputExpressionNode() {
}
public BasicOutputExpressionNode(BasicOutputExpressionNode source) {
super(source);
this.outputs = ProcessorDefinitionHelper.deepCopyDefinitions(source.outputs);
}
public BasicOutputExpressionNode(ExpressionDefinition expression) {
super(expression);
}
public BasicOutputExpressionNode(Expression expression) {
super(expression);
}
public BasicOutputExpressionNode(Predicate predicate) {
super(predicate);
}
@Override
public List<ProcessorDefinition<?>> getOutputs() {
return outputs;
}
public void setOutputs(List<ProcessorDefinition<?>> outputs) {
this.outputs = outputs;
}
@Override
public void addOutput(ProcessorDefinition<?> output) {
this.outputs.add(output);
}
}
| BasicOutputExpressionNode |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/LanguageCamelContextAwareTest.java | {
"start": 1224,
"end": 1824
} | class ____ extends ContextTestSupport {
private final MyLanguage my = new MyLanguage();
@Override
protected Registry createCamelRegistry() throws Exception {
Registry registry = super.createCamelRegistry();
registry.bind("my", my);
return registry;
}
@Test
public void testLanguageCamelContextAware() {
Language lan = context.resolveLanguage("my");
assertNotNull(lan);
MyLanguage me = assertIsInstanceOf(MyLanguage.class, lan);
assertNotNull(me.getCamelContext());
}
private static | LanguageCamelContextAwareTest |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/typeserializerupgrade/PojoSerializerUpgradeTest.java | {
"start": 5729,
"end": 6638
} | class ____ { "
+ "private String b; "
+ "private long a; "
+ "public long getA() { return a;} "
+ "public void setA(long value) { a = value; }"
+ "public String getB() { return b; }"
+ "public void setB(String value) { b = value; }"
+ "@Override public boolean equals(Object obj) { if (obj instanceof Pojo) { Pojo other = (Pojo) obj; return a == other.a && b.equals(other.b);} else { return false; }}"
+ "@Override public int hashCode() { return Objects.hash(a, b); } "
+ "@Override public String toString() {return \"(\" + a + \", \" + b + \")\";}}";
// changed type of a field which should not be recoverable
private static final String SOURCE_C =
"import java.util.Objects;"
+ "public | Pojo |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/jinaai/JinaAIService.java | {
"start": 15030,
"end": 17261
} | class ____ {
public static InferenceServiceConfiguration get() {
return configuration.getOrCompute();
}
private static final LazyInitializable<InferenceServiceConfiguration, RuntimeException> configuration = new LazyInitializable<>(
() -> {
var configurationMap = new HashMap<String, SettingsConfiguration>();
configurationMap.put(
JinaAIServiceSettings.MODEL_ID,
new SettingsConfiguration.Builder(supportedTaskTypes).setDescription(
"The name of the model to use for the inference task."
)
.setLabel("Model ID")
.setRequired(true)
.setSensitive(false)
.setUpdatable(false)
.setType(SettingsConfigurationFieldType.STRING)
.build()
);
configurationMap.put(
DIMENSIONS,
new SettingsConfiguration.Builder(EnumSet.of(TaskType.TEXT_EMBEDDING)).setDescription(
"The number of dimensions the resulting embeddings should have. For more information refer to "
+ "https://api.jina.ai/redoc#tag/embeddings/operation/create_embedding_v1_embeddings_post."
)
.setLabel("Dimensions")
.setRequired(false)
.setSensitive(false)
.setUpdatable(false)
.setType(SettingsConfigurationFieldType.INTEGER)
.build()
);
configurationMap.putAll(DefaultSecretSettings.toSettingsConfiguration(supportedTaskTypes));
configurationMap.putAll(RateLimitSettings.toSettingsConfiguration(supportedTaskTypes));
return new InferenceServiceConfiguration.Builder().setService(NAME)
.setName(SERVICE_NAME)
.setTaskTypes(supportedTaskTypes)
.setConfigurations(configurationMap)
.build();
}
);
}
}
| Configuration |
java | square__retrofit | retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java | {
"start": 12248,
"end": 12748
} | class ____ {
@PUT("/{a}") //
Call<ResponseBody> method(@Path("a") int a, @Path("b") int b, @Body int c) {
return null;
}
}
try {
buildRequest(Example.class);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.isEqualTo(
"URL \"/{a}\" does not contain \"{b}\". (parameter 'b')\n for method Example.method");
}
}
@Test
public void parameterWithoutAnnotation() {
| Example |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/ReferenceBean.java | {
"start": 13199,
"end": 17066
} | interface ____ locally
}
}
if (NativeDetector.inNativeImage()) {
generateFromJdk(interfaces);
}
if (this.lazyProxy == null
&& (StringUtils.isEmpty(this.proxy) || CommonConstants.DEFAULT_PROXY.equalsIgnoreCase(this.proxy))) {
generateFromJavassistFirst(interfaces);
}
if (this.lazyProxy == null) {
generateFromJdk(interfaces);
}
}
private void generateFromJavassistFirst(List<Class<?>> interfaces) {
try {
this.lazyProxy = Proxy.getProxy(interfaces.toArray(new Class[0]))
.newInstance(new LazyTargetInvocationHandler(new DubboReferenceLazyInitTargetSource()));
} catch (Throwable fromJavassist) {
// try fall back to JDK proxy factory
try {
this.lazyProxy = java.lang.reflect.Proxy.newProxyInstance(
beanClassLoader,
interfaces.toArray(new Class[0]),
new LazyTargetInvocationHandler(new DubboReferenceLazyInitTargetSource()));
logger.error(
PROXY_FAILED,
"",
"",
"Failed to generate proxy by Javassist failed. Fallback to use JDK proxy success. "
+ "Interfaces: " + interfaces,
fromJavassist);
} catch (Throwable fromJdk) {
logger.error(
PROXY_FAILED,
"",
"",
"Failed to generate proxy by Javassist failed. Fallback to use JDK proxy is also failed. "
+ "Interfaces: " + interfaces + " Javassist Error.",
fromJavassist);
logger.error(
PROXY_FAILED,
"",
"",
"Failed to generate proxy by Javassist failed. Fallback to use JDK proxy is also failed. "
+ "Interfaces: " + interfaces + " JDK Error.",
fromJdk);
throw fromJavassist;
}
}
}
private void generateFromJdk(List<Class<?>> interfaces) {
try {
this.lazyProxy = java.lang.reflect.Proxy.newProxyInstance(
beanClassLoader,
interfaces.toArray(new Class[0]),
new LazyTargetInvocationHandler(new DubboReferenceLazyInitTargetSource()));
} catch (Throwable fromJdk) {
logger.error(
PROXY_FAILED,
"",
"",
"Failed to generate proxy by Javassist failed. Fallback to use JDK proxy is also failed. "
+ "Interfaces: " + interfaces + " JDK Error.",
fromJdk);
throw fromJdk;
}
}
private Object getCallProxy() throws Exception {
if (referenceConfig == null) {
synchronized (LockUtils.getSingletonMutex(applicationContext)) {
if (referenceConfig == null) {
referenceBeanManager.initReferenceBean(this);
applicationContext
.getBean(
DubboConfigApplicationListener.class.getName(),
DubboConfigApplicationListener.class)
.init();
logger.warn(
CONFIG_DUBBO_BEAN_INITIALIZER,
"",
"",
"ReferenceBean is not ready yet, please make sure to "
+ "call reference | class |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/RedundantTest.java | {
"start": 1736,
"end": 2108
} | class ____ {
private int id;
private Map<String, Object> attributes = new HashMap<String, Object>();
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public Map<String, Object> getAttributes() {
return attributes;
}
}
}
| VO |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopIntIntAggregator.java | {
"start": 1412,
"end": 3116
} | class ____ {
public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) {
return new SingleState(bigArrays, limit, ascending);
}
public static void combine(SingleState state, int v, int outputValue) {
state.add(v, outputValue);
}
public static void combineIntermediate(SingleState state, IntBlock values, IntBlock outputValues) {
int start = values.getFirstValueIndex(0);
int end = start + values.getValueCount(0);
for (int i = start; i < end; i++) {
combine(state, values.getInt(i), outputValues.getInt(i));
}
}
public static Block evaluateFinal(SingleState state, DriverContext driverContext) {
return state.toBlock(driverContext.blockFactory());
}
public static GroupingState initGrouping(BigArrays bigArrays, int limit, boolean ascending) {
return new GroupingState(bigArrays, limit, ascending);
}
public static void combine(GroupingState state, int groupId, int v, int outputValue) {
state.add(groupId, v, outputValue);
}
public static void combineIntermediate(GroupingState state, int groupId, IntBlock values, IntBlock outputValues, int position) {
int start = values.getFirstValueIndex(position);
int end = start + values.getValueCount(position);
for (int i = start; i < end; i++) {
combine(state, groupId, values.getInt(i), outputValues.getInt(i));
}
}
public static Block evaluateFinal(GroupingState state, IntVector selected, GroupingAggregatorEvaluationContext ctx) {
return state.toBlock(ctx.blockFactory(), selected);
}
public static | TopIntIntAggregator |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/fetchprofile/Customer6.java | {
"start": 622,
"end": 1091
} | class ____ {
@Id
@GeneratedValue
private long id;
private String name;
@OneToOne(fetch = FetchType.LAZY)
private Address address;
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
}
| Customer6 |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/ResponseEntityResultHandlerTests.java | {
"start": 24146,
"end": 24991
} | class ____ {
ResponseEntity<String> responseEntityString() { return null; }
ResponseEntity<Void> responseEntityVoid() { return null; }
ResponseEntity<Person> responseEntityPerson() { return null; }
ErrorResponse errorResponse() { return null; }
ProblemDetail problemDetail() { return null; }
HttpHeaders httpHeaders() { return null; }
Mono<ResponseEntity<String>> mono() { return null; }
Single<ResponseEntity<String>> single() { return null; }
CompletableFuture<ResponseEntity<String>> completableFuture() { return null; }
String string() { return null; }
Completable completable() { return null; }
Mono<ResponseEntity<?>> monoResponseEntityWildcard() { return null; }
Flux<?> fluxWildcard() { return null; }
Object object() { return null; }
}
@SuppressWarnings("unused")
private static | TestController |
java | processing__processing4 | java/libraries/io/src/processing/io/I2C.java | {
"start": 2421,
"end": 2452
} | interface ____ master
*/
public | as |
java | google__guava | android/guava/src/com/google/common/collect/ImmutableListMultimap.java | {
"start": 9101,
"end": 19470
} | class ____<K, V> extends ImmutableMultimap.Builder<K, V> {
/**
* Creates a new builder. The returned builder is equivalent to the builder generated by {@link
* ImmutableListMultimap#builder}.
*/
public Builder() {}
/** Creates a new builder with a hint for the number of distinct keys. */
Builder(int expectedKeys) {
super(expectedKeys);
}
/**
* {@inheritDoc}
*
* @since 33.3.0
*/
@CanIgnoreReturnValue
@Override
public Builder<K, V> expectedValuesPerKey(int expectedValuesPerKey) {
super.expectedValuesPerKey(expectedValuesPerKey);
return this;
}
@CanIgnoreReturnValue
@Override
public Builder<K, V> put(K key, V value) {
super.put(key, value);
return this;
}
/**
* {@inheritDoc}
*
* @since 11.0
*/
@CanIgnoreReturnValue
@Override
public Builder<K, V> put(Entry<? extends K, ? extends V> entry) {
super.put(entry);
return this;
}
/**
* {@inheritDoc}
*
* @since 19.0
*/
@CanIgnoreReturnValue
@Override
public Builder<K, V> putAll(Iterable<? extends Entry<? extends K, ? extends V>> entries) {
super.putAll(entries);
return this;
}
@CanIgnoreReturnValue
@Override
public Builder<K, V> putAll(K key, Iterable<? extends V> values) {
super.putAll(key, values);
return this;
}
@CanIgnoreReturnValue
@Override
public Builder<K, V> putAll(K key, V... values) {
super.putAll(key, values);
return this;
}
@CanIgnoreReturnValue
@Override
public Builder<K, V> putAll(Multimap<? extends K, ? extends V> multimap) {
super.putAll(multimap);
return this;
}
@CanIgnoreReturnValue
@Override
Builder<K, V> combine(ImmutableMultimap.Builder<K, V> other) {
super.combine(other);
return this;
}
/**
* {@inheritDoc}
*
* @since 8.0
*/
@CanIgnoreReturnValue
@Override
public Builder<K, V> orderKeysBy(Comparator<? super K> keyComparator) {
super.orderKeysBy(keyComparator);
return this;
}
/**
* {@inheritDoc}
*
* @since 8.0
*/
@CanIgnoreReturnValue
@Override
public Builder<K, V> orderValuesBy(Comparator<? super V> valueComparator) {
super.orderValuesBy(valueComparator);
return this;
}
/** Returns a newly-created immutable list multimap. */
@Override
public ImmutableListMultimap<K, V> build() {
return (ImmutableListMultimap<K, V>) super.build();
}
}
/**
* Returns an immutable multimap containing the same mappings as {@code multimap}. The generated
* multimap's key and value orderings correspond to the iteration ordering of the {@code
* multimap.asMap()} view.
*
* <p>Despite the method name, this method attempts to avoid actually copying the data when it is
* safe to do so. The exact circumstances under which a copy will or will not be performed are
* undocumented and subject to change.
*
* @throws NullPointerException if any key or value in {@code multimap} is null
*/
public static <K, V> ImmutableListMultimap<K, V> copyOf(
Multimap<? extends K, ? extends V> multimap) {
if (multimap.isEmpty()) {
return of();
}
// TODO(lowasser): copy ImmutableSetMultimap by using asList() on the sets
if (multimap instanceof ImmutableListMultimap) {
@SuppressWarnings("unchecked") // safe since multimap is not writable
ImmutableListMultimap<K, V> kvMultimap = (ImmutableListMultimap<K, V>) multimap;
if (!kvMultimap.isPartialView()) {
return kvMultimap;
}
}
return fromMapEntries(multimap.asMap().entrySet(), null);
}
/**
* Returns an immutable multimap containing the specified entries. The returned multimap iterates
* over keys in the order they were first encountered in the input, and the values for each key
* are iterated in the order they were encountered.
*
* @throws NullPointerException if any key, value, or entry is null
* @since 19.0
*/
public static <K, V> ImmutableListMultimap<K, V> copyOf(
Iterable<? extends Entry<? extends K, ? extends V>> entries) {
return new Builder<K, V>().putAll(entries).build();
}
/** Creates an ImmutableListMultimap from an asMap.entrySet. */
static <K, V> ImmutableListMultimap<K, V> fromMapEntries(
Collection<? extends Map.Entry<? extends K, ? extends Collection<? extends V>>> mapEntries,
@Nullable Comparator<? super V> valueComparator) {
if (mapEntries.isEmpty()) {
return of();
}
ImmutableMap.Builder<K, ImmutableList<V>> builder =
new ImmutableMap.Builder<>(mapEntries.size());
int size = 0;
for (Entry<? extends K, ? extends Collection<? extends V>> entry : mapEntries) {
K key = entry.getKey();
Collection<? extends V> values = entry.getValue();
ImmutableList<V> list =
(valueComparator == null)
? ImmutableList.copyOf(values)
: ImmutableList.sortedCopyOf(valueComparator, values);
if (!list.isEmpty()) {
builder.put(key, list);
size += list.size();
}
}
return new ImmutableListMultimap<>(builder.buildOrThrow(), size);
}
/** Creates an ImmutableListMultimap from an asMap.entrySet. */
static <K, V> ImmutableListMultimap<K, V> fromMapBuilderEntries(
Collection<? extends Map.Entry<K, ImmutableCollection.Builder<V>>> mapEntries,
@Nullable Comparator<? super V> valueComparator) {
if (mapEntries.isEmpty()) {
return of();
}
ImmutableMap.Builder<K, ImmutableList<V>> builder =
new ImmutableMap.Builder<>(mapEntries.size());
int size = 0;
for (Entry<K, ImmutableCollection.Builder<V>> entry : mapEntries) {
K key = entry.getKey();
ImmutableList.Builder<V> values = (ImmutableList.Builder<V>) entry.getValue();
ImmutableList<V> list =
(valueComparator == null) ? values.build() : values.buildSorted(valueComparator);
builder.put(key, list);
size += list.size();
}
return new ImmutableListMultimap<>(builder.buildOrThrow(), size);
}
ImmutableListMultimap(ImmutableMap<K, ImmutableList<V>> map, int size) {
super(map, size);
}
// views
/**
* Returns an immutable list of the values for the given key. If no mappings in the multimap have
* the provided key, an empty immutable list is returned. The values are in the same order as the
* parameters used to build this multimap.
*/
@Override
public ImmutableList<V> get(K key) {
// This cast is safe as its type is known in constructor.
ImmutableList<V> list = (ImmutableList<V>) map.get(key);
return (list == null) ? ImmutableList.of() : list;
}
@LazyInit @RetainedWith private transient @Nullable ImmutableListMultimap<V, K> inverse;
/**
* {@inheritDoc}
*
* <p>Because an inverse of a list multimap can contain multiple pairs with the same key and
* value, this method returns an {@code ImmutableListMultimap} rather than the {@code
* ImmutableMultimap} specified in the {@code ImmutableMultimap} class.
*
* @since 11.0
*/
@Override
public ImmutableListMultimap<V, K> inverse() {
ImmutableListMultimap<V, K> result = inverse;
return (result == null) ? (inverse = invert()) : result;
}
private ImmutableListMultimap<V, K> invert() {
Builder<V, K> builder = builder();
for (Entry<K, V> entry : entries()) {
builder.put(entry.getValue(), entry.getKey());
}
ImmutableListMultimap<V, K> invertedMultimap = builder.build();
invertedMultimap.inverse = this;
return invertedMultimap;
}
/**
* Guaranteed to throw an exception and leave the multimap unmodified.
*
* @throws UnsupportedOperationException always
* @deprecated Unsupported operation.
*/
@CanIgnoreReturnValue
@Deprecated
@Override
@DoNotCall("Always throws UnsupportedOperationException")
public final ImmutableList<V> removeAll(@Nullable Object key) {
throw new UnsupportedOperationException();
}
/**
* Guaranteed to throw an exception and leave the multimap unmodified.
*
* @throws UnsupportedOperationException always
* @deprecated Unsupported operation.
*/
@CanIgnoreReturnValue
@Deprecated
@Override
@DoNotCall("Always throws UnsupportedOperationException")
public final ImmutableList<V> replaceValues(K key, Iterable<? extends V> values) {
throw new UnsupportedOperationException();
}
/**
* @serialData number of distinct keys, and then for each distinct key: the key, the number of
* values for that key, and the key's values
*/
@GwtIncompatible
@J2ktIncompatible
private void writeObject(ObjectOutputStream stream) throws IOException {
stream.defaultWriteObject();
Serialization.writeMultimap(this, stream);
}
@GwtIncompatible
@J2ktIncompatible
private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException {
stream.defaultReadObject();
int keyCount = stream.readInt();
if (keyCount < 0) {
throw new InvalidObjectException("Invalid key count " + keyCount);
}
ImmutableMap.Builder<Object, ImmutableList<Object>> builder = ImmutableMap.builder();
int tmpSize = 0;
for (int i = 0; i < keyCount; i++) {
Object key = requireNonNull(stream.readObject());
int valueCount = stream.readInt();
if (valueCount <= 0) {
throw new InvalidObjectException("Invalid value count " + valueCount);
}
ImmutableList.Builder<Object> valuesBuilder = ImmutableList.builder();
for (int j = 0; j < valueCount; j++) {
valuesBuilder.add(requireNonNull(stream.readObject()));
}
builder.put(key, valuesBuilder.build());
tmpSize += valueCount;
}
ImmutableMap<Object, ImmutableList<Object>> tmpMap;
try {
tmpMap = builder.buildOrThrow();
} catch (IllegalArgumentException e) {
throw (InvalidObjectException) new InvalidObjectException(e.getMessage()).initCause(e);
}
FieldSettersHolder.MAP_FIELD_SETTER.set(this, tmpMap);
FieldSettersHolder.SIZE_FIELD_SETTER.set(this, tmpSize);
}
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0;
}
| Builder |
java | elastic__elasticsearch | test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java | {
"start": 803,
"end": 2144
} | enum ____ {
TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null),
DOC_VALUES_SKIPPER("es.doc_values_skipper_feature_flag_enabled=true", Version.fromString("8.18.1"), null),
LOGS_STREAM("es.logs_stream_feature_flag_enabled=true", Version.fromString("9.1.0"), null),
SYNTHETIC_VECTORS("es.mapping_synthetic_vectors=true", Version.fromString("9.2.0"), null),
INDEX_DIMENSIONS_TSID_OPTIMIZATION_FEATURE_FLAG(
"es.index_dimensions_tsid_optimization_feature_flag_enabled=true",
Version.fromString("9.2.0"),
null
),
RANDOM_SAMPLING("es.random_sampling_feature_flag_enabled=true", Version.fromString("9.2.0"), null),
INFERENCE_API_CCM("es.inference_api_ccm_feature_flag_enabled=true", Version.fromString("9.3.0"), null),
GENERIC_VECTOR_FORMAT("es.generic_vector_format_feature_flag_enabled=true", Version.fromString("9.3.0"), null),
TSDB_SYNTHETIC_ID_FEATURE_FLAG("es.tsdb_synthetic_id_feature_flag_enabled=true", Version.fromString("9.3.0"), null);
public final String systemProperty;
public final Version from;
public final Version until;
FeatureFlag(String systemProperty, Version from, Version until) {
this.systemProperty = systemProperty;
this.from = from;
this.until = until;
}
}
| FeatureFlag |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/derby/DerbySelectTest.java | {
"start": 155,
"end": 399
} | class ____ extends TestCase {
public void test_for_derby() throws Exception {
String sql = "select * from sys_user offset ? rows fetch next ? rows only";
SQLUtils.parseSingleStatement(sql, DbType.derby);
}
}
| DerbySelectTest |
java | apache__hadoop | hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/validator/ParserValidator.java | {
"start": 1055,
"end": 1373
} | class ____ {
/**
* Validates the input parameters for the {@link LogParser}.
*
* @param logs input log streams to the {@link LogParser}.
* @return whether the input parameters are valid or not.
*/
public final boolean validate(final InputStream logs) {
// TODO
return true;
}
}
| ParserValidator |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/RedundantOverrideTest.java | {
"start": 1509,
"end": 1801
} | class ____ {
public boolean frob(Object o) {
return false;
}
}
""")
.doTest();
}
@Test
public void addingJavadoc() {
testHelper
.addSourceLines(
"Test.java",
"""
| Bar |
java | apache__hadoop | hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/NullBlockAliasMap.java | {
"start": 1403,
"end": 2837
} | class ____ extends BlockAliasMap<FileRegion> {
@Override
public Reader<FileRegion> getReader(Reader.Options opts, String blockPoolID)
throws IOException {
return new Reader<FileRegion>() {
@Override
public Iterator<FileRegion> iterator() {
return new Iterator<FileRegion>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public FileRegion next() {
throw new NoSuchElementException();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
@Override
public void close() throws IOException {
// do nothing
}
@Override
public Optional<FileRegion> resolve(Block ident) throws IOException {
throw new UnsupportedOperationException();
}
};
}
@Override
public Writer getWriter(Writer.Options opts, String blockPoolID)
throws IOException {
return new Writer<FileRegion>() {
@Override
public void store(FileRegion token) throws IOException {
// do nothing
}
@Override
public void close() throws IOException {
// do nothing
}
};
}
@Override
public void refresh() throws IOException {
// do nothing
}
@Override
public void close() throws IOException {
}
}
| NullBlockAliasMap |
java | apache__camel | dsl/camel-jbang/camel-jbang-core/src/test/java/org/apache/camel/dsl/jbang/core/commands/ExportMainJkubeTest.java | {
"start": 1625,
"end": 4580
} | class ____ {
private File workingDir;
private File profile = new File(".", "application.properties");
@BeforeEach
public void setup() throws IOException {
Path base = Paths.get("target");
workingDir = Files.createTempDirectory(base, "camel-export").toFile();
}
@AfterEach
public void end() throws IOException {
// force removing, since deleteOnExit is not removing.
FileUtil.removeDir(workingDir);
FileUtil.deleteFile(profile);
}
private static Stream<Arguments> runtimeProvider() {
return Stream.of(
Arguments.of(RuntimeType.main));
}
@ParameterizedTest
@MethodSource("runtimeProvider")
public void shouldGenerateProjectWithJib(RuntimeType rt) throws Exception {
// prepare as we need application.properties that contains jkube settings
Files.copy(new File("src/test/resources/application-jkube.properties").toPath(), profile.toPath(),
StandardCopyOption.REPLACE_EXISTING);
Export command = new Export(new CamelJBangMain());
CommandLine.populateCommand(command, "--gav=examples:route:1.0.0", "--dir=" + workingDir,
"--runtime=%s".formatted(rt.runtime()), "target/test-classes/route.yaml");
int exit = command.doCall();
Assertions.assertEquals(0, exit);
Model model = readMavenModel();
Assertions.assertEquals("examples", model.getGroupId());
Assertions.assertEquals("route", model.getArtifactId());
Assertions.assertEquals("1.0.0", model.getVersion());
Assertions.assertEquals("21", model.getProperties().getProperty("java.version"));
Assertions.assertEquals("abc", model.getProperties().getProperty("jib.label"));
Assertions.assertEquals("mirror.gcr.io/library/eclipse-temurin:21-jre",
model.getProperties().getProperty("jib.from.image"));
// should contain jib and jkube plugin
Assertions.assertEquals(5, model.getBuild().getPlugins().size());
Plugin p = model.getBuild().getPlugins().get(3);
Assertions.assertEquals("com.google.cloud.tools", p.getGroupId());
Assertions.assertEquals("jib-maven-plugin", p.getArtifactId());
p = model.getBuild().getPlugins().get(4);
Assertions.assertEquals("org.eclipse.jkube", p.getGroupId());
Assertions.assertEquals("kubernetes-maven-plugin", p.getArtifactId());
Assertions.assertEquals("1.18.1", p.getVersion());
command.printConfigurationValues("export command");
}
private Model readMavenModel() throws Exception {
File f = workingDir.toPath().resolve("pom.xml").toFile();
Assertions.assertTrue(f.isFile(), "Not a pom.xml file: " + f);
MavenXpp3Reader mavenReader = new MavenXpp3Reader();
Model model = mavenReader.read(new FileReader(f));
model.setPomFile(f);
return model;
}
}
| ExportMainJkubeTest |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/util/GlideSuppliers.java | {
"start": 145,
"end": 272
} | class ____ {
/**
* Produces a non-null instance of {@code T}.
*
* @param <T> The data type
*/
public | GlideSuppliers |
java | google__guice | extensions/grapher/src/com/google/inject/grapher/Edge.java | {
"start": 798,
"end": 1654
} | class ____ {
private final NodeId fromId;
private final NodeId toId;
protected Edge(NodeId fromId, NodeId toId) {
this.fromId = fromId;
this.toId = toId;
}
public NodeId getFromId() {
return fromId;
}
public NodeId getToId() {
return toId;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Edge)) {
return false;
}
Edge other = (Edge) obj;
return Objects.equal(fromId, other.fromId) && Objects.equal(toId, other.toId);
}
@Override
public int hashCode() {
return Objects.hashCode(fromId, toId);
}
/**
* Returns a copy of the edge with new node IDs.
*
* @param fromId new ID of the 'from' node
* @param toId new ID of the 'to' node
* @return copy of the edge with the new node IDs
*/
public abstract Edge copy(NodeId fromId, NodeId toId);
}
| Edge |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/concurrent/Executors.java | {
"start": 2001,
"end": 2173
} | enum ____ implements Executor {
INSTANCE;
@Override
public void execute(Runnable command) {
command.run();
}
}
}
| DirectExecutor |
java | apache__flink | flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/FileUtils.java | {
"start": 1446,
"end": 4034
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(FileUtils.class);
private static final ParameterProperty<Path> PROJECT_ROOT_DIRECTORY =
new ParameterProperty<>("rootDir", Paths::get);
private static final ParameterProperty<Path> DISTRIBUTION_DIRECTORY =
new ParameterProperty<>("distDir", Paths::get);
/**
* Matches the given {@link Pattern} against all lines in the given file, and replaces all
* matches with the replacement generated by the given {@link Function}. All unmatched lines and
* provided replacements are written into the file, with the order corresponding to the original
* content. Newlines are automatically added to each line; this implies that an empty
* replacement string will result in an empty line to be written.
*/
public static void replace(Path file, Pattern pattern, Function<Matcher, String> replacer)
throws IOException {
final List<String> fileLines = Files.readAllLines(file);
try (PrintWriter pw =
new PrintWriter(
new OutputStreamWriter(
Files.newOutputStream(file, StandardOpenOption.TRUNCATE_EXISTING),
StandardCharsets.UTF_8.name()))) {
for (String fileLine : fileLines) {
Matcher matcher = pattern.matcher(fileLine);
if (matcher.matches()) {
String replacement = replacer.apply(matcher);
pw.println(replacement);
} else {
pw.println(fileLine);
}
}
}
}
public static Path findFlinkDist() {
Optional<Path> distributionDirectory = DISTRIBUTION_DIRECTORY.get();
if (!distributionDirectory.isPresent()) {
LOG.debug(
"The '{}' property was not set; attempting to automatically determine distribution location.",
DISTRIBUTION_DIRECTORY.getPropertyName());
Path projectRootPath;
Optional<Path> projectRoot = PROJECT_ROOT_DIRECTORY.get();
if (projectRoot.isPresent()) {
// running with maven
projectRootPath = projectRoot.get();
} else {
// running in the IDE; working directory is test module
Optional<Path> projectRootDirectory =
findProjectRootDirectory(Paths.get("").toAbsolutePath());
// this distinction is required in case this | FileUtils |
java | grpc__grpc-java | core/src/test/java/io/grpc/internal/MetricRecorderImplTest.java | {
"start": 1808,
"end": 14035
} | class ____ {
private static final String DESCRIPTION = "description";
private static final String UNIT = "unit";
private static final boolean ENABLED = true;
private static final ImmutableList<String> REQUIRED_LABEL_KEYS = ImmutableList.of("KEY1", "KEY2");
private static final ImmutableList<String> OPTIONAL_LABEL_KEYS = ImmutableList.of(
"OPTIONAL_KEY_1");
private static final ImmutableList<String> REQUIRED_LABEL_VALUES = ImmutableList.of("VALUE1",
"VALUE2");
private static final ImmutableList<String> OPTIONAL_LABEL_VALUES = ImmutableList.of(
"OPTIONAL_VALUE_1");
private MetricSink mockSink = mock(MetricSink.class);
private List<MetricSink> sinks = Arrays.asList(mockSink, mockSink);
private MetricInstrumentRegistry registry =
MetricInstrumentRegistryAccessor.createMetricInstrumentRegistry();
private final DoubleCounterMetricInstrument doubleCounterInstrument =
registry.registerDoubleCounter("counter0", DESCRIPTION, UNIT, REQUIRED_LABEL_KEYS,
OPTIONAL_LABEL_KEYS, ENABLED);
private final LongCounterMetricInstrument longCounterInstrument =
registry.registerLongCounter("counter1", DESCRIPTION, UNIT, REQUIRED_LABEL_KEYS,
OPTIONAL_LABEL_KEYS, ENABLED);
private final DoubleHistogramMetricInstrument doubleHistogramInstrument =
registry.registerDoubleHistogram("histogram1", DESCRIPTION, UNIT,
Collections.emptyList(), REQUIRED_LABEL_KEYS, OPTIONAL_LABEL_KEYS, ENABLED);
private final LongHistogramMetricInstrument longHistogramInstrument =
registry.registerLongHistogram("histogram2", DESCRIPTION, UNIT,
Collections.emptyList(), REQUIRED_LABEL_KEYS, OPTIONAL_LABEL_KEYS, ENABLED);
private final LongGaugeMetricInstrument longGaugeInstrument =
registry.registerLongGauge("gauge0", DESCRIPTION, UNIT, REQUIRED_LABEL_KEYS,
OPTIONAL_LABEL_KEYS, ENABLED);
private final LongUpDownCounterMetricInstrument longUpDownCounterInstrument =
registry.registerLongUpDownCounter("upDownCounter0", DESCRIPTION, UNIT,
REQUIRED_LABEL_KEYS, OPTIONAL_LABEL_KEYS, ENABLED);
private MetricRecorder recorder;
@Before
public void setUp() {
recorder = new MetricRecorderImpl(sinks, registry);
}
@Test
public void addCounter() {
when(mockSink.getMeasuresSize()).thenReturn(6);
recorder.addDoubleCounter(doubleCounterInstrument, 1.0, REQUIRED_LABEL_VALUES,
OPTIONAL_LABEL_VALUES);
verify(mockSink, times(2)).addDoubleCounter(eq(doubleCounterInstrument), eq(1D),
eq(REQUIRED_LABEL_VALUES), eq(OPTIONAL_LABEL_VALUES));
recorder.addLongCounter(longCounterInstrument, 1, REQUIRED_LABEL_VALUES,
OPTIONAL_LABEL_VALUES);
verify(mockSink, times(2)).addLongCounter(eq(longCounterInstrument), eq(1L),
eq(REQUIRED_LABEL_VALUES), eq(OPTIONAL_LABEL_VALUES));
recorder.addLongUpDownCounter(longUpDownCounterInstrument, -10, REQUIRED_LABEL_VALUES,
OPTIONAL_LABEL_VALUES);
verify(mockSink, times(2))
.addLongUpDownCounter(eq(longUpDownCounterInstrument), eq(-10L),
eq(REQUIRED_LABEL_VALUES), eq(OPTIONAL_LABEL_VALUES));
verify(mockSink, never()).updateMeasures(registry.getMetricInstruments());
}
@Test
public void recordHistogram() {
when(mockSink.getMeasuresSize()).thenReturn(4);
recorder.recordDoubleHistogram(doubleHistogramInstrument, 99.0, REQUIRED_LABEL_VALUES,
OPTIONAL_LABEL_VALUES);
verify(mockSink, times(2)).recordDoubleHistogram(eq(doubleHistogramInstrument),
eq(99D), eq(REQUIRED_LABEL_VALUES), eq(OPTIONAL_LABEL_VALUES));
recorder.recordLongHistogram(longHistogramInstrument, 99, REQUIRED_LABEL_VALUES,
OPTIONAL_LABEL_VALUES);
verify(mockSink, times(2)).recordLongHistogram(eq(longHistogramInstrument), eq(99L),
eq(REQUIRED_LABEL_VALUES), eq(OPTIONAL_LABEL_VALUES));
verify(mockSink, never()).updateMeasures(registry.getMetricInstruments());
}
@Test
public void recordCallback() {
MetricSink.Registration mockRegistration = mock(MetricSink.Registration.class);
when(mockSink.getMeasuresSize()).thenReturn(5);
when(mockSink.registerBatchCallback(any(Runnable.class), eq(longGaugeInstrument)))
.thenReturn(mockRegistration);
MetricRecorder.Registration registration = recorder.registerBatchCallback((recorder) -> {
recorder.recordLongGauge(
longGaugeInstrument, 99, REQUIRED_LABEL_VALUES, OPTIONAL_LABEL_VALUES);
}, longGaugeInstrument);
ArgumentCaptor<Runnable> callbackCaptor = ArgumentCaptor.forClass(Runnable.class);
verify(mockSink, times(2))
.registerBatchCallback(callbackCaptor.capture(), eq(longGaugeInstrument));
callbackCaptor.getValue().run();
// Only once, for the one sink that called the callback.
verify(mockSink).recordLongGauge(
longGaugeInstrument, 99, REQUIRED_LABEL_VALUES, OPTIONAL_LABEL_VALUES);
verify(mockRegistration, never()).close();
registration.close();
verify(mockRegistration, times(2)).close();
verify(mockSink, never()).updateMeasures(registry.getMetricInstruments());
}
@Test
public void newRegisteredMetricUpdateMeasures() {
// Sink is initialized with zero measures, should trigger updateMeasures() on sinks
when(mockSink.getMeasuresSize()).thenReturn(0);
// Double Counter
recorder.addDoubleCounter(doubleCounterInstrument, 1.0, REQUIRED_LABEL_VALUES,
OPTIONAL_LABEL_VALUES);
verify(mockSink, times(2)).updateMeasures(anyList());
verify(mockSink, times(2)).addDoubleCounter(eq(doubleCounterInstrument), eq(1D),
eq(REQUIRED_LABEL_VALUES), eq(OPTIONAL_LABEL_VALUES));
// Long Counter
recorder.addLongCounter(longCounterInstrument, 1, REQUIRED_LABEL_VALUES,
OPTIONAL_LABEL_VALUES);
verify(mockSink, times(4)).updateMeasures(anyList());
verify(mockSink, times(2)).addLongCounter(eq(longCounterInstrument), eq(1L),
eq(REQUIRED_LABEL_VALUES), eq(OPTIONAL_LABEL_VALUES));
// Double Histogram
recorder.recordDoubleHistogram(doubleHistogramInstrument, 99.0, REQUIRED_LABEL_VALUES,
OPTIONAL_LABEL_VALUES);
verify(mockSink, times(6)).updateMeasures(anyList());
verify(mockSink, times(2)).recordDoubleHistogram(eq(doubleHistogramInstrument),
eq(99D), eq(REQUIRED_LABEL_VALUES), eq(OPTIONAL_LABEL_VALUES));
// Long Histogram
recorder.recordLongHistogram(longHistogramInstrument, 99, REQUIRED_LABEL_VALUES,
OPTIONAL_LABEL_VALUES);
verify(mockSink, times(8)).updateMeasures(registry.getMetricInstruments());
verify(mockSink, times(2)).recordLongHistogram(eq(longHistogramInstrument), eq(99L),
eq(REQUIRED_LABEL_VALUES), eq(OPTIONAL_LABEL_VALUES));
// Callback
when(mockSink.registerBatchCallback(any(Runnable.class), eq(longGaugeInstrument)))
.thenReturn(mock(MetricSink.Registration.class));
MetricRecorder.Registration registration = recorder.registerBatchCallback(
(recorder) -> { }, longGaugeInstrument);
verify(mockSink, times(10)).updateMeasures(registry.getMetricInstruments());
verify(mockSink, times(2))
.registerBatchCallback(any(Runnable.class), eq(longGaugeInstrument));
registration.close();
// Long UpDown Counter
recorder.addLongUpDownCounter(longUpDownCounterInstrument, -10, REQUIRED_LABEL_VALUES,
OPTIONAL_LABEL_VALUES);
verify(mockSink, times(12)).updateMeasures(anyList());
verify(mockSink, times(2)).addLongUpDownCounter(eq(longUpDownCounterInstrument), eq(-10L),
eq(REQUIRED_LABEL_VALUES), eq(OPTIONAL_LABEL_VALUES));
}
@Test(expected = IllegalArgumentException.class)
public void addDoubleCounterMismatchedRequiredLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(4);
recorder.addDoubleCounter(doubleCounterInstrument, 1.0, ImmutableList.of(),
OPTIONAL_LABEL_VALUES);
}
@Test(expected = IllegalArgumentException.class)
public void addLongCounterMismatchedRequiredLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(4);
recorder.addLongCounter(longCounterInstrument, 1, ImmutableList.of(),
OPTIONAL_LABEL_VALUES);
}
@Test(expected = IllegalArgumentException.class)
public void addLongUpDownCounterMismatchedRequiredLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(6);
recorder.addLongUpDownCounter(longUpDownCounterInstrument, 1, ImmutableList.of(),
OPTIONAL_LABEL_VALUES);
}
@Test(expected = IllegalArgumentException.class)
public void recordDoubleHistogramMismatchedRequiredLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(4);
recorder.recordDoubleHistogram(doubleHistogramInstrument, 99.0, ImmutableList.of(),
OPTIONAL_LABEL_VALUES);
}
@Test(expected = IllegalArgumentException.class)
public void recordLongHistogramMismatchedRequiredLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(4);
recorder.recordLongHistogram(longHistogramInstrument, 99, ImmutableList.of(),
OPTIONAL_LABEL_VALUES);
}
@Test
public void recordLongGaugeMismatchedRequiredLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(4);
when(mockSink.registerBatchCallback(any(Runnable.class), eq(longGaugeInstrument)))
.thenReturn(mock(MetricSink.Registration.class));
MetricRecorder.Registration registration = recorder.registerBatchCallback((recorder) -> {
assertThrows(
IllegalArgumentException.class,
() -> recorder.recordLongGauge(
longGaugeInstrument, 99, ImmutableList.of(), OPTIONAL_LABEL_VALUES));
}, longGaugeInstrument);
ArgumentCaptor<Runnable> callbackCaptor = ArgumentCaptor.forClass(Runnable.class);
verify(mockSink, times(2))
.registerBatchCallback(callbackCaptor.capture(), eq(longGaugeInstrument));
callbackCaptor.getValue().run();
registration.close();
}
@Test(expected = IllegalArgumentException.class)
public void addDoubleCounterMismatchedOptionalLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(4);
recorder.addDoubleCounter(doubleCounterInstrument, 1.0, REQUIRED_LABEL_VALUES,
ImmutableList.of());
}
@Test(expected = IllegalArgumentException.class)
public void addLongCounterMismatchedOptionalLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(4);
recorder.addLongCounter(longCounterInstrument, 1, REQUIRED_LABEL_VALUES,
ImmutableList.of());
}
@Test(expected = IllegalArgumentException.class)
public void addLongUpDownCounterMismatchedOptionalLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(6);
recorder.addLongUpDownCounter(longUpDownCounterInstrument, 1, REQUIRED_LABEL_VALUES,
ImmutableList.of());
}
@Test(expected = IllegalArgumentException.class)
public void recordDoubleHistogramMismatchedOptionalLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(4);
recorder.recordDoubleHistogram(doubleHistogramInstrument, 99.0, REQUIRED_LABEL_VALUES,
ImmutableList.of());
}
@Test(expected = IllegalArgumentException.class)
public void recordLongHistogramMismatchedOptionalLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(4);
recorder.recordLongHistogram(longHistogramInstrument, 99, REQUIRED_LABEL_VALUES,
ImmutableList.of());
}
@Test
public void recordLongGaugeMismatchedOptionalLabelValues() {
when(mockSink.getMeasuresSize()).thenReturn(4);
when(mockSink.registerBatchCallback(any(Runnable.class), eq(longGaugeInstrument)))
.thenReturn(mock(MetricSink.Registration.class));
MetricRecorder.Registration registration = recorder.registerBatchCallback((recorder) -> {
assertThrows(
IllegalArgumentException.class,
() -> recorder.recordLongGauge(
longGaugeInstrument, 99, REQUIRED_LABEL_VALUES, ImmutableList.of()));
}, longGaugeInstrument);
ArgumentCaptor<Runnable> callbackCaptor = ArgumentCaptor.forClass(Runnable.class);
verify(mockSink, times(2))
.registerBatchCallback(callbackCaptor.capture(), eq(longGaugeInstrument));
callbackCaptor.getValue().run();
registration.close();
}
}
| MetricRecorderImplTest |
java | apache__camel | components/camel-joor/src/main/java/org/apache/camel/language/joor/JoorAnnotationExpressionFactory.java | {
"start": 1105,
"end": 2149
} | class ____ extends DefaultAnnotationExpressionFactory {
@Override
public Expression createExpression(
CamelContext camelContext, Annotation annotation,
LanguageAnnotation languageAnnotation, Class<?> expressionReturnType) {
Object[] params = new Object[3];
Class<?> resultType = getResultType(annotation);
if (resultType.equals(Object.class)) {
resultType = expressionReturnType;
}
params[0] = resultType;
if (annotation instanceof Joor) {
Joor joorAnnotation = (Joor) annotation;
params[1] = joorAnnotation.preCompile();
params[2] = joorAnnotation.singleQuotes();
}
String expression = getExpressionFromAnnotation(annotation);
return camelContext.resolveLanguage("joor").createExpression(expression, params);
}
private Class<?> getResultType(Annotation annotation) {
return (Class<?>) getAnnotationObjectValue(annotation, "resultType");
}
}
| JoorAnnotationExpressionFactory |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java | {
"start": 2440,
"end": 13050
} | class ____ extends RemoteConnectionStrategy {
static final int CHANNELS_PER_CONNECTION = 6;
private static final TimeValue SNIFF_REQUEST_TIMEOUT = TimeValue.THIRTY_SECONDS; // TODO make configurable?
private final List<String> configuredSeedNodes;
private final List<Supplier<DiscoveryNode>> seedNodes;
private final int maxNumRemoteConnections;
private final Predicate<DiscoveryNode> nodePredicate;
private final SetOnce<ClusterName> remoteClusterName = new SetOnce<>();
private final String proxyAddress;
private final Executor managementExecutor;
SniffConnectionStrategy(SniffLinkedProjectConfig config, TransportService transportService, RemoteConnectionManager connectionManager) {
this(
config,
config.seedNodes()
.stream()
.map(
seedAddress -> (Supplier<DiscoveryNode>) () -> resolveSeedNode(
config.linkedProjectAlias(),
seedAddress,
config.proxyAddress()
)
)
.toList(),
transportService,
connectionManager
);
}
SniffConnectionStrategy(
SniffLinkedProjectConfig config,
List<Supplier<DiscoveryNode>> seedNodesSupplier,
TransportService transportService,
RemoteConnectionManager connectionManager
) {
super(config, transportService, connectionManager);
this.proxyAddress = config.proxyAddress();
this.maxNumRemoteConnections = config.maxNumConnections();
this.nodePredicate = config.nodePredicate();
this.configuredSeedNodes = config.seedNodes();
this.seedNodes = seedNodesSupplier;
this.managementExecutor = transportService.getThreadPool().executor(ThreadPool.Names.MANAGEMENT);
}
static Writeable.Reader<RemoteConnectionInfo.ModeInfo> infoReader() {
return SniffModeInfo::new;
}
@Override
protected boolean shouldOpenMoreConnections() {
return connectionManager.size() < maxNumRemoteConnections;
}
@Override
protected boolean strategyMustBeRebuilt(LinkedProjectConfig config) {
assert config instanceof SniffLinkedProjectConfig : "expected config to be of type " + SniffLinkedProjectConfig.class;
final var sniffConfig = (SniffLinkedProjectConfig) config;
return sniffConfig.maxNumConnections() != maxNumRemoteConnections
|| seedsChanged(configuredSeedNodes, sniffConfig.seedNodes())
|| proxyChanged(proxyAddress, sniffConfig.proxyAddress());
}
@Override
protected ConnectionStrategy strategyType() {
return ConnectionStrategy.SNIFF;
}
@Override
protected void connectImpl(ActionListener<Void> listener) {
collectRemoteNodes(seedNodes.iterator(), listener);
}
@Override
protected RemoteConnectionInfo.ModeInfo getModeInfo() {
return new SniffModeInfo(configuredSeedNodes, maxNumRemoteConnections, connectionManager.size());
}
private void collectRemoteNodes(Iterator<Supplier<DiscoveryNode>> seedNodesSuppliers, ActionListener<Void> listener) {
if (Thread.currentThread().isInterrupted()) {
listener.onFailure(new InterruptedException("remote connect thread got interrupted"));
return;
}
if (seedNodesSuppliers.hasNext()) {
final Consumer<Exception> onFailure = e -> {
if (isRetryableException(e) && seedNodesSuppliers.hasNext()) {
logger.debug(() -> "fetching nodes from external cluster [" + clusterAlias + "] failed moving to next seed node", e);
collectRemoteNodes(seedNodesSuppliers, listener);
} else {
listener.onFailure(e);
}
};
final DiscoveryNode seedNode = seedNodesSuppliers.next().get();
logger.trace("[{}] opening transient connection to seed node: [{}]", clusterAlias, seedNode);
final ListenableFuture<Transport.Connection> openConnectionStep = new ListenableFuture<>();
try {
connectionManager.openConnection(seedNode, null, openConnectionStep);
} catch (Exception e) {
onFailure.accept(e);
}
final ListenableFuture<TransportService.HandshakeResponse> handshakeStep = new ListenableFuture<>();
openConnectionStep.addListener(ActionListener.wrap(connection -> {
ConnectionProfile connectionProfile = connectionManager.getConnectionProfile();
transportService.handshake(
connection,
connectionProfile.getHandshakeTimeout(),
getRemoteClusterNamePredicate(),
handshakeStep
);
}, onFailure));
final ListenableFuture<Void> fullConnectionStep = new ListenableFuture<>();
handshakeStep.addListener(ActionListener.wrap(handshakeResponse -> {
final DiscoveryNode handshakeNode = handshakeResponse.getDiscoveryNode();
if (nodePredicate.test(handshakeNode) && shouldOpenMoreConnections()) {
logger.trace(
"[{}] opening managed connection to seed node: [{}] proxy address: [{}]",
clusterAlias,
handshakeNode,
proxyAddress
);
final DiscoveryNode handshakeNodeWithProxy = maybeAddProxyAddress(proxyAddress, handshakeNode);
connectionManager.connectToRemoteClusterNode(
handshakeNodeWithProxy,
getConnectionValidator(handshakeNodeWithProxy),
fullConnectionStep
);
} else {
fullConnectionStep.onResponse(null);
}
}, e -> {
final Transport.Connection connection = openConnectionStep.result();
final DiscoveryNode node = connection.getNode();
logger.debug(() -> format("[%s] failed to handshake with seed node: [%s]", clusterAlias, node), e);
IOUtils.closeWhileHandlingException(connection);
onFailure.accept(e);
}));
fullConnectionStep.addListener(ActionListener.wrap(aVoid -> {
if (remoteClusterName.get() == null) {
TransportService.HandshakeResponse handshakeResponse = handshakeStep.result();
assert handshakeResponse.getClusterName().value() != null;
remoteClusterName.set(handshakeResponse.getClusterName());
}
final Transport.Connection connection = openConnectionStep.result();
// here we pass on the connection since we can only close it once the sendRequest returns otherwise
// due to the async nature (it will return before it's actually sent) this can cause the request to fail
// due to an already closed connection.
ThreadPool threadPool = transportService.getThreadPool();
ThreadContext threadContext = threadPool.getThreadContext();
final String action;
final TransportRequest request;
final AbstractSniffResponseHandler<?> sniffResponseHandler;
// Use different action to collect nodes information depending on the connection model
if (REMOTE_CLUSTER_PROFILE.equals(connectionManager.getConnectionProfile().getTransportProfile())) {
action = RemoteClusterNodesAction.TYPE.name();
request = RemoteClusterNodesAction.Request.REMOTE_CLUSTER_SERVER_NODES;
sniffResponseHandler = new RemoteClusterNodesSniffResponseHandler(connection, listener, seedNodesSuppliers);
} else {
action = ClusterStateAction.NAME;
final RemoteClusterStateRequest clusterStateRequest = new RemoteClusterStateRequest(SNIFF_REQUEST_TIMEOUT);
clusterStateRequest.clear();
clusterStateRequest.nodes(true);
request = clusterStateRequest;
sniffResponseHandler = new ClusterStateSniffResponseHandler(connection, listener, seedNodesSuppliers);
}
try (var ignored = threadContext.newEmptySystemContext()) {
// we stash any context here since this is an internal execution and should not leak any existing context information.
transportService.sendRequest(
connection,
action,
request,
TransportRequestOptions.EMPTY,
new TransportService.ContextRestoreResponseHandler<>(
threadContext.newRestorableContext(false),
sniffResponseHandler
)
);
}
}, e -> {
final Transport.Connection connection = openConnectionStep.result();
final DiscoveryNode node = connection.getNode();
logger.debug(() -> format("[%s] failed to open managed connection to seed node: [%s]", clusterAlias, node), e);
IOUtils.closeWhileHandlingException(connection);
onFailure.accept(e);
}));
} else {
listener.onFailure(new NoSeedNodeLeftException("no seed node left for cluster: [" + clusterAlias + "]"));
}
}
private ConnectionManager.ConnectionValidator getConnectionValidator(DiscoveryNode node) {
return (connection, profile, listener) -> {
assert profile.getTransportProfile().equals(connectionManager.getConnectionProfile().getTransportProfile())
: "transport profile must be consistent between the connection manager and the actual profile";
transportService.connectionValidator(node)
.validate(
RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo(
connection,
clusterAlias,
connectionManager.getCredentialsManager()
),
profile,
listener
);
};
}
private | SniffConnectionStrategy |
java | google__dagger | javatests/dagger/internal/codegen/ScopingValidationTest.java | {
"start": 1735,
"end": 2150
} | class ____ {",
" @Inject ScopedType(String s, long l, float f) {}",
"}");
Source moduleFile =
CompilerTests.javaSource(
"test.ScopedModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"import javax.inject.Singleton;",
"",
"@Module",
" | ScopedType |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/errorhandler/DefaultErrorHandlerDefinition.java | {
"start": 1927,
"end": 37261
} | class ____ extends BaseErrorHandlerDefinition {
@XmlTransient
private CamelLogger loggerBean;
@XmlTransient
private Processor onRedeliveryProcessor;
@XmlTransient
private Processor onPrepareFailureProcessor;
@XmlTransient
private Processor onExceptionOccurredProcessor;
@XmlTransient
private ScheduledExecutorService executorServiceBean;
@XmlTransient
private Predicate retryWhilePredicate;
// commonly used should be first
@XmlElement
private RedeliveryPolicyDefinition redeliveryPolicy;
@XmlAttribute
@Metadata(javaType = "java.lang.Boolean")
private String useOriginalMessage;
@XmlAttribute
@Metadata(javaType = "java.lang.Boolean")
private String useOriginalBody;
@XmlAttribute
@Metadata(label = "advanced", javaType = "org.apache.camel.processor.errorhandler.RedeliveryPolicy")
private String redeliveryPolicyRef;
@XmlAttribute
@Metadata(label = "advanced")
private String loggerRef;
@XmlAttribute
@Metadata(label = "advanced", javaType = "org.apache.camel.LoggingLevel", defaultValue = "ERROR",
enums = "TRACE,DEBUG,INFO,WARN,ERROR,OFF")
private String level;
@XmlAttribute
@Metadata(label = "advanced")
private String logName;
@XmlAttribute
@Metadata(label = "advanced", javaType = "org.apache.camel.Processor")
private String onRedeliveryRef;
@XmlAttribute
@Metadata(label = "advanced", javaType = "org.apache.camel.Processor")
private String onExceptionOccurredRef;
@XmlAttribute
@Metadata(label = "advanced", javaType = "org.apache.camel.Processor")
private String onPrepareFailureRef;
@XmlAttribute
@Metadata(label = "advanced", javaType = "org.apache.camel.Processor")
private String retryWhileRef;
@XmlAttribute
@Metadata(label = "advanced", javaType = "java.util.concurrent.ScheduledExecutorService")
private String executorServiceRef;
public DefaultErrorHandlerDefinition() {
}
protected DefaultErrorHandlerDefinition(DefaultErrorHandlerDefinition source) {
this.loggerBean = source.loggerBean;
this.onRedeliveryProcessor = source.onRedeliveryProcessor;
this.onPrepareFailureProcessor = source.onPrepareFailureProcessor;
this.onExceptionOccurredProcessor = source.onExceptionOccurredProcessor;
this.executorServiceBean = source.executorServiceBean;
this.retryWhilePredicate = source.retryWhilePredicate;
this.redeliveryPolicy = source.redeliveryPolicy;
this.useOriginalMessage = source.useOriginalMessage;
this.useOriginalBody = source.useOriginalBody;
this.redeliveryPolicyRef = source.redeliveryPolicyRef;
this.loggerRef = source.loggerRef;
this.level = source.level;
this.logName = source.logName;
this.onRedeliveryRef = source.onRedeliveryRef;
this.onExceptionOccurredRef = source.onExceptionOccurredRef;
this.onPrepareFailureRef = source.onPrepareFailureRef;
this.retryWhileRef = source.retryWhileRef;
this.executorServiceRef = source.executorServiceRef;
}
@Override
public DefaultErrorHandlerDefinition copyDefinition() {
return new DefaultErrorHandlerDefinition(this);
}
@Override
public boolean supportTransacted() {
return false;
}
@Override
public ErrorHandlerFactory cloneBuilder() {
DefaultErrorHandlerDefinition answer = new DefaultErrorHandlerDefinition();
cloneBuilder(answer);
return answer;
}
protected void cloneBuilder(DefaultErrorHandlerDefinition other) {
other.setExecutorServiceBean(getExecutorServiceBean());
other.setExecutorServiceRef(getExecutorServiceRef());
other.setLevel(getLevel());
other.setLogName(getLogName());
other.setLoggerBean(getLoggerBean());
other.setLoggerRef(getLoggerRef());
other.setOnExceptionOccurredProcessor(getOnExceptionOccurredProcessor());
other.setOnExceptionOccurredRef(getOnExceptionOccurredRef());
other.setOnPrepareFailureProcessor(getOnPrepareFailureProcessor());
other.setOnPrepareFailureRef(getOnPrepareFailureRef());
other.setOnRedeliveryProcessor(getOnRedeliveryProcessor());
other.setOnRedeliveryRef(getOnRedeliveryRef());
other.setRedeliveryPolicyRef(getRedeliveryPolicyRef());
other.setRetryWhilePredicate(getRetryWhilePredicate());
other.setRetryWhileRef(getRetryWhileRef());
other.setUseOriginalBody(getUseOriginalBody());
other.setUseOriginalMessage(getUseOriginalMessage());
if (hasRedeliveryPolicy()) {
other.setRedeliveryPolicy(getRedeliveryPolicy().copy());
}
}
protected RedeliveryPolicyDefinition createRedeliveryPolicy() {
return new RedeliveryPolicyDefinition();
}
public String getLoggerRef() {
return loggerRef;
}
/**
* References to a logger to use as logger for the error handler
*/
public void setLoggerRef(String loggerRef) {
this.loggerRef = loggerRef;
}
public CamelLogger getLoggerBean() {
return loggerBean;
}
public void setLoggerBean(CamelLogger loggerBean) {
this.loggerBean = loggerBean;
}
public String getLevel() {
return level;
}
/**
* Logging level to use by error handler
*/
public void setLevel(String level) {
this.level = level;
}
public String getLogName() {
return logName;
}
/**
* Name of the logger to use by the error handler
*/
public void setLogName(String logName) {
this.logName = logName;
}
public String getUseOriginalMessage() {
return useOriginalMessage;
}
/**
* Will use the original input {@link org.apache.camel.Message} (original body and headers) when an
* {@link org.apache.camel.Exchange} is moved to the dead letter queue.
* <p/>
* <b>Notice:</b> this only applies when all redeliveries attempt have failed and the
* {@link org.apache.camel.Exchange} is doomed for failure. <br/>
* Instead of using the current inprogress {@link org.apache.camel.Exchange} IN message we use the original IN
* message instead. This allows you to store the original input in the dead letter queue instead of the inprogress
* snapshot of the IN message. For instance if you route transform the IN body during routing and then failed. With
* the original exchange store in the dead letter queue it might be easier to manually re submit the
* {@link org.apache.camel.Exchange} again as the IN message is the same as when Camel received it. So you should be
* able to send the {@link org.apache.camel.Exchange} to the same input.
* <p/>
* The difference between useOriginalMessage and useOriginalBody is that the former includes both the original body
* and headers, where as the latter only includes the original body. You can use the latter to enrich the message
* with custom headers and include the original message body. The former wont let you do this, as its using the
* original message body and headers as they are. You cannot enable both useOriginalMessage and useOriginalBody.
* <p/>
* The original input message is defensively copied, and the copied message body is converted to
* {@link org.apache.camel.StreamCache} if possible (stream caching is enabled, can be disabled globally or on the
* original route), to ensure the body can be read when the original message is being used later. If the body is
* converted to {@link org.apache.camel.StreamCache} then the message body on the current
* {@link org.apache.camel.Exchange} is replaced with the {@link org.apache.camel.StreamCache} body. If the body is
* not converted to {@link org.apache.camel.StreamCache} then the body will not be able to re-read when accessed
* later.
* <p/>
* <b>Important:</b> The original input means the input message that are bounded by the current
* {@link org.apache.camel.spi.UnitOfWork}. An unit of work typically spans one route, or multiple routes if they
* are connected using internal endpoints such as direct or seda. When messages is passed via external endpoints
* such as JMS or HTTP then the consumer will create a new unit of work, with the message it received as input as
* the original input. Also some EIP patterns such as splitter, multicast, will create a new unit of work boundary
* for the messages in their sub-route (eg the splitted message); however these EIPs have an option named
* <tt>shareUnitOfWork</tt> which allows to combine with the parent unit of work in regard to error handling and
* therefore use the parent original message.
* <p/>
* By default this feature is off.
*/
public void setUseOriginalMessage(String useOriginalMessage) {
this.useOriginalMessage = useOriginalMessage;
}
public String getUseOriginalBody() {
return useOriginalBody;
}
/**
* Will use the original input {@link org.apache.camel.Message} body (original body only) when an
* {@link org.apache.camel.Exchange} is moved to the dead letter queue.
* <p/>
* <b>Notice:</b> this only applies when all redeliveries attempt have failed and the
* {@link org.apache.camel.Exchange} is doomed for failure. <br/>
* Instead of using the current inprogress {@link org.apache.camel.Exchange} IN message we use the original IN
* message instead. This allows you to store the original input in the dead letter queue instead of the inprogress
* snapshot of the IN message. For instance if you route transform the IN body during routing and then failed. With
* the original exchange store in the dead letter queue it might be easier to manually re submit the
* {@link org.apache.camel.Exchange} again as the IN message is the same as when Camel received it. So you should be
* able to send the {@link org.apache.camel.Exchange} to the same input.
* <p/>
* The difference between useOriginalMessage and useOriginalBody is that the former includes both the original body
* and headers, where as the latter only includes the original body. You can use the latter to enrich the message
* with custom headers and include the original message body. The former wont let you do this, as its using the
* original message body and headers as they are. You cannot enable both useOriginalMessage and useOriginalBody.
* <p/>
* The original input message is defensively copied, and the copied message body is converted to
* {@link org.apache.camel.StreamCache} if possible (stream caching is enabled, can be disabled globally or on the
* original route), to ensure the body can be read when the original message is being used later. If the body is
* converted to {@link org.apache.camel.StreamCache} then the message body on the current
* {@link org.apache.camel.Exchange} is replaced with the {@link org.apache.camel.StreamCache} body. If the body is
* not converted to {@link org.apache.camel.StreamCache} then the body will not be able to re-read when accessed
* later.
* <p/>
* <b>Important:</b> The original input means the input message that are bounded by the current
* {@link org.apache.camel.spi.UnitOfWork}. An unit of work typically spans one route, or multiple routes if they
* are connected using internal endpoints such as direct or seda. When messages is passed via external endpoints
* such as JMS or HTTP then the consumer will create a new unit of work, with the message it received as input as
* the original input. Also some EIP patterns such as splitter, multicast, will create a new unit of work boundary
* for the messages in their sub-route (eg the splitted message); however these EIPs have an option named
* <tt>shareUnitOfWork</tt> which allows to combine with the parent unit of work in regard to error handling and
* therefore use the parent original message.
* <p/>
* By default this feature is off.
*/
public void setUseOriginalBody(String useOriginalBody) {
this.useOriginalBody = useOriginalBody;
}
public String getOnRedeliveryRef() {
return onRedeliveryRef;
}
/**
* Sets a reference to a processor that should be processed <b>before</b> a redelivery attempt.
* <p/>
* Can be used to change the {@link org.apache.camel.Exchange} <b>before</b> its being redelivered.
*/
public void setOnRedeliveryRef(String onRedeliveryRef) {
this.onRedeliveryRef = onRedeliveryRef;
}
public Processor getOnRedeliveryProcessor() {
return onRedeliveryProcessor;
}
/**
* Sets a processor that should be processed <b>before</b> a redelivery attempt.
* <p/>
* Can be used to change the {@link org.apache.camel.Exchange} <b>before</b> its being redelivered.
*/
public void setOnRedeliveryProcessor(Processor onRedeliveryProcessor) {
this.onRedeliveryProcessor = onRedeliveryProcessor;
}
public String getOnExceptionOccurredRef() {
return onExceptionOccurredRef;
}
/**
* Sets a reference to a processor that should be processed <b>just after</b> an exception occurred. Can be used to
* perform custom logging about the occurred exception at the exact time it happened.
* <p/>
* Important: Any exception thrown from this processor will be ignored.
*/
public void setOnExceptionOccurredRef(String onExceptionOccurredRef) {
this.onExceptionOccurredRef = onExceptionOccurredRef;
}
public Processor getOnExceptionOccurredProcessor() {
return onExceptionOccurredProcessor;
}
/**
* Sets a processor that should be processed <b>just after</b> an exception occurred. Can be used to perform custom
* logging about the occurred exception at the exact time it happened.
* <p/>
* Important: Any exception thrown from this processor will be ignored.
*/
public void setOnExceptionOccurredProcessor(Processor onExceptionOccurredProcessor) {
this.onExceptionOccurredProcessor = onExceptionOccurredProcessor;
}
public String getOnPrepareFailureRef() {
return onPrepareFailureRef;
}
/**
* Sets a reference to a processor to prepare the {@link org.apache.camel.Exchange} before handled by the failure
* processor / dead letter channel. This allows for example to enrich the message before sending to a dead letter
* queue.
*/
public void setOnPrepareFailureRef(String onPrepareFailureRef) {
this.onPrepareFailureRef = onPrepareFailureRef;
}
public Processor getOnPrepareFailureProcessor() {
return onPrepareFailureProcessor;
}
/**
* Sets a processor to prepare the {@link org.apache.camel.Exchange} before handled by the failure processor / dead
* letter channel. This allows for example to enrich the message before sending to a dead letter queue.
*/
public void setOnPrepareFailureProcessor(Processor onPrepareFailureProcessor) {
this.onPrepareFailureProcessor = onPrepareFailureProcessor;
}
public String getRetryWhileRef() {
return retryWhileRef;
}
/**
* Sets a retry while predicate.
*
* Will continue retrying until the predicate evaluates to false.
*/
public void setRetryWhileRef(String retryWhileRef) {
this.retryWhileRef = retryWhileRef;
}
public String getRedeliveryPolicyRef() {
return redeliveryPolicyRef;
}
/**
* Sets a reference to a {@link RedeliveryPolicy} to be used for redelivery settings.
*/
public void setRedeliveryPolicyRef(String redeliveryPolicyRef) {
this.redeliveryPolicyRef = redeliveryPolicyRef;
}
public String getExecutorServiceRef() {
return executorServiceRef;
}
/**
* Sets a reference to a thread pool to be used by the error handler
*/
public void setExecutorServiceRef(String executorServiceRef) {
this.executorServiceRef = executorServiceRef;
}
public ScheduledExecutorService getExecutorServiceBean() {
return executorServiceBean;
}
/**
* Sets a thread pool to be used by the error handler
*/
public void setExecutorServiceBean(ScheduledExecutorService executorServiceBean) {
this.executorServiceBean = executorServiceBean;
}
public Predicate getRetryWhilePredicate() {
return retryWhilePredicate;
}
/**
* Sets a retry while predicate.
*
* Will continue retrying until the predicate evaluates to false.
*/
public void setRetryWhilePredicate(Predicate retryWhilePredicate) {
this.retryWhilePredicate = retryWhilePredicate;
}
public RedeliveryPolicyDefinition getRedeliveryPolicy() {
if (redeliveryPolicy == null) {
redeliveryPolicy = createRedeliveryPolicy();
}
return redeliveryPolicy;
}
public boolean hasRedeliveryPolicy() {
return redeliveryPolicy != null;
}
/**
* Sets the redelivery settings
*/
public void setRedeliveryPolicy(RedeliveryPolicyDefinition redeliveryPolicy) {
this.redeliveryPolicy = redeliveryPolicy;
}
// Builder methods
// -------------------------------------------------------------------------
public DefaultErrorHandlerDefinition backOffMultiplier(double backOffMultiplier) {
getRedeliveryPolicy().backOffMultiplier(backOffMultiplier);
return this;
}
public DefaultErrorHandlerDefinition collisionAvoidancePercent(double collisionAvoidancePercent) {
getRedeliveryPolicy().collisionAvoidancePercent(collisionAvoidancePercent);
return this;
}
public DefaultErrorHandlerDefinition redeliveryDelay(long delay) {
getRedeliveryPolicy().redeliveryDelay(delay);
return this;
}
public DefaultErrorHandlerDefinition delayPattern(String delayPattern) {
getRedeliveryPolicy().delayPattern(delayPattern);
return this;
}
public DefaultErrorHandlerDefinition maximumRedeliveries(int maximumRedeliveries) {
getRedeliveryPolicy().maximumRedeliveries(maximumRedeliveries);
return this;
}
public DefaultErrorHandlerDefinition disableRedelivery() {
getRedeliveryPolicy().maximumRedeliveries(0);
return this;
}
public DefaultErrorHandlerDefinition maximumRedeliveryDelay(long maximumRedeliveryDelay) {
getRedeliveryPolicy().maximumRedeliveryDelay(maximumRedeliveryDelay);
return this;
}
public DefaultErrorHandlerDefinition useCollisionAvoidance() {
getRedeliveryPolicy().useCollisionAvoidance();
return this;
}
public DefaultErrorHandlerDefinition useExponentialBackOff() {
getRedeliveryPolicy().useExponentialBackOff();
return this;
}
public DefaultErrorHandlerDefinition retriesExhaustedLogLevel(LoggingLevel retriesExhaustedLogLevel) {
getRedeliveryPolicy().setRetriesExhaustedLogLevel(retriesExhaustedLogLevel.name());
return this;
}
public DefaultErrorHandlerDefinition retryAttemptedLogLevel(LoggingLevel retryAttemptedLogLevel) {
getRedeliveryPolicy().setRetryAttemptedLogLevel(retryAttemptedLogLevel.name());
return this;
}
public DefaultErrorHandlerDefinition retryAttemptedLogInterval(int retryAttemptedLogInterval) {
getRedeliveryPolicy().setRetryAttemptedLogInterval(String.valueOf(retryAttemptedLogInterval));
return this;
}
public DefaultErrorHandlerDefinition logStackTrace(boolean logStackTrace) {
getRedeliveryPolicy().setLogStackTrace(logStackTrace ? "true" : "false");
return this;
}
public DefaultErrorHandlerDefinition logRetryStackTrace(boolean logRetryStackTrace) {
getRedeliveryPolicy().setLogRetryStackTrace(logRetryStackTrace ? "true" : "false");
return this;
}
public DefaultErrorHandlerDefinition logHandled(boolean logHandled) {
getRedeliveryPolicy().setLogHandled(logHandled ? "true" : "false");
return this;
}
public DefaultErrorHandlerDefinition logNewException(boolean logNewException) {
getRedeliveryPolicy().setLogNewException(logNewException ? "true" : "false");
return this;
}
public DefaultErrorHandlerDefinition logExhausted(boolean logExhausted) {
getRedeliveryPolicy().setLogExhausted(logExhausted ? "true" : "false");
return this;
}
public DefaultErrorHandlerDefinition logRetryAttempted(boolean logRetryAttempted) {
getRedeliveryPolicy().setLogRetryAttempted(logRetryAttempted ? "true" : "false");
return this;
}
public DefaultErrorHandlerDefinition logExhaustedMessageHistory(boolean logExhaustedMessageHistory) {
getRedeliveryPolicy().setLogExhaustedMessageHistory(logExhaustedMessageHistory ? "true" : "false");
return this;
}
public DefaultErrorHandlerDefinition logExhaustedMessageBody(boolean logExhaustedMessageBody) {
getRedeliveryPolicy().setLogExhaustedMessageBody(logExhaustedMessageBody ? "true" : "false");
return this;
}
public DefaultErrorHandlerDefinition exchangeFormatterRef(String exchangeFormatterRef) {
getRedeliveryPolicy().setExchangeFormatterRef(exchangeFormatterRef);
return this;
}
/**
* Will allow asynchronous delayed redeliveries. The route, in particular the consumer's component, must support the
* Asynchronous Routing Engine (e.g. seda)
*
* @see RedeliveryPolicy#setAsyncDelayedRedelivery(boolean)
* @return the builder
*/
public DefaultErrorHandlerDefinition asyncDelayedRedelivery() {
getRedeliveryPolicy().setAsyncDelayedRedelivery("true");
return this;
}
/**
* Controls whether to allow redelivery while stopping/shutting down a route that uses error handling.
*
* @param allowRedeliveryWhileStopping <tt>true</tt> to allow redelivery, <tt>false</tt> to reject redeliveries
* @return the builder
*/
public DefaultErrorHandlerDefinition allowRedeliveryWhileStopping(boolean allowRedeliveryWhileStopping) {
getRedeliveryPolicy().setAllowRedeliveryWhileStopping(allowRedeliveryWhileStopping ? "true" : "false");
return this;
}
/**
* Sets the thread pool to be used for redelivery.
*
* @param executorService the scheduled thread pool to use
* @return the builder.
*/
public DefaultErrorHandlerDefinition executorService(ScheduledExecutorService executorService) {
setExecutorServiceBean(executorService);
return this;
}
/**
* Sets a reference to a thread pool to be used for redelivery.
*
* @param ref reference to a scheduled thread pool
* @return the builder.
*/
public DefaultErrorHandlerDefinition executorServiceRef(String ref) {
setExecutorServiceRef(ref);
return this;
}
/**
* Sets the logger used for caught exceptions
*
* @param logger the logger
* @return the builder
*/
public DefaultErrorHandlerDefinition logger(CamelLogger logger) {
setLoggerBean(logger);
return this;
}
/**
* Sets the logging level of exceptions caught
*
* @param level the logging level
* @return the builder
*/
public DefaultErrorHandlerDefinition loggingLevel(String level) {
setLevel(level);
return this;
}
/**
* Sets the logging level of exceptions caught
*
* @param level the logging level
* @return the builder
*/
public DefaultErrorHandlerDefinition loggingLevel(LoggingLevel level) {
setLevel(level.name());
return this;
}
/**
* Sets the log used for caught exceptions
*
* @param log the logger
* @return the builder
*/
public DefaultErrorHandlerDefinition log(org.slf4j.Logger log) {
if (loggerBean == null) {
loggerBean = new CamelLogger(LoggerFactory.getLogger(DefaultErrorHandler.class), LoggingLevel.ERROR);
}
loggerBean.setLog(log);
return this;
}
/**
* Sets the log used for caught exceptions
*
* @param log the log name
* @return the builder
*/
public DefaultErrorHandlerDefinition log(String log) {
return log(LoggerFactory.getLogger(log));
}
/**
* Sets the log used for caught exceptions
*
* @param log the log class
* @return the builder
*/
public DefaultErrorHandlerDefinition log(Class<?> log) {
return log(LoggerFactory.getLogger(log));
}
/**
* Sets a processor that should be processed <b>before</b> a redelivery attempt.
* <p/>
* Can be used to change the {@link org.apache.camel.Exchange} <b>before</b> its being redelivered.
*
* @param processor the processor
* @return the builder
*/
public DefaultErrorHandlerDefinition onRedelivery(Processor processor) {
setOnRedeliveryProcessor(processor);
return this;
}
/**
* Sets a reference for the processor to use <b>before</b> a redelivery attempt.
*
* @param onRedeliveryRef the processor's reference
* @return the builder
* @see #onRedelivery(Processor)
*/
public DefaultErrorHandlerDefinition onRedeliveryRef(String onRedeliveryRef) {
setOnRedeliveryRef(onRedeliveryRef);
return this;
}
/**
* Sets the retry while expression.
* <p/>
* Will continue retrying until expression evaluates to <tt>false</tt>.
*
* @param retryWhile expression that determines when to stop retrying
* @return the builder
*/
public DefaultErrorHandlerDefinition retryWhile(Expression retryWhile) {
setRetryWhilePredicate(ExpressionToPredicateAdapter.toPredicate(retryWhile));
return this;
}
public DefaultErrorHandlerDefinition retryWhileRef(String retryWhileRef) {
setRetryWhileRef(retryWhileRef);
return this;
}
/**
* Will use the original input {@link org.apache.camel.Message} (original body and headers) when an
* {@link org.apache.camel.Exchange} is moved to the dead letter queue.
* <p/>
* <b>Notice:</b> this only applies when all redeliveries attempt have failed and the
* {@link org.apache.camel.Exchange} is doomed for failure. <br/>
* Instead of using the current inprogress {@link org.apache.camel.Exchange} IN message we use the original IN
* message instead. This allows you to store the original input in the dead letter queue instead of the inprogress
* snapshot of the IN message. For instance if you route transform the IN body during routing and then failed. With
* the original exchange store in the dead letter queue it might be easier to manually re submit the
* {@link org.apache.camel.Exchange} again as the IN message is the same as when Camel received it. So you should be
* able to send the {@link org.apache.camel.Exchange} to the same input.
* <p/>
* The difference between useOriginalMessage and useOriginalBody is that the former includes both the original body
* and headers, where as the latter only includes the original body. You can use the latter to enrich the message
* with custom headers and include the original message body. The former wont let you do this, as its using the
* original message body and headers as they are. You cannot enable both useOriginalMessage and useOriginalBody.
* <p/>
* The original input message is defensively copied, and the copied message body is converted to
* {@link org.apache.camel.StreamCache} if possible (stream caching is enabled, can be disabled globally or on the
* original route), to ensure the body can be read when the original message is being used later. If the body is
* converted to {@link org.apache.camel.StreamCache} then the message body on the current
* {@link org.apache.camel.Exchange} is replaced with the {@link org.apache.camel.StreamCache} body. If the body is
* not converted to {@link org.apache.camel.StreamCache} then the body will not be able to re-read when accessed
* later.
* <p/>
* <b>Important:</b> The original input means the input message that are bounded by the current
* {@link org.apache.camel.spi.UnitOfWork}. An unit of work typically spans one route, or multiple routes if they
* are connected using internal endpoints such as direct or seda. When messages is passed via external endpoints
* such as JMS or HTTP then the consumer will create a new unit of work, with the message it received as input as
* the original input. Also some EIP patterns such as splitter, multicast, will create a new unit of work boundary
* for the messages in their sub-route (eg the split message); however these EIPs have an option named
* <tt>shareUnitOfWork</tt> which allows to combine with the parent unit of work in regard to error handling and
* therefore use the parent original message.
* <p/>
* By default this feature is off.
*
* @return the builder
* @see #useOriginalBody()
*/
public DefaultErrorHandlerDefinition useOriginalMessage() {
setUseOriginalMessage("true");
return this;
}
/**
* Will use the original input {@link org.apache.camel.Message} body (original body only) when an
* {@link org.apache.camel.Exchange} is moved to the dead letter queue.
* <p/>
* <b>Notice:</b> this only applies when all redeliveries attempt have failed and the
* {@link org.apache.camel.Exchange} is doomed for failure. <br/>
* Instead of using the current inprogress {@link org.apache.camel.Exchange} IN message we use the original IN
* message instead. This allows you to store the original input in the dead letter queue instead of the inprogress
* snapshot of the IN message. For instance if you route transform the IN body during routing and then failed. With
* the original exchange store in the dead letter queue it might be easier to manually re submit the
* {@link org.apache.camel.Exchange} again as the IN message is the same as when Camel received it. So you should be
* able to send the {@link org.apache.camel.Exchange} to the same input.
* <p/>
* The difference between useOriginalMessage and useOriginalBody is that the former includes both the original body
* and headers, where as the latter only includes the original body. You can use the latter to enrich the message
* with custom headers and include the original message body. The former wont let you do this, as its using the
* original message body and headers as they are. You cannot enable both useOriginalMessage and useOriginalBody.
* <p/>
* The original input message is defensively copied, and the copied message body is converted to
* {@link org.apache.camel.StreamCache} if possible, to ensure the body can be read when the original message is
* being used later. If the body is not converted to {@link org.apache.camel.StreamCache} then the body will not be
* able to re-read when accessed later.
* <p/>
* <b>Important:</b> The original input means the input message that are bounded by the current
* {@link org.apache.camel.spi.UnitOfWork}. An unit of work typically spans one route, or multiple routes if they
* are connected using internal endpoints such as direct or seda. When messages is passed via external endpoints
* such as JMS or HTTP then the consumer will create a new unit of work, with the message it received as input as
* the original input. Also some EIP patterns such as splitter, multicast, will create a new unit of work boundary
* for the messages in their sub-route (eg the split message); however these EIPs have an option named
* <tt>shareUnitOfWork</tt> which allows to combine with the parent unit of work in regard to error handling and
* therefore use the parent original message.
* <p/>
* By default this feature is off.
*
* @return the builder
* @see #useOriginalMessage()
*/
public DefaultErrorHandlerDefinition useOriginalBody() {
setUseOriginalBody("true");
return this;
}
/**
* Sets a custom {@link org.apache.camel.Processor} to prepare the {@link org.apache.camel.Exchange} before handled
* by the failure processor / dead letter channel. This allows for example to enrich the message before sending to a
* dead letter queue.
*
* @param processor the processor
* @return the builder
*/
public DefaultErrorHandlerDefinition onPrepareFailure(Processor processor) {
setOnPrepareFailureProcessor(processor);
return this;
}
/**
* Sets a reference for the processor to use before handled by the failure processor.
*
* @param onPrepareFailureRef the processor's reference
* @return the builder
* @see #onPrepareFailure(Processor)
*/
public DefaultErrorHandlerDefinition onPrepareFailureRef(String onPrepareFailureRef) {
setOnPrepareFailureRef(onPrepareFailureRef);
return this;
}
/**
* Sets a custom {@link org.apache.camel.Processor} to process the {@link org.apache.camel.Exchange} just after an
* exception was thrown. This allows to execute the processor at the same time the exception was thrown.
* <p/>
* Important: Any exception thrown from this processor will be ignored.
*
* @param processor the processor
* @return the builder
*/
public DefaultErrorHandlerDefinition onExceptionOccurred(Processor processor) {
setOnExceptionOccurredProcessor(processor);
return this;
}
/**
* Sets a reference for the processor to use just after an exception was thrown.
*
* @param onExceptionOccurredRef the processor's reference
* @return the builder
* @see #onExceptionOccurred(Processor)
*/
public DefaultErrorHandlerDefinition onExceptionOccurredRef(String onExceptionOccurredRef) {
setOnExceptionOccurredRef(onExceptionOccurredRef);
return this;
}
/**
* Sets a reference to a {@link RedeliveryPolicy} to be used for redelivery settings.
*
* @param redeliveryPolicyRef the redelivrey policy reference
* @return the builder
*/
public DefaultErrorHandlerDefinition redeliveryPolicyRef(String redeliveryPolicyRef) {
setRedeliveryPolicyRef(redeliveryPolicyRef);
return this;
}
}
| DefaultErrorHandlerDefinition |
java | mapstruct__mapstruct | processor/src/test/resources/fixtures/org/mapstruct/ap/test/conversion/string/SourceTargetMapperImpl.java | {
"start": 431,
"end": 4342
} | class ____ implements SourceTargetMapper {
@Override
public Target sourceToTarget(Source source) {
if ( source == null ) {
return null;
}
Target target = new Target();
target.setB( String.valueOf( source.getB() ) );
if ( source.getBb() != null ) {
target.setBb( String.valueOf( source.getBb() ) );
}
target.setS( String.valueOf( source.getS() ) );
if ( source.getSs() != null ) {
target.setSs( String.valueOf( source.getSs() ) );
}
target.setI( String.valueOf( source.getI() ) );
if ( source.getIi() != null ) {
target.setIi( String.valueOf( source.getIi() ) );
}
target.setL( String.valueOf( source.getL() ) );
if ( source.getLl() != null ) {
target.setLl( String.valueOf( source.getLl() ) );
}
target.setF( String.valueOf( source.getF() ) );
if ( source.getFf() != null ) {
target.setFf( String.valueOf( source.getFf() ) );
}
target.setD( String.valueOf( source.getD() ) );
if ( source.getDd() != null ) {
target.setDd( String.valueOf( source.getDd() ) );
}
target.setBool( String.valueOf( source.getBool() ) );
if ( source.getBoolBool() != null ) {
target.setBoolBool( String.valueOf( source.getBoolBool() ) );
}
target.setC( String.valueOf( source.getC() ) );
if ( source.getCc() != null ) {
target.setCc( source.getCc().toString() );
}
if ( source.getSb() != null ) {
target.setSb( source.getSb().toString() );
}
return target;
}
@Override
public Source targetToSource(Target target) {
if ( target == null ) {
return null;
}
Source source = new Source();
if ( target.getB() != null ) {
source.setB( Byte.parseByte( target.getB() ) );
}
if ( target.getBb() != null ) {
source.setBb( Byte.parseByte( target.getBb() ) );
}
if ( target.getS() != null ) {
source.setS( Short.parseShort( target.getS() ) );
}
if ( target.getSs() != null ) {
source.setSs( Short.parseShort( target.getSs() ) );
}
if ( target.getI() != null ) {
source.setI( Integer.parseInt( target.getI() ) );
}
if ( target.getIi() != null ) {
source.setIi( Integer.parseInt( target.getIi() ) );
}
if ( target.getL() != null ) {
source.setL( Long.parseLong( target.getL() ) );
}
if ( target.getLl() != null ) {
source.setLl( Long.parseLong( target.getLl() ) );
}
if ( target.getF() != null ) {
source.setF( Float.parseFloat( target.getF() ) );
}
if ( target.getFf() != null ) {
source.setFf( Float.parseFloat( target.getFf() ) );
}
if ( target.getD() != null ) {
source.setD( Double.parseDouble( target.getD() ) );
}
if ( target.getDd() != null ) {
source.setDd( Double.parseDouble( target.getDd() ) );
}
if ( target.getBool() != null ) {
source.setBool( Boolean.parseBoolean( target.getBool() ) );
}
if ( target.getBoolBool() != null ) {
source.setBoolBool( Boolean.parseBoolean( target.getBoolBool() ) );
}
if ( target.getC() != null ) {
source.setC( target.getC().charAt( 0 ) );
}
if ( target.getCc() != null ) {
source.setCc( target.getCc().charAt( 0 ) );
}
source.setObject( target.getObject() );
if ( target.getSb() != null ) {
source.setSb( new StringBuilder( target.getSb() ) );
}
return source;
}
}
| SourceTargetMapperImpl |
java | qos-ch__slf4j | slf4j-api/src/main/java/org/slf4j/Logger.java | {
"start": 3443,
"end": 30868
} | interface ____ {
/**
* Case-insensitive String constant used to retrieve the name of the root logger.
*
* @since 1.3
*/
final public String ROOT_LOGGER_NAME = "ROOT";
/**
* Return the name of this <code>Logger</code> instance.
* @return name of this logger instance
*/
public String getName();
/**
* <p>Make a new {@link LoggingEventBuilder} instance as appropriate for this logger implementation.
* This default implementation always returns a new instance of {@link DefaultLoggingEventBuilder}.</p>
* <p></p>
* <p>This method is intended to be used by logging systems implementing the SLF4J API and <b>not</b>
* by end users.</p>
* <p></p>
* <p>Also note that a {@link LoggingEventBuilder} instance should be built for all levels,
* independently of the level argument. In other words, this method is an <b>unconditional</b>
* constructor for the {@link LoggingEventBuilder} appropriate for this logger implementation.</p>
* <p></p>
* @param level desired level for the event builder
* @return a new {@link LoggingEventBuilder} instance as appropriate for <b>this</b> logger
* @since 2.0
*/
default public LoggingEventBuilder makeLoggingEventBuilder(Level level) {
return new DefaultLoggingEventBuilder(this, level);
}
/**
* Make a new {@link LoggingEventBuilder} instance as appropriate for this logger and the
* desired {@link Level} passed as parameter. If this Logger is disabled for the given Level, then
* a {@link NOPLoggingEventBuilder} is returned.
*
*
* @param level desired level for the event builder
* @return a new {@link LoggingEventBuilder} instance as appropriate for this logger
* @since 2.0
*/
@CheckReturnValue
default public LoggingEventBuilder atLevel(Level level) {
if (isEnabledForLevel(level)) {
return makeLoggingEventBuilder(level);
} else {
return NOPLoggingEventBuilder.singleton();
}
}
/**
* Returns whether this Logger is enabled for a given {@link Level}.
*
* @param level
* @return true if enabled, false otherwise.
*/
default public boolean isEnabledForLevel(Level level) {
int levelInt = level.toInt();
switch (levelInt) {
case (TRACE_INT):
return isTraceEnabled();
case (DEBUG_INT):
return isDebugEnabled();
case (INFO_INT):
return isInfoEnabled();
case (WARN_INT):
return isWarnEnabled();
case (ERROR_INT):
return isErrorEnabled();
default:
throw new IllegalArgumentException("Level [" + level + "] not recognized.");
}
}
/**
* Is the logger instance enabled for the TRACE level?
*
* @return True if this Logger is enabled for the TRACE level,
* false otherwise.
* @since 1.4
*/
public boolean isTraceEnabled();
/**
* Log a message at the TRACE level.
*
* @param msg the message string to be logged
* @since 1.4
*/
public void trace(String msg);
/**
* Log a message at the TRACE level according to the specified format
* and argument.
*
* <p>This form avoids superfluous object creation when the logger
* is disabled for the TRACE level.
*
* @param format the format string
* @param arg the argument
* @since 1.4
*/
public void trace(String format, Object arg);
/**
* Log a message at the TRACE level according to the specified format
* and arguments.
*
* <p>This form avoids superfluous object creation when the logger
* is disabled for the TRACE level.
*
* @param format the format string
* @param arg1 the first argument
* @param arg2 the second argument
* @since 1.4
*/
public void trace(String format, Object arg1, Object arg2);
/**
* Log a message at the TRACE level according to the specified format
* and arguments.
*
* <p>This form avoids superfluous string concatenation when the logger
* is disabled for the TRACE level. However, this variant incurs the hidden
* (and relatively small) cost of creating an <code>Object[]</code> before invoking the method,
* even if this logger is disabled for TRACE. The variants taking {@link #trace(String, Object) one} and
* {@link #trace(String, Object, Object) two} arguments exist solely in order to avoid this hidden cost.
*
* @param format the format string
* @param arguments a list of 3 or more arguments
* @since 1.4
*/
public void trace(String format, Object... arguments);
/**
* Log an exception (throwable) at the TRACE level with an
* accompanying message.
*
* @param msg the message accompanying the exception
* @param t the exception (throwable) to log
* @since 1.4
*/
public void trace(String msg, Throwable t);
/**
* Similar to {@link #isTraceEnabled()} method except that the
* marker data is also taken into account.
*
* @param marker The marker data to take into consideration
* @return True if this Logger is enabled for the TRACE level,
* false otherwise.
*
* @since 1.4
*/
public boolean isTraceEnabled(Marker marker);
/**
* Entry point for fluent-logging for {@link org.slf4j.event.Level#TRACE} level.
*
* @return LoggingEventBuilder instance as appropriate for level TRACE
* @since 2.0
*/
@CheckReturnValue
default public LoggingEventBuilder atTrace() {
if (isTraceEnabled()) {
return makeLoggingEventBuilder(TRACE);
} else {
return NOPLoggingEventBuilder.singleton();
}
}
/**
* Log a message with the specific Marker at the TRACE level.
*
* @param marker the marker data specific to this log statement
* @param msg the message string to be logged
* @since 1.4
*/
public void trace(Marker marker, String msg);
/**
* This method is similar to {@link #trace(String, Object)} method except that the
* marker data is also taken into consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arg the argument
* @since 1.4
*/
public void trace(Marker marker, String format, Object arg);
/**
* This method is similar to {@link #trace(String, Object, Object)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arg1 the first argument
* @param arg2 the second argument
* @since 1.4
*/
public void trace(Marker marker, String format, Object arg1, Object arg2);
/**
* This method is similar to {@link #trace(String, Object...)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param argArray an array of arguments
* @since 1.4
*/
public void trace(Marker marker, String format, Object... argArray);
/**
* This method is similar to {@link #trace(String, Throwable)} method except that the
* marker data is also taken into consideration.
*
* @param marker the marker data specific to this log statement
* @param msg the message accompanying the exception
* @param t the exception (throwable) to log
* @since 1.4
*/
public void trace(Marker marker, String msg, Throwable t);
/**
* Is the logger instance enabled for the DEBUG level?
*
* @return True if this Logger is enabled for the DEBUG level,
* false otherwise.
*/
public boolean isDebugEnabled();
/**
* Log a message at the DEBUG level.
*
* @param msg the message string to be logged
*/
public void debug(String msg);
/**
* Log a message at the DEBUG level according to the specified format
* and argument.
*
* <p>This form avoids superfluous object creation when the logger
* is disabled for the DEBUG level.
*
* @param format the format string
* @param arg the argument
*/
public void debug(String format, Object arg);
/**
* Log a message at the DEBUG level according to the specified format
* and arguments.
*
* <p>This form avoids superfluous object creation when the logger
* is disabled for the DEBUG level.
*
* @param format the format string
* @param arg1 the first argument
* @param arg2 the second argument
*/
public void debug(String format, Object arg1, Object arg2);
/**
* Log a message at the DEBUG level according to the specified format
* and arguments.
*
* <p>This form avoids superfluous string concatenation when the logger
* is disabled for the DEBUG level. However, this variant incurs the hidden
* (and relatively small) cost of creating an <code>Object[]</code> before invoking the method,
* even if this logger is disabled for DEBUG. The variants taking
* {@link #debug(String, Object) one} and {@link #debug(String, Object, Object) two}
* arguments exist solely in order to avoid this hidden cost.
*
* @param format the format string
* @param arguments a list of 3 or more arguments
*/
public void debug(String format, Object... arguments);
/**
* Log an exception (throwable) at the DEBUG level with an
* accompanying message.
*
* @param msg the message accompanying the exception
* @param t the exception (throwable) to log
*/
public void debug(String msg, Throwable t);
/**
* Similar to {@link #isDebugEnabled()} method except that the
* marker data is also taken into account.
*
* @param marker The marker data to take into consideration
* @return True if this Logger is enabled for the DEBUG level,
* false otherwise.
*/
public boolean isDebugEnabled(Marker marker);
/**
* Log a message with the specific Marker at the DEBUG level.
*
* @param marker the marker data specific to this log statement
* @param msg the message string to be logged
*/
public void debug(Marker marker, String msg);
/**
* This method is similar to {@link #debug(String, Object)} method except that the
* marker data is also taken into consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arg the argument
*/
public void debug(Marker marker, String format, Object arg);
/**
* This method is similar to {@link #debug(String, Object, Object)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arg1 the first argument
* @param arg2 the second argument
*/
public void debug(Marker marker, String format, Object arg1, Object arg2);
/**
* This method is similar to {@link #debug(String, Object...)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arguments a list of 3 or more arguments
*/
public void debug(Marker marker, String format, Object... arguments);
/**
* This method is similar to {@link #debug(String, Throwable)} method except that the
* marker data is also taken into consideration.
*
* @param marker the marker data specific to this log statement
* @param msg the message accompanying the exception
* @param t the exception (throwable) to log
*/
public void debug(Marker marker, String msg, Throwable t);
/**
* Entry point for fluent-logging for {@link org.slf4j.event.Level#DEBUG} level.
*
* @return LoggingEventBuilder instance as appropriate for level DEBUG
* @since 2.0
*/
@CheckReturnValue
default public LoggingEventBuilder atDebug() {
if (isDebugEnabled()) {
return makeLoggingEventBuilder(DEBUG);
} else {
return NOPLoggingEventBuilder.singleton();
}
}
/**
* Is the logger instance enabled for the INFO level?
*
* @return True if this Logger is enabled for the INFO level,
* false otherwise.
*/
public boolean isInfoEnabled();
/**
* Log a message at the INFO level.
*
* @param msg the message string to be logged
*/
public void info(String msg);
/**
* Log a message at the INFO level according to the specified format
* and argument.
*
* <p>This form avoids superfluous object creation when the logger
* is disabled for the INFO level.
*
* @param format the format string
* @param arg the argument
*/
public void info(String format, Object arg);
/**
* Log a message at the INFO level according to the specified format
* and arguments.
*
* <p>This form avoids superfluous object creation when the logger
* is disabled for the INFO level.
*
* @param format the format string
* @param arg1 the first argument
* @param arg2 the second argument
*/
public void info(String format, Object arg1, Object arg2);
/**
* Log a message at the INFO level according to the specified format
* and arguments.
*
* <p>This form avoids superfluous string concatenation when the logger
* is disabled for the INFO level. However, this variant incurs the hidden
* (and relatively small) cost of creating an <code>Object[]</code> before invoking the method,
* even if this logger is disabled for INFO. The variants taking
* {@link #info(String, Object) one} and {@link #info(String, Object, Object) two}
* arguments exist solely in order to avoid this hidden cost.
*
* @param format the format string
* @param arguments a list of 3 or more arguments
*/
public void info(String format, Object... arguments);
/**
* Log an exception (throwable) at the INFO level with an
* accompanying message.
*
* @param msg the message accompanying the exception
* @param t the exception (throwable) to log
*/
public void info(String msg, Throwable t);
/**
* Similar to {@link #isInfoEnabled()} method except that the marker
* data is also taken into consideration.
*
* @param marker The marker data to take into consideration
* @return true if this Logger is enabled for the INFO level,
* false otherwise.
*/
public boolean isInfoEnabled(Marker marker);
/**
* Log a message with the specific Marker at the INFO level.
*
* @param marker The marker specific to this log statement
* @param msg the message string to be logged
*/
public void info(Marker marker, String msg);
/**
* This method is similar to {@link #info(String, Object)} method except that the
* marker data is also taken into consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arg the argument
*/
public void info(Marker marker, String format, Object arg);
/**
* This method is similar to {@link #info(String, Object, Object)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arg1 the first argument
* @param arg2 the second argument
*/
public void info(Marker marker, String format, Object arg1, Object arg2);
/**
* This method is similar to {@link #info(String, Object...)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arguments a list of 3 or more arguments
*/
public void info(Marker marker, String format, Object... arguments);
/**
* This method is similar to {@link #info(String, Throwable)} method
* except that the marker data is also taken into consideration.
*
* @param marker the marker data for this log statement
* @param msg the message accompanying the exception
* @param t the exception (throwable) to log
*/
public void info(Marker marker, String msg, Throwable t);
/**
* Entry point for fluent-logging for {@link org.slf4j.event.Level#INFO} level.
*
* @return LoggingEventBuilder instance as appropriate for level INFO
* @since 2.0
*/
@CheckReturnValue
default public LoggingEventBuilder atInfo() {
if (isInfoEnabled()) {
return makeLoggingEventBuilder(INFO);
} else {
return NOPLoggingEventBuilder.singleton();
}
}
/**
* Is the logger instance enabled for the WARN level?
*
* @return True if this Logger is enabled for the WARN level,
* false otherwise.
*/
public boolean isWarnEnabled();
/**
* Log a message at the WARN level.
*
* @param msg the message string to be logged
*/
public void warn(String msg);
/**
* Log a message at the WARN level according to the specified format
* and argument.
*
* <p>This form avoids superfluous object creation when the logger
* is disabled for the WARN level.
*
* @param format the format string
* @param arg the argument
*/
public void warn(String format, Object arg);
/**
* Log a message at the WARN level according to the specified format
* and arguments.
*
* <p>This form avoids superfluous string concatenation when the logger
* is disabled for the WARN level. However, this variant incurs the hidden
* (and relatively small) cost of creating an <code>Object[]</code> before invoking the method,
* even if this logger is disabled for WARN. The variants taking
* {@link #warn(String, Object) one} and {@link #warn(String, Object, Object) two}
* arguments exist solely in order to avoid this hidden cost.
*
* @param format the format string
* @param arguments a list of 3 or more arguments
*/
public void warn(String format, Object... arguments);
/**
* Log a message at the WARN level according to the specified format
* and arguments.
*
* <p>This form avoids superfluous object creation when the logger
* is disabled for the WARN level.
*
* @param format the format string
* @param arg1 the first argument
* @param arg2 the second argument
*/
public void warn(String format, Object arg1, Object arg2);
/**
* Log an exception (throwable) at the WARN level with an
* accompanying message.
*
* @param msg the message accompanying the exception
* @param t the exception (throwable) to log
*/
public void warn(String msg, Throwable t);
/**
* Similar to {@link #isWarnEnabled()} method except that the marker
* data is also taken into consideration.
*
* @param marker The marker data to take into consideration
* @return True if this Logger is enabled for the WARN level,
* false otherwise.
*/
public boolean isWarnEnabled(Marker marker);
/**
* Log a message with the specific Marker at the WARN level.
*
* @param marker The marker specific to this log statement
* @param msg the message string to be logged
*/
public void warn(Marker marker, String msg);
/**
* This method is similar to {@link #warn(String, Object)} method except that the
* marker data is also taken into consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arg the argument
*/
public void warn(Marker marker, String format, Object arg);
/**
* This method is similar to {@link #warn(String, Object, Object)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arg1 the first argument
* @param arg2 the second argument
*/
public void warn(Marker marker, String format, Object arg1, Object arg2);
/**
* This method is similar to {@link #warn(String, Object...)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arguments a list of 3 or more arguments
*/
public void warn(Marker marker, String format, Object... arguments);
/**
* This method is similar to {@link #warn(String, Throwable)} method
* except that the marker data is also taken into consideration.
*
* @param marker the marker data for this log statement
* @param msg the message accompanying the exception
* @param t the exception (throwable) to log
*/
public void warn(Marker marker, String msg, Throwable t);
/**
* Entry point for fluent-logging for {@link org.slf4j.event.Level#WARN} level.
*
* @return LoggingEventBuilder instance as appropriate for level WARN
* @since 2.0
*/
@CheckReturnValue
default public LoggingEventBuilder atWarn() {
if (isWarnEnabled()) {
return makeLoggingEventBuilder(WARN);
} else {
return NOPLoggingEventBuilder.singleton();
}
}
/**
* Is the logger instance enabled for the ERROR level?
*
* @return True if this Logger is enabled for the ERROR level,
* false otherwise.
*/
public boolean isErrorEnabled();
/**
* Log a message at the ERROR level.
*
* @param msg the message string to be logged
*/
public void error(String msg);
/**
* Log a message at the ERROR level according to the specified format
* and argument.
*
* <p>This form avoids superfluous object creation when the logger
* is disabled for the ERROR level.
*
* @param format the format string
* @param arg the argument
*/
public void error(String format, Object arg);
/**
* Log a message at the ERROR level according to the specified format
* and arguments.
*
* <p>This form avoids superfluous object creation when the logger
* is disabled for the ERROR level.
*
* @param format the format string
* @param arg1 the first argument
* @param arg2 the second argument
*/
public void error(String format, Object arg1, Object arg2);
/**
* Log a message at the ERROR level according to the specified format
* and arguments.
*
* <p>This form avoids superfluous string concatenation when the logger
* is disabled for the ERROR level. However, this variant incurs the hidden
* (and relatively small) cost of creating an <code>Object[]</code> before invoking the method,
* even if this logger is disabled for ERROR. The variants taking
* {@link #error(String, Object) one} and {@link #error(String, Object, Object) two}
* arguments exist solely in order to avoid this hidden cost.
*
* @param format the format string
* @param arguments a list of 3 or more arguments
*/
public void error(String format, Object... arguments);
/**
* Log an exception (throwable) at the ERROR level with an
* accompanying message.
*
* @param msg the message accompanying the exception
* @param t the exception (throwable) to log
*/
public void error(String msg, Throwable t);
/**
* Similar to {@link #isErrorEnabled()} method except that the
* marker data is also taken into consideration.
*
* @param marker The marker data to take into consideration
* @return True if this Logger is enabled for the ERROR level,
* false otherwise.
*/
public boolean isErrorEnabled(Marker marker);
/**
* Log a message with the specific Marker at the ERROR level.
*
* @param marker The marker specific to this log statement
* @param msg the message string to be logged
*/
public void error(Marker marker, String msg);
/**
* This method is similar to {@link #error(String, Object)} method except that the
* marker data is also taken into consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arg the argument
*/
public void error(Marker marker, String format, Object arg);
/**
* This method is similar to {@link #error(String, Object, Object)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arg1 the first argument
* @param arg2 the second argument
*/
public void error(Marker marker, String format, Object arg1, Object arg2);
/**
* This method is similar to {@link #error(String, Object...)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param format the format string
* @param arguments a list of 3 or more arguments
*/
public void error(Marker marker, String format, Object... arguments);
/**
* This method is similar to {@link #error(String, Throwable)}
* method except that the marker data is also taken into
* consideration.
*
* @param marker the marker data specific to this log statement
* @param msg the message accompanying the exception
* @param t the exception (throwable) to log
*/
public void error(Marker marker, String msg, Throwable t);
/**
* Entry point for fluent-logging for {@link org.slf4j.event.Level#ERROR} level.
*
* @return LoggingEventBuilder instance as appropriate for level ERROR
* @since 2.0
*/
@CheckReturnValue
default public LoggingEventBuilder atError() {
if (isErrorEnabled()) {
return makeLoggingEventBuilder(ERROR);
} else {
return NOPLoggingEventBuilder.singleton();
}
}
}
| Logger |
java | alibaba__nacos | common/src/main/java/com/alibaba/nacos/common/model/RestResultUtils.java | {
"start": 820,
"end": 2416
} | class ____ {
public static <T> RestResult<T> success() {
return RestResult.<T>builder().withCode(200).build();
}
public static <T> RestResult<T> success(T data) {
return RestResult.<T>builder().withCode(200).withData(data).build();
}
public static <T> RestResult<T> success(String msg, T data) {
return RestResult.<T>builder().withCode(200).withMsg(msg).withData(data).build();
}
public static <T> RestResult<T> success(int code, T data) {
return RestResult.<T>builder().withCode(code).withData(data).build();
}
public static <T> RestResult<T> failed() {
return RestResult.<T>builder().withCode(500).build();
}
public static <T> RestResult<T> failed(String errMsg) {
return RestResult.<T>builder().withCode(500).withMsg(errMsg).build();
}
public static <T> RestResult<T> failed(int code, T data) {
return RestResult.<T>builder().withCode(code).withData(data).build();
}
public static <T> RestResult<T> failed(int code, T data, String errMsg) {
return RestResult.<T>builder().withCode(code).withData(data).withMsg(errMsg).build();
}
public static <T> RestResult<T> failedWithMsg(int code, String errMsg) {
return RestResult.<T>builder().withCode(code).withMsg(errMsg).build();
}
public static <T> RestResult<T> buildResult(IResultCode resultCode, T data) {
return RestResult.<T>builder().withCode(resultCode.getCode()).withMsg(resultCode.getCodeMsg()).withData(data).build();
}
}
| RestResultUtils |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/controller/PeriodicTaskControlManager.java | {
"start": 2191,
"end": 2413
} | interface ____ {
void scheduleDeferred(
String tag,
long deadlineNs,
Supplier<ControllerResult<Void>> op
);
void cancelDeferred(String tag);
}
| QueueAccessor |
java | apache__maven | compat/maven-compat/src/test/java/org/apache/maven/project/inheritance/t02/ProjectInheritanceTest.java | {
"start": 2059,
"end": 6427
} | class ____ extends AbstractProjectInheritanceTestCase {
// ----------------------------------------------------------------------
//
// p4 inherits from p3
// p3 inherits from p2
// p2 inherits from p1
// p1 inherits from p0
// p0 inherits from super model
//
// or we can show it graphically as:
//
// p4 ---> p3 ---> p2 ---> p1 ---> p0 --> super model
//
// ----------------------------------------------------------------------
@Test
@DisabledOnOs(value = WINDOWS, disabledReason = "need to investigate why it fails on windows")
void testProjectInheritance() throws Exception {
File localRepo = getLocalRepositoryPath();
System.out.println("Local repository is at: " + localRepo.getAbsolutePath());
File pom0 = new File(localRepo, "p0/pom.xml");
File pom1 = new File(pom0.getParentFile(), "p1/pom.xml");
File pom2 = new File(pom1.getParentFile(), "p2/pom.xml");
File pom3 = new File(pom2.getParentFile(), "p3/pom.xml");
File pom4 = new File(pom3.getParentFile(), "p4/pom.xml");
File pom5 = new File(pom4.getParentFile(), "p5/pom.xml");
System.out.println("Location of project-4's POM: " + pom4.getPath());
// load everything...
MavenProject project0 = getProject(pom0);
MavenProject project1 = getProject(pom1);
MavenProject project2 = getProject(pom2);
MavenProject project3 = getProject(pom3);
MavenProject project4 = getProject(pom4);
MavenProject project5 = getProject(pom5);
assertEquals("p4", project4.getName());
// ----------------------------------------------------------------------
// Value inherited from p3
// ----------------------------------------------------------------------
assertEquals("2000", project4.getInceptionYear());
// ----------------------------------------------------------------------
// Value taken from p2
// ----------------------------------------------------------------------
assertEquals("mailing-list", project4.getMailingLists().get(0).getName());
// ----------------------------------------------------------------------
// Value taken from p1
// ----------------------------------------------------------------------
assertEquals("scm-url/p2/p3/p4", project4.getScm().getUrl());
// ----------------------------------------------------------------------
// Value taken from p4
// ----------------------------------------------------------------------
assertEquals("Codehaus", project4.getOrganization().getName());
// ----------------------------------------------------------------------
// Value taken from super model
// ----------------------------------------------------------------------
assertEquals("4.0.0", project4.getModelVersion());
Build build = project4.getBuild();
List<Plugin> plugins = build.getPlugins();
Map<String, Integer> validPluginCounts = new HashMap<>();
String testPluginArtifactId = "maven-compiler-plugin";
// this is the plugin we're looking for.
validPluginCounts.put(testPluginArtifactId, 0);
// these are injected if -DperformRelease=true
validPluginCounts.put("maven-deploy-plugin", 0);
validPluginCounts.put("maven-javadoc-plugin", 0);
validPluginCounts.put("maven-source-plugin", 0);
Plugin testPlugin = null;
for (Plugin plugin : plugins) {
String pluginArtifactId = plugin.getArtifactId();
assertTrue(validPluginCounts.containsKey(pluginArtifactId), "Illegal plugin found: " + pluginArtifactId);
if (pluginArtifactId.equals(testPluginArtifactId)) {
testPlugin = plugin;
}
Integer count = validPluginCounts.get(pluginArtifactId);
assertEquals(0, (int) count, "Multiple copies of plugin: " + pluginArtifactId + " found in POM.");
count = count + 1;
validPluginCounts.put(pluginArtifactId, count);
}
assertNotNull(testPlugin);
List<PluginExecution> executions = testPlugin.getExecutions();
assertEquals(1, executions.size());
}
}
| ProjectInheritanceTest |
java | apache__rocketmq | auth/src/main/java/org/apache/rocketmq/auth/authentication/manager/AuthenticationMetadataManagerImpl.java | {
"start": 1780,
"end": 9425
} | class ____ implements AuthenticationMetadataManager {
private final AuthenticationMetadataProvider authenticationMetadataProvider;
private final AuthorizationMetadataProvider authorizationMetadataProvider;
public AuthenticationMetadataManagerImpl(AuthConfig authConfig) {
this.authenticationMetadataProvider = AuthenticationFactory.getMetadataProvider(authConfig);
this.authorizationMetadataProvider = AuthorizationFactory.getMetadataProvider(authConfig);
this.initUser(authConfig);
}
@Override
public void shutdown() {
if (this.authenticationMetadataProvider != null) {
this.authenticationMetadataProvider.shutdown();
}
if (this.authorizationMetadataProvider != null) {
this.authorizationMetadataProvider.shutdown();
}
}
@Override
public void initUser(AuthConfig authConfig) {
if (authConfig == null) {
return;
}
if (StringUtils.isNotBlank(authConfig.getInitAuthenticationUser())) {
try {
User initUser = JSON.parseObject(authConfig.getInitAuthenticationUser(), User.class);
initUser.setUserType(UserType.SUPER);
this.getUser(initUser.getUsername()).thenCompose(user -> {
if (user != null) {
return CompletableFuture.completedFuture(null);
}
return this.createUser(initUser);
}).join();
} catch (Exception e) {
throw new AuthenticationException("Init authentication user error.", e);
}
}
if (StringUtils.isNotBlank(authConfig.getInnerClientAuthenticationCredentials())) {
try {
SessionCredentials credentials = JSON.parseObject(authConfig.getInnerClientAuthenticationCredentials(), SessionCredentials.class);
User innerUser = User.of(credentials.getAccessKey(), credentials.getSecretKey(), UserType.SUPER);
this.getUser(innerUser.getUsername()).thenCompose(user -> {
if (user != null) {
return CompletableFuture.completedFuture(null);
}
return this.createUser(innerUser);
}).join();
} catch (Exception e) {
throw new AuthenticationException("Init inner client authentication credentials error", e);
}
}
}
@Override
public CompletableFuture<Void> createUser(User user) {
CompletableFuture<Void> result = new CompletableFuture<>();
try {
this.validate(user, true);
if (user.getUserType() == null) {
user.setUserType(UserType.NORMAL);
}
if (user.getUserStatus() == null) {
user.setUserStatus(UserStatus.ENABLE);
}
result = this.getAuthenticationMetadataProvider().getUser(user.getUsername()).thenCompose(old -> {
if (old != null) {
throw new AuthenticationException("The user is existed");
}
return this.getAuthenticationMetadataProvider().createUser(user);
});
} catch (Exception e) {
this.handleException(e, result);
}
return result;
}
@Override
public CompletableFuture<Void> updateUser(User user) {
CompletableFuture<Void> result = new CompletableFuture<>();
try {
this.validate(user, false);
result = this.getAuthenticationMetadataProvider().getUser(user.getUsername()).thenCompose(old -> {
if (old == null) {
throw new AuthenticationException("The user is not exist");
}
if (StringUtils.isNotBlank(user.getPassword())) {
old.setPassword(user.getPassword());
}
if (user.getUserType() != null) {
old.setUserType(user.getUserType());
}
if (user.getUserStatus() != null) {
old.setUserStatus(user.getUserStatus());
}
return this.getAuthenticationMetadataProvider().updateUser(old);
});
} catch (Exception e) {
this.handleException(e, result);
}
return result;
}
@Override
public CompletableFuture<Void> deleteUser(String username) {
CompletableFuture<Void> result = new CompletableFuture<>();
try {
if (StringUtils.isBlank(username)) {
throw new AuthenticationException("username can not be blank");
}
CompletableFuture<Void> deleteUser = this.getAuthenticationMetadataProvider().deleteUser(username);
CompletableFuture<Void> deleteAcl = this.getAuthorizationMetadataProvider().deleteAcl(User.of(username));
return CompletableFuture.allOf(deleteUser, deleteAcl);
} catch (Exception e) {
this.handleException(e, result);
}
return result;
}
@Override
public CompletableFuture<User> getUser(String username) {
CompletableFuture<User> result = new CompletableFuture<>();
try {
if (StringUtils.isBlank(username)) {
throw new AuthenticationException("username can not be blank");
}
result = this.getAuthenticationMetadataProvider().getUser(username);
} catch (Exception e) {
this.handleException(e, result);
}
return result;
}
@Override
public CompletableFuture<List<User>> listUser(String filter) {
CompletableFuture<List<User>> result = new CompletableFuture<>();
try {
result = this.getAuthenticationMetadataProvider().listUser(filter);
} catch (Exception e) {
this.handleException(e, result);
}
return result;
}
@Override
public CompletableFuture<Boolean> isSuperUser(String username) {
return this.getUser(username).thenApply(user -> {
if (user == null) {
throw new AuthenticationException("User:{} is not found", username);
}
return user.getUserType() == UserType.SUPER;
});
}
private void validate(User user, boolean isCreate) {
if (user == null) {
throw new AuthenticationException("user can not be null");
}
if (StringUtils.isBlank(user.getUsername())) {
throw new AuthenticationException("username can not be blank");
}
if (isCreate && StringUtils.isBlank(user.getPassword())) {
throw new AuthenticationException("password can not be blank");
}
}
private void handleException(Exception e, CompletableFuture<?> result) {
Throwable throwable = ExceptionUtils.getRealException(e);
result.completeExceptionally(throwable);
}
private AuthenticationMetadataProvider getAuthenticationMetadataProvider() {
if (authenticationMetadataProvider == null) {
throw new IllegalStateException("The authenticationMetadataProvider is not configured.");
}
return authenticationMetadataProvider;
}
private AuthorizationMetadataProvider getAuthorizationMetadataProvider() {
if (authorizationMetadataProvider == null) {
throw new IllegalStateException("The authorizationMetadataProvider is not configured.");
}
return authorizationMetadataProvider;
}
}
| AuthenticationMetadataManagerImpl |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingLeaderElectionDriver.java | {
"start": 1465,
"end": 7186
} | class ____ implements LeaderElectionDriver {
private final Function<ReentrantLock, Boolean> hasLeadershipFunction;
private final TriConsumer<ReentrantLock, String, LeaderInformation>
publishLeaderInformationConsumer;
private final BiConsumer<ReentrantLock, String> deleteLeaderInformationConsumer;
private final ThrowingConsumer<ReentrantLock, Exception> closeConsumer;
private final ReentrantLock lock = new ReentrantLock();
public TestingLeaderElectionDriver(
Function<ReentrantLock, Boolean> hasLeadershipFunction,
TriConsumer<ReentrantLock, String, LeaderInformation> publishLeaderInformationConsumer,
BiConsumer<ReentrantLock, String> deleteLeaderInformationConsumer,
ThrowingConsumer<ReentrantLock, Exception> closeConsumer) {
this.hasLeadershipFunction = hasLeadershipFunction;
this.publishLeaderInformationConsumer = publishLeaderInformationConsumer;
this.deleteLeaderInformationConsumer = deleteLeaderInformationConsumer;
this.closeConsumer = closeConsumer;
}
@Override
public boolean hasLeadership() {
return hasLeadershipFunction.apply(lock);
}
@Override
public void publishLeaderInformation(String componentId, LeaderInformation leaderInformation) {
publishLeaderInformationConsumer.accept(lock, componentId, leaderInformation);
}
@Override
public void deleteLeaderInformation(String componentId) {
deleteLeaderInformationConsumer.accept(lock, componentId);
}
@Override
public void close() throws Exception {
closeConsumer.accept(lock);
}
public static Builder newNoOpBuilder() {
return new Builder();
}
public ReentrantLock getLock() {
return lock;
}
public static Builder newBuilder(AtomicBoolean grantLeadership) {
return newBuilder(grantLeadership, new AtomicReference<>(), new AtomicBoolean());
}
/**
* Returns a {@code Builder} that comes with a basic default implementation of the {@link
* LeaderElectionDriver} contract using the passed parameters for information storage.
*
* @param hasLeadership saves the current leadership state of the instance that is created from
* the {@code Builder}.
* @param storedLeaderInformation saves the leader information that would be otherwise stored in
* some external storage.
* @param isClosed saves the running state of the driver.
*/
public static Builder newBuilder(
AtomicBoolean hasLeadership,
AtomicReference<LeaderInformationRegister> storedLeaderInformation,
AtomicBoolean isClosed) {
Preconditions.checkState(
storedLeaderInformation.get() == null
|| !storedLeaderInformation
.get()
.getRegisteredComponentIds()
.iterator()
.hasNext(),
"Initial state check for storedLeaderInformation failed.");
Preconditions.checkState(!isClosed.get(), "Initial state check for isClosed failed.");
return newNoOpBuilder()
.setHasLeadershipFunction(
lock -> {
try {
lock.lock();
return hasLeadership.get();
} finally {
lock.unlock();
}
})
.setPublishLeaderInformationConsumer(
(lock, componentId, leaderInformation) -> {
try {
lock.lock();
if (hasLeadership.get()) {
storedLeaderInformation.getAndUpdate(
oldData ->
LeaderInformationRegister.merge(
oldData,
componentId,
leaderInformation));
}
} finally {
lock.unlock();
}
})
.setDeleteLeaderInformationConsumer(
(lock, componentId) -> {
try {
lock.lock();
if (hasLeadership.get()) {
storedLeaderInformation.getAndUpdate(
oldData ->
LeaderInformationRegister.clear(
oldData, componentId));
}
} finally {
lock.unlock();
}
})
.setCloseConsumer(
lock -> {
try {
lock.lock();
isClosed.set(true);
} finally {
lock.unlock();
}
});
}
/**
* {@code Factory} implements {@link LeaderElectionDriverFactory} for the {@code
* TestingLeaderElectionDriver}.
*/
public static | TestingLeaderElectionDriver |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/env/AbstractEnvironment.java | {
"start": 7460,
"end": 8319
} | class ____ extends Level1Environment {
* @Override
* protected void customizePropertySources(MutablePropertySources propertySources) {
* super.customizePropertySources(propertySources); // add all from superclass
* propertySources.addLast(new PropertySourceC(...));
* propertySources.addLast(new PropertySourceD(...));
* }
* }
* </pre>
*
* <p>In this arrangement, properties will be resolved against sources A, B, C, D in that
* order. That is to say that property source "A" has precedence over property source
* "D". If the {@code Level2Environment} subclass wished to give property sources C
* and D higher precedence than A and B, it could simply call
* {@code super.customizePropertySources} after, rather than before adding its own:
*
* <pre class="code">
* public | Level2Environment |
java | netty__netty | handler/src/main/java/io/netty/handler/logging/LoggingHandler.java | {
"start": 4613,
"end": 4844
} | class ____ use for the logger
*/
public LoggingHandler(String name) {
this(name, DEFAULT_LEVEL);
}
/**
* Creates a new instance with the specified logger name.
*
* @param name the name of the | to |
java | FasterXML__jackson-core | src/main/java/tools/jackson/core/io/schubfach/MathUtils.java | {
"start": 1180,
"end": 1397
} | class ____ package private utilities for other classes.
* Thus, all methods are assumed to be invoked with correct arguments,
* so these are not checked at all.
*
* @author Raffaello Giulietti
*/
public final | exposes |
java | quarkusio__quarkus | extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/OidcJwtCallerPrincipal.java | {
"start": 275,
"end": 1256
} | class ____ extends DefaultJWTCallerPrincipal {
private final JwtClaims claims;
private final String principalClaim;
private final TokenCredential credential;
public OidcJwtCallerPrincipal(final JwtClaims claims, TokenCredential credential) {
this(claims, credential, null);
}
public OidcJwtCallerPrincipal(final JwtClaims claims, TokenCredential credential, String principalClaim) {
super(claims);
this.claims = claims;
this.credential = credential;
this.principalClaim = principalClaim;
}
public JwtClaims getClaims() {
return claims;
}
public TokenCredential getCredential() {
return credential;
}
@Override
public String getName() {
if (principalClaim != null) {
Optional<String> claim = super.claim(principalClaim);
return claim.orElse(null);
} else {
return super.getName();
}
}
}
| OidcJwtCallerPrincipal |
java | spring-projects__spring-boot | module/spring-boot-security/src/test/java/org/springframework/boot/security/autoconfigure/web/servlet/PathRequestTests.java | {
"start": 1446,
"end": 2719
} | class ____ {
@Test
void toStaticResourcesShouldReturnStaticResourceRequest() {
assertThat(PathRequest.toStaticResources()).isInstanceOf(StaticResourceRequest.class);
}
@Test
void toH2ConsoleShouldMatchH2ConsolePath() {
RequestMatcher matcher = PathRequest.toH2Console();
assertMatcher(matcher).matches("/h2-console");
assertMatcher(matcher).matches("/h2-console/subpath");
assertMatcher(matcher).doesNotMatch("/js/file.js");
}
@Test
void toH2ConsoleWhenManagementContextShouldNeverMatch() {
RequestMatcher matcher = PathRequest.toH2Console();
assertMatcher(matcher, "management").doesNotMatch("/h2-console");
assertMatcher(matcher, "management").doesNotMatch("/h2-console/subpath");
assertMatcher(matcher, "management").doesNotMatch("/js/file.js");
}
private RequestMatcherAssert assertMatcher(RequestMatcher matcher) {
return assertMatcher(matcher, null);
}
private RequestMatcherAssert assertMatcher(RequestMatcher matcher, @Nullable String serverNamespace) {
TestWebApplicationContext context = new TestWebApplicationContext(serverNamespace);
context.registerBean(ServerProperties.class);
context.registerBean(H2ConsoleProperties.class);
return assertThat(new RequestMatcherAssert(context, matcher));
}
static | PathRequestTests |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/statement/SQLForeignKeyImpl.java | {
"start": 4338,
"end": 4629
} | enum ____ {
FULL("FULL"), PARTIAL("PARTIAL"), SIMPLE("SIMPLE");
public final String name;
public final String nameLCase;
Match(String name) {
this.name = name;
this.nameLCase = name.toLowerCase();
}
}
public static | Match |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/writing/InjectionMethods.java | {
"start": 5053,
"end": 8287
} | class ____ {
/**
* Invokes the injection method for {@code binding}, with the dependencies transformed with the
* {@code dependencyUsage} function.
*/
static XCodeBlock invoke(
ContributionBinding binding,
Function<DependencyRequest, XCodeBlock> injectedDependencyUsage,
Function<XExecutableParameterElement, XCodeBlock> assistedDependencyUsage,
XClassName requestingClass,
Optional<XCodeBlock> moduleReference,
CompilerOptions compilerOptions) {
ImmutableList.Builder<XCodeBlock> arguments = ImmutableList.builder();
moduleReference.ifPresent(arguments::add);
invokeArguments(binding, injectedDependencyUsage, assistedDependencyUsage)
.forEach(arguments::add);
XClassName enclosingClass = generatedClassNameForBinding(binding);
String methodName = generatedProxyMethodName(binding);
return invokeMethod(
methodName,
methodTypeArguments(binding, compilerOptions),
arguments.build(),
enclosingClass,
requestingClass);
}
static ImmutableList<XCodeBlock> invokeArguments(
ContributionBinding binding,
Function<DependencyRequest, XCodeBlock> injectedDependencyUsage,
Function<XExecutableParameterElement, XCodeBlock> assistedDependencyUsage) {
ImmutableMap<XExecutableParameterElement, DependencyRequest> dependencyRequestMap =
provisionDependencies(binding).stream()
.collect(
toImmutableMap(
request -> asMethodParameter(request.requestElement().get().xprocessing()),
request -> request));
ImmutableList.Builder<XCodeBlock> arguments = ImmutableList.builder();
XExecutableElement method = asExecutable(binding.bindingElement().get());
for (XExecutableParameterElement parameter : method.getParameters()) {
if (isAssistedParameter(parameter)) {
arguments.add(assistedDependencyUsage.apply(parameter));
} else if (dependencyRequestMap.containsKey(parameter)) {
DependencyRequest request = dependencyRequestMap.get(parameter);
arguments.add(injectedDependencyUsage.apply(request));
} else {
throw new AssertionError("Unexpected parameter: " + parameter);
}
}
return arguments.build();
}
private static ImmutableSet<DependencyRequest> provisionDependencies(
ContributionBinding binding) {
switch (binding.kind()) {
case INJECTION:
return ((InjectionBinding) binding).constructorDependencies();
case ASSISTED_INJECTION:
return ((AssistedInjectionBinding) binding).constructorDependencies();
case PROVISION:
return ((ProvisionBinding) binding).dependencies();
default:
throw new AssertionError("Unexpected binding kind: " + binding.kind());
}
}
}
/**
* A static method that injects one member of an instance of a type. Its first parameter is an
* instance of the type to be injected. The remaining parameters match the dependency requests for
* the injection site.
*
* <p>Example:
*
* <pre><code>
* | ProvisionMethod |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/plugins/ShutdownAwarePlugin.java | {
"start": 1013,
"end": 1606
} | interface ____ {
/**
* Whether the plugin is considered safe to shut down. This method is called when the status of
* a shutdown is retrieved via the API, and it is only called on the master node.
*/
boolean safeToShutdown(String nodeId, SingleNodeShutdownMetadata.Type shutdownType);
/**
* A trigger to notify the plugin that a shutdown for the nodes has been triggered. This method
* will be called on every node for each cluster state, so it should return quickly.
*/
void signalShutdown(Collection<String> shutdownNodeIds);
}
| ShutdownAwarePlugin |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/web/server/OAuth2LoginTests.java | {
"start": 37193,
"end": 37456
} | class ____ {
@Bean
InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
return new InMemoryReactiveClientRegistrationRepository(github);
}
}
@Configuration
@EnableWebFluxSecurity
static | OAuth2LoginWithSingleClientRegistrations |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/util/MethodId.java | {
"start": 119,
"end": 789
} | class ____ {
private MethodId() {
}
public static String get(String methodName, String declaringClassName, String... parameterClassNames) {
return declaringClassName + '#' + methodName + '(' + Arrays.toString(parameterClassNames) + ')';
}
public static String get(String methodName, Class declaringClass, Class... parameterClasses) {
String[] parameterClassNames = new String[parameterClasses.length];
for (int i = 0; i < parameterClasses.length; i++) {
parameterClassNames[i] = parameterClasses[i].getName();
}
return get(methodName, declaringClass.getName(), parameterClassNames);
}
}
| MethodId |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.