language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
FasterXML__jackson-core
|
src/main/java/tools/jackson/core/util/VersionUtil.java
|
{
"start": 1160,
"end": 1837
}
|
class ____ not be found or does not have a public
* static Version field named "VERSION", returns "empty" {@link Version}
* returned by {@link Version#unknownVersion()}.
*
* @param cls Class for which to look version information
*
* @return Version information discovered if any;
* {@link Version#unknownVersion()} if none
*/
public static Version versionFor(Class<?> cls)
{
Version v = null;
try {
String versionInfoClassName = cls.getPackage().getName() + ".PackageVersion";
Class<?> vClass = Class.forName(versionInfoClassName, true, cls.getClassLoader());
// However, if
|
could
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/BitFieldArgs.java
|
{
"start": 1871,
"end": 16384
}
|
class ____ {
/**
* Utility constructor.
*/
private Builder() {
}
/**
* Create a new {@code GET} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, supports up to {@code 2^32-1} using {@link Integer#toUnsignedString(int)}.
* @return a new {@code GET} subcommand for the given {@code bitFieldType} and {@code offset}.
*/
public static BitFieldArgs get(BitFieldType bitFieldType, int offset) {
return new BitFieldArgs().get(bitFieldType, offset);
}
/**
* Create a new {@code GET} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, must not be {@code null}.
* @return a new {@code GET} subcommand for the given {@code bitFieldType} and {@code offset}.
* @since 4.3
*/
public static BitFieldArgs get(BitFieldType bitFieldType, Offset offset) {
return new BitFieldArgs().get(bitFieldType, offset);
}
/**
* Create a new {@code SET} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, supports up to {@code 2^32-1} using {@link Integer#toUnsignedString(int)}.
* @param value the value
* @return a new {@code SET} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
*/
public static BitFieldArgs set(BitFieldType bitFieldType, int offset, long value) {
return new BitFieldArgs().set(bitFieldType, offset, value);
}
/**
* Create a new {@code SET} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, must not be {@code null}.
* @param value the value
* @return a new {@code SET} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
* @since 4.3
*/
public static BitFieldArgs set(BitFieldType bitFieldType, Offset offset, long value) {
return new BitFieldArgs().set(bitFieldType, offset, value);
}
/**
* Create a new {@code INCRBY} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, supports up to {@code 2^32-1} using {@link Integer#toUnsignedString(int)}.
* @param value the value
* @return a new {@code INCRBY} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value} .
*/
public static BitFieldArgs incrBy(BitFieldType bitFieldType, int offset, long value) {
return new BitFieldArgs().incrBy(bitFieldType, offset, value);
}
/**
* Create a new {@code INCRBY} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, must not be {@code null}.
* @param value the value
* @return a new {@code INCRBY} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value} .
* @since 4.3
*/
public static BitFieldArgs incrBy(BitFieldType bitFieldType, Offset offset, long value) {
return new BitFieldArgs().incrBy(bitFieldType, offset, value);
}
/**
* Adds a new {@code OVERFLOW} subcommand.
*
* @param overflowType type of overflow, must not be {@code null}.
* @return a new {@code OVERFLOW} subcommand for the given {@code overflowType}.
*/
public static BitFieldArgs overflow(OverflowType overflowType) {
return new BitFieldArgs().overflow(overflowType);
}
}
/**
* Creates a new signed {@link BitFieldType} for the given number of {@code bits}.
*
* Redis allows up to {@code 64} bits for unsigned integers.
*
* @param bits number of bits to define the integer type width.
* @return the {@link BitFieldType}.
*/
public static BitFieldType signed(int bits) {
return new BitFieldType(true, bits);
}
/**
* Creates a new unsigned {@link BitFieldType} for the given number of {@code bits}. Redis allows up to {@code 63} bits for
* unsigned integers.
*
* @param bits number of bits to define the integer type width.
* @return the {@link BitFieldType}.
*/
public static BitFieldType unsigned(int bits) {
return new BitFieldType(false, bits);
}
/**
* Creates a new {@link Offset} for the given {@code offset}.
*
* @param offset zero-based offset, supports up to {@code 2^32-1} using {@link Integer#toUnsignedString(int)}.
* @return the {@link Offset}.
* @since 4.3
*/
public static Offset offset(int offset) {
return new Offset(false, offset);
}
/**
* Creates a new {@link Offset} for the given {@code offset} that is multiplied by the integer type width used in the sub
* command.
*
* @param offset offset to be multiplied by the integer type width.
* @return the {@link Offset}.
* @since 4.3
*/
public static Offset typeWidthBasedOffset(int offset) {
return new Offset(true, offset);
}
/**
* Adds a new {@link SubCommand} to the {@code BITFIELD} execution.
*
* @param subCommand must not be {@code null}.
*/
private BitFieldArgs addSubCommand(SubCommand subCommand) {
LettuceAssert.notNull(subCommand, "SubCommand must not be null");
commands.add(subCommand);
return this;
}
/**
* Adds a new {@code GET} subcommand using offset {@code 0} and the field type of the previous command.
*
* @return a new {@code GET} subcommand for the given {@code bitFieldType} and {@code offset}.
* @throws IllegalStateException if no previous field type was found
*/
public BitFieldArgs get() {
return get(previousFieldType());
}
/**
* Adds a new {@code GET} subcommand using offset {@code 0}.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @return a new {@code GET} subcommand for the given {@code bitFieldType} and {@code offset}.
*/
public BitFieldArgs get(BitFieldType bitFieldType) {
return get(bitFieldType, 0);
}
/**
* Adds a new {@code GET} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, supports up to {@code 2^32-1} using {@link Integer#toUnsignedString(int)}.
* @return a new {@code GET} subcommand for the given {@code bitFieldType} and {@code offset}.
*/
public BitFieldArgs get(BitFieldType bitFieldType, int offset) {
return addSubCommand(new Get(bitFieldType, false, offset));
}
/**
* Adds a new {@code GET} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset
* @return a new {@code GET} subcommand for the given {@code bitFieldType} and {@code offset}.
* @since 4.3
*/
public BitFieldArgs get(BitFieldType bitFieldType, Offset offset) {
LettuceAssert.notNull(offset, "BitFieldOffset must not be null");
return addSubCommand(new Get(bitFieldType, offset.isMultiplyByTypeWidth(), offset.getOffset()));
}
/**
* Adds a new {@code GET} subcommand using the field type of the previous command.
*
* @param offset bitfield offset, supports up to {@code 2^32-1} using {@link Integer#toUnsignedString(int)}.
* @return a new {@code GET} subcommand for the given {@code bitFieldType} and {@code offset}.
* @throws IllegalStateException if no previous field type was found
*/
public BitFieldArgs get(int offset) {
return get(previousFieldType(), offset);
}
/**
* Adds a new {@code SET} subcommand using offset {@code 0} and the field type of the previous command.
*
* @param value the value
* @return a new {@code SET} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
* @throws IllegalStateException if no previous field type was found
*/
public BitFieldArgs set(long value) {
return set(previousFieldType(), value);
}
/**
* Adds a new {@code SET} subcommand using offset {@code 0}.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param value the value
* @return a new {@code SET} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
*/
public BitFieldArgs set(BitFieldType bitFieldType, long value) {
return set(bitFieldType, 0, value);
}
/**
* Adds a new {@code SET} subcommand using the field type of the previous command.
*
* @param offset bitfield offset, supports up to {@code 2^32-1} using {@link Integer#toUnsignedString(int)}.
* @param value the value
* @return a new {@code SET} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
* @throws IllegalStateException if no previous field type was found
*/
public BitFieldArgs set(int offset, long value) {
return set(previousFieldType(), offset, value);
}
/**
* Adds a new {@code SET} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, supports up to {@code 2^32-1} using {@link Integer#toUnsignedString(int)}.
* @param value the value
* @return a new {@code SET} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
*/
public BitFieldArgs set(BitFieldType bitFieldType, int offset, long value) {
return addSubCommand(new Set(bitFieldType, false, offset, value));
}
/**
* Adds a new {@code SET} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, must not be {@code null}.
* @param value the value
* @return a new {@code SET} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
* @since 4.3
*/
public BitFieldArgs set(BitFieldType bitFieldType, Offset offset, long value) {
LettuceAssert.notNull(offset, "BitFieldOffset must not be null");
return addSubCommand(new Set(bitFieldType, offset.isMultiplyByTypeWidth(), offset.getOffset(), value));
}
/**
* Adds a new {@code INCRBY} subcommand using offset {@code 0} and the field type of the previous command.
*
* @param value the value
* @return a new {@code INCRBY} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
* @throws IllegalStateException if no previous field type was found
*/
public BitFieldArgs incrBy(long value) {
return incrBy(previousFieldType(), value);
}
/**
* Adds a new {@code INCRBY} subcommand using offset {@code 0}.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param value the value
* @return a new {@code INCRBY} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
*/
public BitFieldArgs incrBy(BitFieldType bitFieldType, long value) {
return incrBy(bitFieldType, 0, value);
}
/**
* Adds a new {@code INCRBY} subcommand using the field type of the previous command.
*
* @param offset bitfield offset, supports up to {@code 2^32-1} using {@link Integer#toUnsignedString(int)}.
* @param value the value
* @return a new {@code INCRBY} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
* @throws IllegalStateException if no previous field type was found
*/
public BitFieldArgs incrBy(int offset, long value) {
return incrBy(previousFieldType(), offset, value);
}
/**
* Adds a new {@code INCRBY} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, supports up to {@code 2^32-1} using {@link Integer#toUnsignedString(int)}.
* @param value the value
* @return a new {@code INCRBY} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
*/
public BitFieldArgs incrBy(BitFieldType bitFieldType, int offset, long value) {
return addSubCommand(new IncrBy(bitFieldType, false, offset, value));
}
/**
* Adds a new {@code INCRBY} subcommand.
*
* @param bitFieldType the bit field type, must not be {@code null}.
* @param offset bitfield offset, must not be {@code null}.
* @param value the value
* @return a new {@code INCRBY} subcommand for the given {@code bitFieldType}, {@code offset} and {@code value}.
* @since 4.3
*/
public BitFieldArgs incrBy(BitFieldType bitFieldType, Offset offset, long value) {
LettuceAssert.notNull(offset, "BitFieldOffset must not be null");
return addSubCommand(new IncrBy(bitFieldType, offset.isMultiplyByTypeWidth(), offset.getOffset(), value));
}
/**
* Adds a new {@code OVERFLOW} subcommand.
*
* @param overflowType type of overflow, must not be {@code null}.
* @return a new {@code OVERFLOW} subcommand for the given {@code overflowType}.
*/
public BitFieldArgs overflow(OverflowType overflowType) {
return addSubCommand(new Overflow(overflowType));
}
private BitFieldType previousFieldType() {
List<SubCommand> list = new ArrayList<>(commands);
Collections.reverse(list);
for (SubCommand command : list) {
if (command instanceof Get) {
return ((Get) command).bitFieldType;
}
if (command instanceof Set) {
return ((Set) command).bitFieldType;
}
if (command instanceof IncrBy) {
return ((IncrBy) command).bitFieldType;
}
}
throw new IllegalStateException("No previous field type found");
}
/**
* Representation for the {@code SET} subcommand for {@code BITFIELD}.
*/
private static
|
Builder
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/web/servlet/ServletContextInitializerBeansTests.java
|
{
"start": 14412,
"end": 14639
}
|
class ____ {
@Bean
TestServletAndFilterAndListener testServletAndFilterAndListener() {
return new TestServletAndFilterAndListener();
}
}
@Configuration(proxyBeanMethods = false)
static
|
MultipleInterfacesConfiguration
|
java
|
netty__netty
|
transport-classes-io_uring/src/main/java/io/netty/channel/uring/MsgHdrMemory.java
|
{
"start": 909,
"end": 7004
}
|
class ____ {
private static final byte[] EMPTY_SOCKADDR_STORAGE = new byte[Native.SIZEOF_SOCKADDR_STORAGE];
// It is not possible to have a zero length buffer in sendFd,
// so we use a 1 byte buffer here.
private static final int GLOBAL_IOV_LEN = 1;
private static final ByteBuffer GLOBAL_IOV_BASE = Buffer.allocateDirectWithNativeOrder(GLOBAL_IOV_LEN);
private static final long GLOBAL_IOV_BASE_ADDRESS = Buffer.memoryAddress(GLOBAL_IOV_BASE);
private final CleanableDirectBuffer msgHdrMemoryCleanable;
private final CleanableDirectBuffer socketAddrMemoryCleanable;
private final CleanableDirectBuffer iovMemoryCleanable;
private final CleanableDirectBuffer cmsgDataMemoryCleanable;
private final ByteBuffer msgHdrMemory;
private final ByteBuffer socketAddrMemory;
private final ByteBuffer iovMemory;
private final ByteBuffer cmsgDataMemory;
private final long msgHdrMemoryAddress;
private final short idx;
private final int cmsgDataOffset;
MsgHdrMemory(short idx) {
this.idx = idx;
msgHdrMemoryCleanable = Buffer.allocateDirectBufferWithNativeOrder(Native.SIZEOF_MSGHDR);
socketAddrMemoryCleanable = Buffer.allocateDirectBufferWithNativeOrder(Native.SIZEOF_SOCKADDR_STORAGE);
iovMemoryCleanable = Buffer.allocateDirectBufferWithNativeOrder(Native.SIZEOF_IOVEC);
cmsgDataMemoryCleanable = Buffer.allocateDirectBufferWithNativeOrder(Native.CMSG_SPACE);
msgHdrMemory = msgHdrMemoryCleanable.buffer();
socketAddrMemory = socketAddrMemoryCleanable.buffer();
iovMemory = iovMemoryCleanable.buffer();
cmsgDataMemory = cmsgDataMemoryCleanable.buffer();
msgHdrMemoryAddress = Buffer.memoryAddress(msgHdrMemory);
long cmsgDataMemoryAddr = Buffer.memoryAddress(cmsgDataMemory);
long cmsgDataAddr = Native.cmsghdrData(cmsgDataMemoryAddr);
cmsgDataOffset = (int) (cmsgDataAddr - cmsgDataMemoryAddr);
}
MsgHdrMemory() {
this.idx = 0;
// jdk will memset the memory to 0, so we don't need to do it here.
msgHdrMemoryCleanable = Buffer.allocateDirectBufferWithNativeOrder(Native.SIZEOF_MSGHDR);
socketAddrMemoryCleanable = null;
iovMemoryCleanable = Buffer.allocateDirectBufferWithNativeOrder(Native.SIZEOF_IOVEC);
cmsgDataMemoryCleanable = Buffer.allocateDirectBufferWithNativeOrder(Native.CMSG_SPACE_FOR_FD);
msgHdrMemory = msgHdrMemoryCleanable.buffer();
socketAddrMemory = null;
iovMemory = iovMemoryCleanable.buffer();
cmsgDataMemory = cmsgDataMemoryCleanable.buffer();
msgHdrMemoryAddress = Buffer.memoryAddress(msgHdrMemory);
// These two parameters must be set to valid values and cannot be 0,
// otherwise the fd we get in io_uring_recvmsg is 0
Iov.set(iovMemory, GLOBAL_IOV_BASE_ADDRESS, GLOBAL_IOV_LEN);
long cmsgDataMemoryAddr = Buffer.memoryAddress(cmsgDataMemory);
long cmsgDataAddr = Native.cmsghdrData(cmsgDataMemoryAddr);
cmsgDataOffset = (int) (cmsgDataAddr - cmsgDataMemoryAddr);
}
void set(LinuxSocket socket, InetSocketAddress address, long bufferAddress , int length, short segmentSize) {
int addressLength;
if (address == null) {
addressLength = socket.isIpv6() ? Native.SIZEOF_SOCKADDR_IN6 : Native.SIZEOF_SOCKADDR_IN;
socketAddrMemory.mark();
try {
socketAddrMemory.put(EMPTY_SOCKADDR_STORAGE);
} finally {
socketAddrMemory.reset();
}
} else {
addressLength = SockaddrIn.set(socket.isIpv6(), socketAddrMemory, address);
}
Iov.set(iovMemory, bufferAddress, length);
MsgHdr.set(msgHdrMemory, socketAddrMemory, addressLength, iovMemory, 1, cmsgDataMemory,
cmsgDataOffset, segmentSize);
}
void set(long iovArray, int length) {
MsgHdr.set(msgHdrMemory, iovArray, length);
}
void setScmRightsFd(int fd) {
MsgHdr.prepSendFd(msgHdrMemory, fd, cmsgDataMemory, cmsgDataOffset, iovMemory, 1);
}
int getScmRightsFd() {
return MsgHdr.getCmsgData(msgHdrMemory, cmsgDataMemory, cmsgDataOffset);
}
void prepRecvReadFd() {
MsgHdr.prepReadFd(msgHdrMemory, cmsgDataMemory, cmsgDataOffset, iovMemory, 1);
}
boolean hasPort(IoUringDatagramChannel channel) {
if (channel.socket.isIpv6()) {
return SockaddrIn.hasPortIpv6(socketAddrMemory);
}
return SockaddrIn.hasPortIpv4(socketAddrMemory);
}
DatagramPacket get(IoUringDatagramChannel channel, IoUringIoHandler handler, ByteBuf buffer, int bytesRead) {
InetSocketAddress sender;
if (channel.socket.isIpv6()) {
byte[] ipv6Bytes = handler.inet6AddressArray();
byte[] ipv4bytes = handler.inet4AddressArray();
sender = SockaddrIn.getIPv6(socketAddrMemory, ipv6Bytes, ipv4bytes);
} else {
byte[] bytes = handler.inet4AddressArray();
sender = SockaddrIn.getIPv4(socketAddrMemory, bytes);
}
long bufferAddress = Iov.getBufferAddress(iovMemory);
int bufferLength = Iov.getBufferLength(iovMemory);
// reconstruct the reader index based on the memoryAddress of the buffer and the bufferAddress that was used
// in the iovec.
long memoryAddress = IoUring.memoryAddress(buffer);
int readerIndex = (int) (bufferAddress - memoryAddress);
ByteBuf slice = buffer.slice(readerIndex, bufferLength)
.writerIndex(bytesRead);
return new DatagramPacket(slice.retain(), channel.localAddress(), sender);
}
short idx() {
return idx;
}
long address() {
return msgHdrMemoryAddress;
}
void release() {
msgHdrMemoryCleanable.clean();
if (socketAddrMemoryCleanable != null) {
socketAddrMemoryCleanable.clean();
}
iovMemoryCleanable.clean();
cmsgDataMemoryCleanable.clean();
}
}
|
MsgHdrMemory
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/JarBuilder.java
|
{
"start": 974,
"end": 1296
}
|
class ____ generating job.jar
* for Hadoop Streaming jobs. It includes the files specified
* with the -file option and includes them in the jar. Also,
* hadoop-streaming is a user level appplication, so all the classes
* with hadoop-streaming that are needed in the job are also included
* in the job.jar.
*/
public
|
for
|
java
|
apache__camel
|
core/camel-core-xml/src/main/java/org/apache/camel/core/xml/util/jsse/AbstractJsseUtilFactoryBean.java
|
{
"start": 925,
"end": 1140
}
|
class ____<T> extends AbstractCamelFactoryBean<T> {
@Override
public abstract T getObject() throws Exception;
@Override
public abstract Class<? extends T> getObjectType();
}
|
AbstractJsseUtilFactoryBean
|
java
|
apache__camel
|
components/camel-kafka/src/main/java/org/apache/camel/processor/resume/kafka/RecordError.java
|
{
"start": 989,
"end": 1422
}
|
class ____ {
private final RecordMetadata recordMetadata;
private final Exception exception;
public RecordError(RecordMetadata recordMetadata, Exception exception) {
this.recordMetadata = recordMetadata;
this.exception = exception;
}
public RecordMetadata getRecordMetadata() {
return recordMetadata;
}
public Exception getException() {
return exception;
}
}
|
RecordError
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ser/filter/TestSimpleSerializationIgnore.java
|
{
"start": 434,
"end": 581
}
|
class ____
extends DatabindTestUtil
{
// Class for testing enabled {@link JsonIgnore} annotation
final static
|
TestSimpleSerializationIgnore
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/common/typeutils/SingleThreadAccessCheckingTypeSerializer.java
|
{
"start": 5303,
"end": 6791
}
|
class ____<T>
extends CompositeTypeSerializerSnapshot<
T, SingleThreadAccessCheckingTypeSerializer<T>> {
@SuppressWarnings("unused")
public SingleThreadAccessCheckingTypeSerializerSnapshot() {}
SingleThreadAccessCheckingTypeSerializerSnapshot(
SingleThreadAccessCheckingTypeSerializer<T> serializerInstance) {
super(serializerInstance);
}
@Override
protected int getCurrentOuterSnapshotVersion() {
return 1;
}
@Override
protected TypeSerializer<?>[] getNestedSerializers(
SingleThreadAccessCheckingTypeSerializer<T> outerSerializer) {
return new TypeSerializer[] {outerSerializer.originalSerializer};
}
@SuppressWarnings("unchecked")
@Override
protected SingleThreadAccessCheckingTypeSerializer<T>
createOuterSerializerWithNestedSerializers(TypeSerializer<?>[] nestedSerializers) {
return new SingleThreadAccessCheckingTypeSerializer<>(
(TypeSerializer<T>) nestedSerializers[0]);
}
}
private void writeObject(ObjectOutputStream outputStream) throws IOException {
try (SingleThreadAccessCheck ignored =
singleThreadAccessChecker.startSingleThreadAccessCheck()) {
outputStream.defaultWriteObject();
}
}
private static
|
SingleThreadAccessCheckingTypeSerializerSnapshot
|
java
|
micronaut-projects__micronaut-core
|
http/src/main/java/io/micronaut/http/MutableHttpResponse.java
|
{
"start": 1226,
"end": 6530
}
|
interface ____<B> extends HttpResponse<B>, MutableHttpMessage<B> {
/**
* Adds the specified cookie to the response. This method can be called multiple times to set more than one cookie.
*
* @param cookie the Cookie to return to the client
* @return This response object
*/
MutableHttpResponse<B> cookie(Cookie cookie);
/**
* Adds the specified cookies to the response.
*
* @param cookies the Set of Cookies to return to the client
* @return This response object
*/
default MutableHttpResponse<B> cookies(Set<Cookie> cookies) {
for (Cookie cookie: cookies) {
cookie(cookie);
}
return this;
}
/**
* Sets the body.
*
* @param body The body
* @return This response object
*/
@Override
<T> MutableHttpResponse<T> body(@Nullable T body);
/**
* Sets the response status.
*
* @param status The status
* @param message The message
* @return This response object
*/
default MutableHttpResponse<B> status(HttpStatus status, CharSequence message) {
if (message == null) {
message = status.getReason();
}
return status(status.getCode(), message);
}
@Override
default MutableHttpResponse<B> headers(Consumer<MutableHttpHeaders> headers) {
return (MutableHttpResponse<B>) MutableHttpMessage.super.headers(headers);
}
@Override
default MutableHttpResponse<B> header(CharSequence name, CharSequence value) {
return (MutableHttpResponse<B>) MutableHttpMessage.super.header(name, value);
}
@Override
default MutableHttpResponse<B> headers(Map<CharSequence, CharSequence> namesAndValues) {
return (MutableHttpResponse<B>) MutableHttpMessage.super.headers(namesAndValues);
}
/**
* Sets the HTTP Cache-Control header.
* @param cacheControl Cache Control
* @return This response object
* @since 4.9.0
*/
@NonNull
default MutableHttpResponse<B> cacheControl(@NonNull CacheControl cacheControl) {
return header(HttpHeaders.CACHE_CONTROL, Objects.requireNonNull(cacheControl, "Cache Control parameter cannot be null").toString());
}
/**
* Sets the response encoding. Should be called after {@link #contentType(MediaType)}.
*
* @param encoding The encoding to use
* @return This response object
*/
default MutableHttpResponse<B> characterEncoding(CharSequence encoding) {
if (encoding != null) {
getContentType().ifPresent(mediaType ->
contentType(new MediaType(mediaType.toString(), Collections.singletonMap(MediaType.CHARSET_PARAMETER, encoding.toString())))
);
}
return this;
}
/**
* Sets the response encoding.
*
* @param encoding The encoding to use
* @return The encoded reponse object
*/
default MutableHttpResponse<B> characterEncoding(Charset encoding) {
return characterEncoding(encoding.toString());
}
@Override
default MutableHttpResponse<B> contentLength(long length) {
return (MutableHttpResponse<B>) MutableHttpMessage.super.contentLength(length);
}
@Override
default MutableHttpResponse<B> contentType(CharSequence contentType) {
return (MutableHttpResponse<B>) MutableHttpMessage.super.contentType(contentType);
}
@Override
default MutableHttpResponse<B> contentType(MediaType mediaType) {
return (MutableHttpResponse<B>) MutableHttpMessage.super.contentType(mediaType);
}
@Override
default MutableHttpResponse<B> contentEncoding(CharSequence encoding) {
return (MutableHttpResponse<B>) MutableHttpMessage.super.contentEncoding(encoding);
}
/**
* Sets the locale to use and will apply the appropriate {@link HttpHeaders#CONTENT_LANGUAGE} header to the response.
*
* @param locale The locale
* @return This response object
*/
default MutableHttpResponse<B> locale(Locale locale) {
getHeaders().add(HttpHeaders.CONTENT_LANGUAGE, locale.toString());
return this;
}
/**
* Sets the response status.
*
* @param status The status
* @return This response object
*/
default MutableHttpResponse<B> status(int status) {
return status(status, null);
}
/**
* Sets the response status.
*
* @param status The status
* @param message The message
* @return This response object
*/
MutableHttpResponse<B> status(int status, CharSequence message);
/**
* Sets the response status.
*
* @param status The status
* @return This response object
*/
default MutableHttpResponse<B> status(HttpStatus status) {
return status(status, null);
}
/**
* Sets an attribute on the response.
* @param name The attribute name
* @param value The attribute value
* @return This response object
*/
default MutableHttpResponse<B> attribute(CharSequence name, Object value) {
return (MutableHttpResponse<B>) setAttribute(name, value);
}
@Override
default MutableHttpResponse<?> toMutableResponse() {
return this;
}
}
|
MutableHttpResponse
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/junit4/rules/SpringMethodRule.java
|
{
"start": 4778,
"end": 9518
}
|
class ____ implements MethodRule {
private static final Log logger = LogFactory.getLog(SpringMethodRule.class);
/**
* Apply <em>instance-level</em> and <em>method-level</em> features of
* the <em>Spring TestContext Framework</em> to the supplied {@code base}
* statement.
* <p>Specifically, this method invokes the
* {@link TestContextManager#prepareTestInstance prepareTestInstance()},
* {@link TestContextManager#beforeTestMethod beforeTestMethod()}, and
* {@link TestContextManager#afterTestMethod afterTestMethod()} methods
* on the {@code TestContextManager}, potentially with Spring timeouts
* and repetitions.
* <p>In addition, this method checks whether the test is enabled in
* the current execution environment. This prevents methods with a
* non-matching {@code @IfProfileValue} annotation from running altogether,
* even skipping the execution of {@code prepareTestInstance()} methods
* in {@code TestExecutionListeners}.
* @param base the base {@code Statement} that this rule should be applied to
* @param frameworkMethod the method which is about to be invoked on the test instance
* @param testInstance the current test instance
* @return a statement that wraps the supplied {@code base} with instance-level
* and method-level features of the Spring TestContext Framework
* @see #withBeforeTestMethodCallbacks
* @see #withAfterTestMethodCallbacks
* @see #withPotentialRepeat
* @see #withPotentialTimeout
* @see #withTestInstancePreparation
* @see #withProfileValueCheck
*/
@Override
public Statement apply(Statement base, FrameworkMethod frameworkMethod, Object testInstance) {
Method testMethod = frameworkMethod.getMethod();
if (logger.isDebugEnabled()) {
logger.debug("Applying SpringMethodRule to test method [" + testMethod + "]");
}
Class<?> testClass = testInstance.getClass();
TestContextManager testContextManager = SpringClassRule.getTestContextManager(testClass);
Statement statement = base;
statement = withBeforeTestMethodCallbacks(statement, testMethod, testInstance, testContextManager);
statement = withAfterTestMethodCallbacks(statement, testMethod, testInstance, testContextManager);
statement = withTestInstancePreparation(statement, testInstance, testContextManager);
statement = withPotentialRepeat(statement, testMethod, testInstance);
statement = withPotentialTimeout(statement, testMethod, testInstance);
statement = withProfileValueCheck(statement, testMethod, testInstance);
return statement;
}
/**
* Wrap the supplied {@link Statement} with a {@code RunBeforeTestMethodCallbacks} statement.
* @see RunBeforeTestMethodCallbacks
*/
private Statement withBeforeTestMethodCallbacks(Statement next, Method testMethod,
Object testInstance, TestContextManager testContextManager) {
return new RunBeforeTestMethodCallbacks(
next, testInstance, testMethod, testContextManager);
}
/**
* Wrap the supplied {@link Statement} with a {@code RunAfterTestMethodCallbacks} statement.
* @see RunAfterTestMethodCallbacks
*/
private Statement withAfterTestMethodCallbacks(Statement next, Method testMethod,
Object testInstance, TestContextManager testContextManager) {
return new RunAfterTestMethodCallbacks(
next, testInstance, testMethod, testContextManager);
}
/**
* Wrap the supplied {@link Statement} with a {@code RunPrepareTestInstanceCallbacks} statement.
* @see RunPrepareTestInstanceCallbacks
*/
private Statement withTestInstancePreparation(
Statement next, Object testInstance, TestContextManager testContextManager) {
return new RunPrepareTestInstanceCallbacks(next, testInstance, testContextManager);
}
/**
* Wrap the supplied {@link Statement} with a {@code SpringRepeat} statement.
* <p>Supports Spring's {@link org.springframework.test.annotation.Repeat @Repeat}
* annotation.
* @see SpringRepeat
*/
private Statement withPotentialRepeat(Statement next, Method testMethod, Object testInstance) {
return new SpringRepeat(next, testMethod);
}
/**
* Wrap the supplied {@link Statement} with a {@code SpringFailOnTimeout} statement.
* <p>Supports Spring's {@link org.springframework.test.annotation.Timed @Timed}
* annotation.
* @see SpringFailOnTimeout
*/
private Statement withPotentialTimeout(Statement next, Method testMethod, Object testInstance) {
return new SpringFailOnTimeout(next, testMethod);
}
/**
* Wrap the supplied {@link Statement} with a {@code ProfileValueChecker} statement.
* @see ProfileValueChecker
*/
private Statement withProfileValueCheck(Statement next, Method testMethod, Object testInstance) {
return new ProfileValueChecker(next, testInstance.getClass(), testMethod);
}
}
|
SpringMethodRule
|
java
|
elastic__elasticsearch
|
test/fixtures/aws-fixture-utils/src/main/java/fixture/aws/AwsFixtureUtils.java
|
{
"start": 798,
"end": 2585
}
|
enum ____ {
;
/**
* @return an {@link InetSocketAddress} for a test fixture running on {@code localhost} which binds to any available port.
*/
public static InetSocketAddress getLocalFixtureAddress() {
try {
return new InetSocketAddress(InetAddress.getByName("localhost"), 0);
} catch (UnknownHostException e) {
throw new RuntimeException(e);
}
}
/**
* Send an XML-formatted error response typical of an AWS service.
*/
public static void sendError(final HttpExchange exchange, final RestStatus status, final String errorCode, final String message)
throws IOException {
final Headers headers = exchange.getResponseHeaders();
headers.add("Content-Type", "application/xml");
final String requestId = exchange.getRequestHeaders().getFirst("x-amz-request-id");
if (requestId != null) {
headers.add("x-amz-request-id", requestId);
}
if (errorCode == null || "HEAD".equals(exchange.getRequestMethod())) {
exchange.sendResponseHeaders(status.getStatus(), -1L);
exchange.close();
} else {
final byte[] response = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?><Error>"
+ "<Code>"
+ errorCode
+ "</Code>"
+ "<Message>"
+ message
+ "</Message>"
+ "<RequestId>"
+ requestId
+ "</RequestId>"
+ "</Error>").getBytes(StandardCharsets.UTF_8);
exchange.sendResponseHeaders(status.getStatus(), response.length);
exchange.getResponseBody().write(response);
exchange.close();
}
}
}
|
AwsFixtureUtils
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java
|
{
"start": 13428,
"end": 13680
}
|
class ____ theirs specific state.
*/
public abstract void toXContent(XContentBuilder builder, T state) throws IOException;
/**
* Reads a new instance of the state from the given XContentParser
* Subclasses need to implement this
|
for
|
java
|
micronaut-projects__micronaut-core
|
inject-java/src/test/groovy/io/micronaut/inject/foreach/OuterProperties.java
|
{
"start": 823,
"end": 1238
}
|
class ____ {
private String name;
private List<InnerEach> inner;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<InnerEach> getInner() {
return inner;
}
public void setInner(List<InnerEach> inner) {
this.inner = inner;
}
@EachProperty("inner")
public static
|
OuterProperties
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/aggregator/AggregateNewExchangeAndConfirmTest.java
|
{
"start": 2621,
"end": 3112
}
|
class ____ implements AggregationStrategy {
@Override
public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {
String body = "";
if (oldExchange != null) {
body = oldExchange.getIn().getBody(String.class);
}
body += newExchange.getIn().getBody(String.class);
newExchange.getIn().setBody(body);
return newExchange;
}
}
private
|
MyNewExchangeAggregationStrategy
|
java
|
google__guava
|
guava-gwt/src/com/google/common/DummyJavadocClass.java
|
{
"start": 645,
"end": 821
}
|
class ____ that the Maven Javadoc plugin will produce a jar. If it doesn't produce a jar,
* then the Sonatype repository manager issues an error.
*
* @author Chris Povirk
*/
|
so
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
|
{
"start": 13788,
"end": 13989
}
|
class ____ extends IOException {
private static final long serialVersionUID = 1L;
private LogHeaderCorruptException(String msg) {
super(msg);
}
}
private
|
LogHeaderCorruptException
|
java
|
FasterXML__jackson-core
|
src/main/java/tools/jackson/core/io/MergedStream.java
|
{
"start": 480,
"end": 3020
}
|
class ____ extends InputStream
{
final private IOContext _ctxt;
final private InputStream _in;
private byte[] _b;
private int _ptr;
final private int _end;
public MergedStream(IOContext ctxt, InputStream in, byte[] buf, int start, int end) {
_ctxt = ctxt;
_in = in;
_b = buf;
_ptr = start;
_end = end;
}
@Override
public int available() throws IOException {
if (_b != null) {
return _end - _ptr;
}
return _in.available();
}
@Override public void close() throws IOException {
_free();
_in.close();
}
@Override public synchronized void mark(int readlimit) {
if (_b == null) { _in.mark(readlimit); }
}
@Override public boolean markSupported() {
// Only supports marks past the initial rewindable section...
return (_b == null) && _in.markSupported();
}
@Override public int read() throws IOException {
if (_b != null) {
int c = _b[_ptr++] & 0xFF;
if (_ptr >= _end) {
_free();
}
return c;
}
return _in.read();
}
@Override public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (_b != null) {
int avail = _end - _ptr;
if (len > avail) {
len = avail;
}
System.arraycopy(_b, _ptr, b, off, len);
_ptr += len;
if (_ptr >= _end) {
_free();
}
return len;
}
return _in.read(b, off, len);
}
@Override
public synchronized void reset() throws IOException {
if (_b == null) { _in.reset(); }
}
@Override
public long skip(long n) throws IOException {
long count = 0L;
if (_b != null) {
int amount = _end - _ptr;
if (amount > n) { // all in pushed back segment?
_ptr += (int) n;
return n;
}
_free();
count += amount;
n -= amount;
}
if (n > 0) { count += _in.skip(n); }
return count;
}
private void _free() {
byte[] buf = _b;
if (buf != null) {
_b = null;
if (_ctxt != null) {
_ctxt.releaseReadIOBuffer(buf);
}
}
}
}
|
MergedStream
|
java
|
netty__netty
|
transport/src/test/java/io/netty/channel/DefaultChannelPipelineTailTest.java
|
{
"start": 1217,
"end": 7892
}
|
class ____ {
private static EventLoopGroup GROUP;
@BeforeAll
public static void init() {
GROUP = new MultiThreadIoEventLoopGroup(1, LocalIoHandler.newFactory());
}
@AfterAll
public static void destroy() {
GROUP.shutdownGracefully();
}
@Test
public void testOnUnhandledInboundChannelActive() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
MyChannel myChannel = new MyChannel() {
@Override
protected void onUnhandledInboundChannelActive() {
latch.countDown();
}
};
Bootstrap bootstrap = new Bootstrap()
.channelFactory(new MyChannelFactory(myChannel))
.group(GROUP)
.handler(new ChannelInboundHandlerAdapter())
.remoteAddress(new InetSocketAddress(0));
Channel channel = bootstrap.connect()
.sync().channel();
try {
assertTrue(latch.await(1L, TimeUnit.SECONDS));
} finally {
channel.close();
}
}
@Test
public void testOnUnhandledInboundChannelInactive() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
MyChannel myChannel = new MyChannel() {
@Override
protected void onUnhandledInboundChannelInactive() {
latch.countDown();
}
};
Bootstrap bootstrap = new Bootstrap()
.channelFactory(new MyChannelFactory(myChannel))
.group(GROUP)
.handler(new ChannelInboundHandlerAdapter())
.remoteAddress(new InetSocketAddress(0));
Channel channel = bootstrap.connect()
.sync().channel();
channel.close().syncUninterruptibly();
assertTrue(latch.await(1L, TimeUnit.SECONDS));
}
@Test
public void testOnUnhandledInboundException() throws Exception {
final AtomicReference<Throwable> causeRef = new AtomicReference<Throwable>();
final CountDownLatch latch = new CountDownLatch(1);
MyChannel myChannel = new MyChannel() {
@Override
protected void onUnhandledInboundException(Throwable cause) {
causeRef.set(cause);
latch.countDown();
}
};
Bootstrap bootstrap = new Bootstrap()
.channelFactory(new MyChannelFactory(myChannel))
.group(GROUP)
.handler(new ChannelInboundHandlerAdapter())
.remoteAddress(new InetSocketAddress(0));
Channel channel = bootstrap.connect()
.sync().channel();
try {
IOException ex = new IOException("testOnUnhandledInboundException");
channel.pipeline().fireExceptionCaught(ex);
assertTrue(latch.await(1L, TimeUnit.SECONDS));
assertSame(ex, causeRef.get());
} finally {
channel.close();
}
}
@Test
public void testOnUnhandledInboundMessage() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
MyChannel myChannel = new MyChannel() {
@Override
protected void onUnhandledInboundMessage(Object msg) {
latch.countDown();
}
};
Bootstrap bootstrap = new Bootstrap()
.channelFactory(new MyChannelFactory(myChannel))
.group(GROUP)
.handler(new ChannelInboundHandlerAdapter())
.remoteAddress(new InetSocketAddress(0));
Channel channel = bootstrap.connect()
.sync().channel();
try {
channel.pipeline().fireChannelRead("testOnUnhandledInboundMessage");
assertTrue(latch.await(1L, TimeUnit.SECONDS));
} finally {
channel.close();
}
}
@Test
public void testOnUnhandledInboundReadComplete() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
MyChannel myChannel = new MyChannel() {
@Override
protected void onUnhandledInboundReadComplete() {
latch.countDown();
}
};
Bootstrap bootstrap = new Bootstrap()
.channelFactory(new MyChannelFactory(myChannel))
.group(GROUP)
.handler(new ChannelInboundHandlerAdapter())
.remoteAddress(new InetSocketAddress(0));
Channel channel = bootstrap.connect()
.sync().channel();
try {
channel.pipeline().fireChannelReadComplete();
assertTrue(latch.await(1L, TimeUnit.SECONDS));
} finally {
channel.close();
}
}
@Test
public void testOnUnhandledInboundUserEventTriggered() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
MyChannel myChannel = new MyChannel() {
@Override
protected void onUnhandledInboundUserEventTriggered(Object evt) {
latch.countDown();
}
};
Bootstrap bootstrap = new Bootstrap()
.channelFactory(new MyChannelFactory(myChannel))
.group(GROUP)
.handler(new ChannelInboundHandlerAdapter())
.remoteAddress(new InetSocketAddress(0));
Channel channel = bootstrap.connect()
.sync().channel();
try {
channel.pipeline().fireUserEventTriggered("testOnUnhandledInboundUserEventTriggered");
assertTrue(latch.await(1L, TimeUnit.SECONDS));
} finally {
channel.close();
}
}
@Test
public void testOnUnhandledInboundWritabilityChanged() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
MyChannel myChannel = new MyChannel() {
@Override
protected void onUnhandledInboundWritabilityChanged() {
latch.countDown();
}
};
Bootstrap bootstrap = new Bootstrap()
.channelFactory(new MyChannelFactory(myChannel))
.group(GROUP)
.handler(new ChannelInboundHandlerAdapter())
.remoteAddress(new InetSocketAddress(0));
Channel channel = bootstrap.connect()
.sync().channel();
try {
channel.pipeline().fireChannelWritabilityChanged();
assertTrue(latch.await(1L, TimeUnit.SECONDS));
} finally {
channel.close();
}
}
private static
|
DefaultChannelPipelineTailTest
|
java
|
spring-projects__spring-boot
|
configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationprocessor/MetadataGenerationEnvironmentFactory.java
|
{
"start": 1079,
"end": 2573
}
|
class ____ implements Function<ProcessingEnvironment, MetadataGenerationEnvironment> {
@Override
public MetadataGenerationEnvironment apply(ProcessingEnvironment environment) {
Set<String> endpointAnnotations = new HashSet<>(
Arrays.asList(TestConfigurationMetadataAnnotationProcessor.CONTROLLER_ENDPOINT_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.ENDPOINT_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.REST_CONTROLLER_ENDPOINT_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.SERVLET_ENDPOINT_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.WEB_ENDPOINT_ANNOTATION));
return new MetadataGenerationEnvironment(environment,
TestConfigurationMetadataAnnotationProcessor.CONFIGURATION_PROPERTIES_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.CONFIGURATION_PROPERTIES_SOURCE_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.NESTED_CONFIGURATION_PROPERTY_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.DEPRECATED_CONFIGURATION_PROPERTY_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.CONSTRUCTOR_BINDING_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.AUTOWIRED_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.DEFAULT_VALUE_ANNOTATION, endpointAnnotations,
TestConfigurationMetadataAnnotationProcessor.READ_OPERATION_ANNOTATION,
TestConfigurationMetadataAnnotationProcessor.NAME_ANNOTATION);
}
}
|
MetadataGenerationEnvironmentFactory
|
java
|
apache__camel
|
components/camel-twilio/src/generated/java/org/apache/camel/component/twilio/internal/UsageRecordMonthlyApiMethod.java
|
{
"start": 676,
"end": 1741
}
|
enum ____ implements ApiMethod {
READER(
com.twilio.rest.api.v2010.account.usage.record.MonthlyReader.class,
"reader"),
READER_1(
com.twilio.rest.api.v2010.account.usage.record.MonthlyReader.class,
"reader",
arg("pathAccountSid", String.class));
private final ApiMethod apiMethod;
UsageRecordMonthlyApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(Monthly.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
|
UsageRecordMonthlyApiMethod
|
java
|
spring-projects__spring-security
|
core/src/main/java/org/springframework/security/access/hierarchicalroles/RoleHierarchyImpl.java
|
{
"start": 10178,
"end": 10266
}
|
class ____ constructing child roles within a role hierarchy branch.
*/
public final
|
for
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/query/RegisterNamedQueryWithParameterTest.java
|
{
"start": 760,
"end": 2029
}
|
class ____ {
private static final String QUERY_NAME = "ENTITY_BY_NAME";
private static final String QUERY = "select t.id from TEST_ENTITY t where t.anInteger = :value";
@BeforeAll
public void setUp(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
Query query = entityManager.createNativeQuery( QUERY );
scope.getEntityManagerFactory().addNamedQuery( "ENTITY_BY_NAME", query );
TestEntity entity = new TestEntity( 1L, "And", 1 );
TestEntity entity2 = new TestEntity( 2L, "Fab", 2 );
entityManager.persist( entity );
entityManager.persist( entity2 );
}
);
}
@AfterAll
public void tearDown(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> entityManager.createQuery( "delete from TestEntity" ).executeUpdate()
);
}
@Test
public void testExecuteNativeQuery(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
Query query = entityManager.createNamedQuery( QUERY_NAME );
query.setParameter( "value", 1 );
List<?> results = query.getResultList();
assertThat( results.size() ).isEqualTo( 1 );
}
);
}
@Entity(name = "TestEntity")
@Table(name = "TEST_ENTITY")
public static
|
RegisterNamedQueryWithParameterTest
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/SecondarySort.java
|
{
"start": 4200,
"end": 4604
}
|
class ____ extends Partitioner<IntPair,IntWritable>{
@Override
public int getPartition(IntPair key, IntWritable value,
int numPartitions) {
return Math.abs(key.getFirst() * 127) % numPartitions;
}
}
/**
* Compare only the first part of the pair, so that reduce is called once
* for each value of the first part.
*/
public static
|
FirstPartitioner
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/reflect/FieldUtils.java
|
{
"start": 1775,
"end": 1839
}
|
class ____ {
/**
* Gets all fields of the given
|
FieldUtils
|
java
|
apache__logging-log4j2
|
log4j-core-test/src/test/java/org/apache/logging/log4j/core/LoggerMessageFactoryCustomizationTest.java
|
{
"start": 3175,
"end": 3454
}
|
class ____ extends AbstractMessageFactory {
@Override
public Message newMessage(final String message, final Object... params) {
return ParameterizedMessageFactory.INSTANCE.newMessage(message, params);
}
}
public static
|
TestMessageFactory
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/jinaai/JinaAIEmbeddingsRequestManager.java
|
{
"start": 1375,
"end": 2938
}
|
class ____ extends JinaAIRequestManager {
private static final Logger logger = LogManager.getLogger(JinaAIEmbeddingsRequestManager.class);
private static final ResponseHandler HANDLER = createEmbeddingsHandler();
private static ResponseHandler createEmbeddingsHandler() {
return new JinaAIResponseHandler("jinaai text embedding", JinaAIEmbeddingsResponseEntity::fromResponse);
}
public static JinaAIEmbeddingsRequestManager of(JinaAIEmbeddingsModel model, ThreadPool threadPool) {
return new JinaAIEmbeddingsRequestManager(Objects.requireNonNull(model), Objects.requireNonNull(threadPool));
}
private final JinaAIEmbeddingsModel model;
private JinaAIEmbeddingsRequestManager(JinaAIEmbeddingsModel model, ThreadPool threadPool) {
super(threadPool, model);
this.model = Objects.requireNonNull(model);
}
@Override
public void execute(
InferenceInputs inferenceInputs,
RequestSender requestSender,
Supplier<Boolean> hasRequestCompletedFunction,
ActionListener<InferenceServiceResults> listener
) {
EmbeddingsInput input = inferenceInputs.castTo(EmbeddingsInput.class);
List<String> docsInput = input.getTextInputs();
InputType inputType = input.getInputType();
JinaAIEmbeddingsRequest request = new JinaAIEmbeddingsRequest(docsInput, inputType, model);
execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener));
}
}
|
JinaAIEmbeddingsRequestManager
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java
|
{
"start": 1627,
"end": 10690
}
|
class ____ implements ToXContentObject, Writeable {
public static final ParseField DESCRIPTION = new ParseField("description");
public static final ParseField START_TIME = new ParseField("start_time");
public static final ParseField END_TIME = new ParseField("end_time");
public static final ParseField SKIP_RESULT = new ParseField("skip_result");
public static final ParseField SKIP_MODEL_UPDATE = new ParseField("skip_model_update");
public static final ParseField FORCE_TIME_SHIFT = new ParseField("force_time_shift");
public static final ParseField TYPE = new ParseField("type");
public static final ParseField EVENT_ID = new ParseField("event_id");
public static final ParseField RESULTS_FIELD = new ParseField("events");
public static final String SCHEDULED_EVENT_TYPE = "scheduled_event";
public static final String DOCUMENT_ID_PREFIX = "event_";
public static final ObjectParser<ScheduledEvent.Builder, Void> STRICT_PARSER = createParser(false);
public static final ObjectParser<ScheduledEvent.Builder, Void> LENIENT_PARSER = createParser(true);
private static ObjectParser<ScheduledEvent.Builder, Void> createParser(boolean ignoreUnknownFields) {
ObjectParser<ScheduledEvent.Builder, Void> parser = new ObjectParser<>("scheduled_event", ignoreUnknownFields, Builder::new);
parser.declareString(ScheduledEvent.Builder::description, DESCRIPTION);
parser.declareField(
ScheduledEvent.Builder::startTime,
p -> TimeUtils.parseTimeFieldToInstant(p, START_TIME.getPreferredName()),
START_TIME,
ObjectParser.ValueType.VALUE
);
parser.declareField(
ScheduledEvent.Builder::endTime,
p -> TimeUtils.parseTimeFieldToInstant(p, END_TIME.getPreferredName()),
END_TIME,
ObjectParser.ValueType.VALUE
);
parser.declareBoolean(ScheduledEvent.Builder::skipResult, SKIP_RESULT);
parser.declareBoolean(ScheduledEvent.Builder::skipModelUpdate, SKIP_MODEL_UPDATE);
parser.declareInt(ScheduledEvent.Builder::forceTimeShift, FORCE_TIME_SHIFT);
parser.declareString(ScheduledEvent.Builder::calendarId, Calendar.ID);
parser.declareString((builder, s) -> {}, TYPE);
return parser;
}
public static String documentId(String eventId) {
return DOCUMENT_ID_PREFIX + eventId;
}
private final String description;
private final Instant startTime;
private final Instant endTime;
private final Boolean skipResult;
private final Boolean skipModelUpdate;
private final Integer forceTimeShift;
private final String calendarId;
private final String eventId;
ScheduledEvent(
String description,
Instant startTime,
Instant endTime,
Boolean skipResult,
Boolean skipModelUpdate,
@Nullable Integer forceTimeShift,
String calendarId,
@Nullable String eventId
) {
this.description = Objects.requireNonNull(description);
this.startTime = Instant.ofEpochMilli(Objects.requireNonNull(startTime).toEpochMilli());
this.endTime = Instant.ofEpochMilli(Objects.requireNonNull(endTime).toEpochMilli());
this.skipResult = Objects.requireNonNull(skipResult);
this.skipModelUpdate = Objects.requireNonNull(skipModelUpdate);
this.forceTimeShift = forceTimeShift;
this.calendarId = Objects.requireNonNull(calendarId);
this.eventId = eventId;
}
public ScheduledEvent(StreamInput in) throws IOException {
description = in.readString();
startTime = in.readInstant();
endTime = in.readInstant();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
skipResult = in.readBoolean();
skipModelUpdate = in.readBoolean();
forceTimeShift = in.readOptionalInt();
} else {
skipResult = true;
skipModelUpdate = true;
forceTimeShift = null;
}
calendarId = in.readString();
eventId = in.readOptionalString();
}
public String getDescription() {
return description;
}
public Instant getStartTime() {
return startTime;
}
public Instant getEndTime() {
return endTime;
}
public String getCalendarId() {
return calendarId;
}
public Boolean getSkipResult() {
return skipResult;
}
public Boolean getSkipModelUpdate() {
return skipModelUpdate;
}
public Integer getForceTimeShift() {
return forceTimeShift;
}
public String getEventId() {
return eventId;
}
/**
* Convert the scheduled event to a detection rule.
* The rule will have 2 time based conditions for the start and
* end of the event.
*
* The rule's start and end times are aligned with the bucket span
* so the start time is rounded down to a bucket interval and the
* end time rounded up.
*
* @param bucketSpan Bucket span to align to
* @return The event as a detection rule.
*/
public DetectionRule toDetectionRule(TimeValue bucketSpan) {
List<RuleCondition> conditions = new ArrayList<>();
long bucketSpanSecs = bucketSpan.getSeconds();
long bucketStartTime = Intervals.alignToFloor(getStartTime().getEpochSecond(), bucketSpanSecs);
conditions.add(RuleCondition.createTime(Operator.GTE, bucketStartTime));
long bucketEndTime = Intervals.alignToCeil(getEndTime().getEpochSecond(), bucketSpanSecs);
conditions.add(RuleCondition.createTime(Operator.LT, bucketEndTime));
DetectionRule.Builder builder = new DetectionRule.Builder(conditions);
List<String> ruleActions = new ArrayList<>();
if (skipResult) {
ruleActions.add(RuleAction.SKIP_RESULT.toString());
builder.setActions(RuleAction.SKIP_RESULT);
}
if (skipModelUpdate) {
ruleActions.add(RuleAction.SKIP_MODEL_UPDATE.toString());
}
if (forceTimeShift != null) {
ruleActions.add(RuleAction.FORCE_TIME_SHIFT.toString());
builder.setParams(new RuleParams(new RuleParamsForForceTimeShift(forceTimeShift)));
}
builder.setActions(ruleActions);
return builder.build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(description);
out.writeInstant(startTime);
out.writeInstant(endTime);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeBoolean(skipResult);
out.writeBoolean(skipModelUpdate);
out.writeOptionalInt(forceTimeShift);
}
out.writeString(calendarId);
out.writeOptionalString(eventId);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(DESCRIPTION.getPreferredName(), description);
builder.timestampFieldsFromUnixEpochMillis(
START_TIME.getPreferredName(),
START_TIME.getPreferredName() + "_string",
startTime.toEpochMilli()
);
builder.timestampFieldsFromUnixEpochMillis(
END_TIME.getPreferredName(),
END_TIME.getPreferredName() + "_string",
endTime.toEpochMilli()
);
builder.field(SKIP_RESULT.getPreferredName(), skipResult);
builder.field(SKIP_MODEL_UPDATE.getPreferredName(), skipModelUpdate);
if (forceTimeShift != null) {
builder.field(FORCE_TIME_SHIFT.getPreferredName(), forceTimeShift);
}
builder.field(Calendar.ID.getPreferredName(), calendarId);
if (eventId != null) {
builder.field(EVENT_ID.getPreferredName(), eventId);
}
if (params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false)) {
builder.field(TYPE.getPreferredName(), SCHEDULED_EVENT_TYPE);
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if ((obj instanceof ScheduledEvent) == false) {
return false;
}
ScheduledEvent other = (ScheduledEvent) obj;
return description.equals(other.description)
&& Objects.equals(startTime, other.startTime)
&& Objects.equals(endTime, other.endTime)
&& Objects.equals(skipResult, other.skipResult)
&& Objects.equals(skipModelUpdate, other.skipModelUpdate)
&& Objects.equals(forceTimeShift, other.forceTimeShift)
&& calendarId.equals(other.calendarId);
}
@Override
public int hashCode() {
return Objects.hash(description, startTime, endTime, skipResult, skipModelUpdate, forceTimeShift, calendarId);
}
public static
|
ScheduledEvent
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
|
{
"start": 61788,
"end": 62894
}
|
class ____
implements MultipleArcTransition
<ContainerImpl, ContainerEvent, ContainerState> {
@SuppressWarnings("unchecked")
@Override
public ContainerState transition(
ContainerImpl container, ContainerEvent event) {
ContainerResourceLocalizedEvent rsrcEvent =
(ContainerResourceLocalizedEvent) event;
container.reInitContext.newResourceSet.resourceLocalized(
rsrcEvent.getResource(), rsrcEvent.getLocation());
// Check if all ResourceLocalization has completed
if (container.reInitContext.newResourceSet.getPendingResources()
.isEmpty()) {
// Kill the current container.
container.dispatcher.getEventHandler().handle(
new ContainersLauncherEvent(container,
ContainersLauncherEventType.CLEANUP_CONTAINER_FOR_REINIT));
return ContainerState.REINITIALIZING_AWAITING_KILL;
}
return ContainerState.REINITIALIZING;
}
}
/**
* Resource is localized while the container is running - create symlinks.
*/
static
|
ResourceLocalizedWhileReInitTransition
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java
|
{
"start": 6424,
"end": 48114
}
|
class ____ extends ESSingleNodeTestCase {
private static final Logger logger = LogManager.getLogger(IndexShardIT.class);
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return pluginList(InternalSettingsPlugin.class, BogusEstimatedHeapUsagePlugin.class);
}
public void testLockTryingToDelete() throws Exception {
createIndex("test");
ensureGreen();
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
ClusterService cs = getInstanceFromNode(ClusterService.class);
final Index index = cs.state().metadata().getProject().index("test").getIndex();
Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0));
logger.info("--> paths: [{}]", (Object) shardPaths);
// Should not be able to acquire the lock because it's already open
try {
NodeEnvironment.acquireFSLockForPaths(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), shardPaths);
fail("should not have been able to acquire the lock");
} catch (LockObtainFailedException e) {
assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
}
// Test without the regular shard lock to assume we can acquire it
// (worst case, meaning that the shard lock could be acquired and
// we're green to delete the shard's directory)
final ShardLock sLock = new DummyShardLock(new ShardId(index, 0));
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", Settings.EMPTY);
final LockObtainFailedException exception = expectThrows(
LockObtainFailedException.class,
() -> env.deleteShardDirectoryUnderLock(sLock, indexSettings, indexPaths -> {
assert false : "should not be called " + indexPaths;
})
);
assertThat(exception.getMessage(), exception.getMessage(), containsString("unable to acquire write.lock"));
}
public void testDurableFlagHasEffect() {
createIndex("test");
ensureGreen();
prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
Translog translog = getTranslog(shard);
Predicate<Translog> needsSync = (tlog) -> {
// we can't use tlog.needsSync() here since it also takes the global checkpoint into account
// we explicitly want to check here if our durability checks are taken into account so we only
// check if we are synced upto the current write location
Translog.Location lastWriteLocation = tlog.getLastWriteLocation();
try {
// the lastWriteLocaltion has a Integer.MAX_VALUE size so we have to create a new one
return tlog.ensureSynced(
new Translog.Location(lastWriteLocation.generation(), lastWriteLocation.translogLocation(), 0),
SequenceNumbers.UNASSIGNED_SEQ_NO
);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
setDurability(shard, Translog.Durability.REQUEST);
assertThat(needsSync, falseWith(translog));
setDurability(shard, Translog.Durability.ASYNC);
prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get();
assertThat(needsSync, trueWith(translog));
setDurability(shard, Translog.Durability.REQUEST);
client().prepareDelete("test", "1").get();
assertThat(needsSync, falseWith(translog));
setDurability(shard, Translog.Durability.ASYNC);
client().prepareDelete("test", "2").get();
assertTrue(translog.syncNeeded());
setDurability(shard, Translog.Durability.REQUEST);
assertNoFailures(
client().prepareBulk()
.add(prepareIndex("test").setId("3").setSource("{}", XContentType.JSON))
.add(client().prepareDelete("test", "1"))
.get()
);
assertThat(needsSync, falseWith(translog));
setDurability(shard, Translog.Durability.ASYNC);
assertNoFailures(
client().prepareBulk()
.add(prepareIndex("test").setId("4").setSource("{}", XContentType.JSON))
.add(client().prepareDelete("test", "3"))
.get()
);
setDurability(shard, Translog.Durability.REQUEST);
assertThat(needsSync, trueWith(translog));
}
private void setDurability(IndexShard shard, Translog.Durability durability) {
indicesAdmin().prepareUpdateSettings(shard.shardId().getIndexName())
.setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability.name()).build())
.get();
assertEquals(durability, shard.getTranslogDurability());
}
public void testUpdatePriority() {
assertAcked(indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_PRIORITY, 200)));
IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetadata.SETTING_PRIORITY, 0).intValue());
indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_PRIORITY, 400).build()).get();
assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetadata.SETTING_PRIORITY, 0).intValue());
}
public void testIndexDirIsDeletedWhenShardRemoved() throws Exception {
Environment env = getInstanceFromNode(Environment.class);
Path idxPath = env.sharedDataDir().resolve(randomAlphaOfLength(10));
logger.info("--> idxPath: [{}]", idxPath);
Settings idxSettings = Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, idxPath).build();
createIndex("test", idxSettings);
ensureGreen("test");
prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
assertHitCount(client().prepareSearch("test"), 1L);
indicesAdmin().prepareDelete("test").get();
awaitIndexShardCloseAsyncTasks();
assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
assertPathHasBeenCleared(idxPath);
}
public void testExpectedShardSizeIsPresent() throws InterruptedException {
assertAcked(indicesAdmin().prepareCreate("test").setSettings(indexSettings(1, 0)));
for (int i = 0; i < 50; i++) {
prepareIndex("test").setSource("{}", XContentType.JSON).get();
}
ensureGreen("test");
InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class);
ClusterInfoServiceUtils.refresh(clusterInfoService);
ClusterState state = getInstanceFromNode(ClusterService.class).state();
ShardRouting shardRouting = state.getRoutingTable().index("test").shard(0).primaryShard();
Long test = clusterInfoService.getClusterInfo().getShardSize(shardRouting);
assertNotNull(test);
assertTrue(test > 0);
Optional<Long> dataSetSize = clusterInfoService.getClusterInfo().getShardDataSetSize(shardRouting.shardId());
assertTrue(dataSetSize.isPresent());
assertThat(dataSetSize.get(), greaterThan(0L));
}
public void testHeapUsageEstimateIsPresent() {
InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class);
ClusterInfoServiceUtils.refresh(clusterInfoService);
Map<String, EstimatedHeapUsage> estimatedHeapUsages = clusterInfoService.getClusterInfo().getEstimatedHeapUsages();
assertNotNull(estimatedHeapUsages);
// Not collecting yet because it is disabled
assertTrue(estimatedHeapUsages.isEmpty());
// Enable collection for estimated heap usages
updateClusterSettings(
Settings.builder()
.put(InternalClusterInfoService.CLUSTER_ROUTING_ALLOCATION_ESTIMATED_HEAP_THRESHOLD_DECIDER_ENABLED.getKey(), true)
.build()
);
try {
ClusterInfoServiceUtils.refresh(clusterInfoService);
ClusterState state = getInstanceFromNode(ClusterService.class).state();
estimatedHeapUsages = clusterInfoService.getClusterInfo().getEstimatedHeapUsages();
assertEquals(state.nodes().size(), estimatedHeapUsages.size());
for (DiscoveryNode node : state.nodes()) {
assertTrue(estimatedHeapUsages.containsKey(node.getId()));
EstimatedHeapUsage estimatedHeapUsage = estimatedHeapUsages.get(node.getId());
assertThat(estimatedHeapUsage.estimatedFreeBytes(), lessThanOrEqualTo(estimatedHeapUsage.totalBytes()));
}
} finally {
updateClusterSettings(
Settings.builder()
.putNull(InternalClusterInfoService.CLUSTER_ROUTING_ALLOCATION_ESTIMATED_HEAP_THRESHOLD_DECIDER_ENABLED.getKey())
.build()
);
}
}
public void testNodeWriteLoadsArePresent() {
InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class);
// Force a ClusterInfo refresh to run collection of the node thread pool usage stats.
ClusterInfoServiceUtils.refresh(clusterInfoService);
Map<String, NodeUsageStatsForThreadPools> nodeThreadPoolStats = clusterInfoService.getClusterInfo()
.getNodeUsageStatsForThreadPools();
assertNotNull(nodeThreadPoolStats);
/** Verify that each node has usage stats reported. */
ClusterState state = getInstanceFromNode(ClusterService.class).state();
assertEquals(state.nodes().size(), nodeThreadPoolStats.size());
for (DiscoveryNode node : state.nodes()) {
assertTrue(nodeThreadPoolStats.containsKey(node.getId()));
NodeUsageStatsForThreadPools nodeUsageStatsForThreadPools = nodeThreadPoolStats.get(node.getId());
assertThat(nodeUsageStatsForThreadPools.nodeId(), equalTo(node.getId()));
NodeUsageStatsForThreadPools.ThreadPoolUsageStats writeThreadPoolStats = nodeUsageStatsForThreadPools.threadPoolUsageStatsMap()
.get(ThreadPool.Names.WRITE);
assertNotNull(writeThreadPoolStats);
assertThat(writeThreadPoolStats.totalThreadPoolThreads(), greaterThanOrEqualTo(0));
assertThat(writeThreadPoolStats.averageThreadPoolUtilization(), greaterThanOrEqualTo(0.0f));
assertThat(writeThreadPoolStats.maxThreadPoolQueueLatencyMillis(), greaterThanOrEqualTo(0L));
}
}
public void testShardWriteLoadsArePresent() {
// Create some indices and some write-load
final int numIndices = randomIntBetween(1, 5);
final String indexPrefix = randomIdentifier();
IntStream.range(0, numIndices).forEach(i -> {
final String indexName = indexPrefix + "_" + i;
createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3)).build());
IntStream.range(0, randomIntBetween(1, 500))
.forEach(j -> prepareIndex(indexName).setSource("foo", randomIdentifier(), "bar", randomIdentifier()).get());
});
final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class);
try {
// Explicitly disable write load decider
setWriteLoadDeciderEnablement(WriteLoadConstraintSettings.WriteLoadDeciderStatus.DISABLED);
// Stats should not be collected when the decider is disabled
{
ClusterInfoServiceUtils.refresh(clusterInfoService);
final Map<ShardId, Double> shardWriteLoads = clusterInfoService.getClusterInfo().getShardWriteLoads();
assertNotNull(shardWriteLoads);
assertTrue(shardWriteLoads.isEmpty());
}
// Turn on collection of write-load stats.
setWriteLoadDeciderEnablement(
randomBoolean()
? WriteLoadConstraintSettings.WriteLoadDeciderStatus.ENABLED
: WriteLoadConstraintSettings.WriteLoadDeciderStatus.LOW_THRESHOLD_ONLY
);
// Force a ClusterInfo refresh to run collection of the write-load stats.
ClusterInfoServiceUtils.refresh(clusterInfoService);
final Map<ShardId, Double> shardWriteLoads = clusterInfoService.getClusterInfo().getShardWriteLoads();
// Verify that each shard has write-load reported.
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
assertEquals(state.projectState(ProjectId.DEFAULT).metadata().getTotalNumberOfShards(), shardWriteLoads.size());
for (IndexMetadata indexMetadata : state.projectState(ProjectId.DEFAULT).metadata()) {
double maximumLoadRecorded = 0;
for (int i = 0; i < indexMetadata.getNumberOfShards(); i++) {
final ShardId shardId = new ShardId(indexMetadata.getIndex(), i);
assertTrue(shardWriteLoads.containsKey(shardId));
maximumLoadRecorded = Math.max(shardWriteLoads.get(shardId), maximumLoadRecorded);
}
// Each index should have seen some write-load
assertThat(maximumLoadRecorded, greaterThan(0.0));
}
} finally {
clearWriteLoadDeciderEnablementSetting();
}
}
private void clearWriteLoadDeciderEnablementSetting() {
updateClusterSettings(Settings.builder().putNull(WriteLoadConstraintSettings.WRITE_LOAD_DECIDER_ENABLED_SETTING.getKey()).build());
}
public void testMaxHeapPerNodeIsPresent() {
InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class);
ClusterInfoServiceUtils.refresh(clusterInfoService);
Map<String, ByteSizeValue> maxHeapSizePerNode = clusterInfoService.getClusterInfo().getMaxHeapSizePerNode();
assertNotNull(maxHeapSizePerNode);
ClusterState state = getInstanceFromNode(ClusterService.class).state();
assertEquals(state.nodes().size(), maxHeapSizePerNode.size());
for (DiscoveryNode node : state.nodes()) {
assertTrue(maxHeapSizePerNode.containsKey(node.getId()));
assertThat(maxHeapSizePerNode.get(node.getId()), greaterThan(ByteSizeValue.ZERO));
}
}
public void testIndexCanChangeCustomDataPath() throws Exception {
final String index = "test-custom-data-path";
final Path sharedDataPath = getInstanceFromNode(Environment.class).sharedDataDir().resolve(randomAsciiLettersOfLength(10));
final Path indexDataPath = sharedDataPath.resolve("start-" + randomAsciiLettersOfLength(10));
logger.info("--> creating index [{}] with data_path [{}]", index, indexDataPath);
createIndex(index, Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, indexDataPath.toAbsolutePath().toString()).build());
prepareIndex(index).setId("1").setSource("foo", "bar").setRefreshPolicy(IMMEDIATE).get();
ensureGreen(index);
assertHitCount(client().prepareSearch(index).setSize(0), 1L);
logger.info("--> closing the index [{}]", index);
assertAcked(indicesAdmin().prepareClose(index));
logger.info("--> index closed, re-opening...");
assertAcked(indicesAdmin().prepareOpen(index));
logger.info("--> index re-opened");
ensureGreen(index);
assertHitCount(client().prepareSearch(index).setSize(0), 1L);
// Now, try closing and changing the settings
logger.info("--> closing the index [{}] before updating data_path", index);
assertAcked(indicesAdmin().prepareClose(index));
awaitIndexShardCloseAsyncTasks();
final Path newIndexDataPath = sharedDataPath.resolve("end-" + randomAlphaOfLength(10));
IOUtils.rm(newIndexDataPath);
logger.info("--> copying data on disk from [{}] to [{}]", indexDataPath, newIndexDataPath);
assert Files.exists(newIndexDataPath) == false : "new index data path directory should not exist!";
try (Stream<Path> stream = Files.walk(indexDataPath)) {
stream.forEach(path -> {
try {
if (path.endsWith(".lock") == false) {
Files.copy(path, newIndexDataPath.resolve(indexDataPath.relativize(path)));
}
} catch (final Exception e) {
logger.error("Failed to copy data path directory", e);
fail();
}
});
}
logger.info("--> updating data_path to [{}] for index [{}]", newIndexDataPath, index);
assertAcked(
indicesAdmin().prepareUpdateSettings(index)
.setSettings(Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, newIndexDataPath.toAbsolutePath().toString()).build())
.setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true))
);
logger.info("--> settings updated and files moved, re-opening index");
assertAcked(indicesAdmin().prepareOpen(index));
logger.info("--> index re-opened");
ensureGreen(index);
assertHitCount(client().prepareSearch(index).setSize(0), 1L);
assertAcked(indicesAdmin().prepareDelete(index));
awaitIndexShardCloseAsyncTasks();
assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
assertPathHasBeenCleared(newIndexDataPath.toAbsolutePath());
}
public void testMaybeFlush() throws Exception {
createIndex(
"test",
Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build()
);
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldPeriodicallyFlush());
indicesAdmin().prepareUpdateSettings("test")
.setSettings(
Settings.builder()
.put(
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
ByteSizeValue.of(135 /* size of the operation + one generation header&footer*/, ByteSizeUnit.BYTES)
)
.build()
)
.get();
prepareIndex("test").setId("0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldPeriodicallyFlush());
shard.applyIndexOperationOnPrimary(
Versions.MATCH_ANY,
VersionType.INTERNAL,
new SourceToParse("1", new BytesArray("{}"), XContentType.JSON),
SequenceNumbers.UNASSIGNED_SEQ_NO,
0,
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP,
false
);
assertTrue(shard.shouldPeriodicallyFlush());
final Translog translog = getTranslog(shard);
assertEquals(2, translog.stats().getUncommittedOperations());
assertThat(shard.flushStats().getTotal(), equalTo(0L));
prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertThat(shard.getLastKnownGlobalCheckpoint(), equalTo(2L));
assertBusy(() -> { // this is async
assertFalse(shard.shouldPeriodicallyFlush());
assertThat(shard.flushStats().getPeriodic(), equalTo(1L));
assertThat(shard.flushStats().getTotal(), equalTo(1L));
});
shard.sync();
assertThat(shard.getLastSyncedGlobalCheckpoint(), equalTo(2L));
assertThat("last commit [" + shard.commitStats().getUserData() + "]", translog.stats().getUncommittedOperations(), equalTo(0));
long size = Math.max(translog.stats().getUncommittedSizeInBytes(), Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1);
logger.info(
"--> current translog size: [{}] num_ops [{}] generation [{}]",
translog.stats().getUncommittedSizeInBytes(),
translog.stats().getUncommittedOperations(),
translog.getGeneration()
);
indicesAdmin().prepareUpdateSettings("test")
.setSettings(
Settings.builder()
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(size, ByteSizeUnit.BYTES))
.build()
)
.get();
client().prepareDelete("test", "2").get();
logger.info(
"--> translog size after delete: [{}] num_ops [{}] generation [{}]",
translog.stats().getUncommittedSizeInBytes(),
translog.stats().getUncommittedOperations(),
translog.getGeneration()
);
assertBusy(() -> { // this is async
final TranslogStats translogStats = translog.stats();
final CommitStats commitStats = shard.commitStats();
final FlushStats flushStats = shard.flushStats();
logger.info(
"--> translog stats [{}] gen [{}] commit_stats [{}] flush_stats [{}/{}]",
Strings.toString(translogStats),
translog.getGeneration().translogFileGeneration(),
commitStats.getUserData(),
flushStats.getPeriodic(),
flushStats.getTotal()
);
assertFalse(shard.shouldPeriodicallyFlush());
});
shard.sync();
assertEquals(0, translog.stats().getUncommittedOperations());
}
public void testMaybeRollTranslogGeneration() throws Exception {
final int generationThreshold = randomIntBetween(64, 512);
final Settings settings = Settings.builder()
.put("index.number_of_shards", 1)
.put("index.translog.generation_threshold_size", generationThreshold + "b")
.build();
createIndex("test", settings, "test");
ensureGreen("test");
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
final IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0);
int rolls = 0;
final Translog translog = getTranslog(shard);
final long generation = translog.currentFileGeneration();
final int numberOfDocuments = randomIntBetween(32, 128);
for (int i = 0; i < numberOfDocuments; i++) {
assertThat(translog.currentFileGeneration(), equalTo(generation + rolls));
final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(
Versions.MATCH_ANY,
VersionType.INTERNAL,
new SourceToParse("1", new BytesArray("{}"), XContentType.JSON),
SequenceNumbers.UNASSIGNED_SEQ_NO,
0,
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP,
false
);
final Translog.Location location = result.getTranslogLocation();
shard.afterWriteOperation();
if (location.translogLocation() + location.size() > generationThreshold) {
// wait until the roll completes
assertBusy(() -> assertFalse(shard.shouldRollTranslogGeneration()));
rolls++;
assertThat(translog.currentFileGeneration(), equalTo(generation + rolls));
}
}
}
public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldPeriodicallyFlush());
final boolean flush = randomBoolean();
final Settings settings;
if (flush) {
// size of the operation plus the overhead of one generation.
settings = Settings.builder().put("index.translog.flush_threshold_size", "125b").build();
} else {
// size of the operation plus header and footer
settings = Settings.builder().put("index.translog.generation_threshold_size", "117b").build();
}
indicesAdmin().prepareUpdateSettings("test").setSettings(settings).get();
prepareIndex("test").setId("0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldPeriodicallyFlush());
final AtomicBoolean running = new AtomicBoolean(true);
final int numThreads = randomIntBetween(2, 4);
final Thread[] threads = new Thread[numThreads];
final CyclicBarrier barrier = new CyclicBarrier(numThreads + 1);
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(() -> {
try {
barrier.await();
} catch (final InterruptedException | BrokenBarrierException e) {
throw new RuntimeException(e);
}
while (running.get()) {
shard.afterWriteOperation();
}
});
threads[i].start();
}
barrier.await();
final CheckedRunnable<Exception> check;
if (flush) {
final FlushStats initialStats = shard.flushStats();
prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
check = () -> {
assertFalse(shard.shouldPeriodicallyFlush());
final FlushStats currentStats = shard.flushStats();
String msg = String.format(
Locale.ROOT,
"flush stats: total=[%d vs %d], periodic=[%d vs %d]",
initialStats.getTotal(),
currentStats.getTotal(),
initialStats.getPeriodic(),
currentStats.getPeriodic()
);
assertThat(
msg,
currentStats.getPeriodic(),
either(equalTo(initialStats.getPeriodic() + 1)).or(equalTo(initialStats.getPeriodic() + 2))
);
assertThat(
msg,
currentStats.getTotal(),
either(equalTo(initialStats.getTotal() + 1)).or(equalTo(initialStats.getTotal() + 2))
);
};
} else {
final long generation = getTranslog(shard).currentFileGeneration();
prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
check = () -> {
assertFalse(shard.shouldRollTranslogGeneration());
assertEquals(generation + 1, getTranslog(shard).currentFileGeneration());
};
}
assertBusy(check);
running.set(false);
for (int i = 0; i < threads.length; i++) {
threads[i].join();
}
check.run();
}
public void testFlushStats() throws Exception {
final IndexService indexService = createIndex("test");
ensureGreen();
Settings settings = Settings.builder().put("index.translog.flush_threshold_size", "" + between(200, 300) + "b").build();
indicesAdmin().prepareUpdateSettings("test").setSettings(settings).get();
final int numDocs = between(10, 100);
for (int i = 0; i < numDocs; i++) {
prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get();
}
// A flush stats may include the new total count but the old period count - assert eventually.
assertBusy(() -> {
final FlushStats flushStats = indicesAdmin().prepareStats("test").clear().setFlush(true).get().getTotal().flush;
assertThat(flushStats.getPeriodic(), allOf(equalTo(flushStats.getTotal()), greaterThan(0L)));
});
assertBusy(() -> assertThat(indexService.getShard(0).shouldPeriodicallyFlush(), equalTo(false)));
settings = Settings.builder().put("index.translog.flush_threshold_size", (String) null).build();
indicesAdmin().prepareUpdateSettings("test").setSettings(settings).get();
prepareIndex("test").setId(UUIDs.randomBase64UUID()).setSource("{}", XContentType.JSON).get();
indicesAdmin().prepareFlush("test").setForce(randomBoolean()).setWaitIfOngoing(true).get();
final FlushStats flushStats = indicesAdmin().prepareStats("test").clear().setFlush(true).get().getTotal().flush;
assertThat(flushStats.getTotal(), greaterThan(flushStats.getPeriodic()));
}
public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexService(resolveIndex("test"));
IndexShard shard = indexService.getShardOrNull(0);
prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
client().prepareDelete("test", "0").get();
prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
CheckedFunction<DirectoryReader, DirectoryReader, IOException> wrapper = directoryReader -> directoryReader;
closeShardNoCheck(shard);
AtomicReference<IndexShard> shardRef = new AtomicReference<>();
List<Exception> failures = new ArrayList<>();
IndexingOperationListener listener = new IndexingOperationListener() {
@Override
public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) {
try {
assertNotNull(shardRef.get());
// this is all IMC needs to do - check current memory and refresh
assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0);
shardRef.get().refresh("test");
} catch (Exception e) {
failures.add(e);
throw e;
}
}
@Override
public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) {
try {
assertNotNull(shardRef.get());
// this is all IMC needs to do - check current memory and refresh
assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0);
shardRef.get().refresh("test");
} catch (Exception e) {
failures.add(e);
throw e;
}
}
};
final IndexShard newShard = newIndexShard(indexService, shard, wrapper, getInstanceFromNode(CircuitBreakerService.class), listener);
shardRef.set(newShard);
recoverShard(newShard);
try {
ExceptionsHelper.rethrowAndSuppress(failures);
} finally {
closeShardNoCheck(newShard, randomBoolean());
}
}
private void setWriteLoadDeciderEnablement(WriteLoadConstraintSettings.WriteLoadDeciderStatus status) {
updateClusterSettings(
Settings.builder().put(WriteLoadConstraintSettings.WRITE_LOAD_DECIDER_ENABLED_SETTING.getKey(), status).build()
);
}
public static final IndexShard recoverShard(IndexShard newShard) throws IOException {
DiscoveryNode localNode = DiscoveryNodeUtils.builder("foo").roles(emptySet()).build();
newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
recoverFromStore(newShard);
IndexShardTestCase.updateRoutingEntry(
newShard,
newShard.routingEntry().moveToStarted(ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)
);
return newShard;
}
public static final IndexShard newIndexShard(
final IndexService indexService,
final IndexShard shard,
CheckedFunction<DirectoryReader, DirectoryReader, IOException> wrapper,
final CircuitBreakerService cbs,
final IndexingOperationListener... listeners
) throws IOException {
ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry());
return new IndexShard(
initializingShardRouting,
indexService.getIndexSettings(),
shard.shardPath(),
shard.store(),
indexService.getIndexSortSupplier(),
indexService.cache(),
indexService.mapperService(),
indexService.similarityService(),
shard.getEngineFactory(),
indexService.getIndexEventListener(),
wrapper,
indexService.getThreadPool(),
indexService.getThreadPoolMergeExecutorService(),
indexService.getBigArrays(),
null,
Collections.emptyList(),
Arrays.asList(listeners),
IndexShardTestCase.NOOP_GCP_SYNCER,
RetentionLeaseSyncer.EMPTY,
cbs,
IndexModule.DEFAULT_SNAPSHOT_COMMIT_SUPPLIER,
System::nanoTime,
null,
MapperMetrics.NOOP,
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
);
}
private static ShardRouting getInitializingShardRouting(ShardRouting existingShardRouting) {
ShardRouting shardRouting = shardRoutingBuilder(
existingShardRouting.shardId(),
existingShardRouting.currentNodeId(),
existingShardRouting.primary(),
ShardRoutingState.INITIALIZING
).withAllocationId(existingShardRouting.allocationId()).build();
shardRouting = shardRouting.updateUnassigned(
new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "fake recovery"),
RecoverySource.ExistingStoreRecoverySource.INSTANCE
);
return shardRouting;
}
public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Exception {
createIndex("test", indexSettings(1, 0).put("index.refresh_interval", -1).build());
ensureGreen();
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
final IndexShard shard = indicesService.getShardOrNull(new ShardId(resolveIndex("test"), 0));
final SearchRequest countRequest = new SearchRequest("test").source(new SearchSourceBuilder().size(0));
final long numDocs = between(10, 20);
for (int i = 0; i < numDocs; i++) {
prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get();
if (randomBoolean()) {
shard.refresh("test");
}
}
shard.refresh("test");
assertHitCount(client().search(countRequest), numDocs);
assertThat(shard.getLocalCheckpoint(), equalTo(shard.seqNoStats().getMaxSeqNo()));
final CountDownLatch engineResetLatch = new CountDownLatch(1);
shard.acquireAllPrimaryOperationsPermits(ActionListener.wrap(r -> {
try {
shard.rollbackEngineToGlobalCheckpoint();
} finally {
r.close();
engineResetLatch.countDown();
}
}, Assert::assertNotNull), TimeValue.timeValueMinutes(1L));
engineResetLatch.await();
final long moreDocs = between(10, 20);
for (int i = 0; i < moreDocs; i++) {
prepareIndex("test").setId(Long.toString(i + numDocs)).setSource("{}", XContentType.JSON).get();
if (randomBoolean()) {
shard.refresh("test");
}
}
shard.refresh("test");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
assertThat(
"numDocs=" + numDocs + " moreDocs=" + moreDocs,
(long) searcher.getIndexReader().numDocs(),
equalTo(numDocs + moreDocs)
);
}
assertHitCount(client().search(countRequest), numDocs + moreDocs);
}
public void testShardChangesWithDefaultDocType() throws Exception {
Settings settings = indexSettings(1, 0).put("index.translog.flush_threshold_size", "512mb") // do not flush
.put("index.soft_deletes.enabled", true)
.build();
IndexService indexService = createIndex("index", settings, "user_doc", "title", "type=keyword");
int numOps = between(1, 10);
for (int i = 0; i < numOps; i++) {
if (randomBoolean()) {
prepareIndex("index").setId(randomFrom("1", "2")).setSource("{}", XContentType.JSON).get();
} else {
client().prepareDelete("index", randomFrom("1", "2")).get();
}
}
IndexShard shard = indexService.getShard(0);
try (
Translog.Snapshot luceneSnapshot = shard.newChangesSnapshot(
"test",
0,
numOps - 1,
true,
randomBoolean(),
randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
);
Translog.Snapshot translogSnapshot = getTranslog(shard).newSnapshot()
) {
List<Translog.Operation> opsFromLucene = TestTranslog.drainSnapshot(luceneSnapshot, true);
List<Translog.Operation> opsFromTranslog = TestTranslog.drainSnapshot(translogSnapshot, true);
assertThat(opsFromLucene, equalTo(opsFromTranslog));
}
}
/**
* Test that the {@link org.elasticsearch.index.engine.NoOpEngine} takes precedence over other
* engine factories if the index is closed.
*/
public void testNoOpEngineFactoryTakesPrecedence() {
final String indexName = "closed-index";
createIndex(indexName, indexSettings(1, 0).build());
ensureGreen();
assertAcked(indicesAdmin().prepareClose(indexName));
final ClusterService clusterService = getInstanceFromNode(ClusterService.class);
final ClusterState clusterState = clusterService.state();
final IndexMetadata indexMetadata = clusterState.metadata().getProject().index(indexName);
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
final IndexService indexService = indicesService.indexServiceSafe(indexMetadata.getIndex());
for (IndexShard indexShard : indexService) {
assertThat(indexShard.getEngine(), instanceOf(NoOpEngine.class));
}
}
/**
* Asserts that there are no files in the specified path
*/
private void assertPathHasBeenCleared(Path path) {
logger.info("--> checking that [{}] has been cleared", path);
int count = 0;
StringBuilder sb = new StringBuilder();
sb.append("[");
if (Files.exists(path)) {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
for (Path file : stream) {
// Skip files added by Lucene's ExtraFS
if (file.getFileName().toString().startsWith("extra")) {
continue;
}
logger.info("--> found file: [{}]", file.toAbsolutePath().toString());
if (Files.isDirectory(file)) {
assertPathHasBeenCleared(file);
} else if (Files.isRegularFile(file)) {
count++;
sb.append(file.toAbsolutePath().toString());
sb.append("\n");
}
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
sb.append("]");
assertThat(count + " files exist that should have been cleaned:\n" + sb.toString(), count, equalTo(0));
}
private static void assertAllIndicesRemovedAndDeletionCompleted(Iterable<IndicesService> indicesServices) throws Exception {
for (IndicesService indicesService : indicesServices) {
assertBusy(() -> assertFalse(indicesService.iterator().hasNext()), 1, TimeUnit.MINUTES);
assertBusy(() -> assertFalse(indicesService.hasUncompletedPendingDeletes()), 1, TimeUnit.MINUTES);
}
}
public static
|
IndexShardIT
|
java
|
quarkusio__quarkus
|
extensions/oidc/deployment/src/test/java/io/quarkus/oidc/test/OpaqueTokenVerificationWithUserInfoValidationTest.java
|
{
"start": 451,
"end": 1965
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource(new StringAsset(
"# Disable Dev Services, we use a test resource manager\n" +
"quarkus.keycloak.devservices.enabled=false\n" +
"quarkus.oidc.token.verify-access-token-with-user-info=true\n"
+ "quarkus.oidc.authentication.user-info-required=false\n"),
"application.properties"))
.assertException(t -> {
Throwable e = t;
ConfigurationException te = null;
while (e != null) {
if (e instanceof ConfigurationException) {
te = (ConfigurationException) e;
break;
}
e = e.getCause();
}
assertNotNull(te);
// assert UserInfo is required
assertTrue(
te.getMessage()
.contains(
"UserInfo is not required but 'quarkus.oidc.token.verify-access-token-with-user-info' is enabled"),
te.getMessage());
});
@Test
public void test() {
Assertions.fail();
}
}
|
OpaqueTokenVerificationWithUserInfoValidationTest
|
java
|
redisson__redisson
|
redisson-spring-data/redisson-spring-data-26/src/main/java/org/redisson/spring/data/connection/RedissonReactivePubSubCommands.java
|
{
"start": 1298,
"end": 2534
}
|
class ____ extends RedissonBaseReactive implements ReactivePubSubCommands {
RedissonReactivePubSubCommands(CommandReactiveExecutor executorService) {
super(executorService);
}
@Override
public Flux<Long> publish(Publisher<ChannelMessage<ByteBuffer, ByteBuffer>> messageStream) {
return execute(messageStream, msg -> {
return write(toByteArray(msg.getChannel()), StringCodec.INSTANCE, RedisCommands.PUBLISH, toByteArray(msg.getChannel()), toByteArray(msg.getMessage()));
});
}
@Override
public Mono<Void> subscribe(ByteBuffer... channels) {
throw new UnsupportedOperationException("Subscribe through ReactiveSubscription object created by createSubscription method");
}
@Override
public Mono<Void> pSubscribe(ByteBuffer... patterns) {
throw new UnsupportedOperationException("Subscribe through ReactiveSubscription object created by createSubscription method");
}
@Override
public Mono<ReactiveSubscription> createSubscription(SubscriptionListener subscriptionListener) {
return Mono.just(new RedissonReactiveSubscription(executorService.getConnectionManager(), subscriptionListener));
}
}
|
RedissonReactivePubSubCommands
|
java
|
micronaut-projects__micronaut-core
|
inject/src/main/java/io/micronaut/inject/BeanDefinitionMethodReference.java
|
{
"start": 904,
"end": 1676
}
|
interface ____<T, R> extends ExecutableMethod<T, R> {
/**
* @return The {@link BeanDefinition} associated with this method.
*/
BeanDefinition<T> getBeanDefinition();
/**
* Create a {@link BeanDefinitionMethodReference} for the given {@link BeanDefinition} and {@link ExecutableMethod}.
*
* @param definition The definition
* @param method The method
* @param <T1> The type
* @param <R1> The result
* @return The {@link BeanDefinitionMethodReference}
*/
static <T1, R1> BeanDefinitionMethodReference<T1, R1> of(BeanDefinition<T1> definition, ExecutableMethod<T1, R1> method) {
return new DefaultBeanDefinitionMethodReference<>(definition, method);
}
}
|
BeanDefinitionMethodReference
|
java
|
apache__avro
|
lang/java/ipc/src/test/java/org/apache/avro/ipc/stats/TestHistogram.java
|
{
"start": 1200,
"end": 3554
}
|
class ____ {
@Test
void basicOperation() {
Segmenter<String, Integer> s = new Histogram.TreeMapSegmenter<>(new TreeSet<>(Arrays.asList(0, 1, 2, 4, 8, 16)));
Histogram<String, Integer> h = new Histogram<>(s);
for (int i = 0; i < 20; ++i) {
h.add(i);
}
assertEquals(20, h.getCount());
assertArrayEquals(new int[] { 1, 1, 2, 4, 8, 4 }, h.getHistogram());
assertEquals("[0,1)=1;[1,2)=1;[2,4)=2;[4,8)=4;[8,16)=8;[16,infinity)=4", h.toString());
String[] correctBucketLabels = { "[0,1)", "[1,2)", "[2,4)", "[4,8)", "[8,16)", "[16,infinity)" };
// test bucket iterator
int pos = 0;
Iterator<String> it = h.getSegmenter().getBuckets();
while (it.hasNext()) {
assertEquals(correctBucketLabels[pos], it.next());
pos = pos + 1;
}
assertEquals(correctBucketLabels.length, pos);
List<String> labels = h.getSegmenter().getBucketLabels();
assertEquals(correctBucketLabels.length, labels.size());
if (labels.size() == correctBucketLabels.length) {
for (int i = 0; i < labels.size(); i++) {
assertEquals(correctBucketLabels[i], labels.get(i));
}
}
String[] correctBoundryLabels = { "0", "1", "2", "4", "8", "16" };
List<String> boundryLabels = h.getSegmenter().getBoundaryLabels();
assertEquals(correctBoundryLabels.length, boundryLabels.size());
if (boundryLabels.size() == correctBoundryLabels.length) {
for (int i = 0; i < boundryLabels.size(); i++) {
assertEquals(correctBoundryLabels[i], boundryLabels.get(i));
}
}
List<Entry<String>> entries = new ArrayList<>();
for (Entry<String> entry : h.entries()) {
entries.add(entry);
}
assertEquals("[0,1)", entries.get(0).bucket);
assertEquals(4, entries.get(5).count);
assertEquals(6, entries.size());
h.add(1010);
h.add(9191);
List<Integer> recent = h.getRecentAdditions();
assertTrue(recent.contains(1010));
assertTrue(recent.contains(9191));
}
@Test
void badValue() {
assertThrows(Histogram.SegmenterException.class, () -> {
Segmenter<String, Long> s = new Histogram.TreeMapSegmenter<>(
new TreeSet<>(Arrays.asList(0L, 1L, 2L, 4L, 8L, 16L)));
Histogram<String, Long> h = new Histogram<>(s);
h.add(-1L);
});
}
/** Only has one bucket */
static
|
TestHistogram
|
java
|
apache__flink
|
flink-test-utils-parent/flink-test-utils-connector/src/main/java/org/apache/flink/test/util/source/AbstractTestSource.java
|
{
"start": 1444,
"end": 1712
}
|
class ____<T> extends AbstractTestSourceBase<T, Void> {
private static final long serialVersionUID = 1L;
@Override
public SimpleVersionedSerializer<Void> getEnumeratorCheckpointSerializer() {
return VoidSerializer.INSTANCE;
}
}
|
AbstractTestSource
|
java
|
apache__camel
|
components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsHeaderFilteringTest.java
|
{
"start": 1817,
"end": 4927
}
|
class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
private static final String IN_FILTER_PATTERN = "(org_apache_camel)[_|a-z|A-Z|0-9]*(test)[_|a-z|A-Z|0-9]*";
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
private final String componentName = "jms";
private final String testQueueEndpointA = componentName + ":queue:JmsHeaderFilteringTest.test..a";
private final String testQueueEndpointB = componentName + ":queue:JmsHeaderFilteringTest.test.b";
private final String assertionReceiver = "mock:errors";
private final CountDownLatch latch = new CountDownLatch(2);
@Test
public void testHeaderFilters() throws Exception {
MockEndpoint errors = this.resolveMandatoryEndpoint(assertionReceiver, MockEndpoint.class);
errors.expectedMessageCount(0);
template.send(testQueueEndpointA, ExchangePattern.InOnly, exchange -> {
exchange.getIn().setHeader("org.foo.jms", 10000);
exchange.getIn().setHeader("org.foo.test.jms", 20000);
exchange.getIn().setHeader("testheader", 1020);
exchange.getIn().setHeader("anotherheader", 1030);
exchange.getIn().setHeader("JMSXAppID", "myApp");
});
// make sure that the latch reached zero and that timeout did not elapse
assertTrue(latch.await(2, TimeUnit.SECONDS));
errors.assertIsSatisfied();
}
@Override
public String getComponentName() {
return componentName;
}
@Override
protected JmsComponent setupComponent(CamelContext camelContext, ArtemisService service, String componentName) {
final JmsComponent component = super.setupComponent(camelContext, service, componentName);
JmsHeaderFilterStrategy filter = new JmsHeaderFilterStrategy();
filter.getInFilter().add("testheader");
filter.getOutFilter().add("anotherheader");
// add a regular expression pattern filter, notice that dots are encoded to '_DOT_' in jms headers
filter.setInFilterPattern(IN_FILTER_PATTERN);
component.setHeaderFilterStrategy(filter);
return component;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
onException(AssertionError.class).to(assertionReceiver);
from(testQueueEndpointA).process(new OutHeaderChecker()).to(testQueueEndpointB);
from(testQueueEndpointB).process(new InHeaderChecker());
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
|
JmsHeaderFilteringTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/support/RetryableAction.java
|
{
"start": 1445,
"end": 4863
}
|
class ____<Response> {
private final Logger logger;
private final AtomicBoolean isDone = new AtomicBoolean(false);
private final ThreadPool threadPool;
private final long initialDelayMillis;
private final long maxDelayBoundMillis;
private final long timeoutMillis;
private final long startMillis;
private final ActionListener<Response> finalListener;
private final Executor executor;
private volatile Scheduler.ScheduledCancellable retryTask;
public RetryableAction(
Logger logger,
ThreadPool threadPool,
TimeValue initialDelay,
TimeValue timeoutValue,
ActionListener<Response> listener,
Executor executor
) {
this(logger, threadPool, initialDelay, TimeValue.MAX_VALUE, timeoutValue, listener, executor);
}
public RetryableAction(
Logger logger,
ThreadPool threadPool,
TimeValue initialDelay,
TimeValue maxDelayBound,
TimeValue timeoutValue,
ActionListener<Response> listener,
Executor executor
) {
this.logger = logger;
this.threadPool = threadPool;
this.initialDelayMillis = initialDelay.getMillis();
this.maxDelayBoundMillis = maxDelayBound.getMillis();
if (initialDelayMillis < 1) {
throw new IllegalArgumentException("Initial delay was less than 1 millisecond: " + initialDelay);
}
if (maxDelayBoundMillis < initialDelayMillis) {
throw new IllegalArgumentException(
"Max delay bound [" + maxDelayBound + "] cannot be less than the initial delay [" + initialDelay + "]"
);
}
this.timeoutMillis = timeoutValue.getMillis();
this.startMillis = threadPool.relativeTimeInMillis();
this.finalListener = ActionListener.assertOnce(listener);
this.executor = executor;
}
public void run() {
final RetryingListener retryingListener = new RetryingListener(initialDelayMillis, null);
final Runnable runnable = createRunnable(retryingListener);
executor.execute(runnable);
}
public void cancel(Exception e) {
if (isDone.compareAndSet(false, true)) {
Scheduler.ScheduledCancellable localRetryTask = this.retryTask;
if (localRetryTask != null) {
localRetryTask.cancel();
}
onFinished();
finalListener.onFailure(e);
}
}
private Runnable createRunnable(RetryingListener retryingListener) {
return new ActionRunnable<>(retryingListener) {
@Override
protected void doRun() {
retryTask = null;
// It is possible that the task was cancelled in between the retry being dispatched and now
if (isDone.get() == false) {
tryAction(listener);
}
}
@Override
public void onRejection(Exception e) {
retryTask = null;
onFailure(e);
}
};
}
public abstract void tryAction(ActionListener<Response> listener);
public abstract boolean shouldRetry(Exception e);
protected long calculateDelayBound(long previousDelayBound) {
return Math.min(previousDelayBound * 2, maxDelayBoundMillis);
}
public void onFinished() {}
private
|
RetryableAction
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/BodyInPreCompleteSizeAggregatingStrategy.java
|
{
"start": 927,
"end": 1950
}
|
class ____ implements AggregationStrategy {
@Override
public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {
if (oldExchange == null) {
return newExchange;
}
String oldBody = oldExchange.getIn().getBody(String.class);
String newBody = newExchange.getIn().getBody(String.class);
oldExchange.getIn().setBody(oldBody + "+" + newBody);
return oldExchange;
}
@Override
public boolean canPreComplete() {
return true;
}
@Override
public boolean preComplete(Exchange oldExchange, Exchange newExchange) {
String key = newExchange.getProperty(Exchange.AGGREGATED_CORRELATION_KEY, String.class);
int size = newExchange.getProperty(Exchange.AGGREGATED_SIZE, int.class);
if ("123".equals(key)) {
return size > 2;
} else if ("456".equals(key)) {
return size > 3;
} else {
return true;
}
}
}
|
BodyInPreCompleteSizeAggregatingStrategy
|
java
|
quarkusio__quarkus
|
extensions/qute/runtime/src/main/java/io/quarkus/qute/runtime/PropertyNotFoundNoop.java
|
{
"start": 183,
"end": 537
}
|
class ____ implements ResultMapper {
@Override
public int getPriority() {
return 10;
}
@Override
public boolean appliesTo(Origin origin, Object result) {
return Results.isNotFound(result);
}
@Override
public String map(Object result, Expression expression) {
return "";
}
}
|
PropertyNotFoundNoop
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetReservationsHomeSubClusterRequestPBImpl.java
|
{
"start": 1419,
"end": 2657
}
|
class ____
extends GetReservationsHomeSubClusterRequest {
private GetReservationsHomeSubClusterRequestProto proto =
GetReservationsHomeSubClusterRequestProto.getDefaultInstance();
private GetReservationsHomeSubClusterRequestProto.Builder builder = null;
private boolean viaProto = false;
public GetReservationsHomeSubClusterRequestPBImpl() {
builder = GetReservationsHomeSubClusterRequestProto.newBuilder();
}
public GetReservationsHomeSubClusterRequestPBImpl(
GetReservationsHomeSubClusterRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetReservationsHomeSubClusterRequestProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null) {
return false;
}
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
|
GetReservationsHomeSubClusterRequestPBImpl
|
java
|
apache__dubbo
|
dubbo-common/src/main/java/org/apache/dubbo/common/logger/helpers/FormattingTuple.java
|
{
"start": 1406,
"end": 2079
}
|
class ____ {
static public FormattingTuple NULL = new FormattingTuple(null);
private String message;
private Throwable throwable;
private Object[] argArray;
public FormattingTuple(String message) {
this(message, null, null);
}
public FormattingTuple(String message, Object[] argArray, Throwable throwable) {
this.message = message;
this.throwable = throwable;
this.argArray = argArray;
}
public String getMessage() {
return message;
}
public Object[] getArgArray() {
return argArray;
}
public Throwable getThrowable() {
return throwable;
}
}
|
FormattingTuple
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jms/src/main/java/org/springframework/boot/jms/autoconfigure/JmsPoolConnectionFactoryFactory.java
|
{
"start": 970,
"end": 2599
}
|
class ____ {
private final JmsPoolConnectionFactoryProperties properties;
public JmsPoolConnectionFactoryFactory(JmsPoolConnectionFactoryProperties properties) {
this.properties = properties;
}
/**
* Create a {@link JmsPoolConnectionFactory} based on the specified
* {@link ConnectionFactory}.
* @param connectionFactory the connection factory to wrap
* @return a pooled connection factory
*/
public JmsPoolConnectionFactory createPooledConnectionFactory(ConnectionFactory connectionFactory) {
JmsPoolConnectionFactory pooledConnectionFactory = new JmsPoolConnectionFactory();
pooledConnectionFactory.setConnectionFactory(connectionFactory);
pooledConnectionFactory.setBlockIfSessionPoolIsFull(this.properties.isBlockIfFull());
if (this.properties.getBlockIfFullTimeout() != null) {
pooledConnectionFactory
.setBlockIfSessionPoolIsFullTimeout(this.properties.getBlockIfFullTimeout().toMillis());
}
if (this.properties.getIdleTimeout() != null) {
pooledConnectionFactory.setConnectionIdleTimeout((int) this.properties.getIdleTimeout().toMillis());
}
pooledConnectionFactory.setMaxConnections(this.properties.getMaxConnections());
pooledConnectionFactory.setMaxSessionsPerConnection(this.properties.getMaxSessionsPerConnection());
if (this.properties.getTimeBetweenExpirationCheck() != null) {
pooledConnectionFactory
.setConnectionCheckInterval(this.properties.getTimeBetweenExpirationCheck().toMillis());
}
pooledConnectionFactory.setUseAnonymousProducers(this.properties.isUseAnonymousProducers());
return pooledConnectionFactory;
}
}
|
JmsPoolConnectionFactoryFactory
|
java
|
alibaba__nacos
|
naming/src/main/java/com/alibaba/nacos/naming/core/v2/client/manager/impl/PersistentIpPortClientManager.java
|
{
"start": 1931,
"end": 5626
}
|
class ____ implements ClientManager {
private final ClientFactory<IpPortBasedClient> clientFactory;
private ConcurrentMap<String, IpPortBasedClient> clients = new ConcurrentHashMap<>();
public PersistentIpPortClientManager() {
clientFactory = ClientFactoryHolder.getInstance().findClientFactory(ClientConstants.PERSISTENT_IP_PORT);
}
@Override
public boolean clientConnected(String clientId, ClientAttributes attributes) {
return clientConnected(clientFactory.newClient(clientId, attributes));
}
@Override
public boolean clientConnected(final Client client) {
clients.computeIfAbsent(client.getClientId(), s -> {
Loggers.SRV_LOG.info("Client connection {} connect", client.getClientId());
IpPortBasedClient ipPortBasedClient = (IpPortBasedClient) client;
ipPortBasedClient.init();
return ipPortBasedClient;
});
return true;
}
@Override
public boolean syncClientConnected(String clientId, ClientAttributes attributes) {
throw new UnsupportedOperationException("");
}
@Override
public boolean clientDisconnected(String clientId) {
Loggers.SRV_LOG.info("Persistent client connection {} disconnect", clientId);
IpPortBasedClient client = clients.remove(clientId);
if (null == client) {
return true;
}
boolean isResponsible = isResponsibleClient(client);
NotifyCenter.publishEvent(new ClientEvent.ClientDisconnectEvent(client, isResponsible));
client.release();
NotifyCenter.publishEvent(new ClientOperationEvent.ClientReleaseEvent(client, isResponsible));
return true;
}
@Override
public Client getClient(String clientId) {
return clients.get(clientId);
}
@Override
public boolean contains(String clientId) {
return clients.containsKey(clientId);
}
@Override
public Collection<String> allClientId() {
// client id is unique in the application
// use set to replace array list
// it will improve the performance
Collection<String> clientIds = new HashSet<>(clients.size());
clientIds.addAll(clients.keySet());
return clientIds;
}
/**
* Because the persistence instance relies on the Raft algorithm, any node can process the request.
*
* @param client client
* @return true
*/
@Override
public boolean isResponsibleClient(Client client) {
return true;
}
@Override
public boolean verifyClient(DistroClientVerifyInfo verifyData) {
throw new UnsupportedOperationException("");
}
public Map<String, IpPortBasedClient> showClients() {
return Collections.unmodifiableMap(clients);
}
/**
* Load persistent clients from snapshot.
*
* @param clients clients snapshot
*/
public void loadFromSnapshot(ConcurrentMap<String, IpPortBasedClient> clients) {
ConcurrentMap<String, IpPortBasedClient> oldClients = this.clients;
this.clients = clients;
oldClients.clear();
}
/**
* add client directly.
*
* @param client client
*/
public void addSyncClient(IpPortBasedClient client) {
clients.put(client.getClientId(), client);
}
/**
* remove client.
*
* @param clientId client id
*/
public void removeAndRelease(String clientId) {
IpPortBasedClient client = clients.remove(clientId);
if (client != null) {
client.release();
}
}
}
|
PersistentIpPortClientManager
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundToDouble8Evaluator.java
|
{
"start": 1094,
"end": 4679
}
|
class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(RoundToDouble8Evaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator field;
private final double p0;
private final double p1;
private final double p2;
private final double p3;
private final double p4;
private final double p5;
private final double p6;
private final double p7;
private final DriverContext driverContext;
private Warnings warnings;
public RoundToDouble8Evaluator(Source source, EvalOperator.ExpressionEvaluator field, double p0,
double p1, double p2, double p3, double p4, double p5, double p6, double p7,
DriverContext driverContext) {
this.source = source;
this.field = field;
this.p0 = p0;
this.p1 = p1;
this.p2 = p2;
this.p3 = p3;
this.p4 = p4;
this.p5 = p5;
this.p6 = p6;
this.p7 = p7;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (DoubleBlock fieldBlock = (DoubleBlock) field.eval(page)) {
DoubleVector fieldVector = fieldBlock.asVector();
if (fieldVector == null) {
return eval(page.getPositionCount(), fieldBlock);
}
return eval(page.getPositionCount(), fieldVector).asBlock();
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += field.baseRamBytesUsed();
return baseRamBytesUsed;
}
public DoubleBlock eval(int positionCount, DoubleBlock fieldBlock) {
try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (fieldBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
double field = fieldBlock.getDouble(fieldBlock.getFirstValueIndex(p));
result.appendDouble(RoundToDouble.process(field, this.p0, this.p1, this.p2, this.p3, this.p4, this.p5, this.p6, this.p7));
}
return result.build();
}
}
public DoubleVector eval(int positionCount, DoubleVector fieldVector) {
try(DoubleVector.FixedBuilder result = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
double field = fieldVector.getDouble(p);
result.appendDouble(p, RoundToDouble.process(field, this.p0, this.p1, this.p2, this.p3, this.p4, this.p5, this.p6, this.p7));
}
return result.build();
}
}
@Override
public String toString() {
return "RoundToDouble8Evaluator[" + "field=" + field + ", p0=" + p0 + ", p1=" + p1 + ", p2=" + p2 + ", p3=" + p3 + ", p4=" + p4 + ", p5=" + p5 + ", p6=" + p6 + ", p7=" + p7 + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(field);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static
|
RoundToDouble8Evaluator
|
java
|
spring-projects__spring-boot
|
module/spring-boot-data-redis/src/main/java/org/springframework/boot/data/redis/autoconfigure/LettuceClientConfigurationBuilderCustomizer.java
|
{
"start": 1369,
"end": 1642
}
|
interface ____ {
/**
* Customize the {@link LettuceClientConfigurationBuilder}.
* @param clientConfigurationBuilder the builder to customize
*/
void customize(LettuceClientConfigurationBuilder clientConfigurationBuilder);
}
|
LettuceClientConfigurationBuilderCustomizer
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/IntermediateDataSet.java
|
{
"start": 1332,
"end": 5600
}
|
class ____ implements java.io.Serializable {
private static final long serialVersionUID = 1L;
private final IntermediateDataSetID id; // the identifier
private final JobVertex producer; // the operation that produced this data set
// All consumers must have the same partitioner and parallelism
private final List<JobEdge> consumers = new ArrayList<>();
// The type of partition to use at runtime
private final ResultPartitionType resultType;
private DistributionPattern distributionPattern;
private boolean isBroadcast;
private boolean isForward;
/** The number of job edges that need to be created. */
private int numJobEdgesToCreate;
// --------------------------------------------------------------------------------------------
public IntermediateDataSet(
IntermediateDataSetID id, ResultPartitionType resultType, JobVertex producer) {
this.id = checkNotNull(id);
this.producer = checkNotNull(producer);
this.resultType = checkNotNull(resultType);
}
// --------------------------------------------------------------------------------------------
public IntermediateDataSetID getId() {
return id;
}
public JobVertex getProducer() {
return producer;
}
public List<JobEdge> getConsumers() {
return this.consumers;
}
public boolean areAllConsumerVerticesCreated() {
return numJobEdgesToCreate == consumers.size();
}
public boolean isBroadcast() {
return isBroadcast;
}
public boolean isForward() {
return isForward;
}
public DistributionPattern getDistributionPattern() {
return distributionPattern;
}
public ResultPartitionType getResultType() {
return resultType;
}
// --------------------------------------------------------------------------------------------
public void addConsumer(JobEdge edge) {
// sanity check
checkState(id.equals(edge.getSourceId()), "Incompatible dataset id.");
if (consumers.isEmpty() && distributionPattern == null) {
distributionPattern = edge.getDistributionPattern();
isBroadcast = edge.isBroadcast();
isForward = edge.isForward();
} else {
checkState(
distributionPattern == edge.getDistributionPattern(),
"Incompatible distribution pattern.");
checkState(isBroadcast == edge.isBroadcast(), "Incompatible broadcast type.");
checkState(isForward == edge.isForward(), "Incompatible forward type.");
}
consumers.add(edge);
}
public void configure(
DistributionPattern distributionPattern, boolean isBroadcast, boolean isForward) {
checkState(consumers.isEmpty(), "The output job edges have already been added.");
if (this.distributionPattern == null) {
this.distributionPattern = distributionPattern;
this.isBroadcast = isBroadcast;
this.isForward = isForward;
} else {
checkState(
this.distributionPattern == distributionPattern,
"Incompatible distribution pattern.");
checkState(this.isBroadcast == isBroadcast, "Incompatible broadcast type.");
checkState(this.isForward == isForward, "Incompatible forward type.");
}
}
public void updateOutputPattern(
DistributionPattern distributionPattern, boolean isBroadcast, boolean isForward) {
checkState(consumers.isEmpty(), "The output job edges have already been added.");
checkState(
numJobEdgesToCreate == 1,
"Modification is not allowed when the subscribing output is reused.");
this.distributionPattern = distributionPattern;
this.isBroadcast = isBroadcast;
this.isForward = isForward;
}
public void increaseNumJobEdgesToCreate() {
this.numJobEdgesToCreate++;
}
// --------------------------------------------------------------------------------------------
@Override
public String toString() {
return "Intermediate Data Set (" + id + ")";
}
}
|
IntermediateDataSet
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java
|
{
"start": 823,
"end": 892
}
|
enum ____ the methods for calculating percentiles
*/
public
|
representing
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java
|
{
"start": 2220,
"end": 27305
}
|
class ____ extends AbstractSortTestCase<GeoDistanceSortBuilder> {
@Override
protected GeoDistanceSortBuilder createTestItem() {
return randomGeoDistanceSortBuilder();
}
public static GeoDistanceSortBuilder randomGeoDistanceSortBuilder() {
String fieldName = randomAlphaOfLengthBetween(1, 10);
GeoDistanceSortBuilder result = null;
int id = randomIntBetween(0, 2);
switch (id) {
case 0 -> {
int count = randomIntBetween(1, 10);
String[] geohashes = new String[count];
for (int i = 0; i < count; i++) {
geohashes[i] = RandomGeoGenerator.randomPoint(random()).geohash();
}
result = new GeoDistanceSortBuilder(fieldName, geohashes);
}
case 1 -> {
GeoPoint pt = RandomGeoGenerator.randomPoint(random());
result = new GeoDistanceSortBuilder(fieldName, pt.getLat(), pt.getLon());
}
case 2 -> result = new GeoDistanceSortBuilder(fieldName, points(new GeoPoint[0]));
default -> throw new IllegalStateException("one of three geo initialisation strategies must be used");
}
if (randomBoolean()) {
result.geoDistance(geoDistance(result.geoDistance()));
}
if (randomBoolean()) {
result.unit(randomValueOtherThan(result.unit(), () -> randomFrom(DistanceUnit.values())));
}
if (randomBoolean()) {
result.order(randomFrom(SortOrder.values()));
}
if (randomBoolean()) {
result.sortMode(randomValueOtherThan(SortMode.SUM, () -> randomFrom(SortMode.values())));
}
if (randomBoolean()) {
result.validation(randomValueOtherThan(result.validation(), () -> randomFrom(GeoValidationMethod.values())));
}
if (randomBoolean()) {
NestedSortBuilder nestedSort = new NestedSortBuilder("path");
nestedSort.setFilter(new MatchAllQueryBuilder());
result.setNestedSort(nestedSort);
}
if (randomBoolean()) {
result.ignoreUnmapped(result.ignoreUnmapped() == false);
}
return result;
}
@Override
protected MappedFieldType provideMappedFieldType(String name) {
if (name.equals("double")) {
return new NumberFieldMapper.NumberFieldType(name, NumberFieldMapper.NumberType.DOUBLE);
}
return new GeoPointFieldMapper.GeoPointFieldType(name);
}
private static GeoPoint[] points(GeoPoint[] original) {
GeoPoint[] result = null;
while (result == null || Arrays.deepEquals(original, result)) {
int count = randomIntBetween(1, 10);
result = new GeoPoint[count];
for (int i = 0; i < count; i++) {
result[i] = RandomGeoGenerator.randomPoint(random());
}
}
return result;
}
private static GeoDistance geoDistance(GeoDistance original) {
int id = -1;
while (id == -1 || (original != null && original.ordinal() == id)) {
id = randomIntBetween(0, GeoDistance.values().length - 1);
}
return GeoDistance.values()[id];
}
@Override
protected GeoDistanceSortBuilder mutate(GeoDistanceSortBuilder original) throws IOException {
GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(original);
int parameter = randomIntBetween(0, 8);
switch (parameter) {
case 0:
while (Arrays.deepEquals(original.points(), result.points())) {
GeoPoint pt = RandomGeoGenerator.randomPoint(random());
result.point(pt.getLat(), pt.getLon());
}
break;
case 1:
result.points(points(original.points()));
break;
case 2:
result.geoDistance(geoDistance(original.geoDistance()));
break;
case 3:
result.unit(randomValueOtherThan(result.unit(), () -> randomFrom(DistanceUnit.values())));
break;
case 4:
result.order(randomValueOtherThan(original.order(), () -> randomFrom(SortOrder.values())));
break;
case 5:
result.sortMode(
randomValueOtherThanMany(Arrays.asList(SortMode.SUM, result.sortMode())::contains, () -> randomFrom(SortMode.values()))
);
break;
case 6:
result.setNestedSort(
randomValueOtherThan(original.getNestedSort(), () -> NestedSortBuilderTests.createRandomNestedSort(3))
);
break;
case 7:
result.validation(randomValueOtherThan(result.validation(), () -> randomFrom(GeoValidationMethod.values())));
break;
case 8:
result.ignoreUnmapped(result.ignoreUnmapped() == false);
break;
}
return result;
}
@Override
protected void sortFieldAssertions(GeoDistanceSortBuilder builder, SortField sortField, DocValueFormat format) throws IOException {
assertEquals(builder.order() == SortOrder.ASC ? false : true, sortField.getReverse());
assertEquals(builder.fieldName(), sortField.getField());
}
public void testSortModeSumIsRejectedInSetter() {
GeoDistanceSortBuilder builder = new GeoDistanceSortBuilder("testname", -1, -1);
GeoPoint point = RandomGeoGenerator.randomPoint(random());
builder.point(point.getLat(), point.getLon());
try {
builder.sortMode(SortMode.SUM);
fail("sort mode sum should not be supported");
} catch (IllegalArgumentException e) {
// all good
}
}
public void testSortModeSumIsRejectedInJSON() throws IOException {
String json = """
{
"testname" : [ {
"lat" : -6.046997540714173,
"lon" : -51.94128329747579
} ],
"unit" : "m",
"distance_type" : "arc",
"mode" : "SUM"
}""";
try (XContentParser itemParser = createParser(JsonXContent.jsonXContent, json)) {
itemParser.nextToken();
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> GeoDistanceSortBuilder.fromXContent(itemParser, "")
);
assertEquals("sort_mode [sum] isn't supported for sorting by geo distance", e.getMessage());
}
}
public void testGeoDistanceSortCanBeParsedFromGeoHash() throws IOException {
String json = """
{
"VDcvDuFjE" : [ "7umzzv8eychg", "dmdgmt5z13uw", "ezu09wxw6v4c", "kc7s3515p6k6", "jgeuvjwrmfzn", "kcpcfj7ruyf8" ],
"unit" : "m",
"distance_type" : "arc",
"mode" : "MAX",
"nested" : {
"filter" : {
"ids" : {
"values" : [ ],
"boost" : 5.711116
}
}
},
"validation_method" : "STRICT"
}""";
try (XContentParser itemParser = createParser(JsonXContent.jsonXContent, json)) {
itemParser.nextToken();
GeoDistanceSortBuilder result = GeoDistanceSortBuilder.fromXContent(itemParser, json);
assertEquals(
"[-19.700583312660456, -2.8225036337971687, "
+ "31.537466906011105, -74.63590376079082, "
+ "43.71844606474042, -5.548660643398762, "
+ "-37.20467280596495, 38.71751043945551, "
+ "-69.44606635719538, 84.25200328230858, "
+ "-39.03717711567879, 44.74099852144718]",
Arrays.toString(result.points())
);
}
}
public void testGeoDistanceSortParserManyPointsNoException() throws Exception {
XContentBuilder sortBuilder = jsonBuilder();
sortBuilder.startObject();
sortBuilder.startArray("location");
sortBuilder.startArray().value(1.2).value(3).endArray().startArray().value(5).value(6).endArray();
sortBuilder.endArray();
sortBuilder.field("order", "desc");
sortBuilder.field("unit", "km");
sortBuilder.field("mode", "max");
sortBuilder.endObject();
parse(sortBuilder);
sortBuilder = jsonBuilder();
sortBuilder.startObject();
sortBuilder.startArray("location");
sortBuilder.value(new GeoPoint(1.2, 3)).value(new GeoPoint(1.2, 3));
sortBuilder.endArray();
sortBuilder.field("order", "desc");
sortBuilder.field("unit", "km");
sortBuilder.field("mode", "max");
sortBuilder.endObject();
parse(sortBuilder);
sortBuilder = jsonBuilder();
sortBuilder.startObject();
sortBuilder.startArray("location");
sortBuilder.value("1,2").value("3,4");
sortBuilder.endArray();
sortBuilder.field("order", "desc");
sortBuilder.field("unit", "km");
sortBuilder.field("mode", "max");
sortBuilder.endObject();
parse(sortBuilder);
sortBuilder = jsonBuilder();
sortBuilder.startObject();
sortBuilder.startArray("location");
sortBuilder.value("s3y0zh7w1z0g").value("s6wjr4et3f8v");
sortBuilder.endArray();
sortBuilder.field("order", "desc");
sortBuilder.field("unit", "km");
sortBuilder.field("mode", "max");
sortBuilder.endObject();
parse(sortBuilder);
sortBuilder = jsonBuilder();
sortBuilder.startObject();
sortBuilder.startArray("location");
sortBuilder.value(1.2).value(3);
sortBuilder.endArray();
sortBuilder.field("order", "desc");
sortBuilder.field("unit", "km");
sortBuilder.field("mode", "max");
sortBuilder.endObject();
parse(sortBuilder);
sortBuilder = jsonBuilder();
sortBuilder.startObject();
sortBuilder.field("location", new GeoPoint(1, 2));
sortBuilder.field("order", "desc");
sortBuilder.field("unit", "km");
sortBuilder.field("mode", "max");
sortBuilder.endObject();
parse(sortBuilder);
sortBuilder = jsonBuilder();
sortBuilder.startObject();
sortBuilder.field("location", "1,2");
sortBuilder.field("order", "desc");
sortBuilder.field("unit", "km");
sortBuilder.field("mode", "max");
sortBuilder.endObject();
parse(sortBuilder);
sortBuilder = jsonBuilder();
sortBuilder.startObject();
sortBuilder.field("location", "s3y0zh7w1z0g");
sortBuilder.field("order", "desc");
sortBuilder.field("unit", "km");
sortBuilder.field("mode", "max");
sortBuilder.endObject();
parse(sortBuilder);
sortBuilder = jsonBuilder();
sortBuilder.startObject();
sortBuilder.startArray("location");
sortBuilder.value(new GeoPoint(1, 2)).value("s3y0zh7w1z0g").startArray().value(1).value(2).endArray().value("1,2");
sortBuilder.endArray();
sortBuilder.field("order", "desc");
sortBuilder.field("unit", "km");
sortBuilder.field("mode", "max");
sortBuilder.endObject();
parse(sortBuilder);
}
public void testGeoDistanceSortDeprecatedSortModeException() throws Exception {
XContentBuilder sortBuilder = jsonBuilder();
sortBuilder.startObject();
sortBuilder.startArray("location");
sortBuilder.startArray().value(1.2).value(3).endArray().startArray().value(5).value(6).endArray();
sortBuilder.endArray();
sortBuilder.field("order", "desc");
sortBuilder.field("unit", "km");
sortBuilder.field("sort_mode", "max");
sortBuilder.endObject();
parse(sortBuilder);
assertWarnings("Deprecated field [sort_mode] used, expected [mode] instead");
}
private GeoDistanceSortBuilder parse(XContentBuilder sortBuilder) throws Exception {
try (XContentParser parser = createParser(sortBuilder)) {
parser.nextToken();
return GeoDistanceSortBuilder.fromXContent(parser, null);
}
}
@Override
protected GeoDistanceSortBuilder fromXContent(XContentParser parser, String fieldName) throws IOException {
return GeoDistanceSortBuilder.fromXContent(parser, fieldName);
}
public void testCommonCaseIsOptimized() throws IOException {
// make sure the below tests test something...
assertFalse(SortField.class.equals(LatLonDocValuesField.newDistanceSort("random_field_name", 3.5, 2.1).getClass()));
SearchExecutionContext context = createMockSearchExecutionContext();
// The common case should use LatLonDocValuesField.newDistanceSort
GeoDistanceSortBuilder builder = new GeoDistanceSortBuilder("", new GeoPoint(3.5, 2.1));
SortFieldAndFormat sort = builder.build(context);
assertEquals(LatLonDocValuesField.newDistanceSort("random_field_name", 3.5, 2.1).getClass(), sort.field().getClass());
// however this might be disabled by fancy options
builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1), new GeoPoint(3.0, 4));
sort = builder.build(context);
assertEquals(SortField.class, sort.field().getClass()); // 2 points -> plain SortField with a custom comparator
builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
builder.unit(DistanceUnit.KILOMETERS);
sort = builder.build(context);
assertEquals(SortField.class, sort.field().getClass()); // km rather than m -> plain SortField with a custom comparator
builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
builder.order(SortOrder.DESC);
sort = builder.build(context);
assertEquals(SortField.class, sort.field().getClass()); // descending means the max value should be considered rather than min
builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
builder.setNestedSort(new NestedSortBuilder("path"));
sort = builder.build(context);
assertEquals(SortField.class, sort.field().getClass()); // can't use LatLon optimized sorting with nested fields
builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1));
builder.order(SortOrder.DESC);
sort = builder.build(context);
assertEquals(SortField.class, sort.field().getClass()); // can't use LatLon optimized sorting with DESC sorting
}
/**
* Test that the sort builder order gets transferred correctly to the SortField
*/
public void testBuildSortFieldOrder() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0);
assertEquals(false, geoDistanceSortBuilder.build(searchExecutionContext).field().getReverse());
geoDistanceSortBuilder.order(SortOrder.ASC);
assertEquals(false, geoDistanceSortBuilder.build(searchExecutionContext).field().getReverse());
geoDistanceSortBuilder.order(SortOrder.DESC);
assertEquals(true, geoDistanceSortBuilder.build(searchExecutionContext).field().getReverse());
}
/**
* Test that the sort builder mode gets transferred correctly to the SortField
*/
public void testMultiValueMode() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0);
geoDistanceSortBuilder.sortMode(SortMode.MAX);
SortField sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MAX, comparatorSource.sortMode());
// also use MultiValueMode.Max if no Mode set but order is DESC
geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0);
geoDistanceSortBuilder.order(SortOrder.DESC);
sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MAX, comparatorSource.sortMode());
// use MultiValueMode.Min if no Mode and order is ASC
geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0);
// need to use distance unit other than Meters to not get back a LatLonPointSortField
geoDistanceSortBuilder.order(SortOrder.ASC).unit(DistanceUnit.INCH);
sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MIN, comparatorSource.sortMode());
geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0);
// need to use distance unit other than Meters to not get back a LatLonPointSortField
geoDistanceSortBuilder.sortMode(SortMode.MIN).unit(DistanceUnit.INCH);
sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MIN, comparatorSource.sortMode());
geoDistanceSortBuilder.sortMode(SortMode.AVG);
sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.AVG, comparatorSource.sortMode());
geoDistanceSortBuilder.sortMode(SortMode.MEDIAN);
sortField = geoDistanceSortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
assertEquals(MultiValueMode.MEDIAN, comparatorSource.sortMode());
}
/**
* Test that the sort builder nested object gets created in the SortField
*/
public void testBuildNested() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedSort(
new NestedSortBuilder("path").setFilter(QueryBuilders.matchAllQuery())
);
SortField sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
Nested nested = comparatorSource.nested();
assertNotNull(nested);
assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery());
sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedSort(new NestedSortBuilder("path"));
sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
nested = comparatorSource.nested();
assertNotNull(nested);
assertEquals(new TermQuery(new Term(NestedPathFieldMapper.NAME, "path")), nested.getInnerQuery());
sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedSort(
new NestedSortBuilder("path").setFilter(QueryBuilders.matchAllQuery())
);
sortField = sortBuilder.build(searchExecutionContext).field();
assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class));
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
nested = comparatorSource.nested();
assertNotNull(nested);
assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery());
}
/**
* Test that if coercion is used, a point gets normalized but the original values in the builder are unchanged
*/
public void testBuildCoerce() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", -180.0, -360.0);
sortBuilder.validation(GeoValidationMethod.COERCE);
assertEquals(-180.0, sortBuilder.points()[0].getLat(), 0.0);
assertEquals(-360.0, sortBuilder.points()[0].getLon(), 0.0);
SortField sortField = sortBuilder.build(searchExecutionContext).field();
assertEquals(LatLonDocValuesField.newDistanceSort("fieldName", 0.0, 180.0), sortField);
}
/**
* Test that if validation is strict, invalid points throw an error
*/
public void testBuildInvalidPoints() throws IOException {
SearchExecutionContext searchExecutionContext = createMockSearchExecutionContext();
{
GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", -180.0, 0.0);
sortBuilder.validation(GeoValidationMethod.STRICT);
ElasticsearchParseException ex = expectThrows(
ElasticsearchParseException.class,
() -> sortBuilder.build(searchExecutionContext)
);
assertEquals("illegal latitude value [-180.0] for [GeoDistanceSort] for field [fieldName].", ex.getMessage());
}
{
GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", 0.0, -360.0);
sortBuilder.validation(GeoValidationMethod.STRICT);
ElasticsearchParseException ex = expectThrows(
ElasticsearchParseException.class,
() -> sortBuilder.build(searchExecutionContext)
);
assertEquals("illegal longitude value [-360.0] for [GeoDistanceSort] for field [fieldName].", ex.getMessage());
}
{
GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("double", 0.0, 180.0);
sortBuilder.validation(GeoValidationMethod.STRICT);
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> sortBuilder.build(searchExecutionContext));
assertEquals("unable to apply geo distance sort to field [double] of type [double]", ex.getMessage());
}
}
/**
* Test the nested Filter gets rewritten
*/
public void testNestedRewrites() throws IOException {
GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", 0.0, 0.0);
RangeQueryBuilder rangeQuery = new RangeQueryBuilder("fieldName") {
@Override
public QueryBuilder doSearchRewrite(SearchExecutionContext context) {
return new MatchNoneQueryBuilder();
}
};
sortBuilder.setNestedSort(new NestedSortBuilder("path").setFilter(rangeQuery));
GeoDistanceSortBuilder rewritten = sortBuilder.rewrite(createMockSearchExecutionContext());
assertNotSame(rangeQuery, rewritten.getNestedSort().getFilter());
}
/**
* Test the nested sort gets rewritten
*/
public void testNestedSortRewrites() throws IOException {
GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", 0.0, 0.0);
RangeQueryBuilder rangeQuery = new RangeQueryBuilder("fieldName") {
@Override
public QueryBuilder doSearchRewrite(SearchExecutionContext context) {
return new MatchNoneQueryBuilder();
}
};
sortBuilder.setNestedSort(new NestedSortBuilder("path").setFilter(rangeQuery));
GeoDistanceSortBuilder rewritten = sortBuilder.rewrite(createMockSearchExecutionContext());
assertNotSame(rangeQuery, rewritten.getNestedSort().getFilter());
}
}
|
GeoDistanceSortBuilderTests
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/scalar/ArraySliceFunction.java
|
{
"start": 1470,
"end": 3410
}
|
class ____ extends BuiltInScalarFunction {
private final ArrayData.ElementGetter elementGetter;
public ArraySliceFunction(SpecializedFunction.SpecializedContext context) {
super(BuiltInFunctionDefinitions.ARRAY_SLICE, context);
final DataType dataType =
((CollectionDataType) context.getCallContext().getArgumentDataTypes().get(0))
.getElementDataType();
elementGetter = ArrayData.createElementGetter(dataType.getLogicalType());
}
public @Nullable ArrayData eval(
@Nullable ArrayData array, @Nullable Integer start, @Nullable Integer end) {
try {
if (array == null || start == null || end == null) {
return null;
}
if (array.size() == 0) {
return array;
}
int startIndex = start;
int endIndex = end;
startIndex += startIndex < 0 ? array.size() + 1 : 0;
endIndex += endIndex < 0 ? array.size() + 1 : 0;
startIndex = Math.max(1, startIndex);
endIndex = endIndex == 0 ? 1 : Math.min(endIndex, array.size());
if (endIndex < startIndex) {
return new GenericArrayData(new Object[0]);
}
if (startIndex == 1 && endIndex == array.size()) {
return array;
}
List<Object> slicedArray = new ArrayList<>();
for (int i = startIndex - 1; i <= endIndex - 1; i++) {
slicedArray.add(elementGetter.getElementOrNull(array, i));
}
return new GenericArrayData(slicedArray.toArray());
} catch (Throwable t) {
throw new FlinkRuntimeException(t);
}
}
public @Nullable ArrayData eval(@Nullable ArrayData array, @Nullable Integer start) {
return array == null ? null : eval(array, start, array.size());
}
}
|
ArraySliceFunction
|
java
|
spring-projects__spring-framework
|
spring-websocket/src/test/java/org/springframework/web/socket/adapter/standard/ConvertingEncoderDecoderSupportTests.java
|
{
"start": 8208,
"end": 8317
}
|
class ____ extends
ConvertingEncoderDecoderSupport.BinaryEncoder<MyType> {
}
public static
|
MyBinaryEncoder
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/metadata/definition/common/TestService.java
|
{
"start": 883,
"end": 1346
}
|
interface ____ {
/**
*
* @param innerClass
* @return
*/
void m1(OuterClass.InnerClass innerClass);
/**
*
* @param a
*/
void m2(int[] a);
/**
*
* @param s1
* @return
*/
ResultWithRawCollections m3(String s1);
/**
*
* @param color
*/
void m4(ColorEnum color);
/**
*
* @param s1
* @return
*/
ClassExtendsMap m5(String s1);
}
|
TestService
|
java
|
quarkusio__quarkus
|
extensions/panache/hibernate-reactive-rest-data-panache/deployment/src/main/java/io/quarkus/hibernate/reactive/rest/data/panache/deployment/HibernateReactiveResourceMethodListenerImplementor.java
|
{
"start": 504,
"end": 2512
}
|
class ____ extends ResourceMethodListenerImplementor {
public HibernateReactiveResourceMethodListenerImplementor(ClassCreator cc, List<ClassInfo> resourceMethodListeners) {
super(cc, resourceMethodListeners);
}
public ResultHandle onAfterAdd(BytecodeCreator methodCreator, ResultHandle uni) {
return invokeUniMethodUsingEntity(ON_AFTER_ADD_METHOD_NAME, methodCreator, uni);
}
public ResultHandle onAfterUpdate(BytecodeCreator methodCreator, ResultHandle uni) {
return invokeUniMethodUsingEntity(ON_AFTER_UPDATE_METHOD_NAME, methodCreator, uni);
}
public ResultHandle onAfterDelete(BytecodeCreator methodCreator, ResultHandle uni, ResultHandle id) {
return invokeUniMethodUsingId(ON_AFTER_DELETE_METHOD_NAME, methodCreator, uni, id);
}
protected ResultHandle invokeUniMethodUsingEntity(String methodName, BytecodeCreator methodCreator, ResultHandle uni) {
if (!hasListenerForMethod(methodName)) {
return uni;
}
return UniImplementor.invoke(methodCreator, uni,
(lambda, item) -> processEventListener(methodName, lambda, methodCreator.getThis(), item));
}
protected ResultHandle invokeUniMethodUsingId(String methodName, BytecodeCreator methodCreator, ResultHandle uni,
ResultHandle id) {
if (!hasListenerForMethod(methodName)) {
return uni;
}
return UniImplementor.invoke(methodCreator, uni,
(lambda, voidItem) -> processEventListener(methodName, lambda, methodCreator.getThis(), id));
}
private boolean hasListenerForMethod(String methodName) {
for (Map.Entry<FieldDescriptor, ClassInfo> eventListenerEntry : listenerFields.entrySet()) {
MethodInfo method = findMethodByName(eventListenerEntry.getValue(), methodName);
if (method != null) {
return true;
}
}
return false;
}
}
|
HibernateReactiveResourceMethodListenerImplementor
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/NotificationsIndex.java
|
{
"start": 468,
"end": 1574
}
|
class ____ {
public static final String NOTIFICATIONS_INDEX_PREFIX = ".ml-notifications-";
public static final String NOTIFICATIONS_INDEX_VERSION = "000002";
public static final String NOTIFICATIONS_INDEX = NOTIFICATIONS_INDEX_PREFIX + NOTIFICATIONS_INDEX_VERSION;
public static final String NOTIFICATIONS_INDEX_WRITE_ALIAS = ".ml-notifications-write";
private static final String RESOURCE_PATH = "/ml/";
private static final String MAPPINGS_VERSION_VARIABLE = "xpack.ml.version";
public static final int NOTIFICATIONS_INDEX_MAPPINGS_VERSION = 1;
public static final int NOTIFICATIONS_INDEX_TEMPLATE_VERSION = 1;
private NotificationsIndex() {}
public static String mapping() {
return TemplateUtils.loadTemplate(
RESOURCE_PATH + "notifications_index_mappings.json",
MlIndexAndAlias.BWC_MAPPINGS_VERSION, // Only needed for BWC with pre-8.10.0 nodes
MAPPINGS_VERSION_VARIABLE,
Map.of("xpack.ml.managed.index.version", Integer.toString(NOTIFICATIONS_INDEX_MAPPINGS_VERSION))
);
}
}
|
NotificationsIndex
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/time/DurationTemporalUnitTest.java
|
{
"start": 9856,
"end": 10569
}
|
class ____ {",
" private static final TemporalUnit random = ",
" new Random().nextBoolean() ? YEARS : SECONDS;",
// Since we don't know at compile time what 'random' is, we can't flag this
" private static final Duration D1 = Duration.of(1, random);",
"}")
.doTest();
}
@Test
public void durationOfWithAliasedTemporalUnit() {
helper
.addSourceLines(
"TestClass.java",
"import static java.time.temporal.ChronoUnit.YEARS;",
"import java.time.Duration;",
"import java.time.temporal.Temporal;",
"import java.time.temporal.TemporalUnit;",
"public
|
TestClass
|
java
|
google__guava
|
guava/src/com/google/common/collect/ImmutableMultiset.java
|
{
"start": 13488,
"end": 14940
}
|
class ____<E> implements Serializable {
final ImmutableMultiset<E> multiset;
EntrySetSerializedForm(ImmutableMultiset<E> multiset) {
this.multiset = multiset;
}
Object readResolve() {
return multiset.entrySet();
}
}
@GwtIncompatible
@J2ktIncompatible
@Override
Object writeReplace() {
return new SerializedForm(this);
}
@GwtIncompatible
@J2ktIncompatible
private void readObject(ObjectInputStream stream) throws InvalidObjectException {
throw new InvalidObjectException("Use SerializedForm");
}
/**
* Returns a new builder. The generated builder is equivalent to the builder created by the {@link
* Builder} constructor.
*/
public static <E> Builder<E> builder() {
return new Builder<>();
}
/**
* A builder for creating immutable multiset instances, especially {@code public static final}
* multisets ("constant multisets"). Example:
*
* {@snippet :
* public static final ImmutableMultiset<Bean> BEANS =
* new ImmutableMultiset.Builder<Bean>()
* .addCopies(Bean.COCOA, 4)
* .addCopies(Bean.GARDEN, 6)
* .addCopies(Bean.RED, 8)
* .addCopies(Bean.BLACK_EYED, 10)
* .build();
* }
*
* <p>Builder instances can be reused; it is safe to call {@link #build} multiple times to build
* multiple multisets in series.
*
* @since 2.0
*/
public static
|
EntrySetSerializedForm
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockitousage/examples/use/ArticleManager.java
|
{
"start": 191,
"end": 1291
}
|
class ____ {
private final ArticleCalculator calculator;
private final ArticleDatabase database;
public ArticleManager(ArticleCalculator calculator, ArticleDatabase database) {
this.calculator = calculator;
this.database = database;
}
public void updateArticleCounters(String newspaper) {
int articles = calculator.countArticles(newspaper);
int polishArticles = calculator.countArticlesInPolish(newspaper);
database.updateNumberOfArticles(newspaper, articles);
database.updateNumberOfPolishArticles(newspaper, polishArticles);
database.updateNumberOfEnglishArticles(newspaper, articles - polishArticles);
}
public void updateRelatedArticlesCounters(String newspaper) {
List<Article> articles = database.getArticlesFor("Guardian");
for (Article article : articles) {
int numberOfRelatedArticles = calculator.countNumberOfRelatedArticles(article);
article.setNumberOfRelatedArticles(numberOfRelatedArticles);
database.save(article);
}
}
}
|
ArticleManager
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/CxfRsEndpointBuilderFactory.java
|
{
"start": 1631,
"end": 3028
}
|
interface ____
extends
EndpointConsumerBuilder {
default AdvancedCxfRsEndpointConsumerBuilder advanced() {
return (AdvancedCxfRsEndpointConsumerBuilder) this;
}
/**
* Set the feature list to the CxfRs endpoint.
*
* The option is a:
* <code>java.util.List<org.apache.cxf.feature.Feature></code>
* type.
*
* Group: common
*
* @param features the value to set
* @return the dsl builder
*/
default CxfRsEndpointConsumerBuilder features(List<org.apache.cxf.feature.Feature> features) {
doSetProperty("features", features);
return this;
}
/**
* Set the feature list to the CxfRs endpoint.
*
* The option will be converted to a
* <code>java.util.List<org.apache.cxf.feature.Feature></code>
* type.
*
* Group: common
*
* @param features the value to set
* @return the dsl builder
*/
default CxfRsEndpointConsumerBuilder features(String features) {
doSetProperty("features", features);
return this;
}
/**
* This option is used to specify the model file which is useful for the
* resource
|
CxfRsEndpointConsumerBuilder
|
java
|
netty__netty
|
codec-base/src/main/java/io/netty/handler/codec/DefaultHeaders.java
|
{
"start": 40000,
"end": 42441
}
|
class ____<K, V> implements Entry<K, V> {
protected final int hash;
protected final K key;
protected V value;
/**
* In bucket linked list
*/
protected HeaderEntry<K, V> next;
/**
* Overall insertion order linked list
*/
protected HeaderEntry<K, V> before, after;
protected HeaderEntry(int hash, K key) {
this.hash = hash;
this.key = key;
}
HeaderEntry(int hash, K key, V value, HeaderEntry<K, V> next, HeaderEntry<K, V> head) {
this.hash = hash;
this.key = key;
this.value = value;
this.next = next;
after = head;
before = head.before;
pointNeighborsToThis();
}
HeaderEntry() {
hash = -1;
key = null;
before = after = this;
}
protected final void pointNeighborsToThis() {
before.after = this;
after.before = this;
}
public final HeaderEntry<K, V> before() {
return before;
}
public final HeaderEntry<K, V> after() {
return after;
}
protected void remove() {
before.after = after;
after.before = before;
}
@Override
public final K getKey() {
return key;
}
@Override
public final V getValue() {
return value;
}
@Override
public final V setValue(V value) {
checkNotNull(value, "value");
V oldValue = this.value;
this.value = value;
return oldValue;
}
@Override
public final String toString() {
return key.toString() + '=' + value.toString();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Map.Entry)) {
return false;
}
Entry<?, ?> other = (Entry<?, ?>) o;
return (getKey() == null ? other.getKey() == null : getKey().equals(other.getKey())) &&
(getValue() == null ? other.getValue() == null : getValue().equals(other.getValue()));
}
@Override
public int hashCode() {
return (key == null ? 0 : key.hashCode()) ^ (value == null ? 0 : value.hashCode());
}
}
}
|
HeaderEntry
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/method/configuration/GlobalMethodSecurityConfigurationTests.java
|
{
"start": 12315,
"end": 12637
}
|
class ____ extends GlobalMethodSecurityConfiguration {
@Bean
@Override
protected MethodSecurityMetadataSource customMethodSecurityMetadataSource() {
return mock(MethodSecurityMetadataSource.class);
}
}
@Configuration
@EnableGlobalMethodSecurity(prePostEnabled = true)
public static
|
CustomMetadataSourceConfig
|
java
|
elastic__elasticsearch
|
modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientsManager.java
|
{
"start": 12132,
"end": 17244
}
|
class ____<K> implements Closeable {
protected volatile Map<K, AmazonS3Reference> clientsCache = Collections.emptyMap();
private final AtomicBoolean closed = new AtomicBoolean(false);
/**
* Determine the client key for retrieving the cached client.
* @param repositoryMetadata The repository metadata
* @return Key to a cached client.
*/
abstract K clientKey(RepositoryMetadata repositoryMetadata);
/**
* Get the client settings for a given client key.
*/
abstract S3ClientSettings singleClientSettings(K clientKey);
/**
* Get a map of client name to client settings for all clients.
*/
abstract Map<String, S3ClientSettings> allClientSettings();
/**
* Get the project id for which this clients holder is associated with.
*/
abstract ProjectId projectId();
/**
* Similar to {@link #singleClientSettings(K)} but from the given repository metadata.
*/
S3ClientSettings singleClientSettings(RepositoryMetadata repositoryMetadata) {
return singleClientSettings(clientKey(repositoryMetadata));
}
/**
* Retrieves an {@link AmazonS3Reference} for the given repository metadata. If a cached client exists and can be
* referenced, it is returned. Otherwise, a new client is created, added to the cache, and returned.
*
* @param repositoryMetadata The metadata of the repository for which the Amazon S3 client is required.
* @return An {@link AmazonS3Reference} instance corresponding to the repository metadata.
* @throws IllegalArgumentException If no client settings exist for the given repository metadata.
* @throws AlreadyClosedException If either the clients manager or the holder is closed
*/
final AmazonS3Reference client(RepositoryMetadata repositoryMetadata) {
final var clientKey = clientKey(repositoryMetadata);
final var clientReference = clientsCache.get(clientKey);
// It is ok to retrieve an existing client when the cache is being cleared or the holder is closing.
// As long as there are paired incRef/decRef calls, the client will be closed when the last reference is released
// by either the caller of this method or the clearCache() method.
if (clientReference != null && clientReference.tryIncRef()) {
return clientReference;
}
final var settings = singleClientSettings(clientKey);
synchronized (this) {
final var existing = clientsCache.get(clientKey);
if (existing != null && existing.tryIncRef()) {
return existing;
}
if (closed.get()) {
// Not adding a new client once the clients holder is closed since there won't be anything to close it
throw new AlreadyClosedException("Project [" + projectId() + "] clients holder is closed");
}
if (managerClosed.get()) {
// This clients holder must be added after the manager is closed. It must have no cached clients.
assert clientsCache.isEmpty() : "expect empty cache, but got " + clientsCache;
throw new AlreadyClosedException("s3 clients manager is closed");
}
// The close() method maybe called after we checked it, it is ok since we are already inside the synchronized block.
// The close method calls clearCache() which will clear the newly added client.
final var newClientReference = clientBuilder.apply(settings);
clientsCache = Maps.copyMapWithAddedEntry(clientsCache, clientKey, newClientReference);
return newClientReference;
}
}
/**
* Clear the cache by closing and clearing out all clients. Subsequent {@link #client(RepositoryMetadata)} calls will recreate
* the clients and populate the cache again.
*/
final synchronized void clearCache() {
// the clients will shutdown when they will not be used anymore
IOUtils.closeWhileHandlingException(clientsCache.values());
// clear previously cached clients, they will be built lazily
clientsCache = Collections.emptyMap();
doClearCache();
}
void doClearCache() {}
/**
* Similar to {@link #clearCache()} but also flag the holder to be closed so that no new client can be created.
*/
public final void close() {
if (closed.compareAndSet(false, true)) {
clearCache();
}
}
// visible for tests
final boolean isClosed() {
return closed.get();
}
}
/**
* S3 clients holder for a single project. The client cache is keyed by the client name.
*/
final
|
ClientsHolder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/plan/logical/UnresolvedRelation.java
|
{
"start": 712,
"end": 2974
}
|
class ____ extends LeafPlan implements Unresolvable {
private final TableIdentifier table;
private final boolean frozen;
private final String alias;
private final String unresolvedMsg;
public UnresolvedRelation(Source source, TableIdentifier table, String alias, boolean frozen) {
this(source, table, alias, frozen, null);
}
public UnresolvedRelation(Source source, TableIdentifier table, String alias, boolean frozen, String unresolvedMessage) {
super(source);
this.table = table;
this.alias = alias;
this.frozen = frozen;
this.unresolvedMsg = unresolvedMessage == null ? "Unknown index [" + table.index() + "]" : unresolvedMessage;
}
@Override
protected NodeInfo<UnresolvedRelation> info() {
return NodeInfo.create(this, UnresolvedRelation::new, table, alias, frozen, unresolvedMsg);
}
public TableIdentifier table() {
return table;
}
public String alias() {
return alias;
}
public boolean frozen() {
return frozen;
}
@Override
public boolean resolved() {
return false;
}
@Override
public boolean expressionsResolved() {
return false;
}
@Override
public List<Attribute> output() {
return Collections.emptyList();
}
@Override
public String unresolvedMessage() {
return unresolvedMsg;
}
@Override
public int hashCode() {
return Objects.hash(source(), table, alias, unresolvedMsg);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
UnresolvedRelation other = (UnresolvedRelation) obj;
return Objects.equals(table, other.table)
&& Objects.equals(alias, other.alias)
&& Objects.equals(frozen, other.frozen)
&& Objects.equals(unresolvedMsg, other.unresolvedMsg);
}
@Override
public List<Object> nodeProperties() {
return singletonList(table);
}
@Override
public String toString() {
return UNRESOLVED_PREFIX + table.index();
}
}
|
UnresolvedRelation
|
java
|
elastic__elasticsearch
|
modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java
|
{
"start": 2630,
"end": 8494
}
|
class ____ extends ESTestCase {
private ThreadPool threadPool;
@Before
public void setUpThreadPool() {
threadPool = new TestThreadPool(getTestName());
}
@After
public void tearDownThreadPool() {
terminate(threadPool);
}
// ensure we test the happy path on every build.
public void testStartScrollDone() throws InterruptedException {
dotestBasicsWithRetry(0, 0, 0, e -> fail());
}
public void testRetrySuccess() throws InterruptedException {
int retries = randomIntBetween(1, 10);
dotestBasicsWithRetry(retries, 0, retries, e -> fail());
}
public void testRetryFail() throws InterruptedException {
final int retries = randomInt(10);
final var exceptionRef = new AtomicReference<Exception>();
dotestBasicsWithRetry(retries, retries + 1, retries + 1, exceptionRef::set);
assertThat(exceptionRef.get(), instanceOf(EsRejectedExecutionException.class));
}
private void dotestBasicsWithRetry(int retries, int minFailures, int maxFailures, Consumer<Exception> failureHandler)
throws InterruptedException {
BlockingQueue<ScrollableHitSource.AsyncResponse> responses = new ArrayBlockingQueue<>(100);
MockClient client = new MockClient(threadPool);
TaskId parentTask = new TaskId("thenode", randomInt());
AtomicInteger actualSearchRetries = new AtomicInteger();
int expectedSearchRetries = 0;
ClientScrollableHitSource hitSource = new ClientScrollableHitSource(
logger,
BackoffPolicy.constantBackoff(TimeValue.ZERO, retries),
threadPool,
actualSearchRetries::incrementAndGet,
responses::add,
failureHandler,
new ParentTaskAssigningClient(client, parentTask),
new SearchRequest().scroll(TimeValue.timeValueMinutes(1))
);
hitSource.start();
for (int retry = 0; retry < randomIntBetween(minFailures, maxFailures); ++retry) {
client.fail(TransportSearchAction.TYPE, new EsRejectedExecutionException());
if (retry >= retries) {
return;
}
client.awaitOperation();
++expectedSearchRetries;
}
client.validateRequest(TransportSearchAction.TYPE, (SearchRequest r) -> assertTrue(r.allowPartialSearchResults() == Boolean.FALSE));
SearchResponse searchResponse = createSearchResponse();
try {
client.respond(TransportSearchAction.TYPE, searchResponse);
for (int i = 0; i < randomIntBetween(1, 10); ++i) {
ScrollableHitSource.AsyncResponse asyncResponse = responses.poll(10, TimeUnit.SECONDS);
assertNotNull(asyncResponse);
assertEquals(responses.size(), 0);
assertSameHits(asyncResponse.response().getHits(), searchResponse.getHits().getHits());
asyncResponse.done(TimeValue.ZERO);
for (int retry = 0; retry < randomIntBetween(minFailures, maxFailures); ++retry) {
client.fail(TransportSearchScrollAction.TYPE, new EsRejectedExecutionException());
client.awaitOperation();
++expectedSearchRetries;
}
searchResponse.decRef();
searchResponse = createSearchResponse();
client.respond(TransportSearchScrollAction.TYPE, searchResponse);
}
assertEquals(actualSearchRetries.get(), expectedSearchRetries);
} finally {
searchResponse.decRef();
}
}
public void testScrollKeepAlive() {
MockClient client = new MockClient(threadPool);
TaskId parentTask = new TaskId("thenode", randomInt());
ClientScrollableHitSource hitSource = new ClientScrollableHitSource(
logger,
BackoffPolicy.constantBackoff(TimeValue.ZERO, 0),
threadPool,
() -> fail(),
r -> fail(),
e -> fail(),
new ParentTaskAssigningClient(client, parentTask),
// Set the base for the scroll to wait - this is added to the figure we calculate below
new SearchRequest().scroll(timeValueSeconds(10))
);
hitSource.startNextScroll(timeValueSeconds(100));
client.validateRequest(TransportSearchScrollAction.TYPE, (SearchScrollRequest r) -> assertEquals(r.scroll().seconds(), 110));
}
private SearchResponse createSearchResponse() {
// create a simulated response.
SearchHit hit = SearchHit.unpooled(0, "id").sourceRef(new BytesArray("{}"));
SearchHits hits = SearchHits.unpooled(
IntStream.range(0, randomIntBetween(0, 20)).mapToObj(i -> hit).toArray(SearchHit[]::new),
new TotalHits(0, TotalHits.Relation.EQUAL_TO),
0
);
return SearchResponseUtils.response(hits).scrollId(randomSimpleString(random(), 1, 10)).shards(5, 4, 0).build();
}
private void assertSameHits(List<? extends ScrollableHitSource.Hit> actual, SearchHit[] expected) {
assertEquals(actual.size(), expected.length);
for (int i = 0; i < actual.size(); ++i) {
assertThat(expected[i].getSourceRef(), equalBytes(actual.get(i).getSource()));
assertEquals(actual.get(i).getIndex(), expected[i].getIndex());
assertEquals(actual.get(i).getVersion(), expected[i].getVersion());
assertEquals(actual.get(i).getPrimaryTerm(), expected[i].getPrimaryTerm());
assertEquals(actual.get(i).getSeqNo(), expected[i].getSeqNo());
assertEquals(actual.get(i).getId(), expected[i].getId());
assertEquals(actual.get(i).getIndex(), expected[i].getIndex());
}
}
private static
|
ClientScrollableHitSourceTests
|
java
|
spring-projects__spring-framework
|
spring-web/src/test/java/org/springframework/web/util/ServletContextPropertyUtilsTests.java
|
{
"start": 881,
"end": 1615
}
|
class ____ {
@Test
void resolveAsServletContextInitParameter() {
MockServletContext servletContext = new MockServletContext();
servletContext.setInitParameter("test.prop", "bar");
String resolved = ServletContextPropertyUtils.resolvePlaceholders("${test.prop:foo}", servletContext);
assertThat(resolved).isEqualTo("bar");
}
@Test
void fallbackToSystemProperties() {
MockServletContext servletContext = new MockServletContext();
System.setProperty("test.prop", "bar");
try {
String resolved = ServletContextPropertyUtils.resolvePlaceholders("${test.prop:foo}", servletContext);
assertThat(resolved).isEqualTo("bar");
}
finally {
System.clearProperty("test.prop");
}
}
}
|
ServletContextPropertyUtilsTests
|
java
|
apache__camel
|
tooling/maven/camel-package-maven-plugin/src/main/java/org/apache/camel/maven/packaging/CamelTestInfraGenerateMetadataMojo.java
|
{
"start": 2477,
"end": 3635
}
|
class ____ extends AbstractGeneratorMojo {
@Parameter(property = "project", required = true, readonly = true)
protected MavenProject project;
@Parameter(defaultValue = "${project.basedir}/src/generated/resources")
protected File generatedResourcesOutputDir;
public static final DotName INFRA_SERVICE = DotName.createSimple(InfraService.class.getName());
@Inject
protected CamelTestInfraGenerateMetadataMojo(MavenProjectHelper projectHelper, BuildContext buildContext) {
super(projectHelper, buildContext);
}
@Override
public void execute() throws MojoExecutionException, MojoFailureException {
Set<InfrastructureServiceModel> models = new LinkedHashSet<>();
for (AnnotationInstance ai : PackagePluginUtils.readJandexIndexQuietly(project).getAnnotations(INFRA_SERVICE)) {
InfrastructureServiceModel infrastructureServiceModel = new InfrastructureServiceModel();
String targetClass = ai.target().toString();
infrastructureServiceModel.setImplementation(targetClass);
try {
// Search for target
|
CamelTestInfraGenerateMetadataMojo
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
|
{
"start": 1508,
"end": 11476
}
|
class ____ {
public Builder() {
this.quota = -1L;
this.spaceQuota = -1L;
typeConsumed = new long[StorageType.values().length];
typeQuota = new long[StorageType.values().length];
Arrays.fill(typeQuota, -1L);
}
public Builder fileAndDirectoryCount(long count) {
this.fileAndDirectoryCount = count;
return this;
}
public Builder quota(long quota){
this.quota = quota;
return this;
}
public Builder spaceConsumed(long spaceConsumed) {
this.spaceConsumed = spaceConsumed;
return this;
}
public Builder spaceQuota(long spaceQuota) {
this.spaceQuota = spaceQuota;
return this;
}
public Builder typeConsumed(long[] typeConsumed) {
System.arraycopy(typeConsumed, 0, this.typeConsumed, 0,
typeConsumed.length);
return this;
}
public Builder typeQuota(StorageType type, long quota) {
this.typeQuota[type.ordinal()] = quota;
return this;
}
public Builder typeConsumed(StorageType type, long consumed) {
this.typeConsumed[type.ordinal()] = consumed;
return this;
}
public Builder typeQuota(long[] typeQuota) {
System.arraycopy(typeQuota, 0, this.typeQuota, 0, typeQuota.length);
return this;
}
public QuotaUsage build() {
return new QuotaUsage(this);
}
private long fileAndDirectoryCount;
private long quota;
private long spaceConsumed;
private long spaceQuota;
private long[] typeConsumed;
private long[] typeQuota;
}
// Make it protected for the deprecated ContentSummary constructor.
protected QuotaUsage() { }
/** Build the instance based on the builder.
* @param builder bulider.
*/
protected QuotaUsage(Builder builder) {
this.fileAndDirectoryCount = builder.fileAndDirectoryCount;
this.quota = builder.quota;
this.spaceConsumed = builder.spaceConsumed;
this.spaceQuota = builder.spaceQuota;
this.typeConsumed = builder.typeConsumed;
this.typeQuota = builder.typeQuota;
}
protected void setQuota(long quota) {
this.quota = quota;
}
protected void setSpaceConsumed(long spaceConsumed) {
this.spaceConsumed = spaceConsumed;
}
protected void setSpaceQuota(long spaceQuota) {
this.spaceQuota = spaceQuota;
}
/**
* Return the directory count.
*
* @return file and directory count.
*/
public long getFileAndDirectoryCount() {
return fileAndDirectoryCount;
}
/**
* Return the directory quota.
*
* @return quota.
*/
public long getQuota() {
return quota;
}
/**
* Return (disk) space consumed.
*
* @return space consumed.
*/
public long getSpaceConsumed() {
return spaceConsumed;
}
/**
* Return (disk) space quota.
*
* @return space quota.
*/
public long getSpaceQuota() {
return spaceQuota;
}
/**
* Return storage type quota.
*
* @param type storage type.
* @return type quota.
*/
public long getTypeQuota(StorageType type) {
return (typeQuota != null) ? typeQuota[type.ordinal()] : -1L;
}
/**
* Return storage type consumed.
*
* @param type storage type.
* @return type consumed.
*/
public long getTypeConsumed(StorageType type) {
return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0L;
}
/**
* Return true if any storage type quota has been set.
*
* @return if any storage type quota has been set true, not false.
* */
public boolean isTypeQuotaSet() {
if (typeQuota != null) {
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (typeQuota[t.ordinal()] > 0L) {
return true;
}
}
}
return false;
}
/**
* Return true if any storage type consumption information is available.
*
* @return if any storage type consumption information
* is available, not false.
*/
public boolean isTypeConsumedAvailable() {
if (typeConsumed != null) {
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (typeConsumed[t.ordinal()] > 0L) {
return true;
}
}
}
return false;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result
+ (int) (fileAndDirectoryCount ^ (fileAndDirectoryCount >>> 32));
result = prime * result + (int) (quota ^ (quota >>> 32));
result = prime * result + (int) (spaceConsumed ^ (spaceConsumed >>> 32));
result = prime * result + (int) (spaceQuota ^ (spaceQuota >>> 32));
result = prime * result + Arrays.hashCode(typeConsumed);
result = prime * result + Arrays.hashCode(typeQuota);
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof QuotaUsage)) {
return false;
}
QuotaUsage other = (QuotaUsage) obj;
if (fileAndDirectoryCount != other.fileAndDirectoryCount) {
return false;
}
if (quota != other.quota) {
return false;
}
if (spaceConsumed != other.spaceConsumed) {
return false;
}
if (spaceQuota != other.spaceQuota) {
return false;
}
if (!Arrays.equals(typeConsumed, other.typeConsumed)) {
return false;
}
if (!Arrays.equals(typeQuota, other.typeQuota)) {
return false;
}
return true;
}
/**
* Output format:
* |----12----| |----15----| |----15----| |----15----| |-------18-------|
* QUOTA REMAINING_QUOTA SPACE_QUOTA SPACE_QUOTA_REM FILE_NAME
*/
protected static final String QUOTA_STRING_FORMAT = "%12s %15s ";
protected static final String SPACE_QUOTA_STRING_FORMAT = "%15s %15s ";
protected static final String[] QUOTA_HEADER_FIELDS = new String[] {"QUOTA",
"REM_QUOTA", "SPACE_QUOTA", "REM_SPACE_QUOTA"};
protected static final String QUOTA_HEADER = String.format(
QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT,
(Object[]) QUOTA_HEADER_FIELDS);
/**
* Output format:
* |-----14-----| |-------18------| |-----14-----| |-------18------|
* SSD_QUOTA REM_SSD_QUOTA DISK_QUOTA REM_DISK_QUOTA
* |-----14-----| |-------18------| |-----14-----| |-------18------|
* ARCHIVE_QUOTA REM_ARCHIVE_QUOTA PROVIDED_QUOTA REM_PROVIDED_QUOTA
* |-----14-----| |-------18------| |-------18------|
* NVDIMM_QUOTA REM_NVDIMM_QUOTA PATHNAME
*/
private static final String STORAGE_TYPE_SUMMARY_FORMAT = "%14s %18s ";
/** Return the header of the output.
* @return the header of the output
*/
public static String getHeader() {
return QUOTA_HEADER;
}
/** default quota display string */
private static final String QUOTA_NONE = "none";
private static final String QUOTA_INF = "inf";
@Override
public String toString() {
return toString(false);
}
public String toString(boolean hOption) {
return toString(hOption, false, null);
}
/**
* Return the string representation of the object in the output format.
* if hOption is false file sizes are returned in bytes
* if hOption is true file sizes are returned in human readable
*
* @param hOption a flag indicating if human readable output if to be used
* @param tOption type option.
* @param types storage types.
* @return the string representation of the object.
*/
public String toString(boolean hOption,
boolean tOption, List<StorageType> types) {
if (tOption) {
return getTypesQuotaUsage(hOption, types);
}
return getQuotaUsage(hOption);
}
protected String getQuotaUsage(boolean hOption) {
String quotaStr = QUOTA_NONE;
String quotaRem = QUOTA_INF;
String spaceQuotaStr = QUOTA_NONE;
String spaceQuotaRem = QUOTA_INF;
if (quota > 0L) {
quotaStr = formatSize(quota, hOption);
quotaRem = formatSize(quota-fileAndDirectoryCount, hOption);
}
if (spaceQuota >= 0L) {
spaceQuotaStr = formatSize(spaceQuota, hOption);
spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption);
}
return String.format(QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT,
quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
}
protected String getTypesQuotaUsage(boolean hOption,
List<StorageType> types) {
StringBuilder content = new StringBuilder();
for (StorageType st : types) {
long typeQuota = getTypeQuota(st);
long typeConsumed = getTypeConsumed(st);
String quotaStr = QUOTA_NONE;
String quotaRem = QUOTA_INF;
if (typeQuota >= 0L) {
quotaStr = formatSize(typeQuota, hOption);
quotaRem = formatSize(typeQuota - typeConsumed, hOption);
}
content.append(
String.format(STORAGE_TYPE_SUMMARY_FORMAT, quotaStr, quotaRem));
}
return content.toString();
}
/**
* return the header of with the StorageTypes.
*
* @param storageTypes storage types.
* @return storage header string
*/
public static String getStorageTypeHeader(List<StorageType> storageTypes) {
StringBuilder header = new StringBuilder();
for (StorageType st : storageTypes) {
/* the field length is 13/17 for quota and remain quota
* as the max length for quota name is ARCHIVE_QUOTA
* and remain quota name REM_ARCHIVE_QUOTA */
String storageName = st.toString();
header.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT,
storageName + "_QUOTA", "REM_" + storageName + "_QUOTA"));
}
return header.toString();
}
/**
* Formats a size to be human readable or in bytes.
* @param size value to be formatted
* @param humanReadable flag indicating human readable or not
* @return String representation of the size
*/
private String formatSize(long size, boolean humanReadable) {
return humanReadable
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size);
}
}
|
Builder
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/jpa/spi/JpaCompliance.java
|
{
"start": 341,
"end": 5112
}
|
interface ____ {
/**
* Controls whether Hibernate's handling of JPA's
* {@link jakarta.persistence.Query} (JPQL, Criteria and native-query)
* should strictly follow the JPA spec. This includes parsing and
* translating a query as JPQL instead of HQL, as well as whether calls
* to the {@link jakarta.persistence.Query} methods always throw the
* exceptions defined by the specification.
* <p>
* Deviations result in an exception, if enabled.
*
* @return {@code true} indicates to behave in the spec-defined way
*
* @see org.hibernate.cfg.AvailableSettings#JPA_QUERY_COMPLIANCE
*/
boolean isJpaQueryComplianceEnabled();
/**
* Indicates that Hibernate's {@link Transaction} should behave as
* defined by the specification for JPA's
* {@link jakarta.persistence.EntityTransaction} since it extends it.
*
* @return {@code true} indicates to behave in the spec-defined way
*
* @see org.hibernate.cfg.AvailableSettings#JPA_TRANSACTION_COMPLIANCE
*/
boolean isJpaTransactionComplianceEnabled();
/**
* JPA defines specific exceptions on specific methods when called on
* {@link jakarta.persistence.EntityManager} and
* {@link jakarta.persistence.EntityManagerFactory} when those objects
* have been closed. This setting controls whether the spec defined
* behavior or Hibernate's behavior will be used.
* <p>
* If enabled Hibernate will operate in the JPA specified way throwing
* exceptions when the spec says it should with regard to close checking
*
* @return {@code true} indicates to behave in the spec-defined way
*
* @see org.hibernate.cfg.AvailableSettings#JPA_CLOSED_COMPLIANCE
*/
boolean isJpaClosedComplianceEnabled();
/**
* @deprecated No longer has any effect.
*/
@Deprecated(since = "7.0")
boolean isJpaCascadeComplianceEnabled();
/**
* JPA spec says that an {@link jakarta.persistence.EntityNotFoundException}
* should be thrown when accessing an entity proxy which does not have
* an associated table row in the database.
* <p>
* Traditionally, Hibernate does not initialize an entity Proxy when
* accessing its identifier since we already know the identifier value,
* hence we can save a database round trip.
* <p>
* If enabled Hibernate will initialize the entity proxy even when
* accessing its identifier.
*
* @return {@code true} indicates to behave in the spec-defined way
*
* @see org.hibernate.cfg.AvailableSettings#JPA_PROXY_COMPLIANCE
*/
boolean isJpaProxyComplianceEnabled();
/**
* Should Hibernate comply with all aspects of caching as defined by JPA?
* Or can it deviate to perform things it believes will be "better"?
*
* @implNote Effects include marking all secondary tables as non-optional.
* The reason being that optional secondary tables can lead to entity cache
* being invalidated rather than updated.
*
* @return {@code true} indicates to behave in the spec-defined way
*
* @see org.hibernate.cfg.AvailableSettings#JPA_CACHING_COMPLIANCE
* @see org.hibernate.persister.entity.AbstractEntityPersister#isCacheInvalidationRequired()
*/
boolean isJpaCacheComplianceEnabled();
/**
* Should the scope of {@link jakarta.persistence.TableGenerator#name()}
* and {@link jakarta.persistence.SequenceGenerator#name()} be considered
* globally or locally defined?
*
* @return {@code true} if the generator name scope is considered global
*
* @see org.hibernate.cfg.AvailableSettings#JPA_ID_GENERATOR_GLOBAL_SCOPE_COMPLIANCE
*/
boolean isGlobalGeneratorScopeEnabled();
/**
* Should we strictly handle {@link jakarta.persistence.OrderBy} expressions?
* <p>
* JPA says the order-items can only be attribute references whereas
* Hibernate supports a wide range of items. With this enabled, Hibernate
* will throw a compliance error when a non-attribute-reference is used.
*
* @see org.hibernate.cfg.AvailableSettings#JPA_ORDER_BY_MAPPING_COMPLIANCE
*/
boolean isJpaOrderByMappingComplianceEnabled();
/**
* JPA says that the id passed to
* {@link jakarta.persistence.EntityManager#getReference} and
* {@link jakarta.persistence.EntityManager#find} should be exactly the
* expected type, allowing no type coercion.
* <p>
* Historically, Hibernate behaved the same way. Since 6.0 however,
* Hibernate has the ability to coerce the passed type to the expected
* type. For example, an {@link Integer} may be widened to {@link Long}.
* Coercion is performed by calling
* {@link org.hibernate.type.descriptor.java.JavaType#coerce}.
* <p>
* This setting controls whether such coercion should be allowed.
*
* @see org.hibernate.cfg.AvailableSettings#JPA_LOAD_BY_ID_COMPLIANCE
*
* @since 6.0
*/
boolean isLoadByIdComplianceEnabled();
}
|
JpaCompliance
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
|
{
"start": 1392,
"end": 1669
}
|
class ____ extends QuotaUsage {
/** Default quota usage count. */
public static final long QUOTA_USAGE_COUNT_DEFAULT = 0;
private RouterQuotaUsage(Builder builder) {
super(builder);
}
/** Build the instance based on the builder. */
public static
|
RouterQuotaUsage
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/runtime/entrypoint/StreamingNoop.java
|
{
"start": 1706,
"end": 2624
}
|
class ____ {
public static void main(String[] args) throws Exception {
ParameterTool params = ParameterTool.fromArgs(args);
// define the dataflow
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(2);
RestartStrategyUtils.configureFixedDelayRestartStrategy(env, 10, 1000);
env.readFileStream("input/", 60000, FileMonitoringFunction.WatchType.ONLY_NEW_FILES)
.sinkTo(new DiscardingSink<>());
// generate a job graph
final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
File jobGraphFile = new File(params.get("output", "job.graph"));
try (FileOutputStream output = new FileOutputStream(jobGraphFile);
ObjectOutputStream obOutput = new ObjectOutputStream(output)) {
obOutput.writeObject(jobGraph);
}
}
}
|
StreamingNoop
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jdbc/src/main/java/org/springframework/boot/jdbc/health/DataSourceHealthIndicator.java
|
{
"start": 1907,
"end": 5516
}
|
class ____ extends AbstractHealthIndicator implements InitializingBean {
private @Nullable DataSource dataSource;
private @Nullable String query;
private @Nullable JdbcTemplate jdbcTemplate;
/**
* Create a new {@link DataSourceHealthIndicator} instance.
*/
public DataSourceHealthIndicator() {
this(null, null);
}
/**
* Create a new {@link DataSourceHealthIndicator} using the specified
* {@link DataSource}.
* @param dataSource the data source
*/
public DataSourceHealthIndicator(@Nullable DataSource dataSource) {
this(dataSource, null);
}
/**
* Create a new {@link DataSourceHealthIndicator} using the specified
* {@link DataSource} and validation query.
* @param dataSource the data source
* @param query the validation query to use (can be {@code null})
*/
public DataSourceHealthIndicator(@Nullable DataSource dataSource, @Nullable String query) {
super("DataSource health check failed");
this.dataSource = dataSource;
this.query = query;
this.jdbcTemplate = (dataSource != null) ? new JdbcTemplate(dataSource) : null;
}
@Override
public void afterPropertiesSet() throws Exception {
Assert.state(this.dataSource != null, "DataSource for DataSourceHealthIndicator must be specified");
}
@Override
protected void doHealthCheck(Health.Builder builder) throws Exception {
if (this.dataSource == null) {
builder.up().withDetail("database", "unknown");
}
else {
doDataSourceHealthCheck(builder);
}
}
private void doDataSourceHealthCheck(Health.Builder builder) {
Assert.state(this.jdbcTemplate != null, "'jdbcTemplate' must not be null");
builder.up().withDetail("database", getProduct(this.jdbcTemplate));
String validationQuery = this.query;
if (StringUtils.hasText(validationQuery)) {
builder.withDetail("validationQuery", validationQuery);
// Avoid calling getObject as it breaks MySQL on Java 7 and later
List<Object> results = this.jdbcTemplate.query(validationQuery, new SingleColumnRowMapper());
Object result = DataAccessUtils.requiredSingleResult(results);
builder.withDetail("result", result);
}
else {
builder.withDetail("validationQuery", "isValid()");
boolean valid = isConnectionValid(this.jdbcTemplate);
builder.status((valid) ? Status.UP : Status.DOWN);
}
}
private String getProduct(JdbcTemplate jdbcTemplate) {
return jdbcTemplate.execute((ConnectionCallback<String>) this::getProduct);
}
private String getProduct(Connection connection) throws SQLException {
return connection.getMetaData().getDatabaseProductName();
}
private Boolean isConnectionValid(JdbcTemplate jdbcTemplate) {
return jdbcTemplate.execute((ConnectionCallback<Boolean>) this::isConnectionValid);
}
private Boolean isConnectionValid(Connection connection) throws SQLException {
return connection.isValid(0);
}
/**
* Set the {@link DataSource} to use.
* @param dataSource the data source
*/
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
this.jdbcTemplate = new JdbcTemplate(dataSource);
}
/**
* Set a specific validation query to use to validate a connection. If none is set, a
* validation based on {@link Connection#isValid(int)} is used.
* @param query the validation query to use
*/
public void setQuery(String query) {
this.query = query;
}
/**
* Return the validation query or {@code null}.
* @return the query
*/
public @Nullable String getQuery() {
return this.query;
}
/**
* {@link RowMapper} that expects and returns results from a single column.
*/
private static final
|
DataSourceHealthIndicator
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/errors/BrokerNotFoundException.java
|
{
"start": 1145,
"end": 1574
}
|
class ____ extends StreamsException {
private static final long serialVersionUID = 1L;
public BrokerNotFoundException(final String message) {
super(message);
}
public BrokerNotFoundException(final String message, final Throwable throwable) {
super(message, throwable);
}
public BrokerNotFoundException(final Throwable throwable) {
super(throwable);
}
}
|
BrokerNotFoundException
|
java
|
apache__kafka
|
connect/runtime/src/test/resources/test-plugins/multiple-plugins-in-jar/test/plugins/ThingTwo.java
|
{
"start": 1328,
"end": 1771
}
|
class ____ implements Converter {
@Override
public void configure(final Map<String, ?> configs, final boolean isKey) {
}
@Override
public byte[] fromConnectData(final String topic, final Schema schema, final Object value) {
return "Thing two".getBytes(StandardCharsets.UTF_8);
}
@Override
public SchemaAndValue toConnectData(final String topic, final byte[] value) {
return null;
}
}
|
ThingTwo
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/saml2/Saml2LogoutConfigurerTests.java
|
{
"start": 30902,
"end": 31539
}
|
class ____ {
LogoutHandler mockLogoutHandler = mock(LogoutHandler.class);
@Bean
SecurityFilterChain web(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((authorize) -> authorize.anyRequest().authenticated())
.logout((logout) -> logout.addLogoutHandler(this.mockLogoutHandler))
.saml2Login(withDefaults())
.saml2Logout(withDefaults());
return http.build();
// @formatter:on
}
@Bean
LogoutHandler logoutHandler() {
return this.mockLogoutHandler;
}
}
@Configuration
@EnableWebSecurity
@Import(Saml2LoginConfigBeans.class)
static
|
Saml2LogoutDefaultsConfig
|
java
|
apache__camel
|
core/camel-management/src/main/java/org/apache/camel/management/mbean/StatisticDelta.java
|
{
"start": 902,
"end": 1583
}
|
class ____ extends Statistic {
private final AtomicLong value = new AtomicLong();
private final AtomicLong lastValue = new AtomicLong();
@Override
public void updateValue(long newValue) {
lastValue.set(value.longValue());
value.set(newValue);
}
@Override
public long getValue() {
return value.get() - lastValue.get();
}
@Override
public String toString() {
return Long.toString(value.get());
}
@Override
public boolean isUpdated() {
// this is okay
return true;
}
@Override
public void reset() {
value.set(0);
lastValue.set(0);
}
}
|
StatisticDelta
|
java
|
playframework__playframework
|
core/play/src/main/java/play/http/ActionCreator.java
|
{
"start": 271,
"end": 357
}
|
interface ____ creating Java actions from Java methods. */
@FunctionalInterface
public
|
for
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/FloatTest.java
|
{
"start": 2093,
"end": 2647
}
|
class ____ {
private float f1 = Float.NaN;
private double f2 = Double.NaN;
public Bean() {
}
public Bean(float f1, double f2) {
this.f1 = f1;
this.f2 = f2;
}
public float getF1() {
return f1;
}
public void setF1(float f1) {
this.f1 = f1;
}
public double getF2() {
return f2;
}
public void setF2(double f2) {
this.f2 = f2;
}
}
}
|
Bean
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/jmx/export/assembler/AutodetectCapableMBeanInfoAssembler.java
|
{
"start": 1387,
"end": 1558
}
|
class ____ the bean (might be a proxy class)
* @param beanName the name of the bean in the bean factory
*/
boolean includeBean(Class<?> beanClass, String beanName);
}
|
of
|
java
|
junit-team__junit5
|
junit-jupiter-api/src/main/java/org/junit/jupiter/api/TestInstance.java
|
{
"start": 3179,
"end": 3320
}
|
interface ____ {
/**
* Enumeration of test instance lifecycle <em>modes</em>.
*
* @see #PER_METHOD
* @see #PER_CLASS
*/
|
TestInstance
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/CustomMessageBodyReaderUsesAnnotationsTest.java
|
{
"start": 1781,
"end": 2051
}
|
class ____ {
@GET
public Person get() {
return new Person("dummy");
}
}
public record Person(String name) {
}
@Documented
@Target({ ElementType.METHOD })
@Retention(RetentionPolicy.RUNTIME)
public @
|
Endpoint
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/RBitSetAsync.java
|
{
"start": 775,
"end": 12858
}
|
interface ____ extends RExpirableAsync {
/**
* Returns signed number at specified
* <code>offset</code> and <code>size</code>
*
* @param size - size of signed number up to 64 bits
* @param offset - offset of signed number
* @return signed number
*/
RFuture<Long> getSignedAsync(int size, long offset);
/**
* Returns previous value of signed number and replaces it
* with defined <code>value</code> at specified <code>offset</code>
*
* @param size - size of signed number up to 64 bits
* @param offset - offset of signed number
* @param value - value of signed number
* @return previous value of signed number
*/
RFuture<Long> setSignedAsync(int size, long offset, long value);
/**
* Increments current signed value by
* defined <code>increment</code> value and <code>size</code>
* at specified <code>offset</code>
* and returns result.
*
* @param size - size of signed number up to 64 bits
* @param offset - offset of signed number
* @param increment - increment value
* @return result value
*/
RFuture<Long> incrementAndGetSignedAsync(int size, long offset, long increment);
/**
* Returns unsigned number at specified
* <code>offset</code> and <code>size</code>
*
* @param size - size of unsigned number up to 63 bits
* @param offset - offset of unsigned number
* @return unsigned number
*/
RFuture<Long> getUnsignedAsync(int size, long offset);
/**
* Returns previous value of unsigned number and replaces it
* with defined <code>value</code> at specified <code>offset</code>
*
* @param size - size of unsigned number up to 63 bits
* @param offset - offset of unsigned number
* @param value - value of unsigned number
* @return previous value of unsigned number
*/
RFuture<Long> setUnsignedAsync(int size, long offset, long value);
/**
* Increments current unsigned value by
* defined <code>increment</code> value and <code>size</code>
* at specified <code>offset</code>
* and returns result.
*
* @param size - size of unsigned number up to 63 bits
* @param offset - offset of unsigned number
* @param increment - increment value
* @return result value
*/
RFuture<Long> incrementAndGetUnsignedAsync(int size, long offset, long increment);
/**
* Returns byte number at specified <code>offset</code>
*
* @param offset - offset of number
* @return number
*/
RFuture<Byte> getByteAsync(long offset);
/**
* Returns previous value of byte number and replaces it
* with defined <code>value</code> at specified <code>offset</code>
*
* @param offset - offset of number
* @param value - value of number
* @return previous value of number
*/
RFuture<Byte> setByteAsync(long offset, byte value);
/**
* Increments current byte value on defined <code>increment</code> value at specified <code>offset</code>
* and returns result.
*
* @param offset - offset of number
* @param increment - increment value
* @return result value
*/
RFuture<Byte> incrementAndGetByteAsync(long offset, byte increment);
/**
* Returns short number at specified <code>offset</code>
*
* @param offset - offset of number
* @return number
*/
RFuture<Short> getShortAsync(long offset);
/**
* Returns previous value of short number and replaces it
* with defined <code>value</code> at specified <code>offset</code>
*
* @param offset - offset of number
* @param value - value of number
* @return previous value of number
*/
RFuture<Short> setShortAsync(long offset, short value);
/**
* Increments current short value on defined <code>increment</code> value at specified <code>offset</code>
* and returns result.
*
* @param offset - offset of number
* @param increment - increment value
* @return result value
*/
RFuture<Short> incrementAndGetShortAsync(long offset, short increment);
/**
* Returns integer number at specified <code>offset</code>
*
* @param offset - offset of number
* @return number
*/
RFuture<Integer> getIntegerAsync(long offset);
/**
* Returns previous value of integer number and replaces it
* with defined <code>value</code> at specified <code>offset</code>
*
* @param offset - offset of number
* @param value - value of number
* @return previous value of number
*/
RFuture<Integer> setIntegerAsync(long offset, int value);
/**
* Increments current integer value on defined <code>increment</code> value at specified <code>offset</code>
* and returns result.
*
* @param offset - offset of number
* @param increment - increment value
* @return result value
*/
RFuture<Integer> incrementAndGetIntegerAsync(long offset, int increment);
/**
* Returns long number at specified <code>offset</code>
*
* @param offset - offset of number
* @return number
*/
RFuture<Long> getLongAsync(long offset);
/**
* Returns previous value of long number and replaces it
* with defined <code>value</code> at specified <code>offset</code>
*
* @param offset - offset of number
* @param value - value of number
* @return previous value of number
*/
RFuture<Long> setLongAsync(long offset, long value);
/**
* Increments current long value on defined <code>increment</code> value at specified <code>offset</code>
* and returns result.
*
* @param offset - offset of number
* @param increment - increment value
* @return result value
*/
RFuture<Long> incrementAndGetLongAsync(long offset, long increment);
RFuture<byte[]> toByteArrayAsync();
/**
* Returns "logical size" = index of highest set bit plus one.
* Returns zero if there are no any set bit.
*
* @return "logical size" = index of highest set bit plus one
*/
RFuture<Long> lengthAsync();
/**
* Set all bits to <code>value</code> from <code>fromIndex</code> (inclusive) to <code>toIndex</code> (exclusive)
*
* @param fromIndex inclusive
* @param toIndex exclusive
* @param value true = 1, false = 0
* @return void
*
*/
RFuture<Void> setAsync(long fromIndex, long toIndex, boolean value);
/**
* Set all bits to zero from <code>fromIndex</code> (inclusive) to <code>toIndex</code> (exclusive)
*
* @param fromIndex inclusive
* @param toIndex exclusive
* @return void
*
*/
RFuture<Void> clearAsync(long fromIndex, long toIndex);
/**
* Copy bits state of source BitSet object to this object
*
* @param bs - BitSet source
* @return void
*/
RFuture<Void> setAsync(BitSet bs);
/**
* Executes NOT operation over all bits
*
* @return length in bytes of the destination key
*/
RFuture<Long> notAsync();
/**
* Set all bits to one from <code>fromIndex</code> (inclusive) to <code>toIndex</code> (exclusive)
*
* @param fromIndex inclusive
* @param toIndex exclusive
* @return void
*/
RFuture<Void> setAsync(long fromIndex, long toIndex);
/**
* Returns number of set bits.
*
* @return number of set bits.
*/
RFuture<Long> sizeAsync();
/**
* Returns <code>true</code> if bit set to one and <code>false</code> overwise.
*
* @param bitIndex - index of bit
* @return <code>true</code> if bit set to one and <code>false</code> overwise.
*/
RFuture<Boolean> getAsync(long bitIndex);
/**
* Returns a boolean array where each element of the array corresponds to the query result of the input parameters.
*
* @param bitIndexes indexes of bit
* @return Returns a boolean array where each element of the array corresponds to the query result of the input parameters.
*/
RFuture<boolean[]> getAsync(long... bitIndexes);
/**
* Set bit to one at specified bitIndex
*
* @param bitIndex - index of bit
* @return <code>true</code> - if previous value was true,
* <code>false</code> - if previous value was false
*/
RFuture<Boolean> setAsync(long bitIndex);
/**
* Set bit to <code>value</code> at specified <code>bitIndex</code>
*
* @param bitIndex - index of bit
* @param value true = 1, false = 0
* @return <code>true</code> - if previous value was true,
* <code>false</code> - if previous value was false
*/
RFuture<Boolean> setAsync(long bitIndex, boolean value);
/**
* Set all bits to <code>value</code> which index in indexArray
*
* @param indexArray The index array of bits that needs to be set to <code>value</code>
* @param value true = 1, false = 0
*/
RFuture<Void> setAsync(long[] indexArray, boolean value);
/**
* Returns the number of bits set to one.
*
* @return number of bits
*/
RFuture<Long> cardinalityAsync();
/**
* Set bit to zero at specified <code>bitIndex</code>
*
* @param bitIndex - index of bit
* @return <code>true</code> - if previous value was true,
* <code>false</code> - if previous value was false
*/
RFuture<Boolean> clearAsync(long bitIndex);
/**
* Set all bits to zero
*
* @return void
*/
RFuture<Void> clearAsync();
/**
* Executes OR operation over this object and specified bitsets.
* Stores result into this object.
*
* @param bitSetNames - name of stored bitsets
* @return length in bytes of the destination key
*/
RFuture<Long> orAsync(String... bitSetNames);
/**
* Executes AND operation over this object and specified bitsets.
* Stores result into this object.
*
* @param bitSetNames - name of stored bitsets
* @return length in bytes of the destination key
*/
RFuture<Long> andAsync(String... bitSetNames);
/**
* Executes XOR operation over this object and specified bitsets.
* Stores result into this object.
*
* @param bitSetNames - name of stored bitsets
* @return length in bytes of the destination key
*/
RFuture<Long> xorAsync(String... bitSetNames);
/**
* Executes bitwise DIFF operation over this object and specified bitsets.
* Sets bits that are set in this object but not in any of the other bitsets.
* Stores result into this object.
*
* @param bitSetNames name of stored bitsets
* @return length in bytes of the destination key
*/
RFuture<Long> diffAsync(String... bitSetNames);
/**
* Executes bitwise DIFF1 operation over this object and specified bitsets.
* Sets bits that are set in one or more of the other bitsets but not in this object.
* Stores result into this object.
*
* @param bitSetNames name of stored bitsets
* @return length in bytes of the destination key
*/
RFuture<Long> diffInverseAsync(String... bitSetNames);
/**
* Executes bitwise ANDOR operation over this object and specified bitsets.
* Sets bits that are set in this object AND also in one or more of the other bitsets.
* Stores result into this object.
*
* @param bitSetNames name of stored bitsets
* @return length in bytes of the destination key
*/
RFuture<Long> andOrAsync(String... bitSetNames);
/**
* Executes bitwise ONE operation over this object and specified bitsets.
* Sets bits that are set in exactly one of the provided bitsets.
* Stores result into this object.
*
* @param bitSetNames name of stored bitsets
* @return length in bytes of the destination key
*/
RFuture<Long> setExclusiveAsync(String... bitSetNames);
}
|
RBitSetAsync
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/inject/InjectableWithoutDeser962Test.java
|
{
"start": 551,
"end": 622
}
|
class ____
{
// [databind#962]
static
|
InjectableWithoutDeser962Test
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/processors/PublishProcessor.java
|
{
"start": 6522,
"end": 13198
}
|
class ____<@NonNull T> extends FlowableProcessor<T> {
/** The terminated indicator for the subscribers array. */
@SuppressWarnings("rawtypes")
static final PublishSubscription[] TERMINATED = new PublishSubscription[0];
/** An empty subscribers array to avoid allocating it all the time. */
@SuppressWarnings("rawtypes")
static final PublishSubscription[] EMPTY = new PublishSubscription[0];
/** The array of currently subscribed subscribers. */
final AtomicReference<PublishSubscription<T>[]> subscribers;
/** The error, write before terminating and read after checking subscribers. */
Throwable error;
/**
* Constructs a PublishProcessor.
* @param <T> the value type
* @return the new PublishProcessor
*/
@CheckReturnValue
@NonNull
public static <T> PublishProcessor<T> create() {
return new PublishProcessor<>();
}
/**
* Constructs a PublishProcessor.
* @since 2.0
*/
@SuppressWarnings("unchecked")
PublishProcessor() {
subscribers = new AtomicReference<>(EMPTY);
}
@Override
protected void subscribeActual(@NonNull Subscriber<? super T> t) {
PublishSubscription<T> ps = new PublishSubscription<>(t, this);
t.onSubscribe(ps);
if (add(ps)) {
// if cancellation happened while a successful add, the remove() didn't work
// so we need to do it again
if (ps.isCancelled()) {
remove(ps);
}
} else {
Throwable ex = error;
if (ex != null) {
t.onError(ex);
} else {
t.onComplete();
}
}
}
/**
* Tries to add the given subscriber to the subscribers array atomically
* or returns false if this processor has terminated.
* @param ps the subscriber to add
* @return true if successful, false if this processor has terminated
*/
boolean add(PublishSubscription<T> ps) {
for (;;) {
PublishSubscription<T>[] a = subscribers.get();
if (a == TERMINATED) {
return false;
}
int n = a.length;
@SuppressWarnings("unchecked")
PublishSubscription<T>[] b = new PublishSubscription[n + 1];
System.arraycopy(a, 0, b, 0, n);
b[n] = ps;
if (subscribers.compareAndSet(a, b)) {
return true;
}
}
}
/**
* Atomically removes the given subscriber if it is subscribed to this processor.
* @param ps the subscription wrapping a subscriber to remove
*/
@SuppressWarnings("unchecked")
void remove(PublishSubscription<T> ps) {
for (;;) {
PublishSubscription<T>[] a = subscribers.get();
if (a == TERMINATED || a == EMPTY) {
return;
}
int n = a.length;
int j = -1;
for (int i = 0; i < n; i++) {
if (a[i] == ps) {
j = i;
break;
}
}
if (j < 0) {
return;
}
PublishSubscription<T>[] b;
if (n == 1) {
b = EMPTY;
} else {
b = new PublishSubscription[n - 1];
System.arraycopy(a, 0, b, 0, j);
System.arraycopy(a, j + 1, b, j, n - j - 1);
}
if (subscribers.compareAndSet(a, b)) {
return;
}
}
}
@Override
public void onSubscribe(@NonNull Subscription s) {
if (subscribers.get() == TERMINATED) {
s.cancel();
return;
}
// PublishProcessor doesn't bother with request coordination.
s.request(Long.MAX_VALUE);
}
@Override
public void onNext(@NonNull T t) {
ExceptionHelper.nullCheck(t, "onNext called with a null value.");
for (PublishSubscription<T> s : subscribers.get()) {
s.onNext(t);
}
}
@SuppressWarnings("unchecked")
@Override
public void onError(@NonNull Throwable t) {
ExceptionHelper.nullCheck(t, "onError called with a null Throwable.");
if (subscribers.get() == TERMINATED) {
RxJavaPlugins.onError(t);
return;
}
error = t;
for (PublishSubscription<T> s : subscribers.getAndSet(TERMINATED)) {
s.onError(t);
}
}
@SuppressWarnings("unchecked")
@Override
public void onComplete() {
if (subscribers.get() == TERMINATED) {
return;
}
for (PublishSubscription<T> s : subscribers.getAndSet(TERMINATED)) {
s.onComplete();
}
}
/**
* Tries to emit the item to all currently subscribed {@link Subscriber}s if all of them
* has requested some value, returns {@code false} otherwise.
* <p>
* This method should be called in a sequential manner just like the {@code onXXX} methods
* of this {@code PublishProcessor}.
* <p>History: 2.0.8 - experimental
* @param t the item to emit, not {@code null}
* @return {@code true} if the item was emitted to all {@code Subscriber}s
* @throws NullPointerException if {@code t} is {@code null}
* @since 2.2
*/
@CheckReturnValue
public boolean offer(@NonNull T t) {
ExceptionHelper.nullCheck(t, "offer called with a null value.");
PublishSubscription<T>[] array = subscribers.get();
for (PublishSubscription<T> s : array) {
if (s.isFull()) {
return false;
}
}
for (PublishSubscription<T> s : array) {
s.onNext(t);
}
return true;
}
@Override
@CheckReturnValue
public boolean hasSubscribers() {
return subscribers.get().length != 0;
}
@Override
@Nullable
@CheckReturnValue
public Throwable getThrowable() {
if (subscribers.get() == TERMINATED) {
return error;
}
return null;
}
@Override
@CheckReturnValue
public boolean hasThrowable() {
return subscribers.get() == TERMINATED && error != null;
}
@Override
@CheckReturnValue
public boolean hasComplete() {
return subscribers.get() == TERMINATED && error == null;
}
/**
* Wraps the actual subscriber, tracks its requests and makes cancellation
* to remove itself from the current subscribers array.
*
* @param <T> the value type
*/
static final
|
PublishProcessor
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/migration/MultiTenancyStrategy.java
|
{
"start": 563,
"end": 675
}
|
class ____ be removed after we're done migrating to Jakarta APIs and Hibernate ORM v6.
*/
@Deprecated
public
|
should
|
java
|
apache__camel
|
core/camel-core-model/src/test/java/org/apache/camel/model/rest/RestDefinitionTest.java
|
{
"start": 2649,
"end": 2688
}
|
class ____ testing nested types
}
}
|
for
|
java
|
spring-projects__spring-framework
|
spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/factory/generator/BeanFactoryInitializer.java
|
{
"start": 804,
"end": 912
}
|
interface ____ {
void initializeBeanFactory(DefaultListableBeanFactory beanFactory);
}
|
BeanFactoryInitializer
|
java
|
apache__camel
|
dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java
|
{
"start": 420562,
"end": 425742
}
|
class ____ extends YamlDeserializerBase<HL7DataFormat> {
public HL7DataFormatDeserializer() {
super(HL7DataFormat.class);
}
@Override
protected HL7DataFormat newInstance() {
return new HL7DataFormat();
}
@Override
protected boolean setProperty(HL7DataFormat target, String propertyKey, String propertyName,
Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "id": {
String val = asText(node);
target.setId(val);
break;
}
case "parser": {
String val = asText(node);
target.setParser(val);
break;
}
case "validate": {
String val = asText(node);
target.setValidate(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "head",
types = org.apache.camel.model.rest.HeadDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Head",
description = "Rest HEAD command",
deprecated = false,
properties = {
@YamlProperty(name = "apiDocs", type = "boolean", defaultValue = "true", description = "Whether to include or exclude this rest operation in API documentation. The default value is true.", displayName = "Api Docs"),
@YamlProperty(name = "bindingMode", type = "enum:off,auto,json,xml,json_xml", defaultValue = "off", description = "Sets the binding mode to use. This option will override what may be configured on a parent level The default value is off", displayName = "Binding Mode"),
@YamlProperty(name = "clientRequestValidation", type = "boolean", defaultValue = "false", description = "Whether to enable validation of the client request to check: 1) Content-Type header matches what the Rest DSL consumes; returns HTTP Status 415 if validation error. 2) Accept header matches what the Rest DSL produces; returns HTTP Status 406 if validation error. 3) Missing required data (query parameters, HTTP headers, body); returns HTTP Status 400 if validation error. 4) Parsing error of the message body (JSon, XML or Auto binding mode must be enabled); returns HTTP Status 400 if validation error.", displayName = "Client Request Validation"),
@YamlProperty(name = "clientResponseValidation", type = "boolean", defaultValue = "false", description = "Whether to check what Camel is returning as response to the client: 1) Status-code and Content-Type matches Rest DSL response messages. 2) Check whether expected headers is included according to the Rest DSL repose message headers. 3) If the response body is JSon then check whether its valid JSon. Returns 500 if validation error detected.", displayName = "Client Response Validation"),
@YamlProperty(name = "consumes", type = "string", description = "To define the content type what the REST service consumes (accept as input), such as application/xml or application/json. This option will override what may be configured on a parent level", displayName = "Consumes"),
@YamlProperty(name = "deprecated", type = "boolean", defaultValue = "false", description = "Marks this rest operation as deprecated in OpenApi documentation.", displayName = "Deprecated"),
@YamlProperty(name = "description", type = "string", description = "Sets the description of this node", displayName = "Description"),
@YamlProperty(name = "disabled", type = "boolean", defaultValue = "false", description = "Whether to disable this REST service from the route during build time. Once an REST service has been disabled then it cannot be enabled later at runtime.", displayName = "Disabled"),
@YamlProperty(name = "enableCORS", type = "boolean", defaultValue = "false", description = "Whether to enable CORS headers in the HTTP response. This option will override what may be configured on a parent level The default value is false.", displayName = "Enable CORS"),
@YamlProperty(name = "enableNoContentResponse", type = "boolean", defaultValue = "false", description = "Whether to return HTTP 204 with an empty body when a response contains an empty JSON object or XML root object. The default value is false.", displayName = "Enable No Content Response"),
@YamlProperty(name = "id", type = "string", description = "Sets the id of this node", displayName = "Id"),
@YamlProperty(name = "note", type = "string", description = "Sets the note of this node", displayName = "Note"),
@YamlProperty(name = "outType", type = "string", description = "Sets the
|
HL7DataFormatDeserializer
|
java
|
quarkusio__quarkus
|
integration-tests/opentelemetry-scheduler/src/main/java/io/quarkus/it/opentelemetry/scheduler/FailedJobDefinitionScheduler.java
|
{
"start": 283,
"end": 765
}
|
class ____ {
@Inject
Scheduler scheduler;
@PostConstruct
void init() {
scheduler.newJob("myFailedJobDefinition").setCron("*/1 * * * * ?").setTask(ex -> {
try {
Thread.sleep(100l);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
throw new RuntimeException("error occurred in myFailedJobDefinition.");
}).schedule();
}
}
|
FailedJobDefinitionScheduler
|
java
|
apache__camel
|
components/camel-undertow/src/test/java/org/apache/camel/component/undertow/rest/RestUndertowProducerGetUriParameterTest.java
|
{
"start": 1066,
"end": 2325
}
|
class ____ extends BaseUndertowTest {
@Test
public void testUndertowProducerGet() {
String out = fluentTemplate.withHeader("id", "123").to("direct:start").request(String.class);
assertEquals("123;Donald Duck", out);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// configure to use undertow on localhost with the given port
restConfiguration().component("undertow").host("localhost").port(getPort());
from("direct:start")
.to("rest:get:users/basic?id={id}");
// use the rest DSL to define the rest services
rest("/users/")
.get("basic/?id={id}")
.to("direct:basic");
from("direct:basic")
.to("mock:input")
.process(exchange -> {
String id = exchange.getIn().getHeader("id", String.class);
exchange.getMessage().setBody(id + ";Donald Duck");
});
}
};
}
}
|
RestUndertowProducerGetUriParameterTest
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-jose/src/test/java/org/springframework/security/oauth2/jwt/JwtTypeValidatorTests.java
|
{
"start": 771,
"end": 1975
}
|
class ____ {
@Test
void constructorWhenJwtThenRequiresJwtOrEmpty() {
Jwt.Builder jwt = TestJwts.jwt();
JwtTypeValidator validator = JwtTypeValidator.jwt();
assertThat(validator.validate(jwt.build()).hasErrors()).isFalse();
jwt.header(JoseHeaderNames.TYP, "JWT");
assertThat(validator.validate(jwt.build()).hasErrors()).isFalse();
jwt.header(JoseHeaderNames.TYP, "at+jwt");
assertThat(validator.validate(jwt.build()).hasErrors()).isTrue();
}
@Test
void constructorWhenCustomThenEnforces() {
Jwt.Builder jwt = TestJwts.jwt();
JwtTypeValidator validator = new JwtTypeValidator("JOSE");
assertThat(validator.validate(jwt.build()).hasErrors()).isTrue();
jwt.header(JoseHeaderNames.TYP, "JWT");
assertThat(validator.validate(jwt.build()).hasErrors()).isTrue();
jwt.header(JoseHeaderNames.TYP, "JOSE");
assertThat(validator.validate(jwt.build()).hasErrors()).isFalse();
}
@Test
void validateWhenTypHeaderHasDifferentCaseThenSuccess() {
Jwt.Builder jwt = TestJwts.jwt();
JwtTypeValidator validator = new JwtTypeValidator("at+jwt");
jwt.header(JoseHeaderNames.TYP, "AT+JWT");
assertThat(validator.validate(jwt.build()).hasErrors()).isFalse();
}
}
|
JwtTypeValidatorTests
|
java
|
alibaba__nacos
|
client/src/test/java/com/alibaba/nacos/client/config/http/MetricsHttpAgentTest.java
|
{
"start": 1049,
"end": 3349
}
|
class ____ {
@Test
void testGetter() {
String name = "name";
String encode = "UTF-8";
String tenant = "aaa";
String namespace = "aaa";
final HttpAgent mockHttpAgent = new MockHttpAgent(name, encode, tenant, namespace);
final MetricsHttpAgent metricsHttpAgent = new MetricsHttpAgent(mockHttpAgent);
assertEquals(name, metricsHttpAgent.getName());
assertEquals(encode, metricsHttpAgent.getEncode());
assertEquals(tenant, metricsHttpAgent.getTenant());
assertEquals(namespace, metricsHttpAgent.getNamespace());
}
@Test
void testLifeCycle() throws NacosException {
String name = "name";
String encode = "UTF-8";
String tenant = "aaa";
String namespace = "aaa";
final MockHttpAgent mockHttpAgent = new MockHttpAgent(name, encode, tenant, namespace);
final MetricsHttpAgent metricsHttpAgent = new MetricsHttpAgent(mockHttpAgent);
metricsHttpAgent.start();
assertTrue(mockHttpAgent.isStart());
metricsHttpAgent.shutdown();
assertTrue(mockHttpAgent.isShutdown());
}
@Test
void testHttpMethod() throws Exception {
String name = "name";
String encode = "UTF-8";
String tenant = "aaa";
String namespace = "aaa";
final MockHttpAgent mockHttpAgent = new MockHttpAgent(name, encode, tenant, namespace);
final MetricsHttpAgent metricsHttpAgent = new MetricsHttpAgent(mockHttpAgent);
final HttpRestResult<String> result1 = metricsHttpAgent.httpGet("/aa", new HashMap<String, String>(),
new HashMap<String, String>(), "UTF-8", 1L);
assertEquals("get /aa", result1.getMessage());
final HttpRestResult<String> result2 = metricsHttpAgent.httpPost("/aa", new HashMap<String, String>(),
new HashMap<String, String>(), "UTF-8", 1L);
assertEquals("post /aa", result2.getMessage());
final HttpRestResult<String> result3 = metricsHttpAgent.httpDelete("/aa", new HashMap<String, String>(),
new HashMap<String, String>(), "UTF-8", 1L);
assertEquals("delete /aa", result3.getMessage());
}
private static
|
MetricsHttpAgentTest
|
java
|
apache__camel
|
components/camel-reactive-streams/src/main/java/org/apache/camel/component/reactive/streams/ReactiveStreamsConstants.java
|
{
"start": 983,
"end": 1957
}
|
class ____ {
public static final String SCHEME = "reactive-streams";
public static final String SERVICE_PATH = "META-INF/services/org/apache/camel/reactive-streams/";
public static final String DEFAULT_SERVICE_NAME = "default-service";
/**
* Every exchange consumed by Camel has this header set to indicate if the exchange contains an item
* (value="onNext"), an error (value="onError") or a completion event (value="onComplete"). Errors and completion
* notification are not forwarded by default.
*/
@Metadata(label = "consumer", javaType = "String")
public static final String REACTIVE_STREAMS_EVENT_TYPE = "CamelReactiveStreamsEventType";
@Metadata(description = "The callback.", javaType = "org.apache.camel.component.reactive.streams.api.DispatchCallback")
public static final String REACTIVE_STREAMS_CALLBACK = "CamelReactiveStreamsCallback";
private ReactiveStreamsConstants() {
}
}
|
ReactiveStreamsConstants
|
java
|
spring-projects__spring-boot
|
documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/features/logging/structured/otherformats/MyCustomFormat.java
|
{
"start": 833,
"end": 1080
}
|
class ____ implements StructuredLogFormatter<ILoggingEvent> {
@Override
public String format(ILoggingEvent event) {
return "time=" + event.getInstant() + " level=" + event.getLevel() + " message=" + event.getMessage() + "\n";
}
}
|
MyCustomFormat
|
java
|
google__gson
|
gson/src/test/java/com/google/gson/common/TestTypes.java
|
{
"start": 5474,
"end": 6191
}
|
class ____ {
private final Long longValue;
private final Integer intValue;
private final Boolean booleanValue;
public BagOfPrimitiveWrappers(Long longValue, Integer intValue, Boolean booleanValue) {
this.longValue = longValue;
this.intValue = intValue;
this.booleanValue = booleanValue;
}
public String getExpectedJson() {
StringBuilder sb = new StringBuilder();
sb.append("{");
sb.append("\"longValue\":").append(longValue).append(",");
sb.append("\"intValue\":").append(intValue).append(",");
sb.append("\"booleanValue\":").append(booleanValue);
sb.append("}");
return sb.toString();
}
}
public static
|
BagOfPrimitiveWrappers
|
java
|
spring-projects__spring-framework
|
spring-aop/src/main/java/org/springframework/aop/aspectj/annotation/AbstractAspectJAdvisorFactory.java
|
{
"start": 1707,
"end": 1857
}
|
class ____ factories that can create Spring AOP Advisors
* given AspectJ classes from classes honoring the AspectJ 5 annotation syntax.
*
* <p>This
|
for
|
java
|
micronaut-projects__micronaut-core
|
test-suite/src/test/java/io/micronaut/docs/ioc/validation/PersonServiceSpec.java
|
{
"start": 1026,
"end": 1442
}
|
class ____ {
@Inject PersonService personService;
@Test
void testThatNameIsValidated() {
final ConstraintViolationException exception =
assertThrows(ConstraintViolationException.class, () ->
personService.sayHello("") // <1>
);
assertEquals("sayHello.name: must not be blank", exception.getMessage()); // <2>
}
}
// end::test[]
|
PersonServiceSpec
|
java
|
elastic__elasticsearch
|
x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/SecurityUtils.java
|
{
"start": 774,
"end": 1399
}
|
class ____ {
static Settings secureClientSettings() {
String token = basicAuthHeaderValue("test-admin", new SecureString("x-pack-test-password".toCharArray()));
return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build();
}
static void setRunAsHeader(Request request, String user) {
final RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
builder.addHeader(RUN_AS_USER_HEADER, user);
request.setOptions(builder);
}
static void setUserRole(Request request) {
setRunAsHeader(request, "user1");
}
}
|
SecurityUtils
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/tenant/HibernateCurrentTenantIdentifierResolver.java
|
{
"start": 2736,
"end": 2929
}
|
interface ____ allow resolving the current tenant identifier.",
TenantResolver.class.getSimpleName(), persistenceUnitName));
}
return instance.get();
}
}
|
to
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/RedundantTest.java
|
{
"start": 351,
"end": 969
}
|
class ____ extends TestCase {
public void test_extra() throws Exception {
ExtraProcessor processor = new ExtraProcessor() {
public void processExtra(Object object, String key, Object value) {
VO vo = (VO) object;
vo.getAttributes().put(key, value);
}
};
VO vo = JSON.parseObject("{\"id\":123,\"name\":\"abc\"}", VO.class, processor);
Assert.assertEquals(123, vo.getId());
Assert.assertEquals("abc", vo.getAttributes().get("name"));
}
public void test_extraWithType() throws Exception {
|
RedundantTest
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/stream/StreamReadGroupArgs.java
|
{
"start": 815,
"end": 1993
}
|
interface ____ {
/**
* Defines avoid of adding messages to Pending Entries List.
*
* @return arguments object
*/
StreamReadGroupArgs noAck();
/**
* Defines stream data size limit.
*
* @param count stream data size limit
* @return arguments object
*/
StreamReadGroupArgs count(int count);
/**
* Defines time interval to wait for stream data availability.
* <code>0</code> is used to wait infinitely.
*
* @param timeout timeout duration
* @return arguments object
*/
StreamReadGroupArgs timeout(Duration timeout);
/**
* Defines to return messages of current Stream
* never delivered to any other consumer.
*
* @return arguments object
*/
static StreamReadGroupArgs neverDelivered() {
return greaterThan(null);
}
/**
* Defines to return messages of current Stream
* with ids greater than defined message id.
*
* @param id message id
* @return arguments object
*/
static StreamReadGroupArgs greaterThan(StreamMessageId id) {
return new StreamReadGroupParams(id);
}
}
|
StreamReadGroupArgs
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/TestContextManager.java
|
{
"start": 5347,
"end": 8629
}
|
class ____ be managed
* @see #TestContextManager(TestContextBootstrapper)
*/
public TestContextManager(Class<?> testClass) {
this(BootstrapUtils.resolveTestContextBootstrapper(testClass));
}
/**
* Construct a new {@code TestContextManager} using the supplied {@link TestContextBootstrapper}
* and {@linkplain #registerTestExecutionListeners register} the necessary
* {@link TestExecutionListener TestExecutionListeners}.
* <p>Delegates to the supplied {@code TestContextBootstrapper} for building
* the {@code TestContext} and retrieving the {@code TestExecutionListeners}.
* @param testContextBootstrapper the bootstrapper to use
* @since 4.2
* @see TestContextBootstrapper#buildTestContext
* @see TestContextBootstrapper#getTestExecutionListeners
* @see #registerTestExecutionListeners
*/
public TestContextManager(TestContextBootstrapper testContextBootstrapper) {
this.testContext = testContextBootstrapper.buildTestContext();
this.testContextHolder = ThreadLocal.withInitial(() -> copyTestContext(this.testContext));
registerTestExecutionListeners(testContextBootstrapper.getTestExecutionListeners());
}
/**
* Get the {@link TestContext} managed by this {@code TestContextManager}.
*/
public final TestContext getTestContext() {
return this.testContextHolder.get();
}
/**
* Register the supplied list of {@link TestExecutionListener TestExecutionListeners}
* by appending them to the list of listeners used by this {@code TestContextManager}.
* @see #registerTestExecutionListeners(TestExecutionListener...)
*/
public void registerTestExecutionListeners(List<TestExecutionListener> testExecutionListeners) {
registerTestExecutionListeners(testExecutionListeners.toArray(new TestExecutionListener[0]));
}
/**
* Register the supplied array of {@link TestExecutionListener TestExecutionListeners}
* by appending them to the list of listeners used by this {@code TestContextManager}.
*/
public void registerTestExecutionListeners(TestExecutionListener... testExecutionListeners) {
for (TestExecutionListener listener : testExecutionListeners) {
if (logger.isTraceEnabled()) {
logger.trace("Registering TestExecutionListener: " + typeName(listener));
}
this.testExecutionListeners.add(listener);
}
}
/**
* Get the current {@link TestExecutionListener TestExecutionListeners}
* registered for this {@code TestContextManager}.
* <p>Allows for modifications, for example, adding a listener to the beginning of the list.
* However, make sure to keep the list stable while actually executing tests.
*/
public final List<TestExecutionListener> getTestExecutionListeners() {
return this.testExecutionListeners;
}
/**
* Get a copy of the {@link TestExecutionListener TestExecutionListeners}
* registered for this {@code TestContextManager} in reverse order.
*/
private List<TestExecutionListener> getReversedTestExecutionListeners() {
List<TestExecutionListener> listenersReversed = new ArrayList<>(getTestExecutionListeners());
Collections.reverse(listenersReversed);
return listenersReversed;
}
/**
* Hook for pre-processing a test class <em>before</em> execution of any
* tests within the class. Should be called prior to any framework-specific
* <em>before
|
to
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AllLastTests.java
|
{
"start": 717,
"end": 1202
}
|
class ____ extends AbstractFirstLastTestCase {
public AllLastTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
return parameters(false, true);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new AllLast(source, args.get(0), args.get(1));
}
}
|
AllLastTests
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.