language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/support/MissingValues.java | {
"start": 1069,
"end": 1198
} | class ____ allows to return views of {@link ValuesSource}s that
* replace the missing value with a configured value.
*/
public | that |
java | apache__flink | flink-metrics/flink-metrics-core/src/test/java/org/apache/flink/metrics/util/MetricReporterTestUtils.java | {
"start": 1246,
"end": 1498
} | class ____ be loaded by the {@link
* ServiceLoader}.
*
* <p>Essentially, this verifies that the {@code
* META-INF/services/org.apache.flink.metrics.reporter.MetricReporterFactory} file exists and
* contains the expected factory | can |
java | elastic__elasticsearch | libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java | {
"start": 848,
"end": 15575
} | class ____ extends PosixNativeAccess {
private static final int STATX_BLOCKS = 0x400; /* Want/got stx_blocks */
/** the preferred method is seccomp(2), since we can apply to all threads of the process */
static final int SECCOMP_SET_MODE_FILTER = 1; // since Linux 3.17
static final int SECCOMP_FILTER_FLAG_TSYNC = 1; // since Linux 3.17
/** otherwise, we can use prctl(2), which will at least protect ES application threads */
static final int PR_GET_NO_NEW_PRIVS = 39; // since Linux 3.5
static final int PR_SET_NO_NEW_PRIVS = 38; // since Linux 3.5
static final int PR_GET_SECCOMP = 21; // since Linux 2.6.23
static final int PR_SET_SECCOMP = 22; // since Linux 2.6.23
static final long SECCOMP_MODE_FILTER = 2; // since Linux Linux 3.5
// BPF "macros" and constants
static final int BPF_LD = 0x00;
static final int BPF_W = 0x00;
static final int BPF_ABS = 0x20;
static final int BPF_JMP = 0x05;
static final int BPF_JEQ = 0x10;
static final int BPF_JGE = 0x30;
static final int BPF_JGT = 0x20;
static final int BPF_RET = 0x06;
static final int BPF_K = 0x00;
static SockFilter BPF_STMT(int code, int k) {
return new SockFilter((short) code, (byte) 0, (byte) 0, k);
}
static SockFilter BPF_JUMP(int code, int k, int jt, int jf) {
return new SockFilter((short) code, (byte) jt, (byte) jf, k);
}
static final int SECCOMP_RET_ERRNO = 0x00050000;
static final int SECCOMP_RET_DATA = 0x0000FFFF;
static final int SECCOMP_RET_ALLOW = 0x7FFF0000;
// some errno constants for error checking/handling
static final int EACCES = 0x0D;
static final int EFAULT = 0x0E;
static final int EINVAL = 0x16;
static final int ENOSYS = 0x26;
// offsets that our BPF checks
// check with offsetof() when adding a new arch, move to Arch if different.
static final int SECCOMP_DATA_NR_OFFSET = 0x00;
static final int SECCOMP_DATA_ARCH_OFFSET = 0x04;
record Arch(
int audit, // AUDIT_ARCH_XXX constant from linux/audit.h
int limit, // syscall limit (necessary for blacklisting on amd64, to ban 32-bit syscalls)
int fork, // __NR_fork
int vfork, // __NR_vfork
int execve, // __NR_execve
int execveat, // __NR_execveat
int seccomp // __NR_seccomp
) {}
/** supported architectures for seccomp keyed by os.arch */
private static final Map<String, Arch> ARCHITECTURES;
static {
ARCHITECTURES = Map.of(
"amd64",
new Arch(0xC000003E, 0x3FFFFFFF, 57, 58, 59, 322, 317),
"aarch64",
new Arch(0xC00000B7, 0xFFFFFFFF, 1079, 1071, 221, 281, 277)
);
}
private final LinuxCLibrary linuxLibc;
private final Systemd systemd;
LinuxNativeAccess(NativeLibraryProvider libraryProvider) {
super("Linux", libraryProvider, new PosixConstants(-1L, 9, 1, 8, 64, 144, 48, 64));
this.linuxLibc = libraryProvider.getLibrary(LinuxCLibrary.class);
String socketPath = System.getenv("NOTIFY_SOCKET");
if (socketPath == null) {
this.systemd = null; // not running under systemd
} else {
logger.debug("Systemd socket path: {}", socketPath);
var buffer = newSharedBuffer(64);
this.systemd = new Systemd(libraryProvider.getLibrary(PosixCLibrary.class), socketPath, buffer);
}
}
@Override
protected long getMaxThreads() {
// this is only valid on Linux and the value *is* different on OS X
// see /usr/include/sys/resource.h on OS X
// on Linux the resource RLIMIT_NPROC means *the number of threads*
// this is in opposition to BSD-derived OSes
final int rlimit_nproc = 6;
return getRLimit(rlimit_nproc, "max number of threads");
}
@Override
public Systemd systemd() {
return systemd;
}
@Override
protected void logMemoryLimitInstructions() {
// give specific instructions for the linux case to make it easy
String user = System.getProperty("user.name");
logger.warn("""
These can be adjusted by modifying /etc/security/limits.conf, for example:
\t# allow user '{}' mlockall
\t{} soft memlock unlimited
\t{} hard memlock unlimited""", user, user, user);
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
}
@Override
protected boolean nativePreallocate(int fd, long currentSize, long newSize) {
final int rc = linuxLibc.fallocate(fd, 0, currentSize, newSize - currentSize);
if (rc != 0) {
logger.warn("fallocate failed: " + libc.strerror(libc.errno()));
return false;
}
return true;
}
/**
* Installs exec system call filtering for Linux.
* <p>
* On Linux exec system call filtering currently supports amd64 and aarch64 architectures.
* It requires Linux kernel 3.5 or above, and {@code CONFIG_SECCOMP} and {@code CONFIG_SECCOMP_FILTER}
* compiled into the kernel.
* <p>
* On Linux BPF Filters are installed using either {@code seccomp(2)} (3.17+) or {@code prctl(2)} (3.5+). {@code seccomp(2)}
* is preferred, as it allows filters to be applied to any existing threads in the process, and one motivation
* here is to protect against bugs in the JVM. Otherwise, code will fall back to the {@code prctl(2)} method
* which will at least protect elasticsearch application threads.
* <p>
* Linux BPF filters will return {@code EACCES} (Access Denied) for the following system calls:
* <ul>
* <li>{@code execve}</li>
* <li>{@code fork}</li>
* <li>{@code vfork}</li>
* <li>{@code execveat}</li>
* </ul>
* @see <a href="http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt">
* * http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt</a>
*/
@Override
public void tryInstallExecSandbox() {
// first be defensive: we can give nice errors this way, at the very least.
// also, some of these security features get backported to old versions, checking kernel version here is a big no-no!
String archId = System.getProperty("os.arch");
final Arch arch = ARCHITECTURES.get(archId);
if (arch == null) {
throw new UnsupportedOperationException("seccomp unavailable: '" + archId + "' architecture unsupported");
}
// try to check system calls really are who they claim
// you never know (e.g. https://chromium.googlesource.com/chromium/src.git/+/master/sandbox/linux/seccomp-bpf/sandbox_bpf.cc#57)
final int bogusArg = 0xf7a46a5c;
// test seccomp(BOGUS)
long ret = linuxLibc.syscall(arch.seccomp, bogusArg, 0, 0);
if (ret != -1) {
throw new UnsupportedOperationException("seccomp unavailable: seccomp(BOGUS_OPERATION) returned " + ret);
} else {
int errno = libc.errno();
switch (errno) {
case ENOSYS:
break; // ok
case EINVAL:
break; // ok
default:
throw new UnsupportedOperationException("seccomp(BOGUS_OPERATION): " + libc.strerror(errno));
}
}
// test seccomp(VALID, BOGUS)
ret = linuxLibc.syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, bogusArg, 0);
if (ret != -1) {
throw new UnsupportedOperationException("seccomp unavailable: seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG) returned " + ret);
} else {
int errno = libc.errno();
switch (errno) {
case ENOSYS:
break; // ok
case EINVAL:
break; // ok
default:
throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): " + libc.strerror(errno));
}
}
// test prctl(BOGUS)
ret = linuxLibc.prctl(bogusArg, 0, 0, 0, 0);
if (ret != -1) {
throw new UnsupportedOperationException("seccomp unavailable: prctl(BOGUS_OPTION) returned " + ret);
} else {
int errno = libc.errno();
switch (errno) {
case ENOSYS:
break; // ok
case EINVAL:
break; // ok
default:
throw new UnsupportedOperationException("prctl(BOGUS_OPTION): " + libc.strerror(errno));
}
}
// now just normal defensive checks
// check for GET_NO_NEW_PRIVS
switch (linuxLibc.prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0)) {
case 0:
break; // not yet set
case 1:
break; // already set by caller
default:
int errno = libc.errno();
if (errno == EINVAL) {
// friendly error, this will be the typical case for an old kernel
throw new UnsupportedOperationException(
"seccomp unavailable: requires kernel 3.5+ with" + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in"
);
} else {
throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + libc.strerror(errno));
}
}
// check for SECCOMP
switch (linuxLibc.prctl(PR_GET_SECCOMP, 0, 0, 0, 0)) {
case 0:
break; // not yet set
case 2:
break; // already in filter mode by caller
default:
int errno = libc.errno();
if (errno == EINVAL) {
throw new UnsupportedOperationException(
"seccomp unavailable: CONFIG_SECCOMP not compiled into kernel,"
+ " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed"
);
} else {
throw new UnsupportedOperationException("prctl(PR_GET_SECCOMP): " + libc.strerror(errno));
}
}
// check for SECCOMP_MODE_FILTER
if (linuxLibc.prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 0, 0, 0) != 0) {
int errno = libc.errno();
switch (errno) {
case EFAULT:
break; // available
case EINVAL:
throw new UnsupportedOperationException(
"seccomp unavailable: CONFIG_SECCOMP_FILTER not"
+ " compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed"
);
default:
throw new UnsupportedOperationException("prctl(PR_SET_SECCOMP): " + libc.strerror(errno));
}
}
// ok, now set PR_SET_NO_NEW_PRIVS, needed to be able to set a seccomp filter as ordinary user
if (linuxLibc.prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) != 0) {
throw new UnsupportedOperationException("prctl(PR_SET_NO_NEW_PRIVS): " + libc.strerror(libc.errno()));
}
// check it worked
if (linuxLibc.prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) {
throw new UnsupportedOperationException(
"seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " + libc.strerror(libc.errno())
);
}
// BPF installed to check arch, limit, then syscall.
// See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details.
SockFilter insns[] = {
/* 1 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_ARCH_OFFSET), //
/* 2 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.audit, 0, 7), // if (arch != audit) goto fail;
/* 3 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_NR_OFFSET), //
/* 4 */ BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, arch.limit, 5, 0), // if (syscall > LIMIT) goto fail;
/* 5 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.fork, 4, 0), // if (syscall == FORK) goto fail;
/* 6 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.vfork, 3, 0), // if (syscall == VFORK) goto fail;
/* 7 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.execve, 2, 0), // if (syscall == EXECVE) goto fail;
/* 8 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.execveat, 1, 0), // if (syscall == EXECVEAT) goto fail;
/* 9 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ALLOW), // pass: return OK;
/* 10 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ERRNO | (EACCES & SECCOMP_RET_DATA)), // fail: return EACCES;
};
// seccomp takes a long, so we pass it one explicitly to keep the JNA simple
SockFProg prog = linuxLibc.newSockFProg(insns);
int method = 1;
// install filter, if this works, after this there is no going back!
// first try it with seccomp(SECCOMP_SET_MODE_FILTER), falling back to prctl()
if (linuxLibc.syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, prog.address()) != 0) {
method = 0;
int errno1 = libc.errno();
if (logger.isDebugEnabled()) {
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", libc.strerror(errno1));
}
if (linuxLibc.prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, prog.address(), 0, 0) != 0) {
int errno2 = libc.errno();
throw new UnsupportedOperationException(
"seccomp(SECCOMP_SET_MODE_FILTER): " + libc.strerror(errno1) + ", prctl(PR_SET_SECCOMP): " + libc.strerror(errno2)
);
}
}
// now check that the filter was really installed, we should be in filter mode.
if (linuxLibc.prctl(PR_GET_SECCOMP, 0, 0, 0, 0) != 2) {
throw new UnsupportedOperationException(
"seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): " + libc.strerror(libc.errno())
);
}
logger.debug("Linux seccomp filter installation successful, threads: [{}]", method == 1 ? "all" : "app");
execSandboxState = method == 1 ? ExecSandboxState.ALL_THREADS : ExecSandboxState.EXISTING_THREADS;
}
}
| LinuxNativeAccess |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java | {
"start": 1185,
"end": 6547
} | class ____ extends AbstractAbfsIntegrationTest {
public ITestAbfsStreamStatistics() throws Exception {
}
private static final Logger LOG =
LoggerFactory.getLogger(ITestAbfsStreamStatistics.class);
private static final int LARGE_NUMBER_OF_OPS = 99;
/***
* Testing {@code incrementReadOps()} in class {@code AbfsInputStream} and
* {@code incrementWriteOps()} in class {@code AbfsOutputStream}.
*
*/
@Test
public void testAbfsStreamOps() throws Exception {
describe("Test to see correct population of read and write operations in "
+ "Abfs");
final AzureBlobFileSystem fs = getFileSystem();
Path smallOperationsFile = path("testOneReadWriteOps");
Path largeOperationsFile = path("testLargeReadWriteOps");
FileSystem.Statistics statistics = fs.getFsStatistics();
String testReadWriteOps = "test this";
statistics.reset();
//Test for zero write operation
assertReadWriteOps("write", 0, statistics.getWriteOps());
//Test for zero read operation
assertReadWriteOps("read", 0, statistics.getReadOps());
FSDataOutputStream outForOneOperation = null;
FSDataInputStream inForOneOperation = null;
try {
outForOneOperation = fs.create(smallOperationsFile);
statistics.reset();
outForOneOperation.write(testReadWriteOps.getBytes());
//Test for a single write operation
assertReadWriteOps("write", 1, statistics.getWriteOps());
//Flushing output stream to see content to read
outForOneOperation.hflush();
inForOneOperation = fs.open(smallOperationsFile);
statistics.reset();
int result = inForOneOperation.read(testReadWriteOps.getBytes(), 0,
testReadWriteOps.getBytes().length);
LOG.info("Result of Read operation : {}", result);
/*
* Testing if 2 read_ops value is coming after reading full content
* from a file (3 if anything to read from Buffer too). Reason: read()
* call gives read_ops=1, reading from AbfsClient(http GET) gives
* read_ops=2.
*
* In some cases ABFS-prefetch thread runs in the background which
* returns some bytes from buffer and gives an extra readOp.
* Thus, making readOps values arbitrary and giving intermittent
* failures in some cases. Hence, readOps values of 2 or 3 is seen in
* different setups.
*
*/
assertTrue(
statistics.getReadOps() == 2 || statistics.getReadOps() == 3, String.format("The actual value of %d was not equal to the "
+ "expected value of 2 or 3", statistics.getReadOps()));
} finally {
IOUtils.cleanupWithLogger(LOG, inForOneOperation,
outForOneOperation);
}
//Validating if content is being written in the smallOperationsFile
assertTrue(
validateContent(fs, smallOperationsFile,
testReadWriteOps.getBytes()), "Mismatch in content validation");
FSDataOutputStream outForLargeOperations = null;
FSDataInputStream inForLargeOperations = null;
StringBuilder largeOperationsValidationString = new StringBuilder();
try {
outForLargeOperations = fs.create(largeOperationsFile);
statistics.reset();
int largeValue = LARGE_NUMBER_OF_OPS;
for (int i = 0; i < largeValue; i++) {
outForLargeOperations.write(testReadWriteOps.getBytes());
//Creating the String for content Validation
largeOperationsValidationString.append(testReadWriteOps);
}
LOG.info("Number of bytes of Large data written: {}",
largeOperationsValidationString.toString().getBytes().length);
//Test for 1000000 write operations
assertReadWriteOps("write", largeValue, statistics.getWriteOps());
inForLargeOperations = fs.open(largeOperationsFile);
for (int i = 0; i < largeValue; i++) {
inForLargeOperations
.read(testReadWriteOps.getBytes(), 0,
testReadWriteOps.getBytes().length);
}
if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(largeOperationsFile).toString())) {
// for appendblob data is already flushed, so there might be more data to read.
assertTrue(
statistics.getReadOps() >= largeValue || statistics.getReadOps() <= (largeValue + 4), String.format("The actual value of %d was not equal to the "
+ "expected value", statistics.getReadOps()));
} else {
//Test for 1000000 read operations
assertReadWriteOps("read", largeValue, statistics.getReadOps());
}
} finally {
IOUtils.cleanupWithLogger(LOG, inForLargeOperations,
outForLargeOperations);
}
//Validating if content is being written in largeOperationsFile
assertTrue(
validateContent(fs, largeOperationsFile,
largeOperationsValidationString.toString().getBytes()), "Mismatch in content validation");
}
/**
* Generic method to assert both Read an write operations.
*
* @param operation what operation is being asserted
* @param expectedValue value which is expected
* @param actualValue value which is actual
*/
private void assertReadWriteOps(String operation, long expectedValue,
long actualValue) {
assertEquals(expectedValue, actualValue, "Mismatch in " + operation + " operations");
}
}
| ITestAbfsStreamStatistics |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/channel/ChannelStateSerializer.java | {
"start": 4953,
"end": 7872
} | class ____ implements ChannelStateSerializer {
private static final int SERIALIZATION_VERSION = 0;
@Override
public void writeHeader(DataOutputStream dataStream) throws IOException {
dataStream.writeInt(SERIALIZATION_VERSION);
}
@Override
public void writeData(DataOutputStream stream, Buffer... flinkBuffers) throws IOException {
stream.writeInt(getSize(flinkBuffers));
for (Buffer buffer : flinkBuffers) {
ByteBuf nettyByteBuf = buffer.asByteBuf();
nettyByteBuf.getBytes(nettyByteBuf.readerIndex(), stream, nettyByteBuf.readableBytes());
}
}
private int getSize(Buffer[] buffers) {
int len = 0;
for (Buffer buffer : buffers) {
len = addExact(len, buffer.readableBytes());
}
return len;
}
@Override
public void readHeader(InputStream stream) throws IOException {
int version = readInt(stream);
Preconditions.checkArgument(
version == SERIALIZATION_VERSION, "unsupported version: " + version);
}
@Override
public int readLength(InputStream stream) throws IOException {
int len = readInt(stream);
Preconditions.checkArgument(len >= 0, "negative state size");
return len;
}
@Override
public int readData(InputStream stream, ChannelStateByteBuffer buffer, int bytes)
throws IOException {
return buffer.writeBytes(stream, bytes);
}
private static int readInt(InputStream stream) throws IOException {
return new DataInputStream(stream).readInt();
}
@Override
public byte[] extractAndMerge(byte[] bytes, List<Long> offsets) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(out);
byte[] merged = extractByOffsets(bytes, offsets);
writeHeader(dataOutputStream);
dataOutputStream.writeInt(merged.length);
dataOutputStream.write(merged, 0, merged.length);
dataOutputStream.close();
return out.toByteArray();
}
private byte[] extractByOffsets(byte[] data, List<Long> offsets) throws IOException {
DataInputStream lengthReadingStream =
new DataInputStream(new ByteArrayInputStream(data, 0, data.length));
ByteArrayOutputStream out = new ByteArrayOutputStream();
long prevOffset = 0;
for (long offset : offsets) {
lengthReadingStream.skipBytes((int) (offset - prevOffset));
int dataWithLengthOffset = (int) offset + Integer.BYTES;
out.write(data, dataWithLengthOffset, lengthReadingStream.readInt());
prevOffset = dataWithLengthOffset;
}
return out.toByteArray();
}
@Override
public long getHeaderLength() {
return Integer.BYTES;
}
}
| ChannelStateSerializerImpl |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ABFSContractTestBinding.java | {
"start": 1393,
"end": 2721
} | class ____ extends AbstractAbfsIntegrationTest {
private final URI testUri;
public ABFSContractTestBinding() throws Exception {
this(true);
}
public ABFSContractTestBinding(
final boolean useExistingFileSystem) throws Exception{
if (useExistingFileSystem) {
AbfsConfiguration configuration = getConfiguration();
String testUrl = configuration.get(TestConfigurationKeys.FS_AZURE_CONTRACT_TEST_URI);
assumeThat(testUrl)
.as("Contract tests are skipped because of missing config property :"
+ TestConfigurationKeys.FS_AZURE_CONTRACT_TEST_URI)
.isNotNull();
if (getAuthType() != AuthType.SharedKey) {
testUrl = testUrl.replaceFirst(FileSystemUriSchemes.ABFS_SCHEME, FileSystemUriSchemes.ABFS_SECURE_SCHEME);
}
setTestUrl(testUrl);
this.testUri = new URI(testUrl);
//Get container for contract tests
configuration.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, this.testUri.toString());
String[] splitAuthority = this.testUri.getAuthority().split("\\@");
setFileSystemName(splitAuthority[0]);
} else {
this.testUri = new URI(super.getTestUrl());
}
}
public boolean isSecureMode() {
return this.getAuthType() == AuthType.SharedKey ? false : true;
}
}
| ABFSContractTestBinding |
java | elastic__elasticsearch | x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CustomDateFormatIT.java | {
"start": 466,
"end": 735
} | class ____ extends CustomDateFormatTestCase {
@ClassRule
public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
}
| CustomDateFormatIT |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/criteria/CriteriaMutationQueryFkValuesTest.java | {
"start": 4279,
"end": 4425
} | class ____ {
@Id
private Integer id;
public A() {
}
public A(Integer id) {
this.id = id;
}
}
@Entity( name = "BEntity" )
static | A |
java | google__guice | extensions/assistedinject/test/com/google/inject/assistedinject/ExtensionSpiTest.java | {
"start": 6541,
"end": 6791
} | class ____ implements Animal {
@Inject
public ExplodingCat(
@Named("catName1") String name,
@Assisted String owner,
@Named("age") Integer age,
@Named("petName") String petName) {}
}
private static | ExplodingCat |
java | apache__camel | components/camel-netty-http/src/test/java/org/apache/camel/component/netty/http/rest/RestNettyHttpBindingModeJsonTest.java | {
"start": 1287,
"end": 3128
} | class ____ extends BaseNettyTest {
@Test
public void testBindingMode() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:input");
mock.expectedMessageCount(1);
mock.message(0).body().isInstanceOf(UserJaxbPojo.class);
String body = "{\"id\": 123, \"name\": \"Donald Duck\"}";
template.sendBody("netty-http:http://localhost:" + getPort() + "/users/new", body);
MockEndpoint.assertIsSatisfied(context);
UserJaxbPojo user = mock.getReceivedExchanges().get(0).getIn().getBody(UserJaxbPojo.class);
assertNotNull(user);
assertEquals(123, user.getId());
assertEquals("Donald Duck", user.getName());
}
@Test
public void testBindingModeWrong() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:input");
mock.expectedMessageCount(0);
// we bind to json, but send in xml, which is not possible
String body = "<user name=\"Donald Duck\" id=\"123\"></user>";
try {
template.sendBody("netty-http:http://localhost:" + getPort() + "/users/new", body);
fail("Should have thrown exception");
} catch (Exception e) {
// expected
}
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
restConfiguration().component("netty-http").host("localhost").port(getPort()).bindingMode(RestBindingMode.json);
// use the rest DSL to define the rest services
rest("/users/")
.post("new").type(UserJaxbPojo.class)
.to("mock:input");
}
};
}
}
| RestNettyHttpBindingModeJsonTest |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/format/number/money/MoneyFormattingTests.java | {
"start": 8268,
"end": 8531
} | class ____ {
@NumberFormat(pattern = "#000.000#")
private MonetaryAmount amount;
public MonetaryAmount getAmount() {
return amount;
}
public void setAmount(MonetaryAmount amount) {
this.amount = amount;
}
}
public static | FormattedMoneyHolder4 |
java | hibernate__hibernate-orm | hibernate-vector/src/test/java/org/hibernate/vector/SparseFloatVectorTest.java | {
"start": 10317,
"end": 10917
} | class ____ {
@Id
private Long id;
@Column( name = "the_vector" )
@JdbcTypeCode(SqlTypes.SPARSE_VECTOR_FLOAT32)
@Array(length = 3)
private SparseFloatVector theVector;
public VectorEntity() {
}
public VectorEntity(Long id, SparseFloatVector theVector) {
this.id = id;
this.theVector = theVector;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public SparseFloatVector getTheVector() {
return theVector;
}
public void setTheVector(SparseFloatVector theVector) {
this.theVector = theVector;
}
}
}
| VectorEntity |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/aggregator/SpringAggregationStrategyBeanAdapterRefTest.java | {
"start": 1080,
"end": 1427
} | class ____ extends AggregationStrategyBeanAdapterRefTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this,
"org/apache/camel/spring/processor/aggregator/SpringAggregationStrategyBeanAdapterRefTest.xml");
}
}
| SpringAggregationStrategyBeanAdapterRefTest |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/BeanProxyFactory.java | {
"start": 1018,
"end": 1854
} | interface ____ {
/**
* Service factory key.
*/
String FACTORY = "bean-proxy-factory";
/**
* Creates a proxy bean facade with the interfaces that when invoked will send the data as a message to a Camel
* endpoint.
*
* @param endpoint the endpoint to send to when the proxy is invoked
* @param binding whether to use bean parameter binding which would be needed if invoking a bean method
* with multiple parameters
* @param interfaceClasses the interface(s) to use as bean facade
* @throws Exception is thrown if error creating the proxy
* @return the created bean proxy
*/
<T> T createProxy(Endpoint endpoint, boolean binding, Class<T>... interfaceClasses) throws Exception;
}
| BeanProxyFactory |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/tests/sample/beans/BeanWithObjectProperty.java | {
"start": 738,
"end": 915
} | class ____ {
private Object object;
public Object getObject() {
return object;
}
public void setObject(Object object) {
this.object = object;
}
}
| BeanWithObjectProperty |
java | elastic__elasticsearch | modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java | {
"start": 1401,
"end": 6919
} | class ____ extends AbstractProcessor {
public static final String TYPE = "user_agent";
private final String field;
private final String targetField;
private final Set<Property> properties;
private final UserAgentParser parser;
private final boolean extractDeviceType;
private final boolean ignoreMissing;
public UserAgentProcessor(
String tag,
String description,
String field,
String targetField,
UserAgentParser parser,
Set<Property> properties,
boolean extractDeviceType,
boolean ignoreMissing
) {
super(tag, description);
this.field = field;
this.targetField = targetField;
this.parser = parser;
this.properties = properties;
this.extractDeviceType = extractDeviceType;
this.ignoreMissing = ignoreMissing;
}
boolean isExtractDeviceType() {
return extractDeviceType;
}
boolean isIgnoreMissing() {
return ignoreMissing;
}
@Override
public IngestDocument execute(IngestDocument ingestDocument) {
String userAgent = ingestDocument.getFieldValue(field, String.class, ignoreMissing);
if (userAgent == null && ignoreMissing) {
return ingestDocument;
} else if (userAgent == null) {
throw new IllegalArgumentException("field [" + field + "] is null, cannot parse user-agent.");
}
Details uaClient = parser.parse(userAgent, extractDeviceType);
Map<String, Object> uaDetails = new HashMap<>();
// Parse the user agent in the ECS (Elastic Common Schema) format
for (Property property : this.properties) {
switch (property) {
case ORIGINAL:
uaDetails.put("original", userAgent);
break;
case NAME:
if (uaClient.userAgent() != null && uaClient.userAgent().name() != null) {
uaDetails.put("name", uaClient.userAgent().name());
} else {
uaDetails.put("name", "Other");
}
break;
case VERSION:
if (uaClient.userAgent() != null && uaClient.userAgent().major() != null) {
uaDetails.put("version", versionToString(uaClient.userAgent()));
}
break;
case OS:
if (uaClient.operatingSystem() != null) {
Map<String, String> osDetails = Maps.newMapWithExpectedSize(3);
if (uaClient.operatingSystem().name() != null) {
osDetails.put("name", uaClient.operatingSystem().name());
if (uaClient.operatingSystem().major() != null) {
String version = versionToString(uaClient.operatingSystem());
osDetails.put("version", version);
osDetails.put("full", uaClient.operatingSystem().name() + " " + version);
}
uaDetails.put("os", osDetails);
}
}
break;
case DEVICE:
Map<String, String> deviceDetails = Maps.newMapWithExpectedSize(1);
if (uaClient.device() != null && uaClient.device().name() != null) {
deviceDetails.put("name", uaClient.device().name());
if (extractDeviceType) {
deviceDetails.put("type", uaClient.deviceType());
}
} else {
deviceDetails.put("name", "Other");
if (extractDeviceType) {
if (uaClient.deviceType() != null) {
deviceDetails.put("type", uaClient.deviceType());
} else {
deviceDetails.put("type", "Other");
}
}
}
uaDetails.put("device", deviceDetails);
break;
}
}
ingestDocument.setFieldValue(targetField, uaDetails);
return ingestDocument;
}
private static String versionToString(final UserAgentParser.VersionedName version) {
final StringBuilder versionString = new StringBuilder();
if (Strings.hasLength(version.major())) {
versionString.append(version.major());
if (Strings.hasLength(version.minor())) {
versionString.append(".").append(version.minor());
if (Strings.hasLength(version.patch())) {
versionString.append(".").append(version.patch());
if (Strings.hasLength(version.build())) {
versionString.append(".").append(version.build());
}
}
}
}
return versionString.toString();
}
@Override
public String getType() {
return TYPE;
}
String getField() {
return field;
}
String getTargetField() {
return targetField;
}
Set<Property> getProperties() {
return properties;
}
UserAgentParser getUaParser() {
return parser;
}
public static final | UserAgentProcessor |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GuavaEventBusEndpointBuilderFactory.java | {
"start": 5463,
"end": 11343
} | interface ____
extends
EndpointConsumerBuilder {
default GuavaEventBusEndpointConsumerBuilder basic() {
return (GuavaEventBusEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedGuavaEventBusEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedGuavaEventBusEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedGuavaEventBusEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedGuavaEventBusEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedGuavaEventBusEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedGuavaEventBusEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
}
/**
* Builder for endpoint producers for the Guava EventBus component.
*/
public | AdvancedGuavaEventBusEndpointConsumerBuilder |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/pattern/AbstractStyleNameConverter.java | {
"start": 3572,
"end": 4455
} | class ____ extends AbstractStyleNameConverter {
/** Blue */
protected static final String NAME = "blue";
/**
* Constructs the converter. This constructor must be public.
*
* @param formatters The PatternFormatters to generate the text to manipulate.
* @param styling The styling that should encapsulate the pattern.
*/
public Blue(final List<PatternFormatter> formatters, final String styling) {
super(NAME, formatters, styling);
}
/**
* Gets an instance of the class (called via reflection).
*
* @param config The current Configuration.
* @param options The pattern options, may be null. If the first element is "short", only the first line of the
* throwable will be formatted.
* @return new instance of | Blue |
java | apache__kafka | server/src/test/java/org/apache/kafka/server/EligibleLeaderReplicasIntegrationTest.java | {
"start": 3462,
"end": 22104
} | class ____ {
private final ClusterInstance clusterInstance;
EligibleLeaderReplicasIntegrationTest(ClusterInstance clusterInstance) {
this.clusterInstance = clusterInstance;
}
@ClusterTest(types = {Type.KRAFT}, metadataVersion = MetadataVersion.IBP_4_0_IV1)
public void testHighWatermarkShouldNotAdvanceIfUnderMinIsr() throws ExecutionException, InterruptedException {
try (var admin = clusterInstance.admin();
var producer = clusterInstance.producer(Map.of(
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName(),
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName(),
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers(),
ProducerConfig.ACKS_CONFIG, "1"));
var consumer = clusterInstance.consumer(Map.of(
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers(),
ConsumerConfig.GROUP_ID_CONFIG, "test",
ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "10",
ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName(),
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()))) {
String testTopicName = String.format("%s-%s", "testHighWatermarkShouldNotAdvanceIfUnderMinIsr", "ELR-test");
admin.updateFeatures(
Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME,
new FeatureUpdate(EligibleLeaderReplicasVersion.ELRV_1.featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)),
new UpdateFeaturesOptions()).all().get();
admin.createTopics(List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get();
clusterInstance.waitTopicCreation(testTopicName, 1);
ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
Collection<AlterConfigOp> ops = new ArrayList<>();
ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET));
Map<ConfigResource, Collection<AlterConfigOp>> configOps = Map.of(configResource, ops);
// alter configs on target cluster
admin.incrementalAlterConfigs(configOps).all().get();
TopicDescription testTopicDescription = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName);
TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0);
List<Node> initialReplicas = topicPartitionInfo.replicas();
assertEquals(4, topicPartitionInfo.isr().size());
assertEquals(0, topicPartitionInfo.elr().size());
assertEquals(0, topicPartitionInfo.lastKnownElr().size());
consumer.subscribe(Set.of(testTopicName));
producer.send(new ProducerRecord<>(testTopicName, "0", "0")).get();
waitUntilOneMessageIsConsumed(consumer);
clusterInstance.shutdownBroker(initialReplicas.get(0).id());
clusterInstance.shutdownBroker(initialReplicas.get(1).id());
waitForIsrAndElr((isrSize, elrSize) -> isrSize == 2 && elrSize == 1, admin, testTopicName);
TopicPartition partition = new TopicPartition(testTopicName, 0);
long leoBeforeSend = admin.listOffsets(Map.of(partition, OffsetSpec.latest())).partitionResult(partition).get().offset();
// Now the partition is under min ISR. HWM should not advance.
producer.send(new ProducerRecord<>(testTopicName, "1", "1")).get();
long leoAfterSend = admin.listOffsets(Map.of(partition, OffsetSpec.latest())).partitionResult(partition).get().offset();
assertEquals(leoBeforeSend, leoAfterSend);
// Restore the min ISR and the previous log should be visible.
clusterInstance.startBroker(initialReplicas.get(1).id());
clusterInstance.startBroker(initialReplicas.get(0).id());
waitForIsrAndElr((isrSize, elrSize) -> isrSize == 4 && elrSize == 0, admin, testTopicName);
waitUntilOneMessageIsConsumed(consumer);
}
}
void waitUntilOneMessageIsConsumed(Consumer<?, ?> consumer) throws InterruptedException {
TestUtils.waitForCondition(
() -> {
try {
return consumer.poll(Duration.ofMillis(100L)).count() >= 1;
} catch (Exception e) {
return false;
}
},
DEFAULT_MAX_WAIT_MS,
() -> "fail to consume messages"
);
}
@ClusterTest(types = {Type.KRAFT}, metadataVersion = MetadataVersion.IBP_4_0_IV1)
public void testElrMemberCanBeElected() throws ExecutionException, InterruptedException {
try (var admin = clusterInstance.admin()) {
String testTopicName = String.format("%s-%s", "testElrMemberCanBeElected", "ELR-test");
admin.updateFeatures(
Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME,
new FeatureUpdate(EligibleLeaderReplicasVersion.ELRV_1.featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)),
new UpdateFeaturesOptions()).all().get();
admin.createTopics(List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get();
clusterInstance.waitTopicCreation(testTopicName, 1);
ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
Collection<AlterConfigOp> ops = new ArrayList<>();
ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET));
Map<ConfigResource, Collection<AlterConfigOp>> configOps = Map.of(configResource, ops);
// alter configs on target cluster
admin.incrementalAlterConfigs(configOps).all().get();
TopicDescription testTopicDescription = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName);
TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0);
List<Node> initialReplicas = topicPartitionInfo.replicas();
assertEquals(4, topicPartitionInfo.isr().size());
assertEquals(0, topicPartitionInfo.elr().size());
assertEquals(0, topicPartitionInfo.lastKnownElr().size());
clusterInstance.shutdownBroker(initialReplicas.get(0).id());
clusterInstance.shutdownBroker(initialReplicas.get(1).id());
clusterInstance.shutdownBroker(initialReplicas.get(2).id());
waitForIsrAndElr((isrSize, elrSize) -> isrSize == 1 && elrSize == 2, admin, testTopicName);
clusterInstance.shutdownBroker(initialReplicas.get(3).id());
waitForIsrAndElr((isrSize, elrSize) -> isrSize == 0 && elrSize == 3, admin, testTopicName);
topicPartitionInfo = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName).partitions().get(0);
assertEquals(1, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString());
int expectLastKnownLeader = initialReplicas.get(3).id();
assertEquals(expectLastKnownLeader, topicPartitionInfo.lastKnownElr().get(0).id(), topicPartitionInfo.toString());
// At this point, all the replicas are failed and the last know leader is No.3 and 3 members in the ELR.
// Restart one broker of the ELR and it should be the leader.
int expectLeader = topicPartitionInfo.elr().stream()
.filter(node -> node.id() != expectLastKnownLeader).toList().get(0).id();
clusterInstance.startBroker(expectLeader);
waitForIsrAndElr((isrSize, elrSize) -> isrSize == 1 && elrSize == 2, admin, testTopicName);
topicPartitionInfo = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName).partitions().get(0);
assertEquals(0, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString());
assertEquals(expectLeader, topicPartitionInfo.leader().id(), topicPartitionInfo.toString());
// Start another 2 brokers and the ELR fields should be cleaned.
topicPartitionInfo.replicas().stream().filter(node -> node.id() != expectLeader).limit(2)
.forEach(node -> clusterInstance.startBroker(node.id()));
waitForIsrAndElr((isrSize, elrSize) -> isrSize == 3 && elrSize == 0, admin, testTopicName);
topicPartitionInfo = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName).partitions().get(0);
assertEquals(0, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString());
assertEquals(expectLeader, topicPartitionInfo.leader().id(), topicPartitionInfo.toString());
}
}
@ClusterTest(types = {Type.KRAFT}, metadataVersion = MetadataVersion.IBP_4_0_IV1)
public void testElrMemberShouldBeKickOutWhenUncleanShutdown() throws ExecutionException, InterruptedException {
try (var admin = clusterInstance.admin()) {
String testTopicName = String.format("%s-%s", "testElrMemberShouldBeKickOutWhenUncleanShutdown", "ELR-test");
admin.updateFeatures(
Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME,
new FeatureUpdate(EligibleLeaderReplicasVersion.ELRV_1.featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)),
new UpdateFeaturesOptions()).all().get();
admin.createTopics(List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get();
clusterInstance.waitTopicCreation(testTopicName, 1);
ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
Collection<AlterConfigOp> ops = new ArrayList<>();
ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET));
Map<ConfigResource, Collection<AlterConfigOp>> configOps = Map.of(configResource, ops);
// alter configs on target cluster
admin.incrementalAlterConfigs(configOps).all().get();
TopicDescription testTopicDescription = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName);
TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0);
List<Node> initialReplicas = topicPartitionInfo.replicas();
assertEquals(4, topicPartitionInfo.isr().size());
assertEquals(0, topicPartitionInfo.elr().size());
assertEquals(0, topicPartitionInfo.lastKnownElr().size());
clusterInstance.shutdownBroker(initialReplicas.get(0).id());
clusterInstance.shutdownBroker(initialReplicas.get(1).id());
clusterInstance.shutdownBroker(initialReplicas.get(2).id());
clusterInstance.shutdownBroker(initialReplicas.get(3).id());
waitForIsrAndElr((isrSize, elrSize) -> isrSize == 0 && elrSize == 3, admin, testTopicName);
topicPartitionInfo = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName).partitions().get(0);
int brokerToBeUncleanShutdown = topicPartitionInfo.elr().get(0).id();
var broker = clusterInstance.brokers().values().stream().filter(b -> b.config().brokerId() == brokerToBeUncleanShutdown)
.findFirst().get();
List<File> dirs = new ArrayList<>();
broker.logManager().liveLogDirs().foreach(dirs::add);
assertEquals(1, dirs.size());
CleanShutdownFileHandler handler = new CleanShutdownFileHandler(dirs.get(0).toString());
assertTrue(handler.exists());
assertDoesNotThrow(handler::delete);
// After remove the clean shutdown file, the broker should report unclean shutdown during restart.
clusterInstance.startBroker(brokerToBeUncleanShutdown);
waitForIsrAndElr((isrSize, elrSize) -> isrSize == 0 && elrSize == 2, admin, testTopicName);
topicPartitionInfo = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName).partitions().get(0);
assertNull(topicPartitionInfo.leader());
assertEquals(1, topicPartitionInfo.lastKnownElr().size());
}
}
/*
This test is only valid for KIP-966 part 1. When the unclean recovery is implemented, it should be removed.
*/
@ClusterTest(types = {Type.KRAFT}, metadataVersion = MetadataVersion.IBP_4_0_IV1)
public void testLastKnownLeaderShouldBeElectedIfEmptyElr() throws ExecutionException, InterruptedException {
try (var admin = clusterInstance.admin()) {
String testTopicName = String.format("%s-%s", "testLastKnownLeaderShouldBeElectedIfEmptyElr", "ELR-test");
admin.updateFeatures(
Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME,
new FeatureUpdate(EligibleLeaderReplicasVersion.ELRV_1.featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)),
new UpdateFeaturesOptions()).all().get();
admin.createTopics(List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get();
clusterInstance.waitTopicCreation(testTopicName, 1);
ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
Collection<AlterConfigOp> ops = new ArrayList<>();
ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET));
Map<ConfigResource, Collection<AlterConfigOp>> configOps = Map.of(configResource, ops);
// alter configs on target cluster
admin.incrementalAlterConfigs(configOps).all().get();
TopicDescription testTopicDescription = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName);
TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0);
List<Node> initialReplicas = topicPartitionInfo.replicas();
assertEquals(4, topicPartitionInfo.isr().size());
assertEquals(0, topicPartitionInfo.elr().size());
assertEquals(0, topicPartitionInfo.lastKnownElr().size());
clusterInstance.shutdownBroker(initialReplicas.get(0).id());
clusterInstance.shutdownBroker(initialReplicas.get(1).id());
clusterInstance.shutdownBroker(initialReplicas.get(2).id());
clusterInstance.shutdownBroker(initialReplicas.get(3).id());
waitForIsrAndElr((isrSize, elrSize) -> isrSize == 0 && elrSize == 3, admin, testTopicName);
topicPartitionInfo = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName).partitions().get(0);
int lastKnownLeader = topicPartitionInfo.lastKnownElr().get(0).id();
Set<Integer> initialReplicaSet = initialReplicas.stream().map(node -> node.id()).collect(Collectors.toSet());
clusterInstance.brokers().forEach((id, broker) -> {
if (initialReplicaSet.contains(id)) {
List<File> dirs = new ArrayList<>();
broker.logManager().liveLogDirs().foreach(dirs::add);
assertEquals(1, dirs.size());
CleanShutdownFileHandler handler = new CleanShutdownFileHandler(dirs.get(0).toString());
assertDoesNotThrow(handler::delete);
}
});
// After remove the clean shutdown file, the broker should report unclean shutdown during restart.
topicPartitionInfo.replicas().forEach(replica -> {
if (replica.id() != lastKnownLeader) clusterInstance.startBroker(replica.id());
});
waitForIsrAndElr((isrSize, elrSize) -> isrSize == 0 && elrSize == 1, admin, testTopicName);
topicPartitionInfo = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName).partitions().get(0);
assertNull(topicPartitionInfo.leader());
assertEquals(1, topicPartitionInfo.lastKnownElr().size());
// Now if the last known leader goes through unclean shutdown, it will still be elected.
clusterInstance.startBroker(lastKnownLeader);
waitForIsrAndElr((isrSize, elrSize) -> isrSize > 0 && elrSize == 0, admin, testTopicName);
TestUtils.waitForCondition(
() -> {
try {
TopicPartitionInfo partition = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName).partitions().get(0);
if (partition.leader() == null) return false;
return partition.lastKnownElr().isEmpty() && partition.elr().isEmpty() && partition.leader().id() == lastKnownLeader;
} catch (Exception e) {
return false;
}
},
DEFAULT_MAX_WAIT_MS,
() -> String.format("Partition metadata for %s is not correct", testTopicName)
);
}
}
void waitForIsrAndElr(BiFunction<Integer, Integer, Boolean> isIsrAndElrSizeSatisfied, Admin admin, String testTopicName) throws InterruptedException {
TestUtils.waitForCondition(
() -> {
try {
TopicDescription topicDescription = admin.describeTopics(List.of(testTopicName))
.allTopicNames().get().get(testTopicName);
TopicPartitionInfo partition = topicDescription.partitions().get(0);
return isIsrAndElrSizeSatisfied.apply(partition.isr().size(), partition.elr().size());
} catch (Exception e) {
return false;
}
},
DEFAULT_MAX_WAIT_MS,
() -> String.format("Partition metadata for %s is not propagated", testTopicName)
);
}
}
| EligibleLeaderReplicasIntegrationTest |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng1144MultipleDefaultGoalsTest.java | {
"start": 1040,
"end": 1759
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that multiple goals can be specified as default goal using whitespace as delimiter.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-1144");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.execute();
verifier.verifyErrorFreeLog();
verifier.verifyFilePresent("target/clean-clean.txt");
verifier.verifyFilePresent("target/resources-resources.txt");
}
}
| MavenITmng1144MultipleDefaultGoalsTest |
java | google__auto | factory/src/test/resources/bad/AnnotationsToApplyNotAnnotations.java | {
"start": 736,
"end": 1093
} | interface ____ {
Immutable immutable() default @Immutable;
SuppressWarnings suppressWarnings() default @SuppressWarnings("Immutable");
int whatIsThis() default 23;
Immutable[] andWhatIsThis() default {};
}
@ImmutableAndSuppressWarnings(suppressWarnings = @SuppressWarnings({"unchecked", "Immutable"}))
@AutoFactory
final | ImmutableAndSuppressWarnings |
java | elastic__elasticsearch | plugins/examples/custom-processor/src/main/java/org/elasticsearch/example/customprocessor/ExampleRepeatProcessor.java | {
"start": 394,
"end": 1163
} | class ____ extends AbstractProcessor {
public static final String TYPE = "repeat";
public static final String FIELD_KEY_NAME = "field";
private final String field;
ExampleRepeatProcessor(String tag, String description, String field) {
super(tag, description);
this.field = field;
}
@Override
public IngestDocument execute(IngestDocument document) {
Object val = document.getFieldValue(field, Object.class, true);
if (val instanceof String string) {
String repeated = string.concat(string);
document.setFieldValue(field, repeated);
}
return document;
}
@Override
public String getType() {
return TYPE;
}
public static | ExampleRepeatProcessor |
java | resilience4j__resilience4j | resilience4j-rxjava2/src/main/java/io/github/resilience4j/AbstractDisposable.java | {
"start": 807,
"end": 1831
} | class ____ implements Disposable {
private final AtomicReference<Disposable> subscription = new AtomicReference<>();
public void onSubscribe(Disposable disposable) {
if (DisposableHelper.setOnce(this.subscription, disposable)) {
hookOnSubscribe();
}
}
protected abstract void hookOnSubscribe();
@Override
public void dispose() {
if (DisposableHelper.dispose(subscription)) {
hookOnCancel();
}
}
void whenNotDisposed(Runnable runnable) {
if (!isDisposed()) {
runnable.run();
}
}
void whenNotCompleted(Runnable runnable) {
if (DisposableHelper.dispose(subscription)) {
runnable.run();
}
}
protected abstract void hookOnCancel();
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(subscription.get());
}
@Override
public String toString() {
return getClass().getSimpleName();
}
}
| AbstractDisposable |
java | apache__camel | components/camel-joor/src/test/java/org/apache/camel/language/joor/JoorScriptingLanguageTest.java | {
"start": 1122,
"end": 1929
} | class ____ extends CamelTestSupport {
@Test
public void testScripting() {
Language lan = context.resolveLanguage("joor");
Assertions.assertTrue(lan instanceof ScriptingLanguage);
ScriptingLanguage slan = (ScriptingLanguage) lan;
int num = slan.evaluate("2 * 3", null, int.class);
Assertions.assertEquals(6, num);
MyUser user = new MyUser();
user.setAge(33);
user.setName("Scott");
Map<String, Object> bindings = new LinkedHashMap<>();
bindings.put("user", user);
String out = slan.evaluate("'Hello ' + user.getName() + ' you are ' + user.getAge() + ' years old'", bindings,
String.class);
Assertions.assertEquals("Hello Scott you are 33 years old", out);
}
}
| JoorScriptingLanguageTest |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/AnnotationUtilsTests.java | {
"start": 3366,
"end": 18357
} | class ____ {
@BeforeEach
void clearCacheBeforeTests() {
AnnotationUtils.clearCache();
}
@Test
void findMethodAnnotationOnLeaf() throws Exception {
Method m = Leaf.class.getMethod("annotatedOnLeaf");
assertThat(m.getAnnotation(Order.class)).isNotNull();
assertThat(getAnnotation(m, Order.class)).isNotNull();
assertThat(findAnnotation(m, Order.class)).isNotNull();
}
// @since 4.2
@Test
void findMethodAnnotationWithAnnotationOnMethodInInterface() throws Exception {
Method m = Leaf.class.getMethod("fromInterfaceImplementedByRoot");
// @Order is not @Inherited
assertThat(m.getAnnotation(Order.class)).isNull();
// getAnnotation() does not search on interfaces
assertThat(getAnnotation(m, Order.class)).isNull();
// findAnnotation() does search on interfaces
assertThat(findAnnotation(m, Order.class)).isNotNull();
}
// @since 4.2
@Test
void findMethodAnnotationWithMetaAnnotationOnLeaf() throws Exception {
Method m = Leaf.class.getMethod("metaAnnotatedOnLeaf");
assertThat(m.getAnnotation(Order.class)).isNull();
assertThat(getAnnotation(m, Order.class)).isNotNull();
assertThat(findAnnotation(m, Order.class)).isNotNull();
}
// @since 4.2
@Test
void findMethodAnnotationWithMetaMetaAnnotationOnLeaf() throws Exception {
Method m = Leaf.class.getMethod("metaMetaAnnotatedOnLeaf");
assertThat(m.getAnnotation(Component.class)).isNull();
assertThat(getAnnotation(m, Component.class)).isNull();
assertThat(findAnnotation(m, Component.class)).isNotNull();
}
@Test
void findMethodAnnotationOnRoot() throws Exception {
Method m = Leaf.class.getMethod("annotatedOnRoot");
assertThat(m.getAnnotation(Order.class)).isNotNull();
assertThat(getAnnotation(m, Order.class)).isNotNull();
assertThat(findAnnotation(m, Order.class)).isNotNull();
}
// @since 4.2
@Test
void findMethodAnnotationWithMetaAnnotationOnRoot() throws Exception {
Method m = Leaf.class.getMethod("metaAnnotatedOnRoot");
assertThat(m.getAnnotation(Order.class)).isNull();
assertThat(getAnnotation(m, Order.class)).isNotNull();
assertThat(findAnnotation(m, Order.class)).isNotNull();
}
@Test
void findMethodAnnotationOnRootButOverridden() throws Exception {
Method m = Leaf.class.getMethod("overrideWithoutNewAnnotation");
assertThat(m.getAnnotation(Order.class)).isNull();
assertThat(getAnnotation(m, Order.class)).isNull();
assertThat(findAnnotation(m, Order.class)).isNotNull();
}
@Test
void findMethodAnnotationNotAnnotated() throws Exception {
Method m = Leaf.class.getMethod("notAnnotated");
assertThat(findAnnotation(m, Order.class)).isNull();
}
@Test
void findMethodAnnotationOnBridgeMethod() throws Exception {
Method bridgeMethod = SimpleFoo.class.getMethod("something", Object.class);
assertThat(bridgeMethod.isBridge()).isTrue();
assertThat(bridgeMethod.getAnnotation(Order.class)).isNull();
assertThat(getAnnotation(bridgeMethod, Order.class)).isNull();
assertThat(findAnnotation(bridgeMethod, Order.class)).isNotNull();
// As of JDK 8, invoking getAnnotation() on a bridge method actually finds an
// annotation on its 'bridged' method [1]; however, the Eclipse compiler does
// not support this [2]. Thus, we effectively ignore the following
// assertion if the test is currently executing within the Eclipse IDE.
//
// [1] https://bugs.openjdk.java.net/browse/JDK-6695379
// [2] https://bugs.eclipse.org/bugs/show_bug.cgi?id=495396
//
if (!IdeUtils.runningInEclipse()) {
assertThat(bridgeMethod.getAnnotation(Transactional.class)).isNotNull();
}
assertThat(getAnnotation(bridgeMethod, Transactional.class)).isNotNull();
assertThat(findAnnotation(bridgeMethod, Transactional.class)).isNotNull();
}
@Test
void findMethodAnnotationOnBridgedMethod() throws Exception {
Method bridgedMethod = SimpleFoo.class.getMethod("something", String.class);
assertThat(bridgedMethod.isBridge()).isFalse();
assertThat(bridgedMethod.getAnnotation(Order.class)).isNull();
assertThat(getAnnotation(bridgedMethod, Order.class)).isNull();
assertThat(findAnnotation(bridgedMethod, Order.class)).isNotNull();
assertThat(bridgedMethod.getAnnotation(Transactional.class)).isNotNull();
assertThat(getAnnotation(bridgedMethod, Transactional.class)).isNotNull();
assertThat(findAnnotation(bridgedMethod, Transactional.class)).isNotNull();
}
@Test
void findMethodAnnotationFromInterface() throws Exception {
Method method = ImplementsInterfaceWithAnnotatedMethod.class.getMethod("foo");
Order order = findAnnotation(method, Order.class);
assertThat(order).isNotNull();
}
@Test // SPR-16060
void findMethodAnnotationFromGenericInterface() throws Exception {
Method method = ImplementsInterfaceWithGenericAnnotatedMethod.class.getMethod("foo", String.class);
Order order = findAnnotation(method, Order.class);
assertThat(order).isNotNull();
}
@Test // SPR-17146
void findMethodAnnotationFromGenericSuperclass() throws Exception {
Method method = ExtendsBaseClassWithGenericAnnotatedMethod.class.getMethod("foo", String.class);
Order order = findAnnotation(method, Order.class);
assertThat(order).isNotNull();
}
@Test
void findMethodAnnotationFromInterfaceOnSuper() throws Exception {
Method method = SubOfImplementsInterfaceWithAnnotatedMethod.class.getMethod("foo");
Order order = findAnnotation(method, Order.class);
assertThat(order).isNotNull();
}
@Test
void findMethodAnnotationFromInterfaceWhenSuperDoesNotImplementMethod() throws Exception {
Method method = SubOfAbstractImplementsInterfaceWithAnnotatedMethod.class.getMethod("foo");
Order order = findAnnotation(method, Order.class);
assertThat(order).isNotNull();
}
// @since 4.1.2
@Test
void findClassAnnotationFavorsMoreLocallyDeclaredComposedAnnotationsOverAnnotationsOnInterfaces() {
Component component = findAnnotation(ClassWithLocalMetaAnnotationAndMetaAnnotatedInterface.class, Component.class);
assertThat(component).isNotNull();
assertThat(component.value()).isEqualTo("meta2");
}
// @since 4.0.3
@Test
void findClassAnnotationFavorsMoreLocallyDeclaredComposedAnnotationsOverInheritedAnnotations() {
Transactional transactional = findAnnotation(SubSubClassWithInheritedAnnotation.class, Transactional.class);
assertThat(transactional).isNotNull();
assertThat(transactional.readOnly()).as("readOnly flag for SubSubClassWithInheritedAnnotation").isTrue();
}
// @since 4.0.3
@Test
void findClassAnnotationFavorsMoreLocallyDeclaredComposedAnnotationsOverInheritedComposedAnnotations() {
Component component = findAnnotation(SubSubClassWithInheritedMetaAnnotation.class, Component.class);
assertThat(component).isNotNull();
assertThat(component.value()).isEqualTo("meta2");
}
@Test
void findClassAnnotationOnMetaMetaAnnotatedClass() {
Component component = findAnnotation(MetaMetaAnnotatedClass.class, Component.class);
assertThat(component).as("Should find meta-annotation on composed annotation on class").isNotNull();
assertThat(component.value()).isEqualTo("meta2");
}
@Test
void findClassAnnotationOnMetaMetaMetaAnnotatedClass() {
Component component = findAnnotation(MetaMetaMetaAnnotatedClass.class, Component.class);
assertThat(component).as("Should find meta-annotation on meta-annotation on composed annotation on class").isNotNull();
assertThat(component.value()).isEqualTo("meta2");
}
@Test
void findClassAnnotationOnAnnotatedClassWithMissingTargetMetaAnnotation() {
// TransactionalClass is NOT annotated or meta-annotated with @Component
Component component = findAnnotation(TransactionalClass.class, Component.class);
assertThat(component).as("Should not find @Component on TransactionalClass").isNull();
}
@Test
void findClassAnnotationOnMetaCycleAnnotatedClassWithMissingTargetMetaAnnotation() {
Component component = findAnnotation(MetaCycleAnnotatedClass.class, Component.class);
assertThat(component).as("Should not find @Component on MetaCycleAnnotatedClass").isNull();
}
// @since 4.2
@Test
void findClassAnnotationOnInheritedAnnotationInterface() {
Transactional tx = findAnnotation(InheritedAnnotationInterface.class, Transactional.class);
assertThat(tx).as("Should find @Transactional on InheritedAnnotationInterface").isNotNull();
}
// @since 4.2
@Test
void findClassAnnotationOnSubInheritedAnnotationInterface() {
Transactional tx = findAnnotation(SubInheritedAnnotationInterface.class, Transactional.class);
assertThat(tx).as("Should find @Transactional on SubInheritedAnnotationInterface").isNotNull();
}
// @since 4.2
@Test
void findClassAnnotationOnSubSubInheritedAnnotationInterface() {
Transactional tx = findAnnotation(SubSubInheritedAnnotationInterface.class, Transactional.class);
assertThat(tx).as("Should find @Transactional on SubSubInheritedAnnotationInterface").isNotNull();
}
// @since 4.2
@Test
void findClassAnnotationOnNonInheritedAnnotationInterface() {
Order order = findAnnotation(NonInheritedAnnotationInterface.class, Order.class);
assertThat(order).as("Should find @Order on NonInheritedAnnotationInterface").isNotNull();
}
// @since 4.2
@Test
void findClassAnnotationOnSubNonInheritedAnnotationInterface() {
Order order = findAnnotation(SubNonInheritedAnnotationInterface.class, Order.class);
assertThat(order).as("Should find @Order on SubNonInheritedAnnotationInterface").isNotNull();
}
// @since 4.2
@Test
void findClassAnnotationOnSubSubNonInheritedAnnotationInterface() {
Order order = findAnnotation(SubSubNonInheritedAnnotationInterface.class, Order.class);
assertThat(order).as("Should find @Order on SubSubNonInheritedAnnotationInterface").isNotNull();
}
@Test
void findAnnotationDeclaringClassForAllScenarios() {
// no class-level annotation
assertThat(findAnnotationDeclaringClass(Transactional.class, NonAnnotatedInterface.class)).isNull();
assertThat(findAnnotationDeclaringClass(Transactional.class, NonAnnotatedClass.class)).isNull();
// inherited class-level annotation; note: @Transactional is inherited
assertThat(findAnnotationDeclaringClass(Transactional.class, InheritedAnnotationInterface.class)).isEqualTo(InheritedAnnotationInterface.class);
assertThat(findAnnotationDeclaringClass(Transactional.class, SubInheritedAnnotationInterface.class)).isNull();
assertThat(findAnnotationDeclaringClass(Transactional.class, InheritedAnnotationClass.class)).isEqualTo(InheritedAnnotationClass.class);
assertThat(findAnnotationDeclaringClass(Transactional.class, SubInheritedAnnotationClass.class)).isEqualTo(InheritedAnnotationClass.class);
// non-inherited class-level annotation; note: @Order is not inherited,
// but findAnnotationDeclaringClass() should still find it on classes.
assertThat(findAnnotationDeclaringClass(Order.class, NonInheritedAnnotationInterface.class)).isEqualTo(NonInheritedAnnotationInterface.class);
assertThat(findAnnotationDeclaringClass(Order.class, SubNonInheritedAnnotationInterface.class)).isNull();
assertThat(findAnnotationDeclaringClass(Order.class, NonInheritedAnnotationClass.class)).isEqualTo(NonInheritedAnnotationClass.class);
assertThat(findAnnotationDeclaringClass(Order.class, SubNonInheritedAnnotationClass.class)).isEqualTo(NonInheritedAnnotationClass.class);
}
@Test
void findAnnotationDeclaringClassForTypesWithSingleCandidateType() {
// no class-level annotation
List<Class<? extends Annotation>> transactionalCandidateList = Collections.singletonList(Transactional.class);
assertThat(findAnnotationDeclaringClassForTypes(transactionalCandidateList, NonAnnotatedInterface.class)).isNull();
assertThat(findAnnotationDeclaringClassForTypes(transactionalCandidateList, NonAnnotatedClass.class)).isNull();
// inherited class-level annotation; note: @Transactional is inherited
assertThat(findAnnotationDeclaringClassForTypes(transactionalCandidateList, InheritedAnnotationInterface.class)).isEqualTo(InheritedAnnotationInterface.class);
assertThat(findAnnotationDeclaringClassForTypes(transactionalCandidateList, SubInheritedAnnotationInterface.class)).isNull();
assertThat(findAnnotationDeclaringClassForTypes(transactionalCandidateList, InheritedAnnotationClass.class)).isEqualTo(InheritedAnnotationClass.class);
assertThat(findAnnotationDeclaringClassForTypes(transactionalCandidateList, SubInheritedAnnotationClass.class)).isEqualTo(InheritedAnnotationClass.class);
// non-inherited class-level annotation; note: @Order is not inherited,
// but findAnnotationDeclaringClassForTypes() should still find it on classes.
List<Class<? extends Annotation>> orderCandidateList = Collections.singletonList(Order.class);
assertThat(findAnnotationDeclaringClassForTypes(orderCandidateList, NonInheritedAnnotationInterface.class)).isEqualTo(NonInheritedAnnotationInterface.class);
assertThat(findAnnotationDeclaringClassForTypes(orderCandidateList, SubNonInheritedAnnotationInterface.class)).isNull();
assertThat(findAnnotationDeclaringClassForTypes(orderCandidateList, NonInheritedAnnotationClass.class)).isEqualTo(NonInheritedAnnotationClass.class);
assertThat(findAnnotationDeclaringClassForTypes(orderCandidateList, SubNonInheritedAnnotationClass.class)).isEqualTo(NonInheritedAnnotationClass.class);
}
@Test
void findAnnotationDeclaringClassForTypesWithMultipleCandidateTypes() {
List<Class<? extends Annotation>> candidates = asList(Transactional.class, Order.class);
// no class-level annotation
assertThat(findAnnotationDeclaringClassForTypes(candidates, NonAnnotatedInterface.class)).isNull();
assertThat(findAnnotationDeclaringClassForTypes(candidates, NonAnnotatedClass.class)).isNull();
// inherited class-level annotation; note: @Transactional is inherited
assertThat(findAnnotationDeclaringClassForTypes(candidates, InheritedAnnotationInterface.class)).isEqualTo(InheritedAnnotationInterface.class);
assertThat(findAnnotationDeclaringClassForTypes(candidates, SubInheritedAnnotationInterface.class)).isNull();
assertThat(findAnnotationDeclaringClassForTypes(candidates, InheritedAnnotationClass.class)).isEqualTo(InheritedAnnotationClass.class);
assertThat(findAnnotationDeclaringClassForTypes(candidates, SubInheritedAnnotationClass.class)).isEqualTo(InheritedAnnotationClass.class);
// non-inherited class-level annotation; note: @Order is not inherited,
// but findAnnotationDeclaringClassForTypes() should still find it on classes.
assertThat(findAnnotationDeclaringClassForTypes(candidates, NonInheritedAnnotationInterface.class)).isEqualTo(NonInheritedAnnotationInterface.class);
assertThat(findAnnotationDeclaringClassForTypes(candidates, SubNonInheritedAnnotationInterface.class)).isNull();
assertThat(findAnnotationDeclaringClassForTypes(candidates, NonInheritedAnnotationClass.class)).isEqualTo(NonInheritedAnnotationClass.class);
assertThat(findAnnotationDeclaringClassForTypes(candidates, SubNonInheritedAnnotationClass.class)).isEqualTo(NonInheritedAnnotationClass.class);
// | AnnotationUtilsTests |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/jmx/support/MetricType.java | {
"start": 818,
"end": 983
} | enum ____ {
/**
* The measurement values may go up or down over time.
*/
GAUGE,
/**
* The measurement values will always increase.
*/
COUNTER
}
| MetricType |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ext/jdk8/StreamSerializerTest.java | {
"start": 421,
"end": 484
} | class ____ extends StreamTestBase
{
static | StreamSerializerTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianIntEvaluator.java | {
"start": 4817,
"end": 5307
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final EvalOperator.ExpressionEvaluator.Factory field;
public Factory(EvalOperator.ExpressionEvaluator.Factory field) {
this.field = field;
}
@Override
public MvMedianIntEvaluator get(DriverContext context) {
return new MvMedianIntEvaluator(field.get(context), context);
}
@Override
public String toString() {
return "MvMedian[field=" + field + "]";
}
}
}
| Factory |
java | quarkusio__quarkus | independent-projects/tools/devtools-common/src/main/java/io/quarkus/devtools/project/BuildTool.java | {
"start": 499,
"end": 4184
} | enum ____ {
/** Maven build tool */
MAVEN("\n# Maven\ntarget/\npom.xml.tag\npom.xml.releaseBackup\npom.xml.versionsBackup\nrelease.properties",
"target",
new String[] { "pom.xml" }),
/** Gradle build tool */
GRADLE("\n# Gradle\n.gradle/\nbuild/",
"build",
new String[] { "build.gradle", "settings.gradle", "gradle.properties" }),
/** Gradle build tool with Kotlin DSL */
GRADLE_KOTLIN_DSL("\n# Gradle\n.gradle/\nbuild/",
"build",
new String[] { "build.gradle.kts", "settings.gradle.kts", "gradle.properties" }),
/** JBang build tool */
JBANG("\n# JBang\n.target/\nbuild/",
"build",
new String[0]);
private final String gitIgnoreEntries;
private final String buildDirectory;
private final String[] buildFiles;
private BuildTool(String gitIgnoreEntries, String buildDirectory, String[] buildFiles) {
this.gitIgnoreEntries = gitIgnoreEntries;
this.buildDirectory = buildDirectory;
this.buildFiles = buildFiles;
}
public boolean isAnyGradle() {
return GRADLE.equals(this) || GRADLE_KOTLIN_DSL.equals(this);
}
/**
* @return {@code \n}-separated lines to add to a {@code .gitignore} file
*/
public String getGitIgnoreEntries() {
return gitIgnoreEntries;
}
public String[] getBuildFiles() {
return buildFiles;
}
public String getDependenciesFile() {
return buildFiles[0];
}
public String getBuildDirectory() {
return buildDirectory;
}
public ExtensionManager createExtensionManager(final Path projectDirPath,
ExtensionCatalog catalog) {
switch (this) {
case GRADLE:
return new GroovyGradleBuildFile();
case GRADLE_KOTLIN_DSL:
return new KotlinGradleBuildFile();
case MAVEN:
default:
// TODO it should never get here, this needs a proper refactoring
return new MavenBuildFile(projectDirPath, catalog);
}
}
public String getKey() {
return toString().toLowerCase(Locale.ROOT).replace('_', '-');
}
/**
* Determine the build tool from the contents of an existing project
* (pom.xml, build.gradle.kts, build.gradle, etc.)
*
* @param projectDirPath The Path to an existing project
* @return the BuildTool enumeration matched from filesystem content or null;
*/
public static BuildTool fromProject(Path projectDirPath) {
if (projectDirPath.resolve("pom.xml").toFile().exists()) {
return BuildTool.MAVEN;
} else if (projectDirPath.resolve("build.gradle").toFile().exists()) {
return BuildTool.GRADLE;
} else if (projectDirPath.resolve("build.gradle.kts").toFile().exists()) {
return BuildTool.GRADLE_KOTLIN_DSL;
} else if (projectDirPath.resolve("jbang").toFile().exists()) {
return BuildTool.JBANG;
} else if (projectDirPath.resolve("src").toFile().isDirectory()) {
String[] files = projectDirPath.resolve("src").toFile().list();
if (files != null && Arrays.asList(files).stream().anyMatch(x -> x.contains(".java"))) {
return BuildTool.JBANG;
}
}
return null;
}
public static BuildTool findTool(String tool) {
for (BuildTool value : BuildTool.values()) {
if (value.toString().equalsIgnoreCase(tool) || value.getKey().equalsIgnoreCase(tool)) {
return value;
}
}
return null;
}
}
| BuildTool |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/healthcheck/v2/processor/TcpHealthCheckProcessor.java | {
"start": 11116,
"end": 12103
} | class ____ implements Runnable {
SelectionKey key;
public TimeOutTask(SelectionKey key) {
this.key = key;
}
@Override
public void run() {
if (key != null && key.isValid()) {
SocketChannel channel = (SocketChannel) key.channel();
Beat beat = (Beat) key.attachment();
if (channel.isConnected()) {
return;
}
try {
channel.finishConnect();
} catch (Exception ignore) {
}
try {
beat.finishCheck(false, false, beat.getTask().getCheckRtNormalized() * 2, "tcp:timeout");
key.cancel();
key.channel().close();
} catch (Exception ignore) {
}
}
}
}
private | TimeOutTask |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/CorruptConfigurationException.java | {
"start": 996,
"end": 1466
} | class ____ extends RuntimeException {
private static final long serialVersionUID = 854450995262666207L;
/**
* Creates a new exception with the given error message.
*
* @param message The exception's message.
*/
public CorruptConfigurationException(String message) {
super(message);
}
public CorruptConfigurationException(String message, Throwable cause) {
super(message, cause);
}
}
| CorruptConfigurationException |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/android/FragmentNotInstantiableTest.java | {
"start": 16366,
"end": 16509
} | class ____ extends CustomFragment {
public AbstractFragment(int x) {}
}
public static | AbstractFragment |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/authorization/AuthorizationPollerTests.java | {
"start": 2577,
"end": 26321
} | class ____ extends ESTestCase {
private DeterministicTaskQueue taskQueue;
@Before
public void init() throws Exception {
taskQueue = new DeterministicTaskQueue();
}
public void testDoesNotSendAuthorizationRequest_WhenModelRegistryIsNotReady() {
var mockRegistry = mock(ModelRegistry.class);
when(mockRegistry.isReady()).thenReturn(false);
var authorizationRequestHandler = mock(ElasticInferenceServiceAuthorizationRequestHandler.class);
var poller = new AuthorizationPoller(
new AuthorizationPoller.TaskFields(0, "abc", "abc", "abc", new TaskId("abc", 0), Map.of()),
createWithEmptySettings(taskQueue.getThreadPool()),
authorizationRequestHandler,
mock(Sender.class),
ElasticInferenceServiceSettingsTests.create("", TimeValue.timeValueMillis(1), TimeValue.timeValueMillis(1), true),
mockRegistry,
mock(Client.class),
createMockCCMFeature(false),
createMockCCMService(false),
null
);
var persistentTaskId = "id";
var allocationId = 0L;
var mockPersistentTasksService = mock(PersistentTasksService.class);
poller.init(mockPersistentTasksService, mock(TaskManager.class), persistentTaskId, allocationId);
poller.sendAuthorizationRequest();
verify(authorizationRequestHandler, never()).getAuthorization(any(), any());
verify(mockPersistentTasksService, never()).sendCompletionRequest(
eq(persistentTaskId),
eq(allocationId),
isNull(),
isNull(),
any(),
any()
);
}
public void testDoesNotSendAuthorizationRequest_WhenCCMIsDisabled() {
var mockRegistry = mock(ModelRegistry.class);
when(mockRegistry.isReady()).thenReturn(true);
var authorizationRequestHandler = mock(ElasticInferenceServiceAuthorizationRequestHandler.class);
var poller = new AuthorizationPoller(
new AuthorizationPoller.TaskFields(0, "abc", "abc", "abc", new TaskId("abc", 0), Map.of()),
createWithEmptySettings(taskQueue.getThreadPool()),
authorizationRequestHandler,
mock(Sender.class),
ElasticInferenceServiceSettingsTests.create("", TimeValue.timeValueMillis(1), TimeValue.timeValueMillis(1), true),
mockRegistry,
mock(Client.class),
createMockCCMFeature(true),
createMockCCMService(false),
null
);
var persistentTaskId = "id";
var allocationId = 0L;
var mockPersistentTasksService = mock(PersistentTasksService.class);
poller.init(mockPersistentTasksService, mock(TaskManager.class), persistentTaskId, allocationId);
poller.sendAuthorizationRequest();
verify(authorizationRequestHandler, never()).getAuthorization(any(), any());
verify(mockPersistentTasksService, times(1)).sendCompletionRequest(
eq(persistentTaskId),
eq(allocationId),
isNull(),
isNull(),
any(),
any()
);
}
public void testOnlyMarksCompletedOnce() {
var mockRegistry = mock(ModelRegistry.class);
when(mockRegistry.isReady()).thenReturn(true);
var authorizationRequestHandler = mock(ElasticInferenceServiceAuthorizationRequestHandler.class);
var poller = new AuthorizationPoller(
new AuthorizationPoller.TaskFields(0, "abc", "abc", "abc", new TaskId("abc", 0), Map.of()),
createWithEmptySettings(taskQueue.getThreadPool()),
authorizationRequestHandler,
mock(Sender.class),
ElasticInferenceServiceSettingsTests.create("", TimeValue.timeValueMillis(1), TimeValue.timeValueMillis(1), true),
mockRegistry,
mock(Client.class),
createMockCCMFeature(true),
createMockCCMService(false),
null
);
var persistentTaskId = "id";
var allocationId = 0L;
var mockPersistentTasksService = mock(PersistentTasksService.class);
poller.init(mockPersistentTasksService, mock(TaskManager.class), persistentTaskId, allocationId);
poller.sendAuthorizationRequest();
poller.sendAuthorizationRequest();
verify(authorizationRequestHandler, never()).getAuthorization(any(), any());
verify(mockPersistentTasksService, times(1)).sendCompletionRequest(
eq(persistentTaskId),
eq(allocationId),
isNull(),
isNull(),
any(),
any()
);
}
public void testSendsAuthorizationRequest_WhenModelRegistryIsReady() {
var mockRegistry = mock(ModelRegistry.class);
when(mockRegistry.isReady()).thenReturn(true);
when(mockRegistry.getInferenceIds()).thenReturn(Set.of("id1", "id2"));
var mockAuthHandler = mock(ElasticInferenceServiceAuthorizationRequestHandler.class);
doAnswer(invocation -> {
ActionListener<ElasticInferenceServiceAuthorizationModel> listener = invocation.getArgument(0);
listener.onResponse(
ElasticInferenceServiceAuthorizationModel.of(
new ElasticInferenceServiceAuthorizationResponseEntity(
List.of(
new ElasticInferenceServiceAuthorizationResponseEntity.AuthorizedModel(
InternalPreconfiguredEndpoints.DEFAULT_ELSER_2_MODEL_ID,
EnumSet.of(TaskType.SPARSE_EMBEDDING)
)
)
)
)
);
return Void.TYPE;
}).when(mockAuthHandler).getAuthorization(any(), any());
var mockClient = mock(Client.class);
when(mockClient.threadPool()).thenReturn(taskQueue.getThreadPool());
var poller = new AuthorizationPoller(
new AuthorizationPoller.TaskFields(0, "abc", "abc", "abc", new TaskId("abc", 0), Map.of()),
createWithEmptySettings(taskQueue.getThreadPool()),
mockAuthHandler,
mock(Sender.class),
ElasticInferenceServiceSettingsTests.create("", TimeValue.timeValueMillis(1), TimeValue.timeValueMillis(1), true),
mockRegistry,
mockClient,
createMockCCMFeature(true),
createMockCCMService(true),
null
);
var persistentTaskId = "id";
var allocationId = 0L;
var mockPersistentTasksService = mock(PersistentTasksService.class);
poller.init(mockPersistentTasksService, mock(TaskManager.class), persistentTaskId, allocationId);
var requestArgCaptor = ArgumentCaptor.forClass(StoreInferenceEndpointsAction.Request.class);
poller.sendAuthorizationRequest();
verify(mockClient).execute(eq(StoreInferenceEndpointsAction.INSTANCE), requestArgCaptor.capture(), any());
var capturedRequest = requestArgCaptor.getValue();
assertThat(
capturedRequest.getModels(),
is(
List.of(
PreconfiguredEndpointModelAdapter.createModel(
InternalPreconfiguredEndpoints.getWithInferenceId(InternalPreconfiguredEndpoints.DEFAULT_ELSER_ENDPOINT_ID_V2),
new ElasticInferenceServiceComponents("")
)
)
)
);
verify(mockPersistentTasksService, never()).sendCompletionRequest(
eq(persistentTaskId),
eq(allocationId),
any(),
any(),
any(),
any()
);
}
public void testSendsAuthorizationRequest_WhenCCMIsNotConfigurable() {
var mockRegistry = mock(ModelRegistry.class);
when(mockRegistry.isReady()).thenReturn(true);
when(mockRegistry.getInferenceIds()).thenReturn(Set.of("id1", "id2"));
var mockAuthHandler = mock(ElasticInferenceServiceAuthorizationRequestHandler.class);
doAnswer(invocation -> {
ActionListener<ElasticInferenceServiceAuthorizationModel> listener = invocation.getArgument(0);
listener.onResponse(
ElasticInferenceServiceAuthorizationModel.of(
new ElasticInferenceServiceAuthorizationResponseEntity(
List.of(
new ElasticInferenceServiceAuthorizationResponseEntity.AuthorizedModel(
InternalPreconfiguredEndpoints.DEFAULT_ELSER_2_MODEL_ID,
EnumSet.of(TaskType.SPARSE_EMBEDDING)
)
)
)
)
);
return Void.TYPE;
}).when(mockAuthHandler).getAuthorization(any(), any());
var mockClient = mock(Client.class);
when(mockClient.threadPool()).thenReturn(taskQueue.getThreadPool());
var poller = new AuthorizationPoller(
new AuthorizationPoller.TaskFields(0, "abc", "abc", "abc", new TaskId("abc", 0), Map.of()),
createWithEmptySettings(taskQueue.getThreadPool()),
mockAuthHandler,
mock(Sender.class),
ElasticInferenceServiceSettingsTests.create("", TimeValue.timeValueMillis(1), TimeValue.timeValueMillis(1), true),
mockRegistry,
mockClient,
// CCM is not configurable so we should send the request because it doesn't depend on an api key
createMockCCMFeature(false),
createMockCCMService(false),
null
);
var persistentTaskId = "id";
var allocationId = 0L;
var mockPersistentTasksService = mock(PersistentTasksService.class);
poller.init(mockPersistentTasksService, mock(TaskManager.class), persistentTaskId, allocationId);
var requestArgCaptor = ArgumentCaptor.forClass(StoreInferenceEndpointsAction.Request.class);
poller.sendAuthorizationRequest();
verify(mockClient).execute(eq(StoreInferenceEndpointsAction.INSTANCE), requestArgCaptor.capture(), any());
var capturedRequest = requestArgCaptor.getValue();
assertThat(
capturedRequest.getModels(),
is(
List.of(
PreconfiguredEndpointModelAdapter.createModel(
InternalPreconfiguredEndpoints.getWithInferenceId(InternalPreconfiguredEndpoints.DEFAULT_ELSER_ENDPOINT_ID_V2),
new ElasticInferenceServiceComponents("")
)
)
)
);
verify(mockPersistentTasksService, never()).sendCompletionRequest(
eq(persistentTaskId),
eq(allocationId),
any(),
any(),
any(),
any()
);
}
public void testSendsAuthorizationRequest_ButDoesNotStoreAnyModels_WhenTheirInferenceIdAlreadyExists() {
var mockRegistry = mock(ModelRegistry.class);
when(mockRegistry.isReady()).thenReturn(true);
when(mockRegistry.getInferenceIds()).thenReturn(Set.of(InternalPreconfiguredEndpoints.DEFAULT_ELSER_ENDPOINT_ID_V2, "id2"));
var mockAuthHandler = mock(ElasticInferenceServiceAuthorizationRequestHandler.class);
doAnswer(invocation -> {
ActionListener<ElasticInferenceServiceAuthorizationModel> listener = invocation.getArgument(0);
listener.onResponse(
ElasticInferenceServiceAuthorizationModel.of(
new ElasticInferenceServiceAuthorizationResponseEntity(
List.of(
new ElasticInferenceServiceAuthorizationResponseEntity.AuthorizedModel(
InternalPreconfiguredEndpoints.DEFAULT_ELSER_2_MODEL_ID,
EnumSet.of(TaskType.SPARSE_EMBEDDING)
)
)
)
)
);
return Void.TYPE;
}).when(mockAuthHandler).getAuthorization(any(), any());
var mockClient = mock(Client.class);
when(mockClient.threadPool()).thenReturn(taskQueue.getThreadPool());
var poller = new AuthorizationPoller(
new AuthorizationPoller.TaskFields(0, "abc", "abc", "abc", new TaskId("abc", 0), Map.of()),
createWithEmptySettings(taskQueue.getThreadPool()),
mockAuthHandler,
mock(Sender.class),
ElasticInferenceServiceSettingsTests.create("", TimeValue.timeValueMillis(1), TimeValue.timeValueMillis(1), true),
mockRegistry,
mockClient,
createMockCCMFeature(true),
createMockCCMService(true),
null
);
poller.sendAuthorizationRequest();
verify(mockClient, never()).execute(eq(StoreInferenceEndpointsAction.INSTANCE), any(), any());
}
public void testDoesNotAttemptToStoreModelIds_ThatDoNotExistInThePreconfiguredMapping() {
var mockRegistry = mock(ModelRegistry.class);
when(mockRegistry.isReady()).thenReturn(true);
when(mockRegistry.getInferenceIds()).thenReturn(Set.of("id1", "id2"));
var mockAuthHandler = mock(ElasticInferenceServiceAuthorizationRequestHandler.class);
doAnswer(invocation -> {
ActionListener<ElasticInferenceServiceAuthorizationModel> listener = invocation.getArgument(0);
listener.onResponse(
ElasticInferenceServiceAuthorizationModel.of(
new ElasticInferenceServiceAuthorizationResponseEntity(
List.of(
new ElasticInferenceServiceAuthorizationResponseEntity.AuthorizedModel(
// This is a model id that does not exist in the preconfigured endpoints map so it will not be stored
"abc",
EnumSet.of(TaskType.SPARSE_EMBEDDING)
)
)
)
)
);
return Void.TYPE;
}).when(mockAuthHandler).getAuthorization(any(), any());
var mockClient = mock(Client.class);
when(mockClient.threadPool()).thenReturn(taskQueue.getThreadPool());
var poller = new AuthorizationPoller(
new AuthorizationPoller.TaskFields(0, "abc", "abc", "abc", new TaskId("abc", 0), Map.of()),
createWithEmptySettings(taskQueue.getThreadPool()),
mockAuthHandler,
mock(Sender.class),
ElasticInferenceServiceSettingsTests.create("", TimeValue.timeValueMillis(1), TimeValue.timeValueMillis(1), true),
mockRegistry,
mockClient,
createMockCCMFeature(true),
createMockCCMService(true),
null
);
poller.sendAuthorizationRequest();
verify(mockClient, never()).execute(eq(StoreInferenceEndpointsAction.INSTANCE), any(), any());
}
public void testDoesNotAttemptToStoreModelIds_ThatHaveATaskTypeThatTheEISIntegration_DoesNotSupport() {
var mockRegistry = mock(ModelRegistry.class);
when(mockRegistry.isReady()).thenReturn(true);
when(mockRegistry.getInferenceIds()).thenReturn(Set.of("id1", "id2"));
var mockAuthHandler = mock(ElasticInferenceServiceAuthorizationRequestHandler.class);
doAnswer(invocation -> {
ActionListener<ElasticInferenceServiceAuthorizationModel> listener = invocation.getArgument(0);
listener.onResponse(
ElasticInferenceServiceAuthorizationModel.of(
new ElasticInferenceServiceAuthorizationResponseEntity(
List.of(
new ElasticInferenceServiceAuthorizationResponseEntity.AuthorizedModel(
InternalPreconfiguredEndpoints.DEFAULT_ELSER_2_MODEL_ID,
EnumSet.noneOf(TaskType.class)
)
)
)
)
);
return Void.TYPE;
}).when(mockAuthHandler).getAuthorization(any(), any());
var mockClient = mock(Client.class);
when(mockClient.threadPool()).thenReturn(taskQueue.getThreadPool());
var poller = new AuthorizationPoller(
new AuthorizationPoller.TaskFields(0, "abc", "abc", "abc", new TaskId("abc", 0), Map.of()),
createWithEmptySettings(taskQueue.getThreadPool()),
mockAuthHandler,
mock(Sender.class),
ElasticInferenceServiceSettingsTests.create("", TimeValue.timeValueMillis(1), TimeValue.timeValueMillis(1), true),
mockRegistry,
mockClient,
createMockCCMFeature(true),
createMockCCMService(true),
null
);
poller.sendAuthorizationRequest();
verify(mockClient, never()).execute(eq(StoreInferenceEndpointsAction.INSTANCE), any(), any());
}
public void testSendsTwoAuthorizationRequests() throws InterruptedException {
var mockRegistry = mock(ModelRegistry.class);
when(mockRegistry.isReady()).thenReturn(true);
when(mockRegistry.getInferenceIds()).thenReturn(Set.of("id1", "id2"));
var mockAuthHandler = mock(ElasticInferenceServiceAuthorizationRequestHandler.class);
doAnswer(invocation -> {
ActionListener<ElasticInferenceServiceAuthorizationModel> listener = invocation.getArgument(0);
listener.onResponse(
ElasticInferenceServiceAuthorizationModel.of(
new ElasticInferenceServiceAuthorizationResponseEntity(
List.of(
new ElasticInferenceServiceAuthorizationResponseEntity.AuthorizedModel(
// this is an unknown model id so it won't trigger storing an inference endpoint because
// it doesn't map to a known one
"abc",
EnumSet.of(TaskType.SPARSE_EMBEDDING)
)
)
)
)
);
return Void.TYPE;
}).when(mockAuthHandler).getAuthorization(any(), any());
var mockClient = mock(Client.class);
var callbackCount = new AtomicInteger(0);
var latch = new CountDownLatch(2);
final var pollerRef = new AtomicReference<AuthorizationPoller>();
Runnable callback = () -> {
var count = callbackCount.incrementAndGet();
latch.countDown();
// we only want to run the tasks twice, so advance the time on the queue
// which flags the scheduled authorization request to be ready to run
if (count == 1) {
taskQueue.advanceTime();
} else {
pollerRef.get().shutdown();
}
};
var poller = new AuthorizationPoller(
new AuthorizationPoller.TaskFields(0, "abc", "abc", "abc", new TaskId("abc", 0), Map.of()),
createWithEmptySettings(taskQueue.getThreadPool()),
mockAuthHandler,
mock(Sender.class),
ElasticInferenceServiceSettingsTests.create("", TimeValue.timeValueMillis(1), TimeValue.timeValueMillis(1), true),
mockRegistry,
mockClient,
createMockCCMFeature(true),
createMockCCMService(true),
callback
);
pollerRef.set(poller);
poller.start();
taskQueue.runAllRunnableTasks();
latch.await(TimeValue.THIRTY_SECONDS.getSeconds(), TimeUnit.SECONDS);
assertThat(callbackCount.get(), is(2));
verify(mockClient, never()).execute(eq(StoreInferenceEndpointsAction.INSTANCE), any(), any());
}
public void testCallsShutdownAndMarksTaskAsCompleted_WhenSchedulingFails() throws InterruptedException {
var mockRegistry = mock(ModelRegistry.class);
when(mockRegistry.isReady()).thenReturn(true);
when(mockRegistry.getInferenceIds()).thenReturn(Set.of("id1", "id2"));
var mockAuthHandler = mock(ElasticInferenceServiceAuthorizationRequestHandler.class);
doAnswer(invocation -> {
ActionListener<ElasticInferenceServiceAuthorizationModel> listener = invocation.getArgument(0);
listener.onResponse(
ElasticInferenceServiceAuthorizationModel.of(
new ElasticInferenceServiceAuthorizationResponseEntity(
List.of(
new ElasticInferenceServiceAuthorizationResponseEntity.AuthorizedModel(
// this is an unknown model id so it won't trigger storing an inference endpoint because
// it doesn't map to a known one
"abc",
EnumSet.of(TaskType.SPARSE_EMBEDDING)
)
)
)
)
);
return Void.TYPE;
}).when(mockAuthHandler).getAuthorization(any(), any());
var mockClient = mock(Client.class);
var callbackCount = new AtomicInteger(0);
var latch = new CountDownLatch(1);
Runnable callback = () -> {
callbackCount.incrementAndGet();
latch.countDown();
};
var exception = new IllegalStateException("failing");
// Simulate scheduling failure by having the settings throw an exception when queried
// Throwing an exception should cause the poller to shutdown and mark itself as completed
var settingsMock = mock(ElasticInferenceServiceSettings.class);
when(settingsMock.isPeriodicAuthorizationEnabled()).thenThrow(exception);
var poller = new AuthorizationPoller(
new AuthorizationPoller.TaskFields(0, "abc", "abc", "abc", new TaskId("abc", 0), Map.of()),
createWithEmptySettings(taskQueue.getThreadPool()),
mockAuthHandler,
mock(Sender.class),
settingsMock,
mockRegistry,
mockClient,
createMockCCMFeature(true),
createMockCCMService(true),
callback
);
var persistentTaskId = "id";
var allocationId = 0L;
var mockPersistentTasksService = mock(PersistentTasksService.class);
poller.init(mockPersistentTasksService, mock(TaskManager.class), persistentTaskId, allocationId);
poller.start();
taskQueue.runAllRunnableTasks();
latch.await(TimeValue.THIRTY_SECONDS.getSeconds(), TimeUnit.SECONDS);
assertThat(callbackCount.get(), is(1));
assertTrue(poller.isShutdown());
verify(mockPersistentTasksService, times(1)).sendCompletionRequest(
eq(persistentTaskId),
eq(allocationId),
eq(exception),
eq(null),
any(),
any()
);
verify(mockClient, never()).execute(eq(StoreInferenceEndpointsAction.INSTANCE), any(), any());
poller.waitForAuthorizationToComplete(TimeValue.THIRTY_SECONDS);
verify(mockPersistentTasksService, never()).sendCompletionRequest(
eq(persistentTaskId),
eq(allocationId),
isNull(),
isNull(),
any(),
any()
);
}
}
| AuthorizationPollerTests |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/ServerCallInfoImpl.java | {
"start": 874,
"end": 2101
} | class ____<ReqT, RespT> extends ServerCallInfo<ReqT, RespT> {
private final MethodDescriptor<ReqT, RespT> methodDescriptor;
private final Attributes attributes;
private final String authority;
ServerCallInfoImpl(
MethodDescriptor<ReqT, RespT> methodDescriptor,
Attributes attributes,
@Nullable String authority) {
this.methodDescriptor = methodDescriptor;
this.attributes = attributes;
this.authority = authority;
}
@Override
public MethodDescriptor<ReqT, RespT> getMethodDescriptor() {
return methodDescriptor;
}
@Override
public Attributes getAttributes() {
return attributes;
}
@Override
@Nullable
public String getAuthority() {
return authority;
}
@Override
public boolean equals(Object other) {
if (!(other instanceof ServerCallInfoImpl)) {
return false;
}
ServerCallInfoImpl<?, ?> that = (ServerCallInfoImpl) other;
return Objects.equal(methodDescriptor, that.methodDescriptor)
&& Objects.equal(attributes, that.attributes)
&& Objects.equal(authority, that.authority);
}
@Override
public int hashCode() {
return Objects.hashCode(methodDescriptor, attributes, authority);
}
}
| ServerCallInfoImpl |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/AnnotationUtilsTests.java | {
"start": 67347,
"end": 67468
} | interface ____ {
}
@Retention(RetentionPolicy.RUNTIME)
@Repeatable(TestRepeatableContainer.class)
@ | ContextConfigMismatch |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/observers/inheritance/typevariable/ObserverInheritanceTypeVariableTest.java | {
"start": 1488,
"end": 1747
} | class ____ extends AbstractService<MyAEvent> {
static volatile MyAEvent event;
@Override
protected void doSomething(MyAEvent myEvent) {
MyAService.event = myEvent;
}
}
@ApplicationScoped
static | MyAService |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/service/dump/task/DumpAllTagTask.java | {
"start": 818,
"end": 1012
} | class ____ extends AbstractDelayTask {
@Override
public void merge(AbstractDelayTask task) {
}
public static final String TASK_ID = "dumpAllTagConfigTask";
}
| DumpAllTagTask |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/jsontype/SubtypeResolver.java | {
"start": 439,
"end": 1332
} | class ____
implements Snapshottable<SubtypeResolver>
{
/*
/**********************************************************************
/* Snapshottable
/**********************************************************************
*/
/**
* Method that has to create a new instance that contains
* same registration information as this instance, but is not
* linked to this instance.
*
* @since 3.0
*/
@Override
public abstract SubtypeResolver snapshot();
/*
/**********************************************************************
/* Methods for registering external subtype definitions
/**********************************************************************
*/
/**
* Method for registering specified subtypes (possibly including type
* names); for type entries without name, non-qualified | SubtypeResolver |
java | google__guava | android/guava-tests/test/com/google/common/hash/Crc32cHashFunctionTest.java | {
"start": 990,
"end": 6753
} | class ____ extends TestCase {
public void testEmpty() {
assertCrc(0, new byte[0]);
}
public void testZeros() {
// Test 32 byte array of 0x00.
byte[] zeros = new byte[32];
assertCrc(0x8a9136aa, zeros);
}
public void testZeros100() {
// Test 100 byte array of 0x00.
byte[] zeros = new byte[100];
assertCrc(0x07cb9ff6, zeros);
}
public void testFull() {
// Test 32 byte array of 0xFF.
byte[] fulls = new byte[32];
Arrays.fill(fulls, (byte) 0xFF);
assertCrc(0x62a8ab43, fulls);
}
public void testFull100() {
// Test 100 byte array of 0xFF.
byte[] fulls = new byte[100];
Arrays.fill(fulls, (byte) 0xFF);
assertCrc(0xbc753add, fulls);
}
public void testAscending() {
// Test 32 byte arrays of ascending.
byte[] ascending = new byte[32];
for (int i = 0; i < 32; i++) {
ascending[i] = (byte) i;
}
assertCrc(0x46dd794e, ascending);
}
public void testDescending() {
// Test 32 byte arrays of descending.
byte[] descending = new byte[32];
for (int i = 0; i < 32; i++) {
descending[i] = (byte) (31 - i);
}
assertCrc(0x113fdb5c, descending);
}
public void testDescending100() {
// Test 100 byte arrays of descending.
byte[] descending = new byte[100];
for (int i = 0; i < 100; i++) {
descending[i] = (byte) (99 - i);
}
assertCrc(0xd022db97, descending);
}
public void testScsiReadCommand() {
// Test SCSI read command.
byte[] scsiReadCommand =
new byte[] {
0x01, (byte) 0xc0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x14, 0x00, 0x00, 0x00,
0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14,
0x00, 0x00, 0x00, 0x18,
0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
};
assertCrc(0xd9963a56, scsiReadCommand);
}
// Known values from http://www.evanjones.ca/crc32c.html
public void testSomeOtherKnownValues() {
assertCrc(0x22620404, "The quick brown fox jumps over the lazy dog".getBytes(UTF_8));
assertCrc(0xE3069283, "123456789".getBytes(UTF_8));
assertCrc(0xf3dbd4fe, "1234567890".getBytes(UTF_8));
assertCrc(0xBFE92A83, "23456789".getBytes(UTF_8));
}
public void testAgainstSimplerImplementation() {
Random r = new Random(1234567);
for (int length = 0; length < 1000; length++) {
byte[] bytes = new byte[length];
r.nextBytes(bytes);
assertCrc(referenceCrc(bytes), bytes);
}
}
private static int referenceCrc(byte[] bytes) {
int crc = ~0;
for (byte b : bytes) {
crc = (crc >>> 8) ^ Crc32cHashFunction.Crc32cHasher.byteTable[(crc ^ b) & 0xFF];
}
return ~crc;
}
/**
* Verifies that the crc of an array of byte data matches the expected value.
*
* @param expectedCrc the expected crc value.
* @param data the data to run the checksum on.
*/
private static void assertCrc(int expectedCrc, byte[] data) {
int actualCrc = Hashing.crc32c().hashBytes(data).asInt();
assertEquals(
String.format("expected: %08x, actual: %08x", expectedCrc, actualCrc),
expectedCrc,
actualCrc);
int actualCrcHasher = Hashing.crc32c().newHasher().putBytes(data).hash().asInt();
assertEquals(
String.format("expected: %08x, actual: %08x", expectedCrc, actualCrc),
expectedCrc,
actualCrcHasher);
}
// From RFC 3720, Section 12.1, the polynomial generator is 0x11EDC6F41.
// We calculate the constant below by:
// 1. Omitting the most significant bit (because it's always 1). => 0x1EDC6F41
// 2. Flipping the bits of the constant so we can process a byte at a time. => 0x82F63B78
private static final int CRC32C_GENERATOR = 0x1EDC6F41; // 0x11EDC6F41
private static final int CRC32C_GENERATOR_FLIPPED = Integer.reverse(CRC32C_GENERATOR);
public void testCrc32cByteTable() {
// See Hacker's Delight 2nd Edition, Figure 14-7.
int[] expected = new int[256];
for (int i = 0; i < expected.length; i++) {
int crc = i;
for (int j = 7; j >= 0; j--) {
int mask = -(crc & 1);
crc = (crc >>> 1) ^ (CRC32C_GENERATOR_FLIPPED & mask);
}
expected[i] = crc;
}
int[] actual = Crc32cHashFunction.Crc32cHasher.byteTable;
assertTrue(
"Expected: \n" + Arrays.toString(expected) + "\nActual:\n" + Arrays.toString(actual),
Arrays.equals(expected, actual));
}
static int advanceOneBit(int next) {
if ((next & 1) != 0) {
return (next >>> 1) ^ CRC32C_GENERATOR_FLIPPED;
} else {
return next >>> 1;
}
}
public void testCrc32cStrideTable() {
int next = CRC32C_GENERATOR_FLIPPED;
for (int i = 0; i < 12; i++) { // for 3 ints = 12 bytes in between each stride window
next = (next >>> 8) ^ Crc32cHashFunction.Crc32cHasher.byteTable[next & 0xFF];
}
int[][] expected = new int[4][256];
for (int b = 0; b < 4; ++b) {
for (int bit = 128; bit != 0; bit >>= 1) {
expected[b][bit] = next;
next = advanceOneBit(next);
}
}
for (int b = 0; b < 4; ++b) {
expected[b][0] = 0;
for (int bit = 2; bit < 256; bit <<= 1) {
for (int i = bit + 1; i < (bit << 1); i++) {
expected[b][i] = expected[b][bit] ^ expected[b][i ^ bit];
}
}
}
int[][] actual = Crc32cHashFunction.Crc32cHasher.strideTable;
assertTrue(
"Expected: \n"
+ Arrays.deepToString(expected)
+ "\nActual:\n"
+ Arrays.deepToString(actual),
Arrays.deepEquals(expected, actual));
}
}
| Crc32cHashFunctionTest |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/runtime/src/main/java/io/quarkus/rest/client/reactive/ClientFormParams.java | {
"start": 796,
"end": 1017
} | class ____ to act as the {@link java.lang.annotation.Repeatable} implementation for {@link ClientFormParam}.
*/
@Target({ ElementType.TYPE, ElementType.METHOD })
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @ | serves |
java | quarkusio__quarkus | extensions/panache/panache-common/runtime/src/main/java/io/quarkus/panache/common/impl/GenerateBridge.java | {
"start": 327,
"end": 946
} | interface ____ {
/**
* Set to true if the corresponding JpaOperations method returns Object
* but the bridge should return a more specific type.
*/
boolean targetReturnTypeErased() default false;
/**
* Set to true to delegate to the super method instead of JpaOperations. This is useful to
* still inject interceptor calls and mock stubs.
*/
boolean callSuperMethod() default false;
/**
* Set to false when the implemented method should not receive the entity type as one of its parameters
*/
boolean ignoreEntityTypeParam() default false;
}
| GenerateBridge |
java | spring-projects__spring-security | config/src/integration-test/java/org/springframework/security/config/annotation/authentication/ldap/LdapAuthenticationProviderConfigurerTests.java | {
"start": 6242,
"end": 6626
} | class ____ extends BaseLdapProviderConfig {
@Autowired
void configure(AuthenticationManagerBuilder auth) throws Exception {
// @formatter:off
auth
.ldapAuthentication()
.groupSearchBase("ou=groups")
.groupSearchFilter("(member={0})")
.groupSearchSubtree(true)
.userDnPatterns("uid={0},ou=people");
// @formatter:on
}
}
}
| GroupSubtreeSearchConfig |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/convert/support/GenericConversionServiceTests.java | {
"start": 32663,
"end": 33150
} | class ____<T extends Enum<?> & MyEnumBaseInterface> implements Converter<String, T> {
private final Class<T> enumType;
public StringToMyEnumBaseInterfaceConverter(Class<T> enumType) {
this.enumType = enumType;
}
@Override
public T convert(String source) {
for (T value : enumType.getEnumConstants()) {
if (value.getBaseCode().equals(source)) {
return value;
}
}
return null;
}
}
}
private static | StringToMyEnumBaseInterfaceConverter |
java | google__gson | test-graal-native-image/src/test/java/com/google/gson/native_test/Java17RecordReflectionTest.java | {
"start": 1043,
"end": 3229
} | class ____ {
public record PublicRecord(int i) {}
@Test
void testPublicRecord() {
Gson gson = new Gson();
PublicRecord r = gson.fromJson("{\"i\":1}", PublicRecord.class);
assertThat(r.i).isEqualTo(1);
}
// Private record has implicit private canonical constructor
private record PrivateRecord(int i) {}
@Test
void testPrivateRecord() {
Gson gson = new Gson();
PrivateRecord r = gson.fromJson("{\"i\":1}", PrivateRecord.class);
assertThat(r.i).isEqualTo(1);
}
@Test
void testLocalRecord() {
record LocalRecordDeserialization(int i) {}
Gson gson = new Gson();
LocalRecordDeserialization r = gson.fromJson("{\"i\":1}", LocalRecordDeserialization.class);
assertThat(r.i).isEqualTo(1);
}
@Test
void testLocalRecordSerialization() {
record LocalRecordSerialization(int i) {}
Gson gson = new Gson();
assertThat(gson.toJson(new LocalRecordSerialization(1))).isEqualTo("{\"i\":1}");
}
private record RecordWithSerializedName(@SerializedName("custom-name") int i) {}
@Test
void testSerializedName() {
Gson gson = new Gson();
RecordWithSerializedName r =
gson.fromJson("{\"custom-name\":1}", RecordWithSerializedName.class);
assertThat(r.i).isEqualTo(1);
assertThat(gson.toJson(new RecordWithSerializedName(2))).isEqualTo("{\"custom-name\":2}");
}
private record RecordWithCustomConstructor(int i) {
@SuppressWarnings("unused")
RecordWithCustomConstructor {
i += 5;
}
}
@Test
void testCustomConstructor() {
Gson gson = new Gson();
RecordWithCustomConstructor r = gson.fromJson("{\"i\":1}", RecordWithCustomConstructor.class);
assertThat(r.i).isEqualTo(6);
}
private record RecordWithCustomAccessor(int i) {
@SuppressWarnings("UnusedMethod")
@Override
public int i() {
return i + 5;
}
}
@Test
void testCustomAccessor() {
Gson gson = new Gson();
assertThat(gson.toJson(new RecordWithCustomAccessor(2))).isEqualTo("{\"i\":7}");
}
@JsonAdapter(RecordWithCustomClassAdapter.CustomAdapter.class)
private record RecordWithCustomClassAdapter(int i) {
private static | Java17RecordReflectionTest |
java | apache__hadoop | hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/LpSolver.java | {
"start": 2412,
"end": 14915
} | class ____ extends BaseSolver implements Solver {
private static final Logger LOGGER = LoggerFactory.getLogger(LpSolver.class);
private final SolverPreprocessor preprocessor = new SolverPreprocessor();
/**
* Controls the balance between over-allocation and under-allocation.
*/
private double alpha;
/**
* Controls the generalization of the solver.
*/
private double beta;
/**
* The minimum number of job runs required to run the solver.
*/
private int minJobRuns;
/**
* The time interval which is used to discretize job execution.
*/
private int timeInterval;
/**
* The PredictionSkylineStore to store the predicted ResourceSkyline for new
* run.
*/
private PredictionSkylineStore predictionSkylineStore;
@Override public final void init(final Configuration config,
PredictionSkylineStore skylineStore) {
this.alpha =
config.getDouble(ResourceEstimatorConfiguration.SOLVER_ALPHA_KEY, 0.1);
this.beta =
config.getDouble(ResourceEstimatorConfiguration.SOLVER_BETA_KEY, 0.1);
this.minJobRuns =
config.getInt(ResourceEstimatorConfiguration.SOLVER_MIN_JOB_RUN_KEY, 1);
this.timeInterval =
config.getInt(ResourceEstimatorConfiguration.TIME_INTERVAL_KEY, 5);
this.predictionSkylineStore = skylineStore;
}
/**
* Generate over-allocation constraints.
*
* @param lpModel the LP model.
* @param cJobITimeK actual container allocation for job i in time
* interval k.
* @param oa container over-allocation.
* @param x predicted container allocation.
* @param indexJobITimeK index for job i at time interval k.
* @param timeK index for time interval k.
*/
private void generateOverAllocationConstraints(
final ExpressionsBasedModel lpModel, final double cJobITimeK,
final Variable[] oa, final Variable[] x, final int indexJobITimeK,
final int timeK) {
// oa_job_i_timeK >= x_timeK - cJobITimeK
Expression overAllocExpression =
lpModel.addExpression("over_alloc_" + indexJobITimeK);
overAllocExpression.set(oa[indexJobITimeK], 1);
overAllocExpression.set(x[timeK], -1);
overAllocExpression.lower(-cJobITimeK); // >=
}
/**
* Generate under-allocation constraints.
*
* @param lpModel the LP model.
* @param cJobITimeK actual container allocation for job i in time
* interval k.
* @param uaPredict absolute container under-allocation.
* @param ua recursive container under-allocation.
* @param x predicted container allocation.
* @param indexJobITimeK index for job i at time interval k.
* @param timeK index for time interval k.
*/
private void generateUnderAllocationConstraints(
final ExpressionsBasedModel lpModel, final double cJobITimeK,
final Variable[] uaPredict, final Variable[] ua, final Variable[] x,
final int indexJobITimeK, final int timeK) {
// uaPredict_job_i_timeK + x_timeK >= cJobITimeK
Expression underAllocPredictExpression =
lpModel.addExpression("under_alloc_predict_" + indexJobITimeK);
underAllocPredictExpression.set(uaPredict[indexJobITimeK], 1);
underAllocPredictExpression.set(x[timeK], 1);
underAllocPredictExpression.lower(cJobITimeK); // >=
if (timeK >= 1) {
/** Recursively calculate container under-allocation. */
// ua_job_i_timeK >= ua_job_i_time_(k-1) + cJobITimeK - x_timeK
Expression underAllocExpression =
lpModel.addExpression("under_alloc_" + indexJobITimeK);
underAllocExpression.set(ua[indexJobITimeK], 1);
underAllocExpression.set(ua[indexJobITimeK - 1], -1);
underAllocExpression.set(x[timeK], 1);
underAllocExpression.lower(cJobITimeK); // >=
} else {
/** Initial value for container under-allocation. */
// ua_job_i_time_0 >= cJobI_time_0 - x_time_0
Expression underAllocExpression =
lpModel.addExpression("under_alloc_" + indexJobITimeK);
underAllocExpression.set(ua[indexJobITimeK], 1);
underAllocExpression.set(x[timeK], 1);
underAllocExpression.lower(cJobITimeK); // >=
}
}
/**
* Generate solver objective.
*
* @param objective LP solver objective.
* @param numJobs number of history runs of the recurring pipeline.
* @param jobLen (maximum) job lenght of the recurring pipeline.
* @param oa container over-allocation.
* @param ua recursive container under-allocation.
* @param eps regularization parameter.
*/
private void generateObjective(final Expression objective, final int numJobs,
final int jobLen, final Variable[] oa, final Variable[] ua,
final Variable eps) {
int indexJobITimeK;
// sum Over_Allocation
for (int indexJobI = 0; indexJobI < numJobs; indexJobI++) {
for (int timeK = 0; timeK < jobLen; timeK++) {
indexJobITimeK = indexJobI * jobLen + timeK;
objective.set(oa[indexJobITimeK], alpha / numJobs);
}
}
// sum Under_Allocation
int indexJobITimeN;
for (int indexJobI = 0; indexJobI < numJobs; indexJobI++) {
indexJobITimeN = indexJobI * jobLen + jobLen - 1;
objective.set(ua[indexJobITimeN], (1 - alpha) / numJobs);
}
objective.set(eps, beta);
objective.weight(BigDecimal.valueOf(1));
}
/**
* Get the job length of recurring pipeline.
*
* @param resourceSkylines the history ResourceSkylines allocated to the
* recurring pipeline.
* @param numJobs number of history runs of the recurring pipeline.
* @return length of (discretized time intervals of) the recurring pipeline.
*/
private int getJobLen(final List<ResourceSkyline> resourceSkylines,
final int numJobs) {
int curLen = 0;
int jobLen = 0;
for (int indexJobI = 0; indexJobI < numJobs; indexJobI++) {
curLen = (int) (resourceSkylines.get(indexJobI).getSkylineList()
.getLatestNonNullTime() - resourceSkylines.get(indexJobI)
.getSkylineList().getEarliestStartTime() + timeInterval - 1)
/ timeInterval; // for round up
if (jobLen < curLen) {
jobLen = curLen;
}
}
return jobLen;
}
@Override public final RLESparseResourceAllocation solve(
final Map<RecurrenceId, List<ResourceSkyline>> jobHistory)
throws SolverException, SkylineStoreException {
// TODO: addHistory timeout support for this function, and ideally we should
// return the confidence
// level associated with the predicted resource.
preprocessor.validate(jobHistory, timeInterval);
final List<ResourceSkyline> resourceSkylines =
preprocessor.aggregateSkylines(jobHistory, minJobRuns);
final int numJobs = resourceSkylines.size();
final int jobLen = getJobLen(resourceSkylines, numJobs);
/** Create variables. */
final ExpressionsBasedModel lpModel = new ExpressionsBasedModel();
Variable[] oa = new Variable[jobLen * numJobs];
Variable[] ua = new Variable[jobLen * numJobs];
Variable[] uaPredict = new Variable[jobLen * numJobs];
Variable[] x = new Variable[jobLen];
for (int i = 0; i < jobLen * numJobs; i++) {
oa[i] = new Variable("oa" + i).lower(BigDecimal.valueOf(0));
ua[i] = new Variable("ua" + i).lower(BigDecimal.valueOf(0));
uaPredict[i] = new Variable("uaPredict" + i).lower(BigDecimal.valueOf(0));
}
for (int i = 0; i < jobLen; i++) {
x[i] = new Variable("x").lower(BigDecimal.valueOf(0));
}
lpModel.addVariables(x);
lpModel.addVariables(oa);
lpModel.addVariables(ua);
lpModel.addVariables(uaPredict);
Variable eps = new Variable("epsilon").lower(BigDecimal.valueOf(0));
lpModel.addVariable(eps);
/** Set constraints. */
int indexJobITimeK = 0;
double cJobI = 0;
double cJobITimeK = 0;
ResourceSkyline resourceSkyline;
int[] containerNums;
// 1. sum(job_i){sum(timeK){1/cJobI * uaPredict_job_i_timeK}} <= numJobs
// * eps
Expression regularizationConstraint =
lpModel.addExpression("regularization");
regularizationConstraint.set(eps, -numJobs);
regularizationConstraint.upper(BigDecimal.valueOf(0)); // <= 0
for (int indexJobI = 0;
indexJobI < resourceSkylines.size(); indexJobI++) {
resourceSkyline = resourceSkylines.get(indexJobI);
// the # of containers consumed by job i in discretized time intervals
containerNums = preprocessor
.getDiscreteSkyline(resourceSkyline.getSkylineList(), timeInterval,
resourceSkyline.getContainerSpec().getMemorySize(), jobLen);
// the aggregated # of containers consumed by job i during its lifespan
cJobI = 0;
for (int i = 0; i < containerNums.length; i++) {
cJobI = cJobI + containerNums[i];
}
for (int timeK = 0; timeK < jobLen; timeK++) {
indexJobITimeK = indexJobI * jobLen + timeK;
// the # of containers consumed by job i in the k-th time interval
cJobITimeK = containerNums[timeK];
regularizationConstraint
.set(uaPredict[indexJobITimeK], 1 / cJobI);
generateOverAllocationConstraints(lpModel, cJobITimeK, oa, x,
indexJobITimeK, timeK);
generateUnderAllocationConstraints(lpModel, cJobITimeK, uaPredict,
ua, x, indexJobITimeK, timeK);
}
}
/** Set objective. */
Expression objective = lpModel.addExpression("objective");
generateObjective(objective, numJobs, jobLen, oa, ua, eps);
/** Solve the model. */
final Result lpResult = lpModel.minimise();
final TreeMap<Long, Resource> treeMap = new TreeMap<>();
RLESparseResourceAllocation result =
new RLESparseResourceAllocation(treeMap,
new DefaultResourceCalculator());
ReservationInterval riAdd;
Resource containerSpec = resourceSkylines.get(0).getContainerSpec();
String pipelineId =
((RecurrenceId) jobHistory.keySet().toArray()[0]).getPipelineId();
Resource resource;
for (int indexTimeK = 0; indexTimeK < jobLen; indexTimeK++) {
riAdd = new ReservationInterval(indexTimeK * timeInterval,
(indexTimeK + 1) * timeInterval);
resource = Resource.newInstance(
containerSpec.getMemorySize() * (int) lpResult
.doubleValue(indexTimeK),
containerSpec.getVirtualCores() * (int) lpResult
.doubleValue(indexTimeK));
result.addInterval(riAdd, resource);
LOGGER.debug("time interval: {}, container: {}.", indexTimeK,
lpResult.doubleValue(indexTimeK));
}
predictionSkylineStore.addEstimation(pipelineId, result);
/**
* TODO: 1. We can calculate the estimated error (over-allocation,
* under-allocation) of our prediction which could be used to generate
* confidence level for our prediction; 2. Also, we can modify our model to
* take job input data size (and maybe stage info) into consideration; 3. We
* can also try to generate such conclusion: our prediction under-allocates
* X amount of resources from time 0 to time 100 compared with 95% of
* history runs; 4. We can build framework-specific versions of estimator
* (such as scope/spark/hive, etc.) and provides more specific suggestions.
* For example, we may say: for spark job i, its task size is X GB while the
* container memory allocation is Y GB; as a result, its shuffling stage is
* 20% slower than ideal case due to the disk spilling operations, etc. 5.
* If we have more information of jobs (other than ResourceSkyline), we may
* have such conclusion: job i is 20% slower than 90% of history runs, and
* it is because part of its tasks are running together with job j's tasks.
* In this case, we not only predict the amount of resource needed for job
* i, but also how to place the resource requirements to clusters; 6. We may
* monitor job progress, and dynamically increase/decrease container
* allocations to satisfy job deadline while minimizing the cost; 7. We may
* allow users to specify a budget (say $100 per job run), and optimize the
* resource allocation under the budget constraints. 8. ...
*/
return result;
}
@Override public final void close() {
// TODO: currently place holder
}
}
| LpSolver |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java | {
"start": 54303,
"end": 54361
} | class ____ stores ACL info of mount table.
*/
static | that |
java | google__error-prone | core/src/test/java/com/google/errorprone/fixes/SuggestedFixesTest.java | {
"start": 65842,
"end": 65970
} | class ____<T extends Test<T>> {}
""")
.addOutputLines(
"Test.java",
"""
| Test |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OverrideRecordReader.java | {
"start": 2290,
"end": 4265
} | class ____
public V createValue() {
if (null == valueclass) {
Class<?> cls = kids[kids.length -1].createValue().getClass();
for (int i = kids.length -1; cls.equals(NullWritable.class); i--) {
cls = kids[i].createValue().getClass();
}
valueclass = cls.asSubclass(Writable.class);
}
if (valueclass.equals(NullWritable.class)) {
return (V) NullWritable.get();
}
return (V) ReflectionUtils.newInstance(valueclass, null);
}
/**
* Instead of filling the JoinCollector with iterators from all
* data sources, fill only the rightmost for this key.
* This not only saves space by discarding the other sources, but
* it also emits the number of key-value pairs in the preferred
* RecordReader instead of repeating that stream n times, where
* n is the cardinality of the cross product of the discarded
* streams for the given key.
*/
protected void fillJoinCollector(K iterkey)
throws IOException, InterruptedException {
final PriorityQueue<ComposableRecordReader<K,?>> q =
getRecordReaderQueue();
if (q != null && !q.isEmpty()) {
int highpos = -1;
ArrayList<ComposableRecordReader<K,?>> list =
new ArrayList<ComposableRecordReader<K,?>>(kids.length);
q.peek().key(iterkey);
final WritableComparator cmp = getComparator();
while (0 == cmp.compare(q.peek().key(), iterkey)) {
ComposableRecordReader<K,?> t = q.poll();
if (-1 == highpos || list.get(highpos).id() < t.id()) {
highpos = list.size();
}
list.add(t);
if (q.isEmpty())
break;
}
ComposableRecordReader<K,?> t = list.remove(highpos);
t.accept(jc, iterkey);
for (ComposableRecordReader<K,?> rr : list) {
rr.skip(iterkey);
}
list.add(t);
for (ComposableRecordReader<K,?> rr : list) {
if (rr.hasNext()) {
q.add(rr);
}
}
}
}
}
| agreement |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java | {
"start": 113326,
"end": 113539
} | class ____ has been changed during invocation.");
}
}
/**
* A {@link ClassLoader} that delegates everything to {@link
* ClassLoader#getSystemClassLoader()}.
*/
private static | loader |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/schemaupdate/SchemaUpdateJoinColumnNoConstraintTest.java | {
"start": 2386,
"end": 2592
} | class ____ {
@Id
private Long id;
@ManyToOne
@JoinColumn(
name = "some_fk",
foreignKey = @ForeignKey(name = "none", value = ConstraintMode.NO_CONSTRAINT)
)
private Parent parent;
}
}
| Child |
java | apache__flink | flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java | {
"start": 20577,
"end": 23064
} | class ____ implements PipelineExecutorServiceLoader {
private final ClusterClient<?> clusterClient;
private final StreamGraph streamGraph;
TestExecutorServiceLoader(
final ClusterClient<?> clusterClient, final StreamGraph streamGraph) {
this.clusterClient = checkNotNull(clusterClient);
this.streamGraph = checkNotNull(streamGraph);
}
@Override
public PipelineExecutorFactory getExecutorFactory(@Nonnull Configuration configuration) {
return new PipelineExecutorFactory() {
@Override
public String getName() {
return "my-name";
}
@Override
public boolean isCompatibleWith(@Nonnull Configuration configuration) {
return TEST_EXECUTOR_NAME.equalsIgnoreCase(
configuration.get(DeploymentOptions.TARGET));
}
@Override
public PipelineExecutor getExecutor(@Nonnull Configuration configuration) {
return (pipeline, config, classLoader) -> {
final int parallelism = config.get(CoreOptions.DEFAULT_PARALLELISM);
final JobGraph jobGraph = streamGraph.getJobGraph();
// The job graphs from different cases are generated from the same stream
// graph, resulting in the same job ID, which can lead to exceptions.
// Therefore, we manually set a unique job ID here.
jobGraph.setJobID(new JobID());
final ExecutionConfigAccessor accessor =
ExecutionConfigAccessor.fromConfiguration(config);
jobGraph.addJars(accessor.getJars());
jobGraph.setClasspaths(accessor.getClasspaths());
final JobID jobID = clusterClient.submitJob(jobGraph).get();
return CompletableFuture.completedFuture(
new ClusterClientJobClientAdapter<>(
() -> clusterClient, jobID, classLoader));
};
}
};
}
@Override
public Stream<String> getExecutorNames() {
throw new UnsupportedOperationException("not implemented");
}
}
}
| TestExecutorServiceLoader |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/mvnenc/goals/Decrypt.java | {
"start": 1391,
"end": 2126
} | class ____ extends ConfiguredGoalSupport {
@Inject
public Decrypt(MessageBuilderFactory messageBuilderFactory, SecDispatcher secDispatcher) {
super(messageBuilderFactory, secDispatcher);
}
@Override
protected int doExecute(EncryptContext context) throws Exception {
String encrypted = context.reader.readLine("Enter the password to decrypt: ");
if (secDispatcher.isAnyEncryptedString(encrypted)) {
context.terminal.writer().println(secDispatcher.decrypt(encrypted));
return OK;
} else {
context.terminal.writer().println(messageBuilderFactory.builder().error("Malformed encrypted string"));
return BAD_OPERATION;
}
}
}
| Decrypt |
java | greenrobot__EventBus | EventBus/src/org/greenrobot/eventbus/meta/SubscriberMethodInfo.java | {
"start": 736,
"end": 1559
} | class ____ {
final String methodName;
final ThreadMode threadMode;
final Class<?> eventType;
final int priority;
final boolean sticky;
public SubscriberMethodInfo(String methodName, Class<?> eventType, ThreadMode threadMode,
int priority, boolean sticky) {
this.methodName = methodName;
this.threadMode = threadMode;
this.eventType = eventType;
this.priority = priority;
this.sticky = sticky;
}
public SubscriberMethodInfo(String methodName, Class<?> eventType) {
this(methodName, eventType, ThreadMode.POSTING, 0, false);
}
public SubscriberMethodInfo(String methodName, Class<?> eventType, ThreadMode threadMode) {
this(methodName, eventType, threadMode, 0, false);
}
} | SubscriberMethodInfo |
java | apache__maven | compat/maven-compat/src/main/java/org/apache/maven/repository/legacy/resolver/transform/SnapshotTransformation.java | {
"start": 1897,
"end": 6644
} | class ____ extends AbstractVersionTransformation {
private static final String DEFAULT_SNAPSHOT_TIMESTAMP_FORMAT = "yyyyMMdd.HHmmss";
private static final TimeZone DEFAULT_SNAPSHOT_TIME_ZONE = TimeZone.getTimeZone("Etc/UTC");
private String deploymentTimestamp;
@Override
public void transformForResolve(Artifact artifact, RepositoryRequest request) throws ArtifactResolutionException {
// Only select snapshots that are unresolved (eg 1.0-SNAPSHOT, not 1.0-20050607.123456)
if (artifact.isSnapshot() && artifact.getBaseVersion().equals(artifact.getVersion())) {
try {
String version = resolveVersion(artifact, request);
artifact.updateVersion(version, request.getLocalRepository());
} catch (RepositoryMetadataResolutionException e) {
throw new ArtifactResolutionException(e.getMessage(), artifact, e);
}
}
}
@Override
public void transformForInstall(Artifact artifact, ArtifactRepository localRepository) {
if (artifact.isSnapshot()) {
Snapshot snapshot = new Snapshot();
snapshot.setLocalCopy(true);
RepositoryMetadata metadata = new SnapshotArtifactRepositoryMetadata(artifact, snapshot);
artifact.addMetadata(metadata);
}
}
@Override
public void transformForDeployment(
Artifact artifact, ArtifactRepository remoteRepository, ArtifactRepository localRepository)
throws ArtifactDeploymentException {
if (artifact.isSnapshot()) {
Snapshot snapshot = new Snapshot();
// TODO Should this be changed for MNG-6754 too?
snapshot.setTimestamp(getDeploymentTimestamp());
// we update the build number anyway so that it doesn't get lost. It requires the timestamp to take effect
try {
int buildNumber = resolveLatestSnapshotBuildNumber(artifact, localRepository, remoteRepository);
snapshot.setBuildNumber(buildNumber + 1);
} catch (RepositoryMetadataResolutionException e) {
throw new ArtifactDeploymentException(
"Error retrieving previous build number for artifact '" + artifact.getDependencyConflictId()
+ "': " + e.getMessage(),
e);
}
RepositoryMetadata metadata = new SnapshotArtifactRepositoryMetadata(artifact, snapshot);
artifact.setResolvedVersion(
constructVersion(metadata.getMetadata().getVersioning(), artifact.getBaseVersion()));
artifact.addMetadata(metadata);
}
}
public String getDeploymentTimestamp() {
if (deploymentTimestamp == null) {
deploymentTimestamp = getUtcDateFormatter().format(new Date());
}
return deploymentTimestamp;
}
@Override
protected String constructVersion(Versioning versioning, String baseVersion) {
String version = null;
Snapshot snapshot = versioning.getSnapshot();
if (snapshot != null) {
if (snapshot.getTimestamp() != null && snapshot.getBuildNumber() > 0) {
String newVersion = snapshot.getTimestamp() + "-" + snapshot.getBuildNumber();
version = baseVersion.replace(Artifact.SNAPSHOT_VERSION, newVersion);
} else {
version = baseVersion;
}
}
return version;
}
private int resolveLatestSnapshotBuildNumber(
Artifact artifact, ArtifactRepository localRepository, ArtifactRepository remoteRepository)
throws RepositoryMetadataResolutionException {
RepositoryMetadata metadata = new SnapshotArtifactRepositoryMetadata(artifact);
getLogger().info("Retrieving previous build number from " + remoteRepository.getId());
repositoryMetadataManager.resolveAlways(metadata, localRepository, remoteRepository);
int buildNumber = 0;
Metadata repoMetadata = metadata.getMetadata();
if ((repoMetadata != null)
&& (repoMetadata.getVersioning() != null
&& repoMetadata.getVersioning().getSnapshot() != null)) {
buildNumber = repoMetadata.getVersioning().getSnapshot().getBuildNumber();
}
return buildNumber;
}
public static DateFormat getUtcDateFormatter() {
DateFormat utcDateFormatter = new SimpleDateFormat(DEFAULT_SNAPSHOT_TIMESTAMP_FORMAT);
utcDateFormatter.setCalendar(new GregorianCalendar());
utcDateFormatter.setTimeZone(DEFAULT_SNAPSHOT_TIME_ZONE);
return utcDateFormatter;
}
}
| SnapshotTransformation |
java | alibaba__nacos | plugin/datasource/src/main/java/com/alibaba/nacos/plugin/datasource/impl/derby/ConfigInfoGrayMapperByDerby.java | {
"start": 1190,
"end": 2563
} | class ____ extends AbstractMapperByDerby implements ConfigInfoGrayMapper {
@Override
public MapperResult findAllConfigInfoGrayForDumpAllFetchRows(MapperContext context) {
String sql = "SELECT t.id,data_id,group_id,tenant_id,gray_name,gray_rule,app_name,content,md5,gmt_modified "
+ " FROM ( SELECT id FROM config_info_gray ORDER BY id OFFSET " + context.getStartRow()
+ " ROWS FETCH NEXT " + context.getPageSize() + " ROWS ONLY ) "
+ " g, config_info_gray t WHERE g.id = t.id";
return new MapperResult(sql, Collections.emptyList());
}
@Override
public MapperResult findChangeConfig(MapperContext context) {
String sql = "SELECT id, data_id, group_id, tenant_id, app_name, content,gray_name,gray_rule, "
+ "gmt_modified, encrypted_data_key FROM config_info_gray WHERE "
+ "gmt_modified >= ? and id > ? order by id OFFSET 0 ROWS FETCH NEXT ? ROWS ONLY";
return new MapperResult(sql, CollectionUtils.list(context.getWhereParameter(FieldConstant.START_TIME),
context.getWhereParameter(FieldConstant.LAST_MAX_ID),
context.getWhereParameter(FieldConstant.PAGE_SIZE)));
}
@Override
public String getDataSource() {
return DataSourceConstant.DERBY;
}
}
| ConfigInfoGrayMapperByDerby |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/CleanupState.java | {
"start": 1111,
"end": 1219
} | interface ____ clean up state, both for {@link ProcessFunction} and {@link
* CoProcessFunction}.
*/
public | for |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/util/Arrays_asObjectArray_Test.java | {
"start": 1047,
"end": 2477
} | class ____ {
@ParameterizedTest
@MethodSource("dataProvider")
void should_return_an_Object_array_corresponding_to_the_given_object(Object arrayAsObject, Object[] expected) {
assertThat(asObjectArray(arrayAsObject)).isEqualTo(expected);
}
public static Object[][] dataProvider() {
return new Object[][] {
{ new String[0], array() },
{ new String[] { "a", "b", "c" }, array("a", "b", "c") },
{ new int[] { 1, 2, 3 }, array(1, 2, 3) }
};
}
@ParameterizedTest
@MethodSource("notArrays")
void should_throw_IllegalArgumentException_if_given_object_is_not_an_array(final Object notArray,
final String error) {
// WHEN
Throwable throwable = arrayValuesCall(notArray);
// THEN
assertThat(throwable).isInstanceOf(IllegalArgumentException.class)
.hasMessage(error);
}
private static Throwable arrayValuesCall(final Object actual) {
return catchThrowable(new ThrowingCallable() {
@Override
public void call() throws Exception {
asObjectArray(actual);
}
});
}
public static Object[][] notArrays() {
return new Object[][] {
{ null, "Given object null is not an array" },
{ "abc", "Given object abc is not an array" },
{ 123, "Given object 123 is not an array" }
};
}
}
| Arrays_asObjectArray_Test |
java | apache__camel | components/camel-telemetry/src/main/java/org/apache/camel/telemetry/decorators/RestOpenapiSpanDecorator.java | {
"start": 858,
"end": 1159
} | class ____ extends AbstractHttpSpanDecorator {
@Override
public String getComponent() {
return "rest-openapi";
}
@Override
public String getComponentClassName() {
return "org.apache.camel.component.rest.openapi.RestOpenApiComponent";
}
}
| RestOpenapiSpanDecorator |
java | resilience4j__resilience4j | resilience4j-bulkhead/src/test/java/io/github/resilience4j/bulkhead/ThreadPoolBulkheadConfigTest.java | {
"start": 1004,
"end": 8423
} | class ____ {
@Test
public void testBuildCustom() {
int maxThreadPoolSize = 20;
int coreThreadPoolSize = 2;
long maxWait = 555;
int queueCapacity = 50;
ThreadPoolBulkheadConfig config = ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(maxThreadPoolSize)
.coreThreadPoolSize(coreThreadPoolSize)
.queueCapacity(queueCapacity)
.keepAliveDuration(Duration.ofMillis(maxWait))
.build();
assertThat(config).isNotNull();
assertThat(config.getMaxThreadPoolSize()).isEqualTo(maxThreadPoolSize);
assertThat(config.getCoreThreadPoolSize()).isEqualTo(coreThreadPoolSize);
assertThat(config.getKeepAliveDuration().toMillis()).isEqualTo(maxWait);
assertThat(config.getQueueCapacity()).isEqualTo(queueCapacity);
assertThat(config.getContextPropagator()).isEmpty();
}
@Test
public void testCreateFromBaseConfig() {
int maxThreadPoolSize = 20;
int coreThreadPoolSize = 2;
long maxWait = 555;
int queueCapacity = 50;
ThreadPoolBulkheadConfig config = ThreadPoolBulkheadConfig
.from(ThreadPoolBulkheadConfig.custom().build())
.maxThreadPoolSize(maxThreadPoolSize)
.coreThreadPoolSize(coreThreadPoolSize)
.queueCapacity(queueCapacity)
.keepAliveDuration(Duration.ofMillis(maxWait))
.build();
assertThat(config).isNotNull();
assertThat(config.getMaxThreadPoolSize()).isEqualTo(maxThreadPoolSize);
assertThat(config.getCoreThreadPoolSize()).isEqualTo(coreThreadPoolSize);
assertThat(config.getKeepAliveDuration().toMillis()).isEqualTo(maxWait);
assertThat(config.getQueueCapacity()).isEqualTo(queueCapacity);
assertThat(config.getContextPropagator()).isEmpty();
}
@Test(expected = IllegalArgumentException.class)
public void testBuildWithIllegalMaxThreadPoolSize() {
ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(-1)
.build();
}
@Test(expected = IllegalArgumentException.class)
public void testBuildWithIllegalCoreThreadPoolSize() {
ThreadPoolBulkheadConfig.custom()
.coreThreadPoolSize(-1)
.build();
}
@Test(expected = IllegalArgumentException.class)
public void testBuildWithIllegalMaxWait() {
ThreadPoolBulkheadConfig.custom()
.keepAliveDuration(Duration.ofMillis(-1))
.build();
}
@Test(expected = IllegalArgumentException.class)
public void testBuildWithIllegalQueueCapacity() {
ThreadPoolBulkheadConfig.custom()
.queueCapacity(-1)
.build();
}
@Test(expected = IllegalArgumentException.class)
public void testBuildWithIllegalMaxCoreThreads() {
ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(1)
.coreThreadPoolSize(2)
.build();
}
@Test
public void testContextPropagatorConfig() {
ThreadPoolBulkheadConfig config = ThreadPoolBulkheadConfig
.custom()
.contextPropagator(TestCtxPropagator.class)
.build();
assertThat(config).isNotNull();
assertThat(config.getContextPropagator()).isNotNull();
assertThat(config.getContextPropagator().size()).isEqualTo(1);
assertThat(config.getContextPropagator().get(0).getClass()).isEqualTo(TestCtxPropagator.class);
}
@Test
public void testContextPropagatorConfigDefault() {
int maxThreadPoolSize = 20;
int coreThreadPoolSize = 2;
long maxWait = 555;
int queueCapacity = 50;
ThreadPoolBulkheadConfig config = ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(maxThreadPoolSize)
.coreThreadPoolSize(coreThreadPoolSize)
.queueCapacity(queueCapacity)
.keepAliveDuration(Duration.ofMillis(maxWait))
.build();
assertThat(config).isNotNull();
assertThat(config.getContextPropagator()).isNotNull();
assertThat(config.getContextPropagator()).isEmpty();
}
@Test
public void testContextPropagatorSetAsBean() {
int maxThreadPoolSize = 20;
int coreThreadPoolSize = 2;
long maxWait = 555;
int queueCapacity = 50;
ThreadPoolBulkheadConfig config = ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(maxThreadPoolSize)
.coreThreadPoolSize(coreThreadPoolSize)
.queueCapacity(queueCapacity)
.keepAliveDuration(Duration.ofMillis(maxWait))
.contextPropagator(new TestCtxPropagator())
.build();
assertThat(config).isNotNull();
assertThat(config.getContextPropagator()).isNotNull();
assertThat(config.getContextPropagator()).hasSize(1);
assertThat(config.getContextPropagator().get(0).getClass()).isEqualTo(TestCtxPropagator.class);
}
@Test
public void testContextPropagatorSetAsBeanOverrideSetAsClass() {
int maxThreadPoolSize = 20;
int coreThreadPoolSize = 2;
long maxWait = 555;
int queueCapacity = 50;
ThreadPoolBulkheadConfig config = ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(maxThreadPoolSize)
.coreThreadPoolSize(coreThreadPoolSize)
.queueCapacity(queueCapacity)
.keepAliveDuration(Duration.ofMillis(maxWait))
.contextPropagator(TestCtxPropagator2.class)
//this should override TestCtxPropagator2 context propagator
.contextPropagator(new TestCtxPropagator())
.build();
assertThat(config).isNotNull();
assertThat(config.getContextPropagator()).isNotNull();
assertThat(config.getContextPropagator()).hasSize(2);
List<Class<? extends ContextPropagator>> ctxPropagators = config.getContextPropagator()
.stream().map(ct -> ct.getClass()).collect(Collectors.toList());
assertThat(ctxPropagators).containsExactlyInAnyOrder(TestCtxPropagator.class, TestCtxPropagator2.class);
}
@Test
public void testToString() {
int maxThreadPoolSize = 20;
int coreThreadPoolSize = 2;
long maxWait = 555;
int queueCapacity = 50;
ThreadPoolBulkheadConfig config = ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(maxThreadPoolSize)
.coreThreadPoolSize(coreThreadPoolSize)
.queueCapacity(queueCapacity)
.keepAliveDuration(Duration.ofMillis(maxWait))
.writableStackTraceEnabled(false)
.contextPropagator(TestCtxPropagator2.class)
.build();
String result = config.toString();
assertThat(result).startsWith("ThreadPoolBulkheadConfig{");
assertThat(result).contains("maxThreadPoolSize=20");
assertThat(result).contains("coreThreadPoolSize=2");
assertThat(result).contains("queueCapacity=50");
assertThat(result).contains("keepAliveDuration=PT0.555S");
assertThat(result).contains("writableStackTraceEnabled=false");
assertThat(result).contains("contextPropagators=[io.github.resilience4j.bulkhead.ThreadPoolBulkheadConfigTest$TestCtxPropagator2");
assertThat(result).endsWith("}");
}
public static | ThreadPoolBulkheadConfigTest |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDelete.java | {
"start": 1055,
"end": 1267
} | class ____ extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);
}
}
| ITestAzureNativeContractDelete |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/TextExpansionQueryBuilder.java | {
"start": 2192,
"end": 14852
} | class ____ extends AbstractQueryBuilder<TextExpansionQueryBuilder> {
public static final String NAME = "text_expansion";
public static final ParseField MODEL_TEXT = new ParseField("model_text");
public static final ParseField MODEL_ID = new ParseField("model_id");
private final String fieldName;
private final String modelText;
private final String modelId;
private SetOnce<TextExpansionResults> weightedTokensSupplier;
private final TokenPruningConfig tokenPruningConfig;
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ParseField.class);
public static final String TEXT_EXPANSION_DEPRECATION_MESSAGE = NAME + " is deprecated. Use sparse_vector instead.";
public TextExpansionQueryBuilder(String fieldName, String modelText, String modelId) {
this(fieldName, modelText, modelId, null);
}
public TextExpansionQueryBuilder(String fieldName, String modelText, String modelId, @Nullable TokenPruningConfig tokenPruningConfig) {
if (fieldName == null) {
throw new IllegalArgumentException("[" + NAME + "] requires a fieldName");
}
if (modelText == null) {
throw new IllegalArgumentException("[" + NAME + "] requires a " + MODEL_TEXT.getPreferredName() + " value");
}
if (modelId == null) {
throw new IllegalArgumentException("[" + NAME + "] requires a " + MODEL_ID.getPreferredName() + " value");
}
this.fieldName = fieldName;
this.modelText = modelText;
this.modelId = modelId;
this.tokenPruningConfig = tokenPruningConfig;
}
public TextExpansionQueryBuilder(StreamInput in) throws IOException {
super(in);
this.fieldName = in.readString();
this.modelText = in.readString();
this.modelId = in.readString();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) {
this.tokenPruningConfig = in.readOptionalWriteable(TokenPruningConfig::new);
} else {
this.tokenPruningConfig = null;
}
}
private TextExpansionQueryBuilder(TextExpansionQueryBuilder other, SetOnce<TextExpansionResults> weightedTokensSupplier) {
this.fieldName = other.fieldName;
this.modelText = other.modelText;
this.modelId = other.modelId;
this.tokenPruningConfig = other.tokenPruningConfig;
this.boost = other.boost;
this.queryName = other.queryName;
this.weightedTokensSupplier = weightedTokensSupplier;
}
String getFieldName() {
return fieldName;
}
public TokenPruningConfig getTokenPruningConfig() {
return tokenPruningConfig;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_8_8_0;
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
if (weightedTokensSupplier != null) {
throw new IllegalStateException("token supplier must be null, can't serialize suppliers, missing a rewriteAndFetch?");
}
out.writeString(fieldName);
out.writeString(modelText);
out.writeString(modelId);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) {
out.writeOptionalWriteable(tokenPruningConfig);
}
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.startObject(fieldName);
builder.field(MODEL_TEXT.getPreferredName(), modelText);
builder.field(MODEL_ID.getPreferredName(), modelId);
if (tokenPruningConfig != null) {
builder.field(PRUNING_CONFIG.getPreferredName(), tokenPruningConfig);
}
boostAndQueryNameToXContent(builder);
builder.endObject();
builder.endObject();
}
@Override
protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) {
if (weightedTokensSupplier != null) {
if (weightedTokensSupplier.get() == null) {
return this;
}
return weightedTokensToQuery(fieldName, weightedTokensSupplier.get());
}
CoordinatedInferenceAction.Request inferRequest = CoordinatedInferenceAction.Request.forTextInput(
modelId,
List.of(modelText),
TextExpansionConfigUpdate.EMPTY_UPDATE,
false,
InferModelAction.Request.DEFAULT_TIMEOUT_FOR_API
);
inferRequest.setHighPriority(true);
inferRequest.setPrefixType(TrainedModelPrefixStrings.PrefixType.SEARCH);
SetOnce<TextExpansionResults> textExpansionResultsSupplier = new SetOnce<>();
queryRewriteContext.registerAsyncAction(
(client, listener) -> executeAsyncWithOrigin(
client,
ML_ORIGIN,
CoordinatedInferenceAction.INSTANCE,
inferRequest,
ActionListener.wrap(inferenceResponse -> {
if (inferenceResponse.getInferenceResults().isEmpty()) {
listener.onFailure(new IllegalStateException("inference response contain no results"));
return;
}
if (inferenceResponse.getInferenceResults().get(0) instanceof TextExpansionResults textExpansionResults) {
textExpansionResultsSupplier.set(textExpansionResults);
listener.onResponse(null);
} else if (inferenceResponse.getInferenceResults().get(0) instanceof WarningInferenceResults warning) {
listener.onFailure(new IllegalStateException(warning.getWarning()));
} else {
listener.onFailure(
new IllegalArgumentException(
"expected a result of type ["
+ TextExpansionResults.NAME
+ "] received ["
+ inferenceResponse.getInferenceResults().get(0).getWriteableName()
+ "]. Is ["
+ modelId
+ "] a compatible model?"
)
);
}
}, listener::onFailure)
)
);
return new TextExpansionQueryBuilder(this, textExpansionResultsSupplier);
}
private QueryBuilder weightedTokensToQuery(String fieldName, TextExpansionResults textExpansionResults) {
if (tokenPruningConfig != null) {
WeightedTokensQueryBuilder weightedTokensQueryBuilder = new WeightedTokensQueryBuilder(
fieldName,
textExpansionResults.getWeightedTokens(),
tokenPruningConfig
);
weightedTokensQueryBuilder.queryName(queryName);
weightedTokensQueryBuilder.boost(boost);
return weightedTokensQueryBuilder;
}
// Note: Weighted tokens queries were introduced in 8.13.0. To support mixed version clusters prior to 8.13.0,
// if no token pruning configuration is specified we fall back to a boolean query.
// TODO this should be updated to always use a WeightedTokensQueryBuilder once it's in all supported versions.
var boolQuery = QueryBuilders.boolQuery();
for (var weightedToken : textExpansionResults.getWeightedTokens()) {
boolQuery.should(QueryBuilders.termQuery(fieldName, weightedToken.token()).boost(weightedToken.weight()));
}
boolQuery.minimumShouldMatch(1);
boolQuery.boost(boost);
boolQuery.queryName(queryName);
return boolQuery;
}
@Override
protected Query doToQuery(SearchExecutionContext context) {
throw new IllegalStateException("text_expansion should have been rewritten to another query type");
}
@Override
protected boolean doEquals(TextExpansionQueryBuilder other) {
return Objects.equals(fieldName, other.fieldName)
&& Objects.equals(modelText, other.modelText)
&& Objects.equals(modelId, other.modelId)
&& Objects.equals(tokenPruningConfig, other.tokenPruningConfig)
&& Objects.equals(weightedTokensSupplier, other.weightedTokensSupplier);
}
@Override
protected int doHashCode() {
return Objects.hash(fieldName, modelText, modelId, tokenPruningConfig, weightedTokensSupplier);
}
public static TextExpansionQueryBuilder fromXContent(XContentParser parser) throws IOException {
deprecationLogger.warn(DeprecationCategory.API, NAME, TEXT_EXPANSION_DEPRECATION_MESSAGE);
String fieldName = null;
String modelText = null;
String modelId = null;
TokenPruningConfig tokenPruningConfig = null;
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
String queryName = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
fieldName = currentFieldName;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (PRUNING_CONFIG.match(currentFieldName, parser.getDeprecationHandler())) {
tokenPruningConfig = TokenPruningConfig.fromXContent(parser);
} else {
throw new ParsingException(
parser.getTokenLocation(),
"[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"
);
}
} else if (token.isValue()) {
if (MODEL_TEXT.match(currentFieldName, parser.getDeprecationHandler())) {
modelText = parser.text();
} else if (MODEL_ID.match(currentFieldName, parser.getDeprecationHandler())) {
modelId = parser.text();
} else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
boost = parser.floatValue();
} else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
queryName = parser.text();
} else {
throw new ParsingException(
parser.getTokenLocation(),
"[" + NAME + "] query does not support [" + currentFieldName + "]"
);
}
} else {
throw new ParsingException(
parser.getTokenLocation(),
"[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"
);
}
}
} else {
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
fieldName = parser.currentName();
modelText = parser.text();
}
}
if (modelText == null) {
throw new ParsingException(parser.getTokenLocation(), "No text specified for text query");
}
if (fieldName == null) {
throw new ParsingException(parser.getTokenLocation(), "No fieldname specified for query");
}
TextExpansionQueryBuilder queryBuilder = new TextExpansionQueryBuilder(fieldName, modelText, modelId, tokenPruningConfig);
queryBuilder.queryName(queryName);
queryBuilder.boost(boost);
return queryBuilder;
}
}
| TextExpansionQueryBuilder |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java | {
"start": 36754,
"end": 37046
} | class ____ extends TimerTask {
@Override
public void run() {
try {
reloadManifest();
} catch (Throwable t) {
// Prevent uncaught exceptions from killing this thread
LOG.warn("Error while reloading manifest: ", t);
}
}
}
}
| ManifestReloadTask |
java | redisson__redisson | redisson/src/main/java/org/redisson/redisnode/RedissonBaseNodes.java | {
"start": 1354,
"end": 5791
} | class ____ implements BaseRedisNodes {
ConnectionManager connectionManager;
CommandAsyncExecutor commandExecutor;
public RedissonBaseNodes(ConnectionManager connectionManager, CommandAsyncExecutor commandExecutor) {
this.connectionManager = connectionManager;
this.commandExecutor = commandExecutor;
}
protected <T extends org.redisson.api.redisnode.RedisNode> Collection<T> getNodes(NodeType type) {
Collection<MasterSlaveEntry> entries = connectionManager.getEntrySet();
List<T> result = new ArrayList<>();
for (MasterSlaveEntry masterSlaveEntry : entries) {
if (type == NodeType.MASTER) {
RedisNode entry = new RedisNode(masterSlaveEntry.getClient(), commandExecutor, NodeType.MASTER);
result.add((T) entry);
continue;
}
for (ClientConnectionsEntry slaveEntry : masterSlaveEntry.getAllEntries()) {
if (slaveEntry.getFreezeReason() != ClientConnectionsEntry.FreezeReason.MANAGER
&& slaveEntry.getNodeType() == type) {
RedisNode entry = new RedisNode(slaveEntry.getClient(), commandExecutor, slaveEntry.getNodeType());
result.add((T) entry);
}
}
}
return result;
}
protected RedisNode getNode(String address, NodeType nodeType) {
Collection<MasterSlaveEntry> entries = connectionManager.getEntrySet();
RedisURI addr = new RedisURI(address);
for (MasterSlaveEntry masterSlaveEntry : entries) {
if (nodeType == NodeType.MASTER
&& addr.equals(masterSlaveEntry.getClient().getAddr())) {
return new RedisNode(masterSlaveEntry.getClient(), commandExecutor, NodeType.MASTER);
}
for (ClientConnectionsEntry entry : masterSlaveEntry.getAllEntries()) {
if (addr.equals(entry.getClient().getAddr())
&& entry.getFreezeReason() != ClientConnectionsEntry.FreezeReason.MANAGER) {
return new RedisNode(entry.getClient(), commandExecutor, entry.getNodeType());
}
}
}
return null;
}
protected List<RedisNode> getNodes() {
Collection<MasterSlaveEntry> entries = connectionManager.getEntrySet();
List<RedisNode> result = new ArrayList<>();
for (MasterSlaveEntry masterSlaveEntry : entries) {
if (masterSlaveEntry.getAllEntries().isEmpty()) {
RedisNode masterEntry = new RedisNode(masterSlaveEntry.getClient(), commandExecutor, NodeType.MASTER);
result.add(masterEntry);
}
for (ClientConnectionsEntry slaveEntry : masterSlaveEntry.getAllEntries()) {
if (slaveEntry.getFreezeReason() != ClientConnectionsEntry.FreezeReason.MANAGER) {
RedisNode entry = new RedisNode(slaveEntry.getClient(), commandExecutor, slaveEntry.getNodeType());
result.add(entry);
}
}
}
return result;
}
@Override
public boolean pingAll(long timeout, TimeUnit timeUnit) {
List<RedisNode> clients = getNodes();
List<CompletableFuture<Boolean>> futures = new ArrayList<>();
for (RedisNode entry : clients) {
CompletionStage<RedisConnection> f = entry.getClient().connectAsync();
CompletionStage<Boolean> ff = f.thenCompose(c -> {
RFuture<String> r = c.async(timeUnit.toMillis(timeout), RedisCommands.PING);
return r.whenComplete((rr, ex) -> {
c.closeAsync();
});
}).thenApply("PONG"::equals);
futures.add(ff.toCompletableFuture());
}
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
try {
f.get(timeout, timeUnit);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
return false;
}
return futures.stream()
.map(r -> r.getNow(false))
.filter(r -> !r).findAny()
.orElse(true);
}
@Override
public boolean pingAll() {
return pingAll(1, TimeUnit.SECONDS);
}
}
| RedissonBaseNodes |
java | apache__camel | components/camel-ai/camel-langchain4j-embeddings/src/test/java/org/apache/camel/component/langchain4j/embeddings/LangChain4jEmbeddingsComponentPineconeTargetIT.java | {
"start": 2066,
"end": 6005
} | class ____ extends CamelTestSupport {
public static final long POINT_ID = 8;
public static final String PINECONE_URI = "pinecone:embeddings?token={{pinecone.token}}";
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
context.getRegistry().bind("embedding-model", new AllMiniLmL6V2EmbeddingModel());
return context;
}
@Test
@Order(1)
public void createServerlessIndex() {
Exchange result = fluentTemplate.to(PINECONE_URI)
.withHeader(PineconeVectorDbHeaders.ACTION, PineconeVectorDbAction.CREATE_SERVERLESS_INDEX)
.withBody(
"hello")
.withHeader(PineconeVectorDbHeaders.INDEX_NAME, "embeddings")
.withHeader(PineconeVectorDbHeaders.COLLECTION_SIMILARITY_METRIC, "cosine")
.withHeader(PineconeVectorDbHeaders.COLLECTION_DIMENSION, 384)
.withHeader(PineconeVectorDbHeaders.COLLECTION_CLOUD, "aws")
.withHeader(PineconeVectorDbHeaders.COLLECTION_CLOUD_REGION, "us-east-1")
.request(Exchange.class);
assertThat(result).isNotNull();
assertThat(result.getException()).isNull();
}
@Test
@Order(2)
public void upsert() {
Exchange result = fluentTemplate.to("direct:in")
.withHeader(PineconeVectorDbHeaders.ACTION, PineconeVectorDbAction.UPSERT)
.withBody("hi")
.withHeader(PineconeVectorDbHeaders.INDEX_NAME, "embeddings")
.withHeader(PineconeVectorDbHeaders.INDEX_ID, "elements")
.request(Exchange.class);
assertThat(result).isNotNull();
assertThat(result.getException()).isNull();
}
@Test
@Order(3)
public void queryByVector() {
List<Float> elements = generateFloatVector();
Exchange result = fluentTemplate.to(PINECONE_URI)
.withHeader(PineconeVectorDbHeaders.ACTION, PineconeVectorDbAction.QUERY)
.withBody(
elements)
.withHeader(PineconeVectorDbHeaders.INDEX_NAME, "embeddings")
.withHeader(PineconeVectorDbHeaders.QUERY_TOP_K, 384)
.request(Exchange.class);
assertThat(result).isNotNull();
assertThat(result.getException()).isNull();
assertThat(((QueryResponseWithUnsignedIndices) result.getMessage().getBody()).getMatchesList()).isNotNull();
}
@Test
@Order(4)
public void deleteIndex() {
Exchange result = fluentTemplate.to(PINECONE_URI)
.withHeader(PineconeVectorDbHeaders.ACTION, PineconeVectorDbAction.DELETE_INDEX)
.withBody(
"test")
.withHeader(PineconeVectorDbHeaders.INDEX_NAME, "embeddings")
.request(Exchange.class);
assertThat(result).isNotNull();
assertThat(result.getException()).isNull();
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:in")
.to("langchain4j-embeddings:test")
.setHeader(PineconeVectorDbHeaders.ACTION).constant(PineconeVectorDbAction.UPSERT)
.setHeader(PineconeVectorDbHeaders.INDEX_ID).constant(POINT_ID)
.transform(
new DataType("pinecone:embeddings"))
.to(PINECONE_URI);
}
};
}
private List<Float> generateFloatVector() {
Random ran = new Random();
List<Float> vector = new ArrayList<>();
for (int i = 0; i < 384; ++i) {
vector.add(ran.nextFloat());
}
return vector;
}
}
| LangChain4jEmbeddingsComponentPineconeTargetIT |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/JdkObsoleteTest.java | {
"start": 11052,
"end": 11312
} | class ____ {
void f() {
SortedSetMultimap<String, String> myMultimap = TreeMultimap.create();
String myValue = myMultimap.get("foo").first();
}
}
""")
.doTest();
}
}
| Test |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/operators/testutils/types/IntPair.java | {
"start": 875,
"end": 1418
} | class ____ {
private int key;
private int value;
public IntPair() {}
public IntPair(int key, int value) {
this.key = key;
this.value = value;
}
public int getKey() {
return key;
}
public void setKey(int key) {
this.key = key;
}
public int getValue() {
return value;
}
public void setValue(int value) {
this.value = value;
}
@Override
public String toString() {
return "(" + this.key + "/" + this.value + ")";
}
}
| IntPair |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/image/TopicDelta.java | {
"start": 1447,
"end": 10610
} | class ____ {
private final TopicImage image;
private final Map<Integer, PartitionRegistration> partitionChanges = new HashMap<>();
private final Map<Integer, Integer> partitionToUncleanLeaderElectionCount = new HashMap<>();
private final Map<Integer, Integer> partitionToElrElectionCount = new HashMap<>();
public TopicDelta(TopicImage image) {
this.image = image;
}
public TopicImage image() {
return image;
}
public Map<Integer, PartitionRegistration> partitionChanges() {
return partitionChanges;
}
public Map<Integer, PartitionRegistration> newPartitions() {
return partitionChanges
.entrySet()
.stream()
.filter(entry -> !image.partitions().containsKey(entry.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
public String name() {
return image.name();
}
public Uuid id() {
return image.id();
}
public Map<Integer, Integer> partitionToElrElectionCount() {
return partitionToElrElectionCount;
}
public Map<Integer, Integer> partitionToUncleanLeaderElectionCount() {
return partitionToUncleanLeaderElectionCount;
}
public void replay(PartitionRecord record) {
int partitionId = record.partitionId();
PartitionRegistration prevPartition = partitionChanges.get(partitionId);
if (prevPartition == null) {
prevPartition = image.partitions().get(partitionId);
}
if (prevPartition != null) {
updateElectionStats(partitionId, prevPartition, record.leader(), record.leaderRecoveryState());
}
partitionChanges.put(record.partitionId(), new PartitionRegistration(record));
}
public void replay(PartitionChangeRecord record) {
int partitionId = record.partitionId();
PartitionRegistration prevPartition = partitionChanges.get(partitionId);
if (prevPartition == null) {
prevPartition = image.partitions().get(partitionId);
if (prevPartition == null) {
throw new RuntimeException("Unable to find partition " +
record.topicId() + ":" + partitionId);
}
}
updateElectionStats(partitionId, prevPartition, record.leader(), record.leaderRecoveryState());
partitionChanges.put(record.partitionId(), prevPartition.merge(record));
}
private void updateElectionStats(int partitionId, PartitionRegistration prevPartition, int newLeader, byte newLeaderRecoveryState) {
if (PartitionRegistration.electionWasUnclean(newLeaderRecoveryState)) {
partitionToUncleanLeaderElectionCount.put(partitionId, partitionToUncleanLeaderElectionCount.getOrDefault(partitionId, 0) + 1);
}
if (Replicas.contains(prevPartition.elr, newLeader)) {
partitionToElrElectionCount.put(partitionId, partitionToElrElectionCount.getOrDefault(partitionId, 0) + 1);
}
}
public void replay() {
// Some partitions are not added to the image yet, let's check the partitionChanges first.
partitionChanges.forEach(this::maybeClearElr);
image.partitions().forEach((partitionId, partition) -> {
if (!partitionChanges.containsKey(partitionId)) {
maybeClearElr(partitionId, partition);
}
});
}
void maybeClearElr(int partitionId, PartitionRegistration partition) {
if (partition.elr.length != 0 || partition.lastKnownElr.length != 0) {
partitionChanges.put(partitionId, partition.merge(
new PartitionChangeRecord().
setPartitionId(partitionId).
setTopicId(image.id()).
setEligibleLeaderReplicas(List.of()).
setLastKnownElr(List.of())
));
}
}
public TopicImage apply() {
Map<Integer, PartitionRegistration> newPartitions = new HashMap<>();
for (Entry<Integer, PartitionRegistration> entry : image.partitions().entrySet()) {
int partitionId = entry.getKey();
PartitionRegistration changedPartition = partitionChanges.get(partitionId);
if (changedPartition == null) {
newPartitions.put(partitionId, entry.getValue());
} else {
newPartitions.put(partitionId, changedPartition);
}
}
for (Entry<Integer, PartitionRegistration> entry : partitionChanges.entrySet()) {
if (!newPartitions.containsKey(entry.getKey())) {
newPartitions.put(entry.getKey(), entry.getValue());
}
}
return new TopicImage(image.name(), image.id(), newPartitions);
}
/**
* Find the partitions that have change based on the replica given.
* <p>
* The changes identified are:
* <ul>
* <li>deletes: partitions for which the broker is not a replica anymore</li>
* <li>electedLeaders: partitions for which the broker is now a leader (leader epoch bump on the leader)</li>
* <li>leaders: partitions for which the isr or replicas change if the broker is a leader (partition epoch bump on the leader)</li>
* <li>followers: partitions for which the broker is now a follower or follower with isr or replica updates (partition epoch bump on follower)</li>
* <li>topicIds: a map of topic names to topic IDs in leaders and followers changes</li>
* <li>directoryIds: partitions for which directory id changes or newly added to the broker</li>
* </ul>
* <p>
* Leader epoch bumps are a strict subset of all partition epoch bumps, so all partitions in electedLeaders will be in leaders.
*
* @param brokerId the broker id
* @return the LocalReplicaChanges that cover changes in the broker
*/
@SuppressWarnings("checkstyle:cyclomaticComplexity")
public LocalReplicaChanges localChanges(int brokerId) {
Set<TopicPartition> deletes = new HashSet<>();
Map<TopicPartition, LocalReplicaChanges.PartitionInfo> electedLeaders = new HashMap<>();
Map<TopicPartition, LocalReplicaChanges.PartitionInfo> leaders = new HashMap<>();
Map<TopicPartition, LocalReplicaChanges.PartitionInfo> followers = new HashMap<>();
Map<String, Uuid> topicIds = new HashMap<>();
Map<TopicIdPartition, Uuid> directoryIds = new HashMap<>();
for (Entry<Integer, PartitionRegistration> entry : partitionChanges.entrySet()) {
if (!Replicas.contains(entry.getValue().replicas, brokerId)) {
PartitionRegistration prevPartition = image.partitions().get(entry.getKey());
if (prevPartition != null && Replicas.contains(prevPartition.replicas, brokerId)) {
deletes.add(new TopicPartition(name(), entry.getKey()));
}
} else if (entry.getValue().leader == brokerId) {
PartitionRegistration prevPartition = image.partitions().get(entry.getKey());
if (prevPartition == null || prevPartition.partitionEpoch != entry.getValue().partitionEpoch) {
TopicPartition tp = new TopicPartition(name(), entry.getKey());
LocalReplicaChanges.PartitionInfo partitionInfo = new LocalReplicaChanges.PartitionInfo(id(), entry.getValue());
leaders.put(tp, partitionInfo);
if (prevPartition == null || prevPartition.leaderEpoch != entry.getValue().leaderEpoch) {
electedLeaders.put(tp, partitionInfo);
}
topicIds.putIfAbsent(name(), id());
}
} else {
PartitionRegistration prevPartition = image.partitions().get(entry.getKey());
if (prevPartition == null || prevPartition.partitionEpoch != entry.getValue().partitionEpoch) {
followers.put(
new TopicPartition(name(), entry.getKey()),
new LocalReplicaChanges.PartitionInfo(id(), entry.getValue())
);
topicIds.putIfAbsent(name(), id());
}
}
try {
PartitionRegistration prevPartition = image.partitions().get(entry.getKey());
if (prevPartition == null || prevPartition.directory(brokerId) != entry.getValue().directory(brokerId)) {
directoryIds.put(
new TopicIdPartition(id(), new TopicPartition(name(), entry.getKey())),
entry.getValue().directory(brokerId)
);
}
} catch (IllegalArgumentException e) {
// Do nothing if broker isn't part of the replica set.
}
}
return new LocalReplicaChanges(deletes, electedLeaders, leaders, followers, topicIds, directoryIds);
}
@Override
public String toString() {
return "TopicDelta(" +
"partitionChanges=" + partitionChanges +
')';
}
}
| TopicDelta |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/onetomany/C.java | {
"start": 396,
"end": 786
} | class ____ {
@GeneratedValue(strategy = GenerationType.AUTO)
@Id
Long id;
@NotNull
String name;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String toString() {
return "C [id=" + id + ", name=" + name + "]";
}
}
| C |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/EventHubsEndpointBuilderFactory.java | {
"start": 37463,
"end": 40056
} | interface ____ extends EndpointProducerBuilder {
default EventHubsEndpointProducerBuilder basic() {
return (EventHubsEndpointProducerBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedEventHubsEndpointProducerBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedEventHubsEndpointProducerBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
/**
* Builder for endpoint for the Azure Event Hubs component.
*/
public | AdvancedEventHubsEndpointProducerBuilder |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/utils/IOUtils.java | {
"start": 7951,
"end": 9114
} | class ____ resource [" + path + "]";
throw new FileNotFoundException(description + " cannot be resolved to URL because it does not exist");
}
return url;
}
try {
// try URL
return new URL(resourceLocation);
} catch (MalformedURLException ex) {
// no URL -> treat as file path
try {
return new File(resourceLocation).toURI().toURL();
} catch (MalformedURLException ex2) {
throw new FileNotFoundException(
"Resource location [" + resourceLocation + "] is neither a URL not a well-formed file path");
}
}
}
public static byte[] toByteArray(final InputStream inputStream) throws IOException {
try (final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) {
byte[] buffer = new byte[1024];
int n;
while (-1 != (n = inputStream.read(buffer))) {
byteArrayOutputStream.write(buffer, 0, n);
}
return byteArrayOutputStream.toByteArray();
}
}
}
| path |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeLabelsResponse.java | {
"start": 1202,
"end": 2453
} | class ____ {
/**
* Creates a new instance.
*
* @param labels Node labels
* @return response
* @deprecated Use {@link #newInstance(List)} instead.
*/
@Deprecated
public static GetClusterNodeLabelsResponse newInstance(Set<String> labels) {
List<NodeLabel> list = new ArrayList<>();
for (String label : labels) {
list.add(NodeLabel.newInstance(label));
}
return newInstance(list);
}
public static GetClusterNodeLabelsResponse newInstance(List<NodeLabel> labels) {
GetClusterNodeLabelsResponse response =
Records.newRecord(GetClusterNodeLabelsResponse.class);
response.setNodeLabelList(labels);
return response;
}
public abstract void setNodeLabelList(List<NodeLabel> labels);
public abstract List<NodeLabel> getNodeLabelList();
/**
* Set node labels to the response.
*
* @param labels Node labels
* @deprecated Use {@link #setNodeLabelList(List)} instead.
*/
@Deprecated
public abstract void setNodeLabels(Set<String> labels);
/**
* Get node labels of the response.
*
* @return Node labels
* @deprecated Use {@link #getNodeLabelList()} instead.
*/
@Deprecated
public abstract Set<String> getNodeLabels();
}
| GetClusterNodeLabelsResponse |
java | apache__hadoop | hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java | {
"start": 1490,
"end": 4907
} | class ____ extends InputStream implements Seekable {
private final InputStream rawStream;
private final float maxBytesPerSec;
private final long startTime = System.currentTimeMillis();
private long bytesRead = 0;
private long totalSleepTime = 0;
private static final long SLEEP_DURATION_MS = 50;
public ThrottledInputStream(InputStream rawStream) {
this(rawStream, Long.MAX_VALUE);
}
public ThrottledInputStream(InputStream rawStream, float maxBytesPerSec) {
assert maxBytesPerSec > 0 : "Bandwidth " + maxBytesPerSec + " is invalid";
this.rawStream = rawStream;
this.maxBytesPerSec = maxBytesPerSec;
}
@Override
public void close() throws IOException {
rawStream.close();
}
/** {@inheritDoc} */
@Override
public int read() throws IOException {
throttle();
int data = rawStream.read();
if (data != -1) {
bytesRead++;
}
return data;
}
/** {@inheritDoc} */
@Override
public int read(byte[] b) throws IOException {
throttle();
int readLen = rawStream.read(b);
if (readLen != -1) {
bytesRead += readLen;
}
return readLen;
}
/** {@inheritDoc} */
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (len == 0) {
return 0;
}
throttle();
int readLen = rawStream.read(b, off, len);
if (readLen != -1) {
bytesRead += readLen;
}
return readLen;
}
private void throttle() throws IOException {
while (getBytesPerSec() > maxBytesPerSec) {
try {
Thread.sleep(SLEEP_DURATION_MS);
totalSleepTime += SLEEP_DURATION_MS;
} catch (InterruptedException e) {
throw new IOException("Thread aborted", e);
}
}
}
/**
* Getter for the number of bytes read from this stream, since creation.
* @return The number of bytes.
*/
public long getTotalBytesRead() {
return bytesRead;
}
/**
* Getter for the read-rate from this stream, since creation.
* Calculated as bytesRead/elapsedTimeSinceStart.
* @return Read rate, in bytes/sec.
*/
public long getBytesPerSec() {
if (bytesRead == 0){
return 0;
}
float elapsed = (System.currentTimeMillis() - startTime) / 1000.0f;
return (long) (bytesRead / elapsed);
}
/**
* Getter the total time spent in sleep.
* @return Number of milliseconds spent in sleep.
*/
public long getTotalSleepTime() {
return totalSleepTime;
}
/** {@inheritDoc} */
@Override
public String toString() {
return "ThrottledInputStream{" +
"bytesRead=" + bytesRead +
", maxBytesPerSec=" + maxBytesPerSec +
", bytesPerSec=" + getBytesPerSec() +
", totalSleepTime=" + totalSleepTime +
'}';
}
private void checkSeekable() throws IOException {
if (!(rawStream instanceof Seekable)) {
throw new UnsupportedOperationException(
"seek operations are unsupported by the internal stream");
}
}
@Override
public void seek(long pos) throws IOException {
checkSeekable();
((Seekable) rawStream).seek(pos);
}
@Override
public long getPos() throws IOException {
checkSeekable();
return ((Seekable) rawStream).getPos();
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
checkSeekable();
return ((Seekable) rawStream).seekToNewSource(targetPos);
}
}
| ThrottledInputStream |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/AbstractContextConfigurationUtilsTests.java | {
"start": 5933,
"end": 6099
} | class ____ {
}
@ContextConfiguration("/foo.xml")
@ActiveProfiles(profiles = "foo")
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @ | QuuxConfig |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/nosql/NoSqlAppender.java | {
"start": 2414,
"end": 7133
} | class ____<B extends Builder<B>> extends AbstractAppender.Builder<B>
implements org.apache.logging.log4j.core.util.Builder<NoSqlAppender> {
@PluginBuilderAttribute("bufferSize")
private int bufferSize;
@PluginElement("NoSqlProvider")
private NoSqlProvider<?> provider;
@PluginElement("AdditionalField")
private KeyValuePair[] additionalFields;
@SuppressWarnings("resource")
@Override
public NoSqlAppender build() {
final String name = getName();
if (provider == null) {
LOGGER.error("NoSQL provider not specified for appender [{}].", name);
return null;
}
final String managerName = "noSqlManager{ description=" + name + ", bufferSize=" + bufferSize
+ ", provider=" + provider + " }";
final NoSqlDatabaseManager<?> manager = NoSqlDatabaseManager.getNoSqlDatabaseManager(
managerName, bufferSize, provider, additionalFields, getConfiguration());
if (manager == null) {
return null;
}
return new NoSqlAppender(name, getFilter(), getLayout(), isIgnoreExceptions(), getPropertyArray(), manager);
}
/**
* Sets the buffer size.
*
* @param bufferSize
* If an integer greater than 0, this causes the appender to buffer log events and flush whenever the
* buffer reaches this size.
* @return this
*/
public B setBufferSize(final int bufferSize) {
this.bufferSize = bufferSize;
return asBuilder();
}
/**
* Sets the provider.
*
* @param provider
* The NoSQL provider that provides connections to the chosen NoSQL database.
* @return this
*/
public B setProvider(final NoSqlProvider<?> provider) {
this.provider = provider;
return asBuilder();
}
}
/**
* Factory method for creating a NoSQL appender within the plugin manager.
*
* @param name
* The name of the appender.
* @param ignore
* If {@code "true"} (default) exceptions encountered when appending events are logged; otherwise they
* are propagated to the caller.
* @param filter
* The filter, if any, to use.
* @param bufferSize
* If an integer greater than 0, this causes the appender to buffer log events and flush whenever the
* buffer reaches this size.
* @param provider
* The NoSQL provider that provides connections to the chosen NoSQL database.
* @return a new NoSQL appender.
* @deprecated since 2.11.0; use {@link Builder}.
*/
@SuppressWarnings("resource")
@Deprecated
public static NoSqlAppender createAppender(
// @formatter:off
final String name,
final String ignore,
final Filter filter,
final String bufferSize,
final NoSqlProvider<?> provider) {
// @formatter:on
if (provider == null) {
LOGGER.error("NoSQL provider not specified for appender [{}].", name);
return null;
}
final int bufferSizeInt = AbstractAppender.parseInt(bufferSize, 0);
final boolean ignoreExceptions = Booleans.parseBoolean(ignore, true);
final String managerName =
"noSqlManager{ description=" + name + ", bufferSize=" + bufferSizeInt + ", provider=" + provider + " }";
final NoSqlDatabaseManager<?> manager =
NoSqlDatabaseManager.getNoSqlDatabaseManager(managerName, bufferSizeInt, provider, null, null);
if (manager == null) {
return null;
}
return new NoSqlAppender(name, filter, null, ignoreExceptions, null, manager);
}
@PluginBuilderFactory
public static <B extends Builder<B>> B newBuilder() {
return new Builder<B>().asBuilder();
}
private final String description;
private NoSqlAppender(
final String name,
final Filter filter,
final Layout<? extends Serializable> layout,
final boolean ignoreExceptions,
final Property[] properties,
final NoSqlDatabaseManager<?> manager) {
super(name, filter, layout, ignoreExceptions, properties, manager);
this.description = this.getName() + "{ manager=" + this.getManager() + " }";
}
@Override
public String toString() {
return this.description;
}
}
| Builder |
java | apache__flink | flink-table/flink-table-common/src/test/java/org/apache/flink/table/types/extraction/DataTypeExtractorTest.java | {
"start": 38667,
"end": 38909
} | class ____ {
public Map<String, Integer> mapField;
public SimplePojo simplePojoField;
public Object someObject;
}
/** Simple POJO with no RAW types. */
@SuppressWarnings("unused")
public static | ComplexPojo |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java | {
"start": 2629,
"end": 3191
} | class ____ {
static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(IntIrateState.class);
long lastTimestamp;
long secondLastTimestamp = -1;
int lastValue;
int secondLastValue;
boolean hasSecond;
IntIrateState(long lastTimestamp, int lastValue) {
this.lastTimestamp = lastTimestamp;
this.lastValue = lastValue;
this.hasSecond = false;
}
long bytesUsed() {
return BASE_RAM_USAGE;
}
}
public static final | IntIrateState |
java | quarkusio__quarkus | integration-tests/reactive-messaging-hibernate-orm/src/test/java/io/quarkus/it/kafka/KafkaConnectorIT.java | {
"start": 115,
"end": 170
} | class ____ extends KafkaConnectorTest {
}
| KafkaConnectorIT |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/converters/SqlDescribeModelConverter.java | {
"start": 1257,
"end": 1826
} | class ____ implements SqlNodeConverter<SqlRichDescribeModel> {
@Override
public Operation convertSqlNode(
SqlRichDescribeModel sqlRichDescribeModel, ConvertContext context) {
UnresolvedIdentifier unresolvedIdentifier =
UnresolvedIdentifier.of(sqlRichDescribeModel.fullModelName());
ObjectIdentifier identifier =
context.getCatalogManager().qualifyIdentifier(unresolvedIdentifier);
return new DescribeModelOperation(identifier, sqlRichDescribeModel.isExtended());
}
}
| SqlDescribeModelConverter |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/mapping/Any.java | {
"start": 11005,
"end": 12571
} | class ____ extends SimpleValue {
private String typeName;
private final Consumer<Selectable> selectableConsumer;
public KeyValue(
Consumer<Selectable> selectableConsumer,
MetadataBuildingContext buildingContext) {
super( buildingContext );
this.selectableConsumer = selectableConsumer;
}
public KeyValue(
Consumer<Selectable> selectableConsumer,
MetadataBuildingContext buildingContext,
Table table) {
super( buildingContext, table );
this.selectableConsumer = selectableConsumer;
}
private KeyValue(KeyValue original) {
super( original );
this.typeName = original.typeName;
this.selectableConsumer = original.selectableConsumer;
}
@Override
public KeyValue copy() {
return new KeyValue( this );
}
@Override
public Type getType() throws MappingException {
return getMetadata().getTypeConfiguration().getBasicTypeRegistry().getRegisteredType( typeName );
}
@Override
public String getTypeName() {
return typeName;
}
@Override
public void setTypeName(String typeName) {
this.typeName = typeName;
}
@Override
public void addColumn(Column column) {
super.addColumn( column );
selectableConsumer.accept( column );
}
@Override
public void addColumn(Column column, boolean isInsertable, boolean isUpdatable) {
super.addColumn( column, isInsertable, isUpdatable );
selectableConsumer.accept( column );
}
@Override
public void addFormula(Formula formula) {
super.addFormula( formula );
selectableConsumer.accept( formula );
}
}
}
| KeyValue |
java | apache__camel | components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/model/fix/complex/onetomany/Order.java | {
"start": 1279,
"end": 3610
} | class ____ {
@Link
Header header;
@Link
Trailer trailer;
@KeyValuePairField(tag = 1)
// Client reference
private String account;
@KeyValuePairField(tag = 11)
// Order reference
private String clOrdId;
@KeyValuePairField(tag = 58)
// Free text
private String text;
@KeyValuePairField(tag = 777, pattern = "dd-MM-yyyy HH:mm:ss", timezone = "GMT-3")
// created
private Date created;
@OneToMany(mappedTo = "org.apache.camel.dataformat.bindy.model.fix.complex.onetomany.Security")
private List<Security> securities;
public List<Security> getSecurities() {
return securities;
}
public void setSecurities(List<Security> securities) {
this.securities = securities;
}
public Header getHeader() {
return header;
}
public void setHeader(Header header) {
this.header = header;
}
public Trailer getTrailer() {
return trailer;
}
public void setTrailer(Trailer trailer) {
this.trailer = trailer;
}
public String getAccount() {
return account;
}
public void setAccount(String account) {
this.account = account;
}
public String getClOrdId() {
return clOrdId;
}
public void setClOrdId(String clOrdId) {
this.clOrdId = clOrdId;
}
public String getText() {
return this.text;
}
public void setText(String text) {
this.text = text;
}
public Date getCreated() {
return created;
}
public void setCreated(Date created) {
this.created = created;
}
@Override
public String toString() {
StringBuilder temp = new StringBuilder();
temp.append(Order.class.getName())
.append(" --> 1: ")
.append(this.account)
.append(", 11: ")
.append(this.clOrdId)
.append(", 58: ")
.append(this.text)
.append(", 777: ")
.append(this.created);
temp.append("\r");
if (this.securities != null) {
for (Security sec : this.securities) {
temp.append(sec.toString());
temp.append("\r");
}
}
return temp.toString();
}
}
| Order |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/CleanupTestContainers.java | {
"start": 1502,
"end": 3070
} | class ____ extends AbstractAbfsIntegrationTest {
private static final Logger LOG = LoggerFactory.getLogger(CleanupTestContainers.class);
private static final String CONTAINER_PREFIX = "abfs-testcontainer-";
public CleanupTestContainers() throws Exception {
}
@Test
public void testDeleteContainers() throws Throwable {
int count = 0;
AbfsConfiguration abfsConfig = getAbfsStore(getFileSystem()).getAbfsConfiguration();
String accountName = abfsConfig.getAccountName().split("\\.")[0];
LOG.debug("Deleting test containers in account - {}", abfsConfig.getAccountName());
String accountKey = abfsConfig.getStorageAccountKey();
if ((accountKey == null) || (accountKey.isEmpty())) {
LOG.debug("Clean up not possible. Account ket not present in config");
}
final StorageCredentials credentials;
credentials = new StorageCredentialsAccountAndKey(
accountName, accountKey);
CloudStorageAccount storageAccount = new CloudStorageAccount(credentials, true);
CloudBlobClient blobClient = storageAccount.createCloudBlobClient();
Iterable<CloudBlobContainer> containers
= blobClient.listContainers(CONTAINER_PREFIX);
for (CloudBlobContainer container : containers) {
LOG.info("Container {} URI {}",
container.getName(),
container.getUri());
if (container.deleteIfExists()) {
count++;
LOG.info("Current deleted test containers count - #{}", count);
}
}
LOG.info("Summary: Deleted {} test containers", count);
}
}
| CleanupTestContainers |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/AllNestedConditions.java | {
"start": 1169,
"end": 1255
} | class ____ {
* }
*
* @ConditionalOnProperty("something")
* static | OnJndi |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/DefaultTyping.java | {
"start": 364,
"end": 786
} | enum ____ {
/**
* This value means that only properties that have
* {@link java.lang.Object} as declared type (including
* generic types without explicit type) will use default
* typing.
*/
JAVA_LANG_OBJECT,
/**
* Value that means that default typing will be used for
* properties with declared type of {@link java.lang.Object}
* or an abstract type (abstract | DefaultTyping |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/Dialect.java | {
"start": 203707,
"end": 216205
} | enum ____ { day, hour, minute }
final Unit unit;
if ( daysPart != 0 ) {
unit = hoursPart == 0 && minutesPart == 0 && secondsPart == 0 && nano == 0
? Unit.day
: null;
}
else if ( hoursPart != 0 ) {
unit = minutesPart == 0 && secondsPart == 0 && nano == 0
? Unit.hour
: null;
}
else if ( minutesPart != 0 ) {
unit = secondsPart == 0 && nano == 0
? Unit.minute
: null;
}
else {
unit = null;
}
appender.appendSql( "interval '" );
if ( unit != null ) {
appender.appendSql( switch( unit ) {
case day -> daysPart;
case hour -> hoursPart;
case minute -> minutesPart;
});
appender.appendSql( "' " );
appender.appendSql( unit.toString() );
}
else {
appender.appendSql( "interval '" );
appender.appendSql( literal.getSeconds() );
if ( nano > 0 ) {
appender.appendSql( '.' );
appender.appendSql( nano );
}
appender.appendSql( "' second" );
}
}
/**
* Append a literal SQL {@code interval} representing the given Java
* {@link TemporalAmount}.
*/
public void appendIntervalLiteral(SqlAppender appender, TemporalAmount literal) {
if ( literal instanceof Duration duration ) {
appendIntervalLiteral( appender, duration );
}
else if ( literal instanceof Period period ) {
final int years = period.getYears();
final int months = period.getMonths();
final int days = period.getDays();
final boolean parenthesis = years != 0 && months != 0
|| years != 0 && days != 0
|| months != 0 && days != 0;
if ( parenthesis ) {
appender.appendSql( '(' );
}
boolean first = true;
for ( var unit : literal.getUnits() ) {
final long value = literal.get( unit );
if ( value != 0 ) {
if ( first ) {
first = false;
}
else {
appender.appendSql( "+" );
}
appender.appendSql( "interval '" );
appender.appendSql( value );
appender.appendSql( "' " );
if ( unit == ChronoUnit.YEARS ) {
appender.appendSql( "year" );
}
else if ( unit == ChronoUnit.MONTHS ) {
appender.appendSql( "month" );
}
else {
assert unit == ChronoUnit.DAYS;
appender.appendSql( "day" );
}
}
}
if ( parenthesis ) {
appender.appendSql( ')' );
}
}
else {
throw new IllegalArgumentException( "Unsupported temporal amount type: " + literal );
}
}
/**
* Append a literal SQL {@code uuid} representing the given Java
* {@link UUID}.
* <p>
* This is usually a {@code cast()} expression, but it might be
* a function call.
*/
public void appendUUIDLiteral(SqlAppender appender, UUID literal) {
appender.appendSql( "cast('" );
appender.appendSql( literal.toString() );
appender.appendSql( "' as uuid)" );
}
/**
* Does this dialect supports timezone offsets in temporal literals.
*/
public boolean supportsTemporalLiteralOffset() {
return false;
}
/**
* How the dialect supports time zone types like {@link Types#TIMESTAMP_WITH_TIMEZONE}.
*/
public TimeZoneSupport getTimeZoneSupport() {
return TimeZoneSupport.NONE;
}
/**
* The name of a {@code rowid}-like pseudo-column which
* acts as a high-performance row locator, or null if
* this dialect has no such pseudo-column.
* <p>
* If the {@code rowid}-like value is an explicitly-declared
* named column instead of an implicit pseudo-column, and if
* the given name is nonempty, return the given name.
*
* @param rowId the name specified by
* {@link org.hibernate.annotations.RowId#value()},
* which is ignored if {@link #getRowIdColumnString}
* is not overridden
*/
public String rowId(String rowId) {
return null;
}
/**
* The JDBC type code of the {@code rowid}-like pseudo-column
* which acts as a high-performance row locator.
*
* @return {@link Types#ROWID} by default
*/
public int rowIdSqlType() {
return ROWID;
}
/**
* If this dialect requires that the {@code rowid} column be
* declared explicitly, return the DDL column definition.
*
* @return the DDL column definition, or {@code null} if
* the {@code rowid} is an implicit pseudo-column
*/
public String getRowIdColumnString(String rowId) {
return null;
}
/**
* Get the minimum {@link DmlTargetColumnQualifierSupport} required by this dialect.
*
* @return the column qualifier support required by this dialect
*/
public DmlTargetColumnQualifierSupport getDmlTargetColumnQualifierSupport() {
return DmlTargetColumnQualifierSupport.NONE;
}
/**
* Get this dialect's level of support for primary key functional dependency analysis
* within {@code GROUP BY} and {@code ORDER BY} clauses.
*/
public FunctionalDependencyAnalysisSupport getFunctionalDependencyAnalysisSupport() {
return FunctionalDependencyAnalysisSupportImpl.NONE;
}
/**
* Render a SQL check condition for {@link CheckConstraint}
*
* @return a SQL expression representing the {@link CheckConstraint}
*/
public String getCheckConstraintString(CheckConstraint checkConstraint) {
final String constraintName = checkConstraint.getName();
final String checkWithName =
isBlank( constraintName )
? " check"
: " constraint " + constraintName + " check";
final String constraint = checkWithName + " (" + checkConstraint.getConstraint() + ")";
return appendCheckConstraintOptions( checkConstraint, constraint );
}
/**
* Append the {@linkplain CheckConstraint#getOptions() options} to the given DDL
* string declaring a SQL {@code check} constraint.
*
* @param checkConstraint an instance of {@link CheckConstraint}
* @param sqlCheckConstraint the SQL to append the {@link CheckConstraint} options
*
* @return a SQL expression
*
* @since 7.0
*/
@Internal @Incubating
public String appendCheckConstraintOptions(CheckConstraint checkConstraint, String sqlCheckConstraint) {
return sqlCheckConstraint;
}
/**
* Does this dialect support appending table options SQL fragment at the end of the SQL table creation statement?
*
* @return {@code true} indicates it does; {@code false} indicates it does not;
*
* @since 7.0
*/
@Deprecated(since = "7.1", forRemoval = true)
public boolean supportsTableOptions() {
return false;
}
/**
* Does this dialect support binding {@link Types#NULL} for {@link PreparedStatement#setNull(int, int)}?
* If it does, then the call to {@link PreparedStatement#getParameterMetaData()} may be skipped for
* better performance.
*
* @return {@code true} indicates it does; {@code false} indicates it does not;
* @see org.hibernate.type.descriptor.jdbc.ObjectNullResolvingJdbcType
*/
public boolean supportsBindingNullSqlTypeForSetNull() {
return false;
}
/**
* Does this dialect support binding {@code null} for {@link PreparedStatement#setObject(int, Object)}?
* if it does, then call of {@link PreparedStatement#getParameterMetaData()} could be eliminated for better performance.
*
* @return {@code true} indicates it does; {@code false} indicates it does not;
* @see org.hibernate.type.descriptor.jdbc.ObjectNullResolvingJdbcType
*/
public boolean supportsBindingNullForSetObject() {
return false;
}
/**
* Whether the FILTER clause for aggregate functions is supported.
*/
public boolean supportsFilterClause() {
// By default, we report false because not many dialects support this
return false;
}
/**
* Whether the SQL row constructor is supported.
*/
public boolean supportsRowConstructor() {
return false;
}
/**
* Whether the SQL array constructor is supported.
*/
public boolean supportsArrayConstructor() {
return false;
}
public boolean supportsDuplicateSelectItemsInQueryGroup() {
return true;
}
public boolean supportsIntersect() {
return true;
}
/**
* If the dialect supports using joins in mutation statement subquery
* that could also use columns from the mutation target table
*/
public boolean supportsJoinInMutationStatementSubquery() {
return true;
}
public boolean supportsJoinsInDelete() {
return false;
}
public boolean supportsNestedSubqueryCorrelation() {
return true;
}
/**
* Whether the SQL cycle clause is supported, which can be used for recursive CTEs.
*/
public boolean supportsRecursiveCycleClause() {
return false;
}
/**
* Whether the SQL cycle clause supports the using sub-clause.
*/
public boolean supportsRecursiveCycleUsingClause() {
return false;
}
/**
* Whether the SQL search clause is supported, which can be used for recursive CTEs.
*/
public boolean supportsRecursiveSearchClause() {
return false;
}
public boolean supportsSimpleQueryGrouping() {
return true;
}
/**
* Is the {@code cross join} syntax supported?
*/
public boolean supportsCrossJoin() {
return true;
}
/**
* Is this dialect known to support what ANSI-SQL terms "row value
* constructor" syntax; sometimes called tuple syntax.
* <p>
* Basically, does it support syntax like
* {@code ... where (FIRST_NAME, LAST_NAME) = ('Steve', 'Ebersole') ...}
*
* @return True if this SQL dialect is known to support "row value
* constructor" syntax; false otherwise.
*/
public boolean supportsRowValueConstructorSyntax() {
return true;
}
/**
* Is this dialect known to support what ANSI-SQL terms "row value
* constructor" syntax; sometimes called tuple syntax with <code><</code>, <code>></code>, <code>≤</code>
* and <code>≥</code> operators.
* <p>
* Basically, does it support syntax like
* {@code ... where (FIRST_NAME, LAST_NAME) < ('Steve', 'Ebersole') ...}
*
* @return True if this SQL dialect is known to support "row value
* constructor" syntax with relational comparison operators; false otherwise.
*/
public boolean supportsRowValueConstructorGtLtSyntax() {
return supportsRowValueConstructorSyntax();
}
/**
* Is this dialect known to support what ANSI-SQL terms "row value
* constructor" syntax; sometimes called tuple syntax with <code>is distinct from</code>
* and <code>is not distinct from</code> operators.
* <p>
* Basically, does it support syntax like
* {@code ... where (FIRST_NAME, LAST_NAME) is distinct from ('Steve', 'Ebersole') ...}
*
* @return True if this SQL dialect is known to support "row value
* constructor" syntax with distinct from comparison operators; false otherwise.
*/
public boolean supportsRowValueConstructorDistinctFromSyntax() {
return supportsRowValueConstructorSyntax() && supportsDistinctFromPredicate();
}
/**
* Whether the SQL with clause is supported.
*/
public boolean supportsWithClause() {
return true;
}
/**
* Whether the SQL with clause is supported within a subquery.
*/
public boolean supportsWithClauseInSubquery() {
return supportsWithClause();
}
/**
* Whether the SQL with clause is supported within a CTE.
*/
public boolean supportsNestedWithClause() {
return supportsWithClauseInSubquery();
}
/**
* Is this dialect known to support what ANSI-SQL terms "row value
* constructor" syntax; sometimes called tuple syntax with quantified predicates.
* <p>
* Basically, does it support syntax like
* {@code ... where (FIRST_NAME, LAST_NAME) = ALL (select ...) ...}
*
* @return True if this SQL dialect is known to support "row value
* constructor" syntax with quantified predicates; false otherwise.
*/
public boolean supportsRowValueConstructorSyntaxInQuantifiedPredicates() {
return true;
}
/**
* If the dialect supports {@link org.hibernate.dialect.Dialect#supportsRowValueConstructorSyntax() row values},
* does it offer such support in IN lists as well?
* <p>
* For example, {@code ... where (FIRST_NAME, LAST_NAME) IN ( (?, ?), (?, ?) ) ...}
*
* @return True if this SQL dialect is known to support "row value
* constructor" syntax in the IN list; false otherwise.
*/
public boolean supportsRowValueConstructorSyntaxInInList() {
return true;
}
/**
* If the dialect supports {@link org.hibernate.dialect.Dialect#supportsRowValueConstructorSyntax() row values},
* does it offer such support in IN subqueries as well?
* <p>
* For example, {@code ... where (FIRST_NAME, LAST_NAME) IN ( select ... ) ...}
*
* @return True if this SQL dialect is known to support "row value
* constructor" syntax in the IN subqueries; false otherwise.
*/
public boolean supportsRowValueConstructorSyntaxInInSubQuery() {
return supportsRowValueConstructorSyntaxInInList();
}
}
| Unit |
java | quarkusio__quarkus | extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/metrics/MetricsDisabledTest.java | {
"start": 369,
"end": 1035
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withEmptyApplication()
.overrideConfigKey("quarkus.otel.metrics.enabled", "false")
.assertException(t -> Assertions.assertEquals(DeploymentException.class, t.getClass()));
@Inject
Meter openTelemetryMeter;
@Test
void testNoOpenTelemetry() {
//Should not be reached: dump what was injected if it somehow passed
Assertions.assertNull(openTelemetryMeter,
"A OpenTelemetry Meter instance should not be found/injected when OpenTelemetry metrics is disabled");
}
}
| MetricsDisabledTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/ExistingPropertyTest.java | {
"start": 4361,
"end": 4848
} | class ____ {
public Car car;
public CarWrapper() {}
public CarWrapper(Car c) { car = c; }
}
// for [databind#1635]
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME,
include = JsonTypeInfo.As.EXISTING_PROPERTY,
// IMPORTANT! Must be defined as `visible`
visible=true,
property = "type",
defaultImpl=Bean1635Default.class)
@JsonSubTypes({ @JsonSubTypes.Type(Bean1635A.class) })
static | CarWrapper |
java | google__gson | gson/src/main/java/com/google/gson/JsonArray.java | {
"start": 1682,
"end": 14918
} | class ____ extends JsonElement implements Iterable<JsonElement> {
private final ArrayList<JsonElement> elements;
/** Creates an empty JsonArray. */
@SuppressWarnings("deprecation") // superclass constructor
public JsonArray() {
elements = new ArrayList<>();
}
/**
* Creates an empty JsonArray with the desired initial capacity.
*
* @param capacity initial capacity.
* @throws IllegalArgumentException if the {@code capacity} is negative
* @since 2.8.1
*/
@SuppressWarnings("deprecation") // superclass constructor
public JsonArray(int capacity) {
elements = new ArrayList<>(capacity);
}
/**
* Creates a deep copy of this element and all its children.
*
* @since 2.8.2
*/
@Override
public JsonArray deepCopy() {
if (!elements.isEmpty()) {
JsonArray result = new JsonArray(elements.size());
for (JsonElement element : elements) {
result.add(element.deepCopy());
}
return result;
}
return new JsonArray();
}
/**
* Adds the specified boolean to self.
*
* @param bool the boolean that needs to be added to the array.
* @since 2.4
*/
public void add(Boolean bool) {
elements.add(bool == null ? JsonNull.INSTANCE : new JsonPrimitive(bool));
}
/**
* Adds the specified character to self.
*
* @param character the character that needs to be added to the array.
* @since 2.4
*/
public void add(Character character) {
elements.add(character == null ? JsonNull.INSTANCE : new JsonPrimitive(character));
}
/**
* Adds the specified number to self.
*
* @param number the number that needs to be added to the array.
* @since 2.4
*/
public void add(Number number) {
elements.add(number == null ? JsonNull.INSTANCE : new JsonPrimitive(number));
}
/**
* Adds the specified string to self.
*
* @param string the string that needs to be added to the array.
* @since 2.4
*/
public void add(String string) {
elements.add(string == null ? JsonNull.INSTANCE : new JsonPrimitive(string));
}
/**
* Adds the specified element to self.
*
* @param element the element that needs to be added to the array.
*/
public void add(JsonElement element) {
if (element == null) {
element = JsonNull.INSTANCE;
}
elements.add(element);
}
/**
* Adds all the elements of the specified array to self.
*
* @param array the array whose elements need to be added to the array.
*/
public void addAll(JsonArray array) {
elements.addAll(array.elements);
}
/**
* Replaces the element at the specified position in this array with the specified element.
*
* @param index index of the element to replace
* @param element element to be stored at the specified position
* @return the element previously at the specified position
* @throws IndexOutOfBoundsException if the specified index is outside the array bounds
*/
@CanIgnoreReturnValue
public JsonElement set(int index, JsonElement element) {
return elements.set(index, element == null ? JsonNull.INSTANCE : element);
}
/**
* Removes the first occurrence of the specified element from this array, if it is present. If the
* array does not contain the element, it is unchanged.
*
* @param element element to be removed from this array, if present
* @return true if this array contained the specified element, false otherwise
* @since 2.3
*/
@CanIgnoreReturnValue
public boolean remove(JsonElement element) {
return elements.remove(element);
}
/**
* Removes the element at the specified position in this array. Shifts any subsequent elements to
* the left (subtracts one from their indices). Returns the element that was removed from the
* array.
*
* @param index index the index of the element to be removed
* @return the element previously at the specified position
* @throws IndexOutOfBoundsException if the specified index is outside the array bounds
* @since 2.3
*/
@CanIgnoreReturnValue
public JsonElement remove(int index) {
return elements.remove(index);
}
/**
* Returns true if this array contains the specified element.
*
* @return true if this array contains the specified element.
* @param element whose presence in this array is to be tested
* @since 2.3
*/
public boolean contains(JsonElement element) {
return elements.contains(element);
}
/**
* Returns the number of elements in the array.
*
* @return the number of elements in the array.
*/
public int size() {
return elements.size();
}
/**
* Returns true if the array is empty.
*
* @return true if the array is empty.
* @since 2.8.7
*/
public boolean isEmpty() {
return elements.isEmpty();
}
/**
* Returns an iterator to navigate the elements of the array. Since the array is an ordered list,
* the iterator navigates the elements in the order they were inserted.
*
* @return an iterator to navigate the elements of the array.
*/
@Override
public Iterator<JsonElement> iterator() {
return elements.iterator();
}
/**
* Returns the i-th element of the array.
*
* @param i the index of the element that is being sought.
* @return the element present at the i-th index.
* @throws IndexOutOfBoundsException if {@code i} is negative or greater than or equal to the
* {@link #size()} of the array.
*/
public JsonElement get(int i) {
return elements.get(i);
}
private JsonElement getAsSingleElement() {
int size = elements.size();
if (size == 1) {
return elements.get(0);
}
throw new IllegalStateException("Array must have size 1, but has size " + size);
}
/**
* Convenience method to get this array as a {@link Number} if it contains a single element. This
* method calls {@link JsonElement#getAsNumber()} on the element, therefore any of the exceptions
* declared by that method can occur.
*
* @return this element as a number if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
*/
@Override
public Number getAsNumber() {
return getAsSingleElement().getAsNumber();
}
/**
* Convenience method to get this array as a {@link String} if it contains a single element. This
* method calls {@link JsonElement#getAsString()} on the element, therefore any of the exceptions
* declared by that method can occur.
*
* @return this element as a String if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
*/
@Override
public String getAsString() {
return getAsSingleElement().getAsString();
}
/**
* Convenience method to get this array as a double if it contains a single element. This method
* calls {@link JsonElement#getAsDouble()} on the element, therefore any of the exceptions
* declared by that method can occur.
*
* @return this element as a double if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
*/
@Override
public double getAsDouble() {
return getAsSingleElement().getAsDouble();
}
/**
* Convenience method to get this array as a {@link BigDecimal} if it contains a single element.
* This method calls {@link JsonElement#getAsBigDecimal()} on the element, therefore any of the
* exceptions declared by that method can occur.
*
* @return this element as a {@link BigDecimal} if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
* @since 1.2
*/
@Override
public BigDecimal getAsBigDecimal() {
return getAsSingleElement().getAsBigDecimal();
}
/**
* Convenience method to get this array as a {@link BigInteger} if it contains a single element.
* This method calls {@link JsonElement#getAsBigInteger()} on the element, therefore any of the
* exceptions declared by that method can occur.
*
* @return this element as a {@link BigInteger} if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
* @since 1.2
*/
@Override
public BigInteger getAsBigInteger() {
return getAsSingleElement().getAsBigInteger();
}
/**
* Convenience method to get this array as a float if it contains a single element. This method
* calls {@link JsonElement#getAsFloat()} on the element, therefore any of the exceptions declared
* by that method can occur.
*
* @return this element as a float if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
*/
@Override
public float getAsFloat() {
return getAsSingleElement().getAsFloat();
}
/**
* Convenience method to get this array as a long if it contains a single element. This method
* calls {@link JsonElement#getAsLong()} on the element, therefore any of the exceptions declared
* by that method can occur.
*
* @return this element as a long if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
*/
@Override
public long getAsLong() {
return getAsSingleElement().getAsLong();
}
/**
* Convenience method to get this array as an integer if it contains a single element. This method
* calls {@link JsonElement#getAsInt()} on the element, therefore any of the exceptions declared
* by that method can occur.
*
* @return this element as an integer if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
*/
@Override
public int getAsInt() {
return getAsSingleElement().getAsInt();
}
/**
* Convenience method to get this array as a primitive byte if it contains a single element. This
* method calls {@link JsonElement#getAsByte()} on the element, therefore any of the exceptions
* declared by that method can occur.
*
* @return this element as a primitive byte if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
*/
@Override
public byte getAsByte() {
return getAsSingleElement().getAsByte();
}
/**
* Convenience method to get this array as a character if it contains a single element. This
* method calls {@link JsonElement#getAsCharacter()} on the element, therefore any of the
* exceptions declared by that method can occur.
*
* @return this element as a primitive short if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
* @deprecated This method is misleading, as it does not get this element as a char but rather as
* a string's first character.
*/
@Deprecated
@Override
public char getAsCharacter() {
return getAsSingleElement().getAsCharacter();
}
/**
* Convenience method to get this array as a primitive short if it contains a single element. This
* method calls {@link JsonElement#getAsShort()} on the element, therefore any of the exceptions
* declared by that method can occur.
*
* @return this element as a primitive short if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
*/
@Override
public short getAsShort() {
return getAsSingleElement().getAsShort();
}
/**
* Convenience method to get this array as a boolean if it contains a single element. This method
* calls {@link JsonElement#getAsBoolean()} on the element, therefore any of the exceptions
* declared by that method can occur.
*
* @return this element as a boolean if it is single element array.
* @throws IllegalStateException if the array is empty or has more than one element.
*/
@Override
public boolean getAsBoolean() {
return getAsSingleElement().getAsBoolean();
}
/**
* Returns a mutable {@link List} view of this {@code JsonArray}. Changes to the {@code List} are
* visible in this {@code JsonArray} and the other way around.
*
* <p>The {@code List} does not permit {@code null} elements. Unlike {@code JsonArray}'s {@code
* null} handling, a {@link NullPointerException} is thrown when trying to add {@code null}. Use
* {@link JsonNull} for JSON null values.
*
* @return mutable {@code List} view
* @since 2.10
*/
public List<JsonElement> asList() {
return new NonNullElementWrapperList<>(elements);
}
/**
* Returns whether the other object is equal to this. This method only considers the other object
* to be equal if it is an instance of {@code JsonArray} and has equal elements in the same order.
*/
@Override
public boolean equals(Object o) {
return (o == this) || (o instanceof JsonArray && ((JsonArray) o).elements.equals(elements));
}
/**
* Returns the hash code of this array. This method calculates the hash code based on the elements
* of this array.
*/
@Override
public int hashCode() {
return elements.hashCode();
}
}
| JsonArray |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/classes/ClassAssert_hasPublicFields_Test.java | {
"start": 925,
"end": 1266
} | class ____ extends ClassAssertBaseTest {
@Override
protected ClassAssert invoke_api_method() {
return assertions.hasPublicFields("field");
}
@Override
protected void verify_internal_effects() {
verify(classes).assertHasPublicFields(getInfo(assertions), getActual(assertions), "field");
}
}
| ClassAssert_hasPublicFields_Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cdi/converters/legacy/ConversionAutoApplyTest.java | {
"start": 1874,
"end": 2300
} | class ____ implements AttributeConverter<Money, BigDecimal> {
@Override
public BigDecimal convertToDatabaseColumn(Money attribute) {
return attribute == null ? null : new BigDecimal(attribute.toString());
}
@Override
public Money convertToEntityAttribute(BigDecimal dbData) {
return dbData == null ? null : new Money( dbData.toString() );
}
}
@Entity
@Table( name = "Widget" )
public static | MoneyConverter |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/reflection/ReflectorTest.java | {
"start": 6335,
"end": 6440
} | class ____ extends Parent<String> {
}
@Test
void shouldResolveReadonlySetterWithOverload() {
| Child |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/config/spi/ConfigurationService.java | {
"start": 972,
"end": 3075
} | interface ____ extends Service {
/**
* Access to the complete map of config settings. The returned map is immutable
*
* @return The immutable map of config settings.
*/
Map<String,Object> getSettings();
/**
* Get the named setting, using the specified converter.
*
* @param name The name of the setting to get.
* @param converter The converter to apply
* @param <T> The Java type of the conversion
*
* @return The converted (typed) setting.
* May return {@code null}
* (see {@link #getSetting(String, Class, Object)})
*/
<T> @Nullable T getSetting(String name, Converter<T> converter);
/**
* Get the named setting, using the specified converter and default value.
*
* @param name The name of the setting to get.
* @param converter The converter to apply
* @param defaultValue If no setting with that name is found, return this default value as the result.
* @param <T> The Java type of the conversion
*
* @return The converted (typed) setting. Will be the defaultValue if no such setting was defined.
*/
<T> @PolyNull T getSetting(String name, Converter<T> converter, @PolyNull T defaultValue);
/**
* Get the named setting. Differs from the form taking a Converter in that here we expect to have a simple
* cast rather than any involved conversion.
*
* @param name The name of the setting to get.
* @param expected The expected Java type.
* @param defaultValue If no setting with that name is found, return this default value as the result.
* @param <T> The Java type of the conversion
*
* @return The converted (typed) setting. Will be the defaultValue if no such setting was defined.
*
* @deprecated Use {@link #getSetting(String, Converter, Object)}.
* This method does not report errors correctly.
*/
@Deprecated(since = "7.2")
<T> @PolyNull T getSetting(String name, Class<T> expected, @PolyNull T defaultValue);
/**
* Simple conversion contract for converting an untyped object to a specified type.
*
* @param <T> The Java type of the converted value
*/
| ConfigurationService |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeWebScheme.java | {
"start": 2371,
"end": 7477
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestRouterNamenodeWebScheme.class);
/** Router for the test. */
private Router router;
/** Namenodes in the cluster. */
private Map<String, Map<String, MockNamenode>> nns = new HashMap<>();
/** Nameservices in the federated cluster. */
private List<String> nsIds = asList("ns0", "ns1");
@BeforeEach
public void setup() throws Exception {
LOG.info("Initialize the Mock Namenodes to monitor");
for (String nsId : nsIds) {
nns.put(nsId, new HashMap<>());
for (String nnId : asList("nn0", "nn1")) {
nns.get(nsId).put(nnId, new MockNamenode(nsId));
}
}
LOG.info("Set nn0 to active for all nameservices");
for (Map<String, MockNamenode> nnNS : nns.values()) {
nnNS.get("nn0").transitionToActive();
nnNS.get("nn1").transitionToStandby();
}
}
@AfterEach
public void cleanup() throws Exception {
for (Map<String, MockNamenode> nnNS : nns.values()) {
for (MockNamenode nn : nnNS.values()) {
nn.stop();
}
}
nns.clear();
if (router != null) {
router.stop();
}
}
/**
* Get the configuration of the cluster which contains all the Namenodes and
* their addresses.
* @return Configuration containing all the Namenodes.
*/
private Configuration getNamenodesConfig() {
final Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
StringUtils.join(",", nns.keySet()));
for (String nsId : nns.keySet()) {
Set<String> nnIds = nns.get(nsId).keySet();
StringBuilder sb = new StringBuilder();
sb.append(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX);
sb.append(".").append(nsId);
conf.set(sb.toString(), StringUtils.join(",", nnIds));
for (String nnId : nnIds) {
final MockNamenode nn = nns.get(nsId).get(nnId);
sb = new StringBuilder();
sb.append(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
sb.append(".").append(nsId);
sb.append(".").append(nnId);
conf.set(sb.toString(), "localhost:" + nn.getRPCPort());
sb = new StringBuilder();
sb.append(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
sb.append(".").append(nsId);
sb.append(".").append(nnId);
conf.set(sb.toString(), "localhost:" + nn.getHTTPPort());
}
}
return conf;
}
@Test
public void testWebSchemeHttp() throws IOException {
testWebScheme(HttpConfig.Policy.HTTP_ONLY, "http");
}
@Test
public void testWebSchemeHttps() throws IOException {
testWebScheme(HttpConfig.Policy.HTTPS_ONLY, "https");
}
private void testWebScheme(HttpConfig.Policy httpPolicy,
String expectedScheme) throws IOException {
Configuration nsConf = getNamenodesConfig();
// Setup the State Store for the Router to use
Configuration stateStoreConfig = getStateStoreConfiguration();
stateStoreConfig.setClass(
RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS,
MembershipNamenodeResolver.class, ActiveNamenodeResolver.class);
stateStoreConfig.setClass(
RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS,
MountTableResolver.class, FileSubclusterResolver.class);
Configuration routerConf = new RouterConfigBuilder(nsConf)
.enableLocalHeartbeat(true)
.heartbeat()
.stateStore()
.rpc()
.build();
// set "dfs.http.policy" to "HTTPS_ONLY"
routerConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, httpPolicy.name());
// Specify namenodes (ns1.nn0,ns1.nn1) to monitor
routerConf.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "0.0.0.0:0");
routerConf.set(RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE,
"ns1.nn0,ns1.nn1");
routerConf.addResource(stateStoreConfig);
// Specify local node (ns0.nn1) to monitor
routerConf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "ns0");
routerConf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
// Start the Router with the namenodes to monitor
router = new Router();
router.init(routerConf);
router.start();
// Manually trigger the heartbeat and update the values
Collection<NamenodeHeartbeatService> heartbeatServices =
router.getNamenodeHeartbeatServices();
for (NamenodeHeartbeatService service : heartbeatServices) {
service.periodicInvoke();
}
MembershipNamenodeResolver resolver =
(MembershipNamenodeResolver) router.getNamenodeResolver();
resolver.loadCache(true);
// Check that the webSchemes are "https"
final List<FederationNamenodeContext> namespaceInfo = new ArrayList<>();
for (String nsId : nns.keySet()) {
List<? extends FederationNamenodeContext> nnReports =
resolver.getNamenodesForNameserviceId(nsId, false);
namespaceInfo.addAll(nnReports);
}
for (FederationNamenodeContext nnInfo : namespaceInfo) {
assertEquals(nnInfo.getWebScheme(), expectedScheme, "Unexpected scheme for Policy: "+
httpPolicy.name());
}
}
} | TestRouterNamenodeWebScheme |
java | dropwizard__dropwizard | dropwizard-hibernate/src/test/java/io/dropwizard/hibernate/SubResourcesTest.java | {
"start": 1334,
"end": 4380
} | class ____ {
private static final DropwizardAppExtension<TestConfiguration> appExtension = new DropwizardAppExtension<>(
TestApplication.class,
"hibernate-sub-resource-test.yaml",
new ResourceConfigurationSourceProvider(),
ConfigOverride.config("dataSource.url", "jdbc:h2:mem:sub-resources-" + System.nanoTime()));
private String baseUri() {
return "http://localhost:" + appExtension.getLocalPort();
}
@Test
void canReadFromTopResource() {
final Person person = appExtension.client()
.property(ClientProperties.CONNECT_TIMEOUT, 0)
.target(baseUri())
.path("/people/Greg")
.request()
.get(Person.class);
assertThat(person.getName()).isEqualTo("Greg");
}
@Test
void canWriteTopResource() {
final Person person = appExtension.client()
.property(ClientProperties.CONNECT_TIMEOUT, 0)
.target(baseUri())
.path("/people")
.request()
.post(Entity.entity("{\"name\": \"Jason\", \"email\": \"jason@gmail.com\", \"birthday\":637317407000}",
MediaType.APPLICATION_JSON_TYPE), Person.class);
assertThat(person.getName()).isEqualTo("Jason");
}
@Test
void canReadFromSubResources() {
final Dog dog = appExtension.client()
.property(ClientProperties.CONNECT_TIMEOUT, 0)
.target(baseUri())
.path("/people/Greg/dogs/Bello")
.request()
.get(Dog.class);
assertThat(dog.getName()).isEqualTo("Bello");
assertThat(dog.getOwner()).isNotNull();
assertThat(requireNonNull(dog.getOwner()).getName()).isEqualTo("Greg");
}
@Test
void canWriteSubResource() {
final Dog dog = appExtension.client()
.property(ClientProperties.CONNECT_TIMEOUT, 0)
.target(baseUri())
.path("/people/Greg/dogs")
.request()
.post(Entity.entity("{\"name\": \"Bandit\"}", MediaType.APPLICATION_JSON_TYPE), Dog.class);
assertThat(dog.getName()).isEqualTo("Bandit");
assertThat(dog.getOwner()).isNotNull();
assertThat(requireNonNull(dog.getOwner()).getName()).isEqualTo("Greg");
}
@Test
void errorsAreHandled() {
Response response = appExtension.client()
.property(ClientProperties.CONNECT_TIMEOUT, 0)
.target(baseUri())
.path("/people/Jim/dogs")
.request()
.post(Entity.entity("{\"name\": \"Bullet\"}", MediaType.APPLICATION_JSON_TYPE));
assertThat(response.getStatus()).isEqualTo(404);
}
@Test
void noSessionErrorIsRaised() {
Response response = appExtension.client()
.property(ClientProperties.CONNECT_TIMEOUT, 0)
.target(baseUri())
.path("/people/Greg/dogs")
.request()
.get();
assertThat(response.getStatus()).isEqualTo(500);
}
public static | SubResourcesTest |
java | apache__camel | components/camel-jgroups-raft/src/test/java/org/apache/camel/component/jgroups/raft/cluster/JGroupsRaftClusterAbstractTest.java | {
"start": 1000,
"end": 1561
} | class ____ {
protected void waitForLeader(int attempts, RaftHandle rh, RaftHandle rh2, RaftHandle rh3) throws InterruptedException {
boolean thereIsLeader = rh.isLeader() || rh2.isLeader() || rh3.isLeader();
while (!thereIsLeader && attempts > 0) {
thereIsLeader = rh.isLeader() || rh2.isLeader() || rh3.isLeader();
TimeUnit.SECONDS.sleep(1);
attempts--;
}
if (attempts <= 0) {
throw new RuntimeCamelException("No leader in time!");
}
}
}
| JGroupsRaftClusterAbstractTest |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/cache/interceptor/CacheSyncFailureTests.java | {
"start": 3299,
"end": 4395
} | class ____ {
private final AtomicLong counter = new AtomicLong();
@Cacheable(cacheNames = "testCache", sync = true, unless = "#result > 10")
public Object unlessSync(Object arg1) {
return this.counter.getAndIncrement();
}
@Cacheable(cacheNames = {"testCache", "anotherTestCache"}, sync = true)
public Object severalCachesSync(Object arg1) {
return this.counter.getAndIncrement();
}
@Cacheable(cacheResolver = "testCacheResolver", sync = true)
public Object severalCachesWithResolvedSync(Object arg1) {
return this.counter.getAndIncrement();
}
@Cacheable(cacheNames = "testCache", sync = true)
@CacheEvict(cacheNames = "anotherTestCache", key = "#arg1")
public Object syncWithAnotherOperation(Object arg1) {
return this.counter.getAndIncrement();
}
@Caching(cacheable = {
@Cacheable(cacheNames = "testCache", sync = true),
@Cacheable(cacheNames = "anotherTestCache", sync = true)
})
public Object syncWithTwoGetOperations(Object arg1) {
return this.counter.getAndIncrement();
}
}
@Configuration
@EnableCaching
static | SimpleService |
java | google__truth | core/src/test/java/com/google/common/truth/IterableSubjectTest.java | {
"start": 5287,
"end": 21652
} | class ____.
*/
assertFailureValue(e, "though it did contain", "[2] (Integer)");
assertFailureValue(e, "full contents", "[1, 2]");
}
@Test
public void containsFailsWithSameToStringAndNull() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList(1, "null")).contains(null));
assertFailureValue(e, "an instance of", "null type");
}
@Test
public void containsFailure() {
AssertionError e = expectFailure(whenTesting -> whenTesting.that(asList(1, 2, 3)).contains(5));
assertFailureKeys(e, "expected to contain", "but was");
assertFailureValue(e, "expected to contain", "5");
}
@Test
public void containsOnNullIterable() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that((Iterable<?>) null).contains(5));
assertFailureKeys(e, "expected an iterable that contains", "but was");
}
@Test
public void doesNotContain() {
assertThat(asList(1, null, 3)).doesNotContain(5);
}
@Test
public void doesNotContainNull() {
assertThat(asList(1, 2, 3)).doesNotContain(null);
}
@Test
public void doesNotContainFailure() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList(1, 2, 3)).doesNotContain(2));
assertFailureKeys(e, "expected not to contain", "but was");
assertFailureValue(e, "expected not to contain", "2");
}
@Test
public void doesNotContainOnNullIterable() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that((Iterable<?>) null).doesNotContain(2));
assertFailureKeys(e, "expected an iterable that does not contain", "but was");
}
@Test
public void containsNoDuplicates() {
assertThat(asList(1, 2, 3)).containsNoDuplicates();
}
@Test
public void containsNoDuplicatesMixedTypes() {
assertThat(asList(1, 2, 2L, 3)).containsNoDuplicates();
}
@Test
public void containsNoDuplicatesFailure() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList(1, 2, 2, 3)).containsNoDuplicates());
assertFailureKeys(e, "expected not to contain duplicates", "but contained", "full contents");
assertFailureValue(e, "but contained", "[2 x 2]");
assertFailureValue(e, "full contents", "[1, 2, 2, 3]");
}
@Test
public void containsNoDuplicatesOnNullIterable() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that((Iterable<?>) null).containsNoDuplicates());
assertFailureKeys(e, "expected an iterable that does not contain duplicates", "but was");
}
@Test
public void containsAnyOf() {
assertThat(asList(1, 2, 3)).containsAnyOf(1, 5);
}
@Test
public void containsAnyOfWithNull() {
assertThat(asList(1, null, 3)).containsAnyOf(null, 5);
}
@Test
public void containsAnyOfWithNullInThirdAndFinalPosition() {
assertThat(asList(1, null, 3)).containsAnyOf(4, 5, (Integer) null);
}
@Test
public void containsAnyOfFailure() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList(1, 2, 3)).containsAnyOf(5, 6, 0));
assertFailureKeys(e, "expected to contain any of", "but was");
assertFailureValue(e, "expected to contain any of", "[5, 6, 0]");
}
@Test
public void containsAnyOfFailsWithSameToStringAndHomogeneousList() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList(1L, 2L, 3L)).containsAnyOf(2, 3));
assertFailureKeys(
e, "expected to contain any of", "but did not", "though it did contain", "full contents");
assertFailureValue(e, "expected to contain any of", "[2, 3] (Integer)");
assertFailureValue(e, "though it did contain", "[2, 3] (Long)");
assertFailureValue(e, "full contents", "[1, 2, 3]");
}
@Test
public void containsAnyOfFailsWithSameToStringAndHomogeneousListWithDuplicates() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList(3L, 3L)).containsAnyOf(2, 3, 3));
assertFailureKeys(
e, "expected to contain any of", "but did not", "though it did contain", "full contents");
assertFailureValue(e, "expected to contain any of", "[2, 3 [2 copies]] (Integer)");
assertFailureValue(e, "though it did contain", "[3 [2 copies]] (Long)");
assertFailureValue(e, "full contents", "[3, 3]");
}
@Test
public void containsAnyOfFailsWithSameToStringAndNullInSubject() {
AssertionError e =
expectFailure(
whenTesting -> whenTesting.that(asList(null, "abc")).containsAnyOf("def", "null"));
assertFailureKeys(
e, "expected to contain any of", "but did not", "though it did contain", "full contents");
assertFailureValue(e, "expected to contain any of", "[def, null] (String)");
assertFailureValue(e, "though it did contain", "[null (null type)]");
assertFailureValue(e, "full contents", "[null, abc]");
}
@Test
public void containsAnyOfFailsWithSameToStringAndNullInExpectation() {
AssertionError e =
expectFailure(
whenTesting -> whenTesting.that(asList("null", "abc")).containsAnyOf("def", null));
assertFailureKeys(
e, "expected to contain any of", "but did not", "though it did contain", "full contents");
assertFailureValue(e, "expected to contain any of", "[def (String), null (null type)]");
assertFailureValue(e, "though it did contain", "[null] (String)");
assertFailureValue(e, "full contents", "[null, abc]");
}
@Test
public void containsAnyOfWithOneShotIterable() {
List<Object> contents = asList(2, 1, "b");
Iterable<Object> oneShot = new OneShotIterable<>(contents.iterator(), "OneShotIterable");
assertThat(oneShot).containsAnyOf(3, "a", 7, "b", 0);
}
@Test
public void containsAnyOfOnNullIterable() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that((Iterable<?>) null).containsAnyOf(5, 6, 0));
assertFailureKeys(e, "expected an iterable that contains any of", "but was");
}
@Test
public void containsAnyInIterable() {
assertThat(asList(1, 2, 3)).containsAnyIn(asList(1, 10, 100));
AssertionError e =
expectFailure(
whenTesting -> whenTesting.that(asList(1, 2, 3)).containsAnyIn(asList(5, 6, 0)));
assertFailureKeys(e, "expected to contain any of", "but was");
assertFailureValue(e, "expected to contain any of", "[5, 6, 0]");
}
@Test
public void containsAnyInArray() {
assertThat(asList(1, 2, 3)).containsAnyIn(new Integer[] {1, 10, 100});
AssertionError e =
expectFailure(
whenTesting ->
whenTesting.that(asList(1, 2, 3)).containsAnyIn(new Integer[] {5, 6, 0}));
assertFailureKeys(e, "expected to contain any of", "but was");
assertFailureValue(e, "expected to contain any of", "[5, 6, 0]");
}
@Test
public void containsAnyInWithNullExpectedIterable() {
AssertionError e =
expectFailure(
whenTesting -> whenTesting.that(asList(1, 2, 3)).containsAnyIn((Iterable<?>) null));
assertFailureKeys(
e,
"could not perform containment check because expected iterable was null",
"actual contents");
}
@Test
public void containsAnyInWithNullExpectedArray() {
AssertionError e =
expectFailure(
whenTesting -> whenTesting.that(asList(1, 2, 3)).containsAnyIn((Object[]) null));
assertFailureKeys(
e,
"could not perform containment check because expected array was null",
"actual contents");
}
@Test
public void containsAtLeast() {
assertThat(asList(1, 2, 3)).containsAtLeast(1, 2);
}
@Test
public void containsAtLeastWithMany() {
assertThat(asList(1, 2, 3)).containsAtLeast(1, 2);
}
@Test
public void containsAtLeastWithDuplicates() {
assertThat(asList(1, 2, 2, 2, 3)).containsAtLeast(2, 2);
}
@Test
public void containsAtLeastWithNull() {
assertThat(asList(1, null, 3)).containsAtLeast(3, null);
}
@Test
public void containsAtLeastWithNullAtThirdAndFinalPosition() {
assertThat(asList(1, null, 3)).containsAtLeast(1, 3, (Object) null);
}
/*
* Test that we only call toString() if the assertion fails -- that is, not just if the elements
* are out of order, but only if someone actually calls inOrder(). There are 2 reasons for this:
*
* 1. Calling toString() uses extra time and space. (To be fair, Iterable assertions often use a
* lot of those already.)
*
* 2. Some toString() methods are buggy. Arguably we shouldn't accommodate these, especially since
* those users are in for a nasty surprise if their tests actually fail someday, but I don't want
* to bite that off now. (Maybe Fact should catch exceptions from toString()?)
*/
@Test
public void containsAtLeastElementsInOutOfOrderDoesNotStringify() {
CountsToStringCalls o = new CountsToStringCalls();
List<Object> actual = asList(o, 1);
List<Object> expected = asList(1, o);
assertThat(actual).containsAtLeastElementsIn(expected);
assertThat(o.calls).isEqualTo(0);
expectFailure(
whenTesting -> whenTesting.that(actual).containsAtLeastElementsIn(expected).inOrder());
assertThat(o.calls).isGreaterThan(0);
}
@Test
public void containsAtLeastFailure() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList(1, 2, 3)).containsAtLeast(1, 2, 4));
assertFailureKeys(e, "missing (1)", "---", "expected to contain at least", "but was");
assertFailureValue(e, "missing (1)", "4");
assertFailureValue(e, "expected to contain at least", "[1, 2, 4]");
}
@Test
public void containsAtLeastWithExtras() {
AssertionError e =
expectFailure(
whenTesting -> whenTesting.that(asList("y", "x")).containsAtLeast("x", "y", "z"));
assertFailureValue(e, "missing (1)", "z");
}
@Test
public void containsAtLeastWithExtraCopiesOfOutOfOrder() {
AssertionError e =
expectFailure(
whenTesting -> whenTesting.that(asList("y", "x")).containsAtLeast("x", "y", "y"));
assertFailureValue(e, "missing (1)", "y");
}
@Test
public void containsAtLeastWithDuplicatesFailure() {
AssertionError e =
expectFailure(
whenTesting -> whenTesting.that(asList(1, 2, 3)).containsAtLeast(1, 2, 2, 2, 3, 4));
assertFailureValue(e, "missing (3)", "2 [2 copies], 4");
}
/*
* Slightly subtle test to ensure that if multiple equal elements are found
* to be missing we only reference it once in the output message.
*/
@Test
public void containsAtLeastWithDuplicateMissingElements() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList(1, 2)).containsAtLeast(4, 4, 4));
assertFailureValue(e, "missing (3)", "4 [3 copies]");
}
@Test
public void containsAtLeastWithNullFailure() {
AssertionError e =
expectFailure(
whenTesting -> whenTesting.that(asList(1, null, 3)).containsAtLeast(1, null, null, 3));
assertFailureValue(e, "missing (1)", "null");
}
@Test
public void containsAtLeastFailsWithSameToStringAndHomogeneousList() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList(1L, 2L)).containsAtLeast(1, 2));
assertFailureValue(e, "missing (2)", "1, 2 (Integer)");
assertFailureValue(e, "though it did contain (2)", "1, 2 (Long)");
}
@Test
public void containsAtLeastFailsWithSameToStringAndHomogeneousListWithDuplicates() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList(1L, 2L, 2L)).containsAtLeast(1, 1, 2));
assertFailureValue(e, "missing (3)", "1 [2 copies], 2 (Integer)");
assertFailureValue(e, "though it did contain (3)", "1, 2 [2 copies] (Long)");
}
@Test
public void containsAtLeastFailsWithSameToStringAndHomogeneousListWithNull() {
AssertionError e =
expectFailure(
whenTesting -> whenTesting.that(asList("null", "abc")).containsAtLeast("abc", null));
assertFailureValue(e, "missing (1)", "null (null type)");
assertFailureValue(e, "though it did contain (1)", "null (String)");
}
@Test
public void containsAtLeastFailsWithSameToStringAndHeterogeneousListWithDuplicates() {
AssertionError e =
expectFailure(
whenTesting ->
whenTesting.that(asList(1, 2, 2L, 3L, 3L)).containsAtLeast(2L, 2L, 3, 3));
assertFailureValue(e, "missing (3)", "2 (Long), 3 (Integer) [2 copies]");
assertFailureValue(e, "though it did contain (3)", "2 (Integer), 3 (Long) [2 copies]");
}
@Test
public void containsAtLeastFailsWithEmptyString() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(asList("a", null)).containsAtLeast("", null));
assertFailureKeys(e, "missing (1)", "---", "expected to contain at least", "but was");
assertFailureValue(e, "missing (1)", "");
}
@Test
public void containsAtLeastInOrder() {
assertThat(asList(3, 2, 5)).containsAtLeast(3, 2, 5).inOrder();
}
@Test
public void containsAtLeastInOrderWithGaps() {
assertThat(asList(3, 2, 5)).containsAtLeast(3, 5).inOrder();
assertThat(asList(3, 2, 2, 4, 5)).containsAtLeast(3, 2, 2, 5).inOrder();
assertThat(asList(3, 1, 4, 1, 5)).containsAtLeast(3, 1, 5).inOrder();
assertThat(asList("x", "y", "y", "z")).containsAtLeast("x", "y", "z").inOrder();
assertThat(asList("x", "x", "y", "z")).containsAtLeast("x", "y", "z").inOrder();
assertThat(asList("z", "x", "y", "z")).containsAtLeast("x", "y", "z").inOrder();
assertThat(asList("x", "x", "y", "z", "x")).containsAtLeast("x", "y", "z", "x").inOrder();
}
@Test
public void containsAtLeastInOrderWithNull() {
assertThat(asList(3, null, 5)).containsAtLeast(3, null, 5).inOrder();
assertThat(asList(3, null, 7, 5)).containsAtLeast(3, null, 5).inOrder();
}
@Test
public void containsAtLeastInOrderWithFailure() {
AssertionError e =
expectFailure(
whenTesting ->
whenTesting.that(asList(1, null, 3)).containsAtLeast(null, 1, 3).inOrder());
assertFailureKeys(
e,
"required elements were all found, but order was wrong",
"expected order for required elements",
"but was");
assertFailureValue(e, "expected order for required elements", "[null, 1, 3]");
assertFailureValue(e, "but was", "[1, null, 3]");
}
@Test
public void containsAtLeastInOrderWithFailureWithActualOrder() {
AssertionError e =
expectFailure(
whenTesting ->
whenTesting.that(asList(1, 2, null, 3, 4)).containsAtLeast(null, 1, 3).inOrder());
assertFailureKeys(
e,
"required elements were all found, but order was wrong",
"expected order for required elements",
"but order was",
"full contents");
assertFailureValue(e, "expected order for required elements", "[null, 1, 3]");
assertFailureValue(e, "but order was", "[1, null, 3]");
assertFailureValue(e, "full contents", "[1, 2, null, 3, 4]");
}
@Test
public void containsAtLeastInOrderWithOneShotIterable() {
List<Object> contents = asList(2, 1, null, 4, "a", 3, "b");
Iterable<Object> oneShot = new OneShotIterable<>(contents.iterator(), contents.toString());
assertThat(oneShot).containsAtLeast(1, null, 3).inOrder();
}
@Test
public void containsAtLeastInOrderWithOneShotIterableWrongOrder() {
List<Object> contents = asList(2, 1, null, 4, "a", 3, "b");
Iterable<Object> oneShot = new OneShotIterable<>(contents.iterator(), "BadIterable");
AssertionError e =
expectFailure(
whenTesting ->
whenTesting.that(oneShot).containsAtLeast(1, 3, (Object) null).inOrder());
assertFailureKeys(
e,
"required elements were all found, but order was wrong",
"expected order for required elements",
"but was");
assertFailureValue(e, "expected order for required elements", "[1, 3, null]");
assertFailureValue(e, "but was", "BadIterable"); // TODO(b/231966021): Output its elements.
}
@Test
public void containsAtLeastOnNullIterable() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that((Iterable<?>) null).containsAtLeast(1, 2, 4));
assertFailureKeys(e, "expected an iterable that contains at least", "but was");
}
private static final | name |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMCommand.java | {
"start": 1515,
"end": 2556
} | enum ____ {
/**
* @deprecated Sent by Resource Manager when it is out of sync with the AM and
* wants the AM get back in sync.
*
* Note: Instead of sending this command,
* {@link ApplicationMasterNotRegisteredException} will be thrown
* when ApplicationMaster is out of sync with ResourceManager and
* ApplicationMaster is expected to re-register with RM by calling
* {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)}
*/
AM_RESYNC,
/**
* @deprecated Sent by Resource Manager when it wants the AM to shutdown.
* Note: This command was earlier sent by ResourceManager to
* instruct AM to shutdown if RM had restarted. Now
* {@link ApplicationAttemptNotFoundException} will be thrown in case
* that RM has restarted and AM is supposed to handle this
* exception by shutting down itself.
*/
AM_SHUTDOWN
}
| AMCommand |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/entity/Bid.java | {
"start": 339,
"end": 1198
} | class ____ {
private Integer id;
private String description;
private Starred note;
private Starred editorsNote;
private Boolean approved;
@Enumerated(EnumType.STRING)
//@Column(columnDefinition = "VARCHAR(10)")
public Starred getEditorsNote() {
return editorsNote;
}
public void setEditorsNote(Starred editorsNote) {
this.editorsNote = editorsNote;
}
@Id
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Starred getNote() {
return note;
}
public void setNote(Starred note) {
this.note = note;
}
public Boolean getApproved() {
return approved;
}
public void setApproved(Boolean approved) {
this.approved = approved;
}
}
| Bid |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/Planner.java | {
"start": 1248,
"end": 1954
} | interface ____ {
/**
* Update the existing {@link Plan}, by adding/removing/updating existing
* reservations, and adding a subset of the reservation requests in the
* contracts parameter.
*
* @param plan the {@link Plan} to replan
* @param contracts the list of reservation requests
* @throws PlanningException if operation is unsuccessful
*/
public void plan(Plan plan, List<ReservationDefinition> contracts)
throws PlanningException;
/**
* Initialize the replanner
*
* @param planQueueName the name of the queue for this plan
* @param conf the scheduler configuration
*/
void init(String planQueueName, ReservationSchedulerConfiguration conf);
}
| Planner |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/active/ActiveResourceManagerTest.java | {
"start": 3956,
"end": 60196
} | class ____ {
@RegisterExtension
public static AllCallbackWrapper<TestingRpcServiceExtension> rpcServiceExtensionWrapper =
new AllCallbackWrapper<>(new TestingRpcServiceExtension());
private static final long TIMEOUT_SEC = 5L;
private static final Duration TIMEOUT_TIME = Duration.ofSeconds(TIMEOUT_SEC);
private static final Duration TESTING_START_WORKER_INTERVAL = Duration.ofMillis(50);
private static final long TESTING_START_WORKER_TIMEOUT_MS = 50;
private static final WorkerResourceSpec WORKER_RESOURCE_SPEC = WorkerResourceSpec.ZERO;
private static final TaskExecutorMemoryConfiguration TESTING_CONFIG =
new TaskExecutorMemoryConfiguration(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 21L, 36L);
/** Tests worker successfully requested, started and registered. */
@Test
void testStartNewWorker() throws Exception {
new Context() {
{
final ResourceID tmResourceId = ResourceID.generate();
final CompletableFuture<TaskExecutorProcessSpec> requestWorkerFromDriverFuture =
new CompletableFuture<>();
driverBuilder.setRequestResourceFunction(
taskExecutorProcessSpec -> {
requestWorkerFromDriverFuture.complete(taskExecutorProcessSpec);
return CompletableFuture.completedFuture(tmResourceId);
});
runTest(
() -> {
// received worker request, verify requesting from driver
CompletableFuture<Void> startNewWorkerFuture =
runInMainThread(
() ->
getResourceManager()
.requestNewWorker(
WORKER_RESOURCE_SPEC));
TaskExecutorProcessSpec taskExecutorProcessSpec =
requestWorkerFromDriverFuture.get(
TIMEOUT_SEC, TimeUnit.SECONDS);
startNewWorkerFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(taskExecutorProcessSpec)
.isEqualTo(
TaskExecutorProcessUtils
.processSpecFromWorkerResourceSpec(
flinkConfig, WORKER_RESOURCE_SPEC));
// worker registered, verify registration succeeded
CompletableFuture<RegistrationResponse> registerTaskExecutorFuture =
registerTaskExecutor(tmResourceId);
assertThatFuture(registerTaskExecutorFuture)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isInstanceOf(RegistrationResponse.Success.class);
});
}
};
}
/** Tests request new workers when resources less than declared. */
@Test
void testLessThanDeclareResource() throws Exception {
new Context() {
{
final AtomicInteger requestCount = new AtomicInteger(0);
final List<CompletableFuture<ResourceID>> resourceIdFutures = new ArrayList<>();
resourceIdFutures.add(CompletableFuture.completedFuture(ResourceID.generate()));
resourceIdFutures.add(new CompletableFuture<>());
resourceIdFutures.add(new CompletableFuture<>());
driverBuilder.setRequestResourceFunction(
taskExecutorProcessSpec ->
resourceIdFutures.get(requestCount.getAndIncrement()));
runTest(
() -> {
// request two new worker
runInMainThread(
() ->
getResourceManager()
.requestNewWorker(WORKER_RESOURCE_SPEC))
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
runInMainThread(
() ->
getResourceManager()
.requestNewWorker(WORKER_RESOURCE_SPEC))
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(requestCount).hasValue(2);
// release registered worker.
CompletableFuture<Void> declareResourceFuture =
runInMainThread(
() ->
getResourceManager()
.declareResourceNeeded(
Collections.singleton(
new ResourceDeclaration(
WORKER_RESOURCE_SPEC,
3,
Collections
.emptySet()))));
declareResourceFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
// request new worker.
assertThat(requestCount).hasValue(3);
});
}
};
}
/** Test release workers if more than resources declared. */
@Test
void testMoreThanDeclaredResource() throws Exception {
new Context() {
{
final AtomicInteger requestCount = new AtomicInteger(0);
final List<CompletableFuture<ResourceID>> resourceIdFutures =
Arrays.asList(
CompletableFuture.completedFuture(ResourceID.generate()),
CompletableFuture.completedFuture(ResourceID.generate()),
CompletableFuture.completedFuture(ResourceID.generate()),
new CompletableFuture<>());
final AtomicInteger releaseCount = new AtomicInteger(0);
final List<CompletableFuture<ResourceID>> releaseResourceFutures =
Arrays.asList(
new CompletableFuture<>(),
new CompletableFuture<>(),
new CompletableFuture<>());
driverBuilder
.setRequestResourceFunction(
taskExecutorProcessSpec ->
resourceIdFutures.get(requestCount.getAndIncrement()))
.setReleaseResourceConsumer(
resourceID ->
releaseResourceFutures
.get(releaseCount.getAndIncrement())
.complete(resourceID));
runTest(
() -> {
runInMainThread(
() -> {
for (int i = 0; i < 4; i++) {
getResourceManager()
.requestNewWorker(WORKER_RESOURCE_SPEC);
}
});
ResourceID unWantedResource = resourceIdFutures.get(0).get();
ResourceID normalResource = resourceIdFutures.get(1).get();
ResourceID startingResource = resourceIdFutures.get(2).get();
CompletableFuture<ResourceID> pendingRequestFuture =
resourceIdFutures.get(3);
registerTaskExecutorAndSendSlotReport(unWantedResource, 1)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
registerTaskExecutorAndSendSlotReport(normalResource, 1)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(requestCount).hasValue(4);
assertThat(releaseCount).hasValue(0);
Set<InstanceID> unWantedWorkers =
Collections.singleton(
getResourceManager()
.getInstanceIdByResourceId(unWantedResource)
.get());
// release unwanted workers.
runInMainThread(
() ->
getResourceManager()
.declareResourceNeeded(
Collections.singleton(
new ResourceDeclaration(
WORKER_RESOURCE_SPEC,
3,
unWantedWorkers))))
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(releaseCount).hasValue(1);
assertThat(releaseResourceFutures.get(0))
.isCompletedWithValue(unWantedResource);
// release pending workers.
runInMainThread(
() ->
getResourceManager()
.declareResourceNeeded(
Collections.singleton(
new ResourceDeclaration(
WORKER_RESOURCE_SPEC,
2,
Collections
.emptySet()))))
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(releaseCount).hasValue(1);
assertThat(pendingRequestFuture).isCancelled();
// release starting workers.
runInMainThread(
() ->
getResourceManager()
.declareResourceNeeded(
Collections.singleton(
new ResourceDeclaration(
WORKER_RESOURCE_SPEC,
1,
Collections
.emptySet()))))
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(releaseCount).hasValue(2);
assertThat(releaseResourceFutures.get(1))
.isCompletedWithValue(startingResource);
// release last workers.
runInMainThread(
() ->
getResourceManager()
.declareResourceNeeded(
Collections.singleton(
new ResourceDeclaration(
WORKER_RESOURCE_SPEC,
0,
Collections
.emptySet()))))
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(releaseCount).hasValue(3);
assertThat(releaseResourceFutures.get(2))
.isCompletedWithValue(normalResource);
});
}
};
}
/** Tests worker failed while requesting. */
@Test
void testStartNewWorkerFailedRequesting() throws Exception {
new Context() {
{
final ResourceID tmResourceId = ResourceID.generate();
final AtomicInteger requestCount = new AtomicInteger(0);
final List<CompletableFuture<ResourceID>> resourceIdFutures = new ArrayList<>();
resourceIdFutures.add(new CompletableFuture<>());
resourceIdFutures.add(new CompletableFuture<>());
final List<CompletableFuture<TaskExecutorProcessSpec>>
requestWorkerFromDriverFutures = new ArrayList<>();
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
driverBuilder.setRequestResourceFunction(
taskExecutorProcessSpec -> {
int idx = requestCount.getAndIncrement();
assertThat(idx).isLessThan(2);
requestWorkerFromDriverFutures
.get(idx)
.complete(taskExecutorProcessSpec);
return resourceIdFutures.get(idx);
});
runTest(
() -> {
// received worker request, verify requesting from driver
CompletableFuture<Void> startNewWorkerFuture =
runInMainThread(
() ->
getResourceManager()
.declareResourceNeeded(
Collections.singleton(
new ResourceDeclaration(
WORKER_RESOURCE_SPEC,
1,
Collections
.emptySet()))));
TaskExecutorProcessSpec taskExecutorProcessSpec1 =
requestWorkerFromDriverFutures
.get(0)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
startNewWorkerFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(taskExecutorProcessSpec1)
.isEqualTo(
TaskExecutorProcessUtils
.processSpecFromWorkerResourceSpec(
flinkConfig, WORKER_RESOURCE_SPEC));
// first request failed, verify requesting another worker from driver
runInMainThread(
() ->
resourceIdFutures
.get(0)
.completeExceptionally(
new Throwable("testing error")));
TaskExecutorProcessSpec taskExecutorProcessSpec2 =
requestWorkerFromDriverFutures
.get(1)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(taskExecutorProcessSpec2)
.isEqualTo(taskExecutorProcessSpec1);
// second request allocated, verify registration succeed
runInMainThread(() -> resourceIdFutures.get(1).complete(tmResourceId));
CompletableFuture<RegistrationResponse> registerTaskExecutorFuture =
registerTaskExecutor(tmResourceId);
assertThatFuture(registerTaskExecutorFuture)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isInstanceOf(RegistrationResponse.Success.class);
});
}
};
}
/** Tests worker terminated after requested before registered. */
@Test
void testWorkerTerminatedBeforeRegister() throws Exception {
new Context() {
{
final AtomicInteger requestCount = new AtomicInteger(0);
final List<ResourceID> tmResourceIds = new ArrayList<>();
tmResourceIds.add(ResourceID.generate());
tmResourceIds.add(ResourceID.generate());
final List<CompletableFuture<TaskExecutorProcessSpec>>
requestWorkerFromDriverFutures = new ArrayList<>();
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
driverBuilder.setRequestResourceFunction(
taskExecutorProcessSpec -> {
int idx = requestCount.getAndIncrement();
assertThat(idx).isLessThan(2);
requestWorkerFromDriverFutures
.get(idx)
.complete(taskExecutorProcessSpec);
return CompletableFuture.completedFuture(tmResourceIds.get(idx));
});
runTest(
() -> {
// received worker request, verify requesting from driver
CompletableFuture<Void> startNewWorkerFuture =
runInMainThread(
() ->
getResourceManager()
.declareResourceNeeded(
Collections.singleton(
new ResourceDeclaration(
WORKER_RESOURCE_SPEC,
1,
Collections
.emptySet()))));
TaskExecutorProcessSpec taskExecutorProcessSpec1 =
requestWorkerFromDriverFutures
.get(0)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
startNewWorkerFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(taskExecutorProcessSpec1)
.isEqualTo(
TaskExecutorProcessUtils
.processSpecFromWorkerResourceSpec(
flinkConfig, WORKER_RESOURCE_SPEC));
// first worker failed before register, verify requesting another worker
// from driver
runInMainThread(
() ->
getResourceManager()
.onWorkerTerminated(
tmResourceIds.get(0),
"terminate for testing"));
TaskExecutorProcessSpec taskExecutorProcessSpec2 =
requestWorkerFromDriverFutures
.get(1)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(taskExecutorProcessSpec2)
.isEqualTo(taskExecutorProcessSpec1);
// second worker registered, verify registration succeed
CompletableFuture<RegistrationResponse> registerTaskExecutorFuture =
registerTaskExecutor(tmResourceIds.get(1));
assertThatFuture(registerTaskExecutorFuture)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isInstanceOf(RegistrationResponse.Success.class);
});
}
};
}
/** Tests worker terminated after registered. */
@Test
void testWorkerTerminatedAfterRegister() throws Exception {
new Context() {
{
final AtomicInteger requestCount = new AtomicInteger(0);
final List<ResourceID> tmResourceIds = new ArrayList<>();
tmResourceIds.add(ResourceID.generate());
tmResourceIds.add(ResourceID.generate());
final List<CompletableFuture<TaskExecutorProcessSpec>>
requestWorkerFromDriverFutures = new ArrayList<>();
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
driverBuilder.setRequestResourceFunction(
taskExecutorProcessSpec -> {
int idx = requestCount.getAndIncrement();
assertThat(idx).isLessThan(2);
requestWorkerFromDriverFutures
.get(idx)
.complete(taskExecutorProcessSpec);
return CompletableFuture.completedFuture(tmResourceIds.get(idx));
});
runTest(
() -> {
// received worker request, verify requesting from driver
CompletableFuture<Void> startNewWorkerFuture =
runInMainThread(
() ->
getResourceManager()
.declareResourceNeeded(
Collections.singleton(
new ResourceDeclaration(
WORKER_RESOURCE_SPEC,
1,
Collections
.emptySet()))));
TaskExecutorProcessSpec taskExecutorProcessSpec1 =
requestWorkerFromDriverFutures
.get(0)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
startNewWorkerFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(taskExecutorProcessSpec1)
.isEqualTo(
TaskExecutorProcessUtils
.processSpecFromWorkerResourceSpec(
flinkConfig, WORKER_RESOURCE_SPEC));
// first worker registered, verify registration succeed
CompletableFuture<RegistrationResponse> registerTaskExecutorFuture1 =
registerTaskExecutor(tmResourceIds.get(0));
assertThatFuture(registerTaskExecutorFuture1)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isInstanceOf(RegistrationResponse.Success.class);
// first worker terminated, verify requesting another worker from driver
runInMainThread(
() ->
getResourceManager()
.onWorkerTerminated(
tmResourceIds.get(0),
"terminate for testing"));
TaskExecutorProcessSpec taskExecutorProcessSpec2 =
requestWorkerFromDriverFutures
.get(1)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(taskExecutorProcessSpec2)
.isEqualTo(taskExecutorProcessSpec1);
// second worker registered, verify registration succeed
CompletableFuture<RegistrationResponse> registerTaskExecutorFuture2 =
registerTaskExecutor(tmResourceIds.get(1));
assertThatFuture(registerTaskExecutorFuture2)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isInstanceOf(RegistrationResponse.Success.class);
});
}
};
}
/** Tests worker terminated and is no longer required. */
@Test
void testWorkerTerminatedNoLongerRequired() throws Exception {
new Context() {
{
final ResourceID tmResourceId = ResourceID.generate();
final AtomicInteger requestCount = new AtomicInteger(0);
final List<CompletableFuture<TaskExecutorProcessSpec>>
requestWorkerFromDriverFutures = new ArrayList<>();
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
driverBuilder.setRequestResourceFunction(
taskExecutorProcessSpec -> {
int idx = requestCount.getAndIncrement();
assertThat(idx).isLessThan(2);
requestWorkerFromDriverFutures
.get(idx)
.complete(taskExecutorProcessSpec);
return CompletableFuture.completedFuture(tmResourceId);
});
runTest(
() -> {
// received worker request, verify requesting from driver
CompletableFuture<Void> startNewWorkerFuture =
runInMainThread(
() ->
getResourceManager()
.requestNewWorker(
WORKER_RESOURCE_SPEC));
TaskExecutorProcessSpec taskExecutorProcessSpec =
requestWorkerFromDriverFutures
.get(0)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
startNewWorkerFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(taskExecutorProcessSpec)
.isEqualTo(
TaskExecutorProcessUtils
.processSpecFromWorkerResourceSpec(
flinkConfig, WORKER_RESOURCE_SPEC));
// worker registered, verify registration succeed
CompletableFuture<RegistrationResponse> registerTaskExecutorFuture =
registerTaskExecutor(tmResourceId);
assertThatFuture(registerTaskExecutorFuture)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isInstanceOf(RegistrationResponse.Success.class);
// worker terminated, verify not requesting new worker
runInMainThread(
() -> {
getResourceManager()
.onWorkerTerminated(
tmResourceId,
"terminate for testing");
// needs to return something, so that we can use
// `get()` to make sure the main thread processing
// finishes before the assertions
return null;
})
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(requestWorkerFromDriverFutures.get(1)).isNotCompleted();
});
}
};
}
@Test
void testCloseTaskManagerConnectionOnWorkerTerminated() throws Exception {
new Context() {
{
final ResourceID tmResourceId = ResourceID.generate();
final CompletableFuture<TaskExecutorProcessSpec> requestWorkerFromDriverFuture =
new CompletableFuture<>();
final CompletableFuture<Void> disconnectResourceManagerFuture =
new CompletableFuture<>();
final TestingTaskExecutorGateway taskExecutorGateway =
new TestingTaskExecutorGatewayBuilder()
.setDisconnectResourceManagerConsumer(
(ignore) -> disconnectResourceManagerFuture.complete(null))
.createTestingTaskExecutorGateway();
driverBuilder.setRequestResourceFunction(
taskExecutorProcessSpec -> {
requestWorkerFromDriverFuture.complete(taskExecutorProcessSpec);
return CompletableFuture.completedFuture(tmResourceId);
});
runTest(
() -> {
// request a new worker, terminate it after registered
runInMainThread(
() ->
getResourceManager()
.requestNewWorker(WORKER_RESOURCE_SPEC))
.thenCompose(
(ignore) ->
registerTaskExecutor(
tmResourceId, taskExecutorGateway))
.thenRun(
() ->
runInMainThread(
() ->
getResourceManager()
.onWorkerTerminated(
tmResourceId,
"terminate for testing")));
// verify task manager connection is closed
disconnectResourceManagerFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
});
}
};
}
@Test
void testStartWorkerIntervalOnWorkerTerminationExceedFailureRate() throws Exception {
new Context() {
{
flinkConfig.set(ResourceManagerOptions.START_WORKER_MAX_FAILURE_RATE, 1d);
flinkConfig.set(
ResourceManagerOptions.START_WORKER_RETRY_INTERVAL,
TESTING_START_WORKER_INTERVAL);
final AtomicInteger requestCount = new AtomicInteger(0);
final List<ResourceID> tmResourceIds = new ArrayList<>();
tmResourceIds.add(ResourceID.generate());
tmResourceIds.add(ResourceID.generate());
final List<CompletableFuture<Long>> requestWorkerFromDriverFutures =
new ArrayList<>();
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
driverBuilder.setRequestResourceFunction(
taskExecutorProcessSpec -> {
int idx = requestCount.getAndIncrement();
assertThat(idx).isLessThan(2);
requestWorkerFromDriverFutures
.get(idx)
.complete(System.currentTimeMillis());
return CompletableFuture.completedFuture(tmResourceIds.get(idx));
});
runTest(
() -> {
// received worker request, verify requesting from driver
CompletableFuture<Void> startNewWorkerFuture =
runInMainThread(
() ->
getResourceManager()
.declareResourceNeeded(
Collections.singleton(
new ResourceDeclaration(
WORKER_RESOURCE_SPEC,
1,
Collections
.emptySet()))));
long t1 =
requestWorkerFromDriverFutures
.get(0)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
startNewWorkerFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
// first worker failed before register, verify requesting another worker
// from driver
runInMainThread(
() ->
getResourceManager()
.onWorkerTerminated(
tmResourceIds.get(0),
"terminate for testing"));
long t2 =
requestWorkerFromDriverFutures
.get(1)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
// validate trying creating worker twice, with proper interval
assertThat((t2 - t1))
.isGreaterThanOrEqualTo(
TESTING_START_WORKER_INTERVAL.toMillis());
// second worker registered, verify registration succeed
CompletableFuture<RegistrationResponse> registerTaskExecutorFuture =
registerTaskExecutor(tmResourceIds.get(1));
assertThatFuture(registerTaskExecutorFuture)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isInstanceOf(RegistrationResponse.Success.class);
});
}
};
}
@Test
void testStartWorkerIntervalOnRequestWorkerFailure() throws Exception {
new Context() {
{
flinkConfig.set(ResourceManagerOptions.START_WORKER_MAX_FAILURE_RATE, 1d);
flinkConfig.set(
ResourceManagerOptions.START_WORKER_RETRY_INTERVAL,
TESTING_START_WORKER_INTERVAL);
final AtomicInteger requestCount = new AtomicInteger(0);
final ResourceID tmResourceId = ResourceID.generate();
final List<CompletableFuture<ResourceID>> resourceIdFutures = new ArrayList<>();
resourceIdFutures.add(new CompletableFuture<>());
resourceIdFutures.add(new CompletableFuture<>());
final List<CompletableFuture<Long>> requestWorkerFromDriverFutures =
new ArrayList<>();
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
requestWorkerFromDriverFutures.add(new CompletableFuture<>());
driverBuilder.setRequestResourceFunction(
taskExecutorProcessSpec -> {
int idx = requestCount.getAndIncrement();
assertThat(idx).isLessThan(2);
requestWorkerFromDriverFutures
.get(idx)
.complete(System.currentTimeMillis());
return resourceIdFutures.get(idx);
});
runTest(
() -> {
// received worker request, verify requesting from driver
CompletableFuture<Void> startNewWorkerFuture =
runInMainThread(
() ->
getResourceManager()
.declareResourceNeeded(
Collections.singleton(
new ResourceDeclaration(
WORKER_RESOURCE_SPEC,
1,
Collections
.emptySet()))));
startNewWorkerFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
long t1 =
requestWorkerFromDriverFutures
.get(0)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
// first request failed, verify requesting another worker from driver
runInMainThread(
() ->
resourceIdFutures
.get(0)
.completeExceptionally(
new Throwable("testing error")));
long t2 =
requestWorkerFromDriverFutures
.get(1)
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
// validate trying creating worker twice, with proper interval
assertThat((t2 - t1))
.isGreaterThanOrEqualTo(
TESTING_START_WORKER_INTERVAL.toMillis());
// second worker registered, verify registration succeed
resourceIdFutures.get(1).complete(tmResourceId);
CompletableFuture<RegistrationResponse> registerTaskExecutorFuture =
registerTaskExecutor(tmResourceId);
assertThatFuture(registerTaskExecutorFuture)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isInstanceOf(RegistrationResponse.Success.class);
});
}
};
}
/** Tests workers from previous attempt successfully recovered and registered. */
@Test
void testRecoverWorkerFromPreviousAttempt() throws Exception {
new Context() {
{
final ResourceID tmResourceId = ResourceID.generate();
runTest(
() -> {
runInMainThread(
() ->
getResourceManager()
.onPreviousAttemptWorkersRecovered(
Collections.singleton(tmResourceId)));
CompletableFuture<RegistrationResponse> registerTaskExecutorFuture =
registerTaskExecutor(tmResourceId);
assertThatFuture(registerTaskExecutorFuture)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isInstanceOf(RegistrationResponse.Success.class);
});
}
};
}
/** Tests decline unknown worker registration. */
@Test
void testRegisterUnknownWorker() throws Exception {
new Context() {
{
runTest(
() -> {
CompletableFuture<RegistrationResponse> registerTaskExecutorFuture =
registerTaskExecutor(ResourceID.generate());
assertThatFuture(registerTaskExecutorFuture)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isInstanceOf(RegistrationResponse.Rejection.class);
});
}
};
}
@Test
void testOnError() throws Exception {
new Context() {
{
final Throwable fatalError = new Throwable("Testing fatal error");
runTest(
() -> {
runInMainThread(() -> getResourceManager().onError(fatalError));
final Throwable reportedError =
getFatalErrorHandler()
.getErrorFuture()
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
assertThat(reportedError).isSameAs(fatalError);
});
}
};
}
@Test
void testWorkerRegistrationTimeout() throws Exception {
new Context() {
{
final ResourceID tmResourceId = ResourceID.generate();
final CompletableFuture<ResourceID> releaseResourceFuture =
new CompletableFuture<>();
flinkConfig.set(
ResourceManagerOptions.TASK_MANAGER_REGISTRATION_TIMEOUT,
Duration.ofMillis(TESTING_START_WORKER_TIMEOUT_MS));
driverBuilder
.setRequestResourceFunction(
taskExecutorProcessSpec ->
CompletableFuture.completedFuture(tmResourceId))
.setReleaseResourceConsumer(releaseResourceFuture::complete);
runTest(
() -> {
// request new worker
runInMainThread(
() ->
getResourceManager()
.requestNewWorker(WORKER_RESOURCE_SPEC));
// verify worker is released due to not registered in time
assertThatFuture(releaseResourceFuture)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isSameAs(tmResourceId);
});
}
};
}
@Test
void testWorkerRegistrationTimeoutNotCountingAllocationTime() throws Exception {
new Context() {
{
final ResourceID tmResourceId = ResourceID.generate();
final CompletableFuture<ResourceID> requestResourceFuture =
new CompletableFuture<>();
final CompletableFuture<ResourceID> releaseResourceFuture =
new CompletableFuture<>();
flinkConfig.set(
ResourceManagerOptions.TASK_MANAGER_REGISTRATION_TIMEOUT,
Duration.ofMillis(TESTING_START_WORKER_TIMEOUT_MS));
driverBuilder
.setRequestResourceFunction(
taskExecutorProcessSpec -> requestResourceFuture)
.setReleaseResourceConsumer(releaseResourceFuture::complete);
runTest(
() -> {
// request new worker
runInMainThread(
() ->
getResourceManager()
.requestNewWorker(WORKER_RESOURCE_SPEC));
// resource allocation takes longer than worker registration timeout
Thread.sleep(TESTING_START_WORKER_TIMEOUT_MS * 2);
final long start = System.nanoTime();
runInMainThread(() -> requestResourceFuture.complete(tmResourceId));
// worker registered, verify not released due to timeout
RegistrationResponse registrationResponse =
registerTaskExecutor(tmResourceId).join();
assertThatFuture(releaseResourceFuture).isNotDone();
final long registrationTime = (System.nanoTime() - start) / 1_000_000;
assumeThat(registrationTime)
.as(
"The registration must not take longer than the start worker timeout. If it does, then this indicates a very slow machine.")
.isLessThan(TESTING_START_WORKER_TIMEOUT_MS);
assertThat(registrationResponse)
.isInstanceOf(RegistrationResponse.Success.class);
});
}
};
}
@Test
void testWorkerRegistrationTimeoutRecoveredFromPreviousAttempt() throws Exception {
new Context() {
{
final ResourceID tmResourceId = ResourceID.generate();
final CompletableFuture<ResourceID> releaseResourceFuture =
new CompletableFuture<>();
flinkConfig.set(
ResourceManagerOptions.TASK_MANAGER_REGISTRATION_TIMEOUT,
Duration.ofMillis(TESTING_START_WORKER_TIMEOUT_MS));
driverBuilder.setReleaseResourceConsumer(releaseResourceFuture::complete);
runTest(
() -> {
// workers recovered
runInMainThread(
() ->
getResourceManager()
.onPreviousAttemptWorkersRecovered(
Collections.singleton(tmResourceId)));
// verify worker is released due to not registered in time
assertThatFuture(releaseResourceFuture)
.succeedsWithin(TIMEOUT_SEC, TimeUnit.SECONDS)
.isSameAs(tmResourceId);
});
}
};
}
@Test
void testResourceManagerRecoveredAfterAllTMRegistered() throws Exception {
new Context() {
{
final ResourceID tmResourceId1 = ResourceID.generate();
final ResourceID tmResourceId2 = ResourceID.generate();
runTest(
() -> {
// workers recovered
runInMainThread(
() ->
getResourceManager()
.onPreviousAttemptWorkersRecovered(
ImmutableSet.of(
tmResourceId1, tmResourceId2)));
runInMainThread(
() ->
getResourceManager()
.onWorkerRegistered(
tmResourceId1,
WorkerResourceSpec.ZERO));
runInMainThread(
() ->
getResourceManager()
.onWorkerRegistered(
tmResourceId2,
WorkerResourceSpec.ZERO));
runInMainThread(
() ->
assertThat(
getResourceManager()
.getReadyToServeFuture())
.isCompleted())
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
});
}
};
}
@Test
void testResourceManagerRecoveredAfterReconcileTimeout() throws Exception {
new Context() {
{
final ResourceID tmResourceId1 = ResourceID.generate();
final ResourceID tmResourceId2 = ResourceID.generate();
flinkConfig.set(
ResourceManagerOptions.RESOURCE_MANAGER_PREVIOUS_WORKER_RECOVERY_TIMEOUT,
Duration.ofMillis(TESTING_START_WORKER_TIMEOUT_MS));
runTest(
() -> {
// workers recovered
runInMainThread(
() -> {
getResourceManager()
.onPreviousAttemptWorkersRecovered(
ImmutableSet.of(
tmResourceId1, tmResourceId2));
});
runInMainThread(
() ->
getResourceManager()
.onWorkerRegistered(
tmResourceId1,
WorkerResourceSpec.ZERO));
getResourceManager()
.getReadyToServeFuture()
.get(TIMEOUT_SEC, TimeUnit.SECONDS);
});
}
};
}
private static | ActiveResourceManagerTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.