language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | quarkusio__quarkus | extensions/reactive-routes/deployment/src/main/java/io/quarkus/vertx/web/deployment/HandlerDescriptor.java | {
"start": 339,
"end": 3571
} | class ____ {
private final MethodInfo method;
private final BeanValidationAnnotationsBuildItem validationAnnotations;
private final boolean failureHandler;
private final Type payloadType;
private final String[] contentTypes;
HandlerDescriptor(MethodInfo method, BeanValidationAnnotationsBuildItem bvAnnotations, boolean failureHandler,
String[] producedTypes) {
this.method = method;
this.validationAnnotations = bvAnnotations;
this.failureHandler = failureHandler;
Type returnType = method.returnType();
if (returnType.kind() == Kind.VOID) {
payloadType = null;
} else {
if (returnType.name().equals(DotNames.UNI) || returnType.name().equals(DotNames.MULTI)
|| returnType.name().equals(DotNames.COMPLETION_STAGE)) {
payloadType = returnType.asParameterizedType().arguments().get(0);
} else {
payloadType = returnType;
}
}
this.contentTypes = producedTypes;
}
Type getReturnType() {
return method.returnType();
}
boolean isReturningUni() {
return method.returnType().name().equals(DotNames.UNI);
}
boolean isReturningMulti() {
return method.returnType().name().equals(DotNames.MULTI);
}
boolean isReturningCompletionStage() {
return method.returnType().name().equals(DotNames.COMPLETION_STAGE);
}
public String getFirstContentType() {
if (contentTypes == null || contentTypes.length == 0) {
return null;
}
return contentTypes[0];
}
/**
* @return {@code true} if the method is annotated with a constraint or {@code @Valid} or any parameter has such kind of
* annotation.
*/
boolean requireValidation() {
if (validationAnnotations == null) {
return false;
}
for (AnnotationInstance annotation : method.annotations()) {
if (validationAnnotations.getAllAnnotations().contains(annotation.name())) {
return true;
}
}
return false;
}
/**
* @return {@code true} if the method is annotated with {@code @Valid}.
*/
boolean isProducedResponseValidated() {
if (validationAnnotations == null) {
return false;
}
for (AnnotationInstance annotation : method.annotations()) {
if (validationAnnotations.getValidAnnotation().equals(annotation.name())) {
return true;
}
}
return false;
}
Type getPayloadType() {
return payloadType;
}
boolean isPayloadString() {
Type type = getPayloadType();
return type != null && type.name().equals(DotName.STRING_NAME);
}
boolean isPayloadBuffer() {
Type type = getPayloadType();
return type != null && type.name().equals(DotNames.BUFFER);
}
boolean isPayloadMutinyBuffer() {
Type type = getPayloadType();
return type != null && type.name().equals(DotNames.MUTINY_BUFFER);
}
boolean isFailureHandler() {
return failureHandler;
}
}
| HandlerDescriptor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/associations/FieldWithUnderscoreTest.java | {
"start": 608,
"end": 913
} | class ____ {
@Test void test(SessionFactoryScope scope) {
scope.inSession(s -> s.createSelectionQuery("from B join _a", B.class).getResultList());
scope.inSession(s -> s.createSelectionQuery("from B left join fetch _a", B.class).getResultList());
}
@Entity(name = "A")
static | FieldWithUnderscoreTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/temptable/HANAGlobalTemporaryTableStrategy.java | {
"start": 201,
"end": 494
} | class ____ extends StandardGlobalTemporaryTableStrategy {
@Override
public String getTemporaryTableCreateCommand() {
return "create global temporary row table";
}
@Override
public String getTemporaryTableTruncateCommand() {
return "truncate table";
}
}
| HANAGlobalTemporaryTableStrategy |
java | apache__camel | components/camel-mail/src/main/java/org/apache/camel/component/mail/MailSorter.java | {
"start": 1174,
"end": 6768
} | class ____ {
/**
* No instances
*/
private MailSorter() {
}
/**
* Sort the messages. This emulates sorting the messages on the server if the server doesn't have the sorting
* capability. See RFC 5256 Does not support complex sorting like in the RFC (with Base Subject or other similar
* stuff), just simple comparisons.
*
* @param messages Messages to sort. Are sorted in place
* @param sortTerm Sort term
*/
public static void sortMessages(Message[] messages, final SortTerm[] sortTerm) {
final List<SortTermWithDescending> sortTermsWithDescending = getSortTermsWithDescending(sortTerm);
sortMessages(messages, sortTermsWithDescending);
}
/**
* Compute the potentially descending sort terms from the input list
*
* @param sortTerm Input list
* @return Sort terms list including if the respective sort should be sorted in descending order
*/
private static List<SortTermWithDescending> getSortTermsWithDescending(SortTerm[] sortTerm) {
// List of reversable sort terms. If the boolean is true the respective sort term is descending
final List<SortTermWithDescending> sortTermsWithDescending = new ArrayList<>(sortTerm.length);
// Descending next item in input because the last item was a "descending"
boolean descendingNext = false;
for (SortTerm term : sortTerm) {
if (term.equals(SortTerm.REVERSE)) {
if (descendingNext) {
throw new IllegalArgumentException("Double reverse in sort term is not allowed");
}
descendingNext = true;
} else {
sortTermsWithDescending.add(new SortTermWithDescending(term, descendingNext));
descendingNext = false;
}
}
return sortTermsWithDescending;
}
/**
* Sort messages using the list of properties
*
* @param messages Messages to sort. Are sorted in place
* @param sortTermsWithDescending Sort terms list including if the respective sort should be sorted in descending
* order
*/
private static void sortMessages(Message[] messages, final List<SortTermWithDescending> sortTermsWithDescending) {
Arrays.sort(messages, (Message m1, Message m2) -> {
try {
for (SortTermWithDescending reversableTerm : sortTermsWithDescending) {
int comparison = compareMessageProperty(m1, m2, reversableTerm.getTerm());
// Descending
if (reversableTerm.isDescending()) {
comparison = -comparison;
}
// Abort on first non-equal
if (comparison != 0) {
return comparison;
}
}
// Equal
return 0;
} catch (MessagingException e) {
throw new IllegalArgumentException(e);
}
});
}
/**
* Compare the value of the property of the two messages.
*
* @param msg1 Message 1
* @param msg2 Message 2
* @param property Property to compare
* @return msg1.property.compareTo(msg2.property)
* @throws jakarta.mail.MessagingException If message data could not be read.
*/
private static int compareMessageProperty(Message msg1, Message msg2, SortTerm property) throws MessagingException {
if (property.equals(SortTerm.TO)) {
InternetAddress addr1 = (InternetAddress) msg1.getRecipients(Message.RecipientType.TO)[0];
InternetAddress addr2 = (InternetAddress) msg2.getRecipients(Message.RecipientType.TO)[0];
return addr1.getAddress().compareTo(addr2.getAddress());
} else if (property.equals(SortTerm.CC)) {
InternetAddress addr1 = (InternetAddress) msg1.getRecipients(Message.RecipientType.CC)[0];
InternetAddress addr2 = (InternetAddress) msg2.getRecipients(Message.RecipientType.CC)[0];
return addr1.getAddress().compareTo(addr2.getAddress());
} else if (property.equals(SortTerm.FROM)) {
InternetAddress addr1 = (InternetAddress) msg1.getFrom()[0];
InternetAddress addr2 = (InternetAddress) msg2.getFrom()[0];
return addr1.getAddress().compareTo(addr2.getAddress());
} else if (property.equals(SortTerm.ARRIVAL)) {
Date arr1 = msg1.getReceivedDate();
Date arr2 = msg2.getReceivedDate();
return arr1.compareTo(arr2);
} else if (property.equals(SortTerm.DATE)) {
Date sent1 = msg1.getSentDate();
Date sent2 = msg2.getSentDate();
return sent1.compareTo(sent2);
} else if (property.equals(SortTerm.SIZE)) {
int size1 = msg1.getSize();
int size2 = msg2.getSize();
return Integer.compare(size1, size2);
} else if (property.equals(SortTerm.SUBJECT)) {
String sub1 = msg1.getSubject();
String sub2 = msg2.getSubject();
return sub1.compareTo(sub2);
}
throw new IllegalArgumentException(String.format("Unknown sort term: %s", property.toString()));
}
/**
* A sort term with a bit indicating if sorting should be descending for this term
*/
private static final | MailSorter |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/db2/DB2TruncateTest.java | {
"start": 1037,
"end": 2711
} | class ____ extends DB2Test {
public void test_0() throws Exception {
String sql = "TRUNCATE TASK_AICSFSM_QUERY_BATCH_TEMP IMMEDIATE";
DB2StatementParser parser = new DB2StatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
DB2SchemaStatVisitor visitor = new DB2SchemaStatVisitor();
stmt.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("TASK_AICSFSM_QUERY_BATCH_TEMP")));
// assertTrue(visitor.getColumns().contains(new Column("A", "F_0201")));
// assertTrue(visitor.getColumns().contains(new Column("mytable", "first_name")));
// assertTrue(visitor.getColumns().contains(new Column("mytable", "full_name")));
assertEquals("TRUNCATE TABLE TASK_AICSFSM_QUERY_BATCH_TEMP IMMEDIATE", //
SQLUtils.toSQLString(stmt, JdbcConstants.DB2));
assertEquals("truncate table TASK_AICSFSM_QUERY_BATCH_TEMP immediate", //
SQLUtils.toSQLString(stmt, JdbcConstants.DB2, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION));
}
}
| DB2TruncateTest |
java | google__jimfs | jimfs/src/test/java/com/google/common/jimfs/JimfsFileChannelTest.java | {
"start": 2969,
"end": 28927
} | class ____ {
private static FileChannel channel(RegularFile file, OpenOption... options) throws IOException {
return new JimfsFileChannel(
file,
Options.getOptionsForChannel(ImmutableSet.copyOf(options)),
new FileSystemState(new FakeFileTimeSource(), Runnables.doNothing()));
}
@Test
public void testPosition() throws IOException {
FileChannel channel = channel(regularFile(10), READ);
assertEquals(0, channel.position());
assertSame(channel, channel.position(100));
assertEquals(100, channel.position());
}
@Test
public void testSize() throws IOException {
RegularFile file = regularFile(10);
FileChannel channel = channel(file, READ);
assertEquals(10, channel.size());
file.write(10, new byte[90], 0, 90);
assertEquals(100, channel.size());
}
@Test
public void testRead() throws IOException {
RegularFile file = regularFile(20);
FileChannel channel = channel(file, READ);
assertEquals(0, channel.position());
ByteBuffer buf = buffer("1234567890");
ByteBuffer buf2 = buffer("123457890");
assertEquals(10, channel.read(buf));
assertEquals(10, channel.position());
buf.flip();
assertEquals(10, channel.read(new ByteBuffer[] {buf, buf2}));
assertEquals(20, channel.position());
buf.flip();
buf2.flip();
file.write(20, new byte[10], 0, 10);
assertEquals(10, channel.read(new ByteBuffer[] {buf, buf2}, 0, 2));
assertEquals(30, channel.position());
buf.flip();
assertEquals(10, channel.read(buf, 5));
assertEquals(30, channel.position());
buf.flip();
assertEquals(-1, channel.read(buf));
assertEquals(30, channel.position());
}
@Test
public void testWrite() throws IOException {
RegularFile file = regularFile(0);
FileChannel channel = channel(file, WRITE);
assertEquals(0, channel.position());
ByteBuffer buf = buffer("1234567890");
ByteBuffer buf2 = buffer("1234567890");
assertEquals(10, channel.write(buf));
assertEquals(10, channel.position());
buf.flip();
assertEquals(20, channel.write(new ByteBuffer[] {buf, buf2}));
assertEquals(30, channel.position());
buf.flip();
buf2.flip();
assertEquals(20, channel.write(new ByteBuffer[] {buf, buf2}, 0, 2));
assertEquals(50, channel.position());
buf.flip();
assertEquals(10, channel.write(buf, 5));
assertEquals(50, channel.position());
}
@Test
public void testAppend() throws IOException {
RegularFile file = regularFile(0);
FileChannel channel = channel(file, WRITE, APPEND);
assertEquals(0, channel.position());
ByteBuffer buf = buffer("1234567890");
ByteBuffer buf2 = buffer("1234567890");
assertEquals(10, channel.write(buf));
assertEquals(10, channel.position());
buf.flip();
channel.position(0);
assertEquals(20, channel.write(new ByteBuffer[] {buf, buf2}));
assertEquals(30, channel.position());
buf.flip();
buf2.flip();
channel.position(0);
assertEquals(20, channel.write(new ByteBuffer[] {buf, buf2}, 0, 2));
assertEquals(50, channel.position());
buf.flip();
channel.position(0);
assertEquals(10, channel.write(buf, 5));
assertEquals(60, channel.position());
buf.flip();
channel.position(0);
assertEquals(10, channel.transferFrom(new ByteBufferChannel(buf), 0, 10));
assertEquals(70, channel.position());
}
@Test
public void testTransferTo() throws IOException {
RegularFile file = regularFile(10);
FileChannel channel = channel(file, READ);
ByteBufferChannel writeChannel = new ByteBufferChannel(buffer("1234567890"));
assertEquals(10, channel.transferTo(0, 100, writeChannel));
assertEquals(0, channel.position());
}
@Test
public void testTransferFrom() throws IOException {
RegularFile file = regularFile(0);
FileChannel channel = channel(file, WRITE);
ByteBufferChannel readChannel = new ByteBufferChannel(buffer("1234567890"));
assertEquals(10, channel.transferFrom(readChannel, 0, 100));
assertEquals(0, channel.position());
}
@Test
public void testTruncate() throws IOException {
RegularFile file = regularFile(10);
FileChannel channel = channel(file, WRITE);
channel.truncate(10); // no resize, >= size
assertEquals(10, file.size());
channel.truncate(11); // no resize, > size
assertEquals(10, file.size());
channel.truncate(5); // resize down to 5
assertEquals(5, file.size());
channel.position(20);
channel.truncate(10);
assertEquals(10, channel.position());
channel.truncate(2);
assertEquals(2, channel.position());
}
@Test
public void testFileTimeUpdates() throws IOException {
RegularFile file = regularFile(10);
FakeFileTimeSource fileTimeSource = new FakeFileTimeSource();
FileChannel channel =
new JimfsFileChannel(
file,
ImmutableSet.<OpenOption>of(READ, WRITE),
new FileSystemState(fileTimeSource, Runnables.doNothing()));
// accessedTime
FileTime accessTime = file.getLastAccessTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.read(ByteBuffer.allocate(10));
assertNotEquals(accessTime, file.getLastAccessTime());
accessTime = file.getLastAccessTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.read(ByteBuffer.allocate(10), 0);
assertNotEquals(accessTime, file.getLastAccessTime());
accessTime = file.getLastAccessTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.read(new ByteBuffer[] {ByteBuffer.allocate(10)});
assertNotEquals(accessTime, file.getLastAccessTime());
accessTime = file.getLastAccessTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.read(new ByteBuffer[] {ByteBuffer.allocate(10)}, 0, 1);
assertNotEquals(accessTime, file.getLastAccessTime());
accessTime = file.getLastAccessTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.transferTo(0, 10, new ByteBufferChannel(10));
assertNotEquals(accessTime, file.getLastAccessTime());
// modified
FileTime modifiedTime = file.getLastModifiedTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.write(ByteBuffer.allocate(10));
assertNotEquals(modifiedTime, file.getLastModifiedTime());
modifiedTime = file.getLastModifiedTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.write(ByteBuffer.allocate(10), 0);
assertNotEquals(modifiedTime, file.getLastModifiedTime());
modifiedTime = file.getLastModifiedTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.write(new ByteBuffer[] {ByteBuffer.allocate(10)});
assertNotEquals(modifiedTime, file.getLastModifiedTime());
modifiedTime = file.getLastModifiedTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.write(new ByteBuffer[] {ByteBuffer.allocate(10)}, 0, 1);
assertNotEquals(modifiedTime, file.getLastModifiedTime());
modifiedTime = file.getLastModifiedTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.truncate(0);
assertNotEquals(modifiedTime, file.getLastModifiedTime());
modifiedTime = file.getLastModifiedTime();
fileTimeSource.advance(Duration.ofMillis(2));
channel.transferFrom(new ByteBufferChannel(10), 0, 10);
assertNotEquals(modifiedTime, file.getLastModifiedTime());
}
@Test
public void testClose() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
ExecutorService executor = Executors.newSingleThreadExecutor();
assertTrue(channel.isOpen());
channel.close();
assertFalse(channel.isOpen());
assertThrows(ClosedChannelException.class, () -> channel.position());
assertThrows(ClosedChannelException.class, () -> channel.position(0));
assertThrows(ClosedChannelException.class, () -> channel.lock());
assertThrows(ClosedChannelException.class, () -> channel.lock(0, 10, true));
assertThrows(ClosedChannelException.class, () -> channel.tryLock());
assertThrows(ClosedChannelException.class, () -> channel.tryLock(0, 10, true));
assertThrows(ClosedChannelException.class, () -> channel.force(true));
assertThrows(ClosedChannelException.class, () -> channel.write(buffer("111")));
assertThrows(ClosedChannelException.class, () -> channel.write(buffer("111"), 10));
assertThrows(
ClosedChannelException.class,
() -> channel.write(new ByteBuffer[] {buffer("111"), buffer("111")}));
assertThrows(
ClosedChannelException.class,
() -> channel.write(new ByteBuffer[] {buffer("111"), buffer("111")}, 0, 2));
assertThrows(
ClosedChannelException.class,
() -> channel.transferFrom(new ByteBufferChannel(bytes("1111")), 0, 4));
assertThrows(ClosedChannelException.class, () -> channel.truncate(0));
assertThrows(ClosedChannelException.class, () -> channel.read(buffer("111")));
assertThrows(ClosedChannelException.class, () -> channel.read(buffer("111"), 10));
assertThrows(
ClosedChannelException.class,
() -> channel.read(new ByteBuffer[] {buffer("111"), buffer("111")}));
assertThrows(
ClosedChannelException.class,
() -> channel.read(new ByteBuffer[] {buffer("111"), buffer("111")}, 0, 2));
assertThrows(
ClosedChannelException.class,
() -> channel.transferTo(0, 10, new ByteBufferChannel(buffer("111"))));
executor.shutdown();
}
@Test
public void testWritesInReadOnlyMode() throws IOException {
FileChannel channel = channel(regularFile(0), READ);
assertThrows(NonWritableChannelException.class, () -> channel.write(buffer("111")));
assertThrows(NonWritableChannelException.class, () -> channel.write(buffer("111"), 10));
assertThrows(
NonWritableChannelException.class,
() -> channel.write(new ByteBuffer[] {buffer("111"), buffer("111")}));
assertThrows(
NonWritableChannelException.class,
() -> channel.write(new ByteBuffer[] {buffer("111"), buffer("111")}, 0, 2));
assertThrows(
NonWritableChannelException.class,
() -> channel.transferFrom(new ByteBufferChannel(bytes("1111")), 0, 4));
assertThrows(NonWritableChannelException.class, () -> channel.truncate(0));
assertThrows(NonWritableChannelException.class, () -> channel.lock(0, 10, false));
}
@Test
public void testReadsInWriteOnlyMode() throws IOException {
FileChannel channel = channel(regularFile(0), WRITE);
assertThrows(NonReadableChannelException.class, () -> channel.read(buffer("111")));
assertThrows(NonReadableChannelException.class, () -> channel.read(buffer("111"), 10));
assertThrows(
NonReadableChannelException.class,
() -> channel.read(new ByteBuffer[] {buffer("111"), buffer("111")}));
assertThrows(
NonReadableChannelException.class,
() -> channel.read(new ByteBuffer[] {buffer("111"), buffer("111")}, 0, 2));
assertThrows(
NonReadableChannelException.class,
() -> channel.transferTo(0, 10, new ByteBufferChannel(buffer("111"))));
assertThrows(NonReadableChannelException.class, () -> channel.lock(0, 10, true));
}
@Test
public void testPositionNegative() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
assertThrows(IllegalArgumentException.class, () -> channel.position(-1));
}
@Test
public void testTruncateNegative() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
assertThrows(IllegalArgumentException.class, () -> channel.truncate(-1));
}
@Test
public void testWriteNegative() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
assertThrows(IllegalArgumentException.class, () -> channel.write(buffer("111"), -1));
ByteBuffer[] bufs = {buffer("111"), buffer("111")};
assertThrows(IndexOutOfBoundsException.class, () -> channel.write(bufs, -1, 10));
assertThrows(IndexOutOfBoundsException.class, () -> channel.write(bufs, 0, -1));
}
@Test
public void testReadNegative() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
assertThrows(IllegalArgumentException.class, () -> channel.read(buffer("111"), -1));
ByteBuffer[] bufs = {buffer("111"), buffer("111")};
assertThrows(IndexOutOfBoundsException.class, () -> channel.read(bufs, -1, 10));
assertThrows(IndexOutOfBoundsException.class, () -> channel.read(bufs, 0, -1));
}
@Test
public void testTransferToNegative() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
assertThrows(
IllegalArgumentException.class, () -> channel.transferTo(-1, 0, new ByteBufferChannel(10)));
assertThrows(
IllegalArgumentException.class, () -> channel.transferTo(0, -1, new ByteBufferChannel(10)));
}
@Test
public void testTransferFromNegative() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
assertThrows(
IllegalArgumentException.class,
() -> channel.transferFrom(new ByteBufferChannel(10), -1, 0));
assertThrows(
IllegalArgumentException.class,
() -> channel.transferFrom(new ByteBufferChannel(10), 0, -1));
}
@Test
public void testLockNegative() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
assertThrows(IllegalArgumentException.class, () -> channel.lock(-1, 10, true));
assertThrows(IllegalArgumentException.class, () -> channel.lock(0, -1, true));
assertThrows(IllegalArgumentException.class, () -> channel.tryLock(-1, 10, true));
assertThrows(IllegalArgumentException.class, () -> channel.tryLock(0, -1, true));
}
@Test
public void testNullPointerExceptions() throws IOException {
FileChannel channel = channel(regularFile(100), READ, WRITE);
NullPointerTester tester = new NullPointerTester();
tester.testAllPublicInstanceMethods(channel);
}
@Test
public void testLock() throws IOException {
FileChannel channel = channel(regularFile(10), READ, WRITE);
assertNotNull(channel.lock());
assertNotNull(channel.lock(0, 10, false));
assertNotNull(channel.lock(0, 10, true));
assertNotNull(channel.tryLock());
assertNotNull(channel.tryLock(0, 10, false));
assertNotNull(channel.tryLock(0, 10, true));
FileLock lock = channel.lock();
assertTrue(lock.isValid());
lock.release();
assertFalse(lock.isValid());
}
@Test
public void testAsynchronousClose() throws Exception {
RegularFile file = regularFile(10);
final FileChannel channel = channel(file, READ, WRITE);
file.writeLock().lock(); // ensure all operations on the channel will block
ExecutorService executor = Executors.newCachedThreadPool();
CountDownLatch latch = new CountDownLatch(BLOCKING_OP_COUNT);
List<Future<?>> futures = queueAllBlockingOperations(channel, executor, latch);
// wait for all the threads to have started running
latch.await();
// then ensure time for operations to start blocking
Uninterruptibles.sleepUninterruptibly(20, MILLISECONDS);
// close channel on this thread
channel.close();
// the blocking operations are running on different threads, so they all get
// AsynchronousCloseException
for (Future<?> future : futures) {
ExecutionException expected = assertThrows(ExecutionException.class, () -> future.get());
assertWithMessage("blocking thread exception")
.that(expected.getCause())
.isInstanceOf(AsynchronousCloseException.class);
}
}
@Test
@org.junit.Ignore // flaky
public void testCloseByInterrupt() throws Exception {
RegularFile file = regularFile(10);
final FileChannel channel = channel(file, READ, WRITE);
file.writeLock().lock(); // ensure all operations on the channel will block
ExecutorService executor = Executors.newCachedThreadPool();
final CountDownLatch threadStartLatch = new CountDownLatch(1);
final SettableFuture<Throwable> interruptException = SettableFuture.create();
// This thread, being the first to run, will be blocking on the interruptible lock (the byte
// file's write lock) and as such will be interrupted properly... the other threads will be
// blocked on the lock that guards the position field and the specification that only one method
// on the channel will be in progress at a time. That lock is not interruptible, so we must
// interrupt this thread.
Thread thread =
new Thread(
new Runnable() {
@Override
public void run() {
threadStartLatch.countDown();
try {
channel.write(ByteBuffer.allocate(20));
interruptException.set(null);
} catch (Throwable e) {
interruptException.set(e);
}
}
});
thread.start();
// let the thread start running
threadStartLatch.await();
// then ensure time for thread to start blocking on the write lock
Uninterruptibles.sleepUninterruptibly(10, MILLISECONDS);
CountDownLatch blockingStartLatch = new CountDownLatch(BLOCKING_OP_COUNT);
List<Future<?>> futures = queueAllBlockingOperations(channel, executor, blockingStartLatch);
// wait for all blocking threads to start
blockingStartLatch.await();
// then ensure time for the operations to start blocking
Uninterruptibles.sleepUninterruptibly(20, MILLISECONDS);
// interrupting this blocking thread closes the channel and makes all the other threads
// throw AsynchronousCloseException... the operation on this thread should throw
// ClosedByInterruptException
thread.interrupt();
// get the exception that caused the interrupted operation to terminate
assertWithMessage("interrupted thread exception")
.that(interruptException.get(200, MILLISECONDS))
.isInstanceOf(ClosedByInterruptException.class);
// check that each other thread got AsynchronousCloseException (since the interrupt, on a
// different thread, closed the channel)
for (Future<?> future : futures) {
ExecutionException expected = assertThrows(ExecutionException.class, () -> future.get());
assertWithMessage("blocking thread exception")
.that(expected.getCause())
.isInstanceOf(AsynchronousCloseException.class);
}
}
private static final int BLOCKING_OP_COUNT = 10;
/**
* Queues blocking operations on the channel in separate threads using the given executor. The
* given latch should have a count of BLOCKING_OP_COUNT to allow the caller wants to wait for all
* threads to start executing.
*/
private List<Future<?>> queueAllBlockingOperations(
final FileChannel channel, ExecutorService executor, final CountDownLatch startLatch) {
List<Future<?>> futures = new ArrayList<>();
final ByteBuffer buffer = ByteBuffer.allocate(10);
futures.add(
executor.submit(
new Callable<Object>() {
@Override
public Object call() throws Exception {
startLatch.countDown();
channel.write(buffer);
return null;
}
}));
futures.add(
executor.submit(
new Callable<Object>() {
@Override
public Object call() throws Exception {
startLatch.countDown();
channel.write(buffer, 0);
return null;
}
}));
futures.add(
executor.submit(
new Callable<Object>() {
@Override
public Object call() throws Exception {
startLatch.countDown();
channel.write(new ByteBuffer[] {buffer, buffer});
return null;
}
}));
futures.add(
executor.submit(
new Callable<Object>() {
@Override
public Object call() throws Exception {
startLatch.countDown();
channel.write(new ByteBuffer[] {buffer, buffer, buffer}, 0, 2);
return null;
}
}));
futures.add(
executor.submit(
new Callable<Object>() {
@Override
public Object call() throws Exception {
startLatch.countDown();
channel.read(buffer);
return null;
}
}));
futures.add(
executor.submit(
new Callable<Object>() {
@Override
public Object call() throws Exception {
startLatch.countDown();
channel.read(buffer, 0);
return null;
}
}));
futures.add(
executor.submit(
new Callable<Object>() {
@Override
public Object call() throws Exception {
startLatch.countDown();
channel.read(new ByteBuffer[] {buffer, buffer});
return null;
}
}));
futures.add(
executor.submit(
new Callable<Object>() {
@Override
public Object call() throws Exception {
startLatch.countDown();
channel.read(new ByteBuffer[] {buffer, buffer, buffer}, 0, 2);
return null;
}
}));
futures.add(
executor.submit(
new Callable<Object>() {
@Override
public Object call() throws Exception {
startLatch.countDown();
channel.transferTo(0, 10, new ByteBufferChannel(buffer));
return null;
}
}));
futures.add(
executor.submit(
new Callable<Object>() {
@Override
public Object call() throws Exception {
startLatch.countDown();
channel.transferFrom(new ByteBufferChannel(buffer), 0, 10);
return null;
}
}));
return futures;
}
/**
* Tests that the methods on the default FileChannel that support InterruptibleChannel behavior
* also support it on JimfsFileChannel, by just interrupting the thread before calling the method.
*/
@Test
public void testInterruptedThreads() throws IOException {
final ByteBuffer buf = ByteBuffer.allocate(10);
final ByteBuffer[] bufArray = {buf};
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.size();
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.position();
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.position(0);
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.write(buf);
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.write(bufArray, 0, 1);
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.read(buf);
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.read(bufArray, 0, 1);
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.write(buf, 0);
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.read(buf, 0);
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.transferTo(0, 1, channel(regularFile(10), READ, WRITE));
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.transferFrom(channel(regularFile(10), READ, WRITE), 0, 1);
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.force(true);
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.truncate(0);
}
});
assertClosedByInterrupt(
new FileChannelMethod() {
@Override
public void call(FileChannel channel) throws IOException {
channel.lock(0, 1, true);
}
});
// tryLock() does not handle interruption
// map() always throws UOE; it doesn't make sense for it to try to handle interruption
}
private | JimfsFileChannelTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java | {
"start": 7033,
"end": 19682
} | interface ____ {
void run() throws AccessControlException;
}
static String runCheckPermission(CheckPermission checker,
LongFunction<String> checkElapsedMs) throws AccessControlException {
final String message;
final long start = Time.monotonicNow();
try {
checker.run();
} finally {
final long end = Time.monotonicNow();
message = checkElapsedMs.apply(end - start);
}
return message;
}
private AccessControlEnforcer initAccessControlEnforcer() {
final AccessControlEnforcer e = Optional.ofNullable(attributeProvider)
.map(p -> p.getExternalAccessControlEnforcer(this))
.orElse(this);
if (e == this) {
return this;
}
// For an external AccessControlEnforcer, check for slowness.
return new AccessControlEnforcer() {
@Override
public void checkPermission(
String filesystemOwner, String superGroup, UserGroupInformation ugi,
INodeAttributes[] inodeAttrs, INode[] inodes, byte[][] pathByNameArr,
int snapshotId, String path, int ancestorIndex, boolean doCheckOwner,
FsAction ancestorAccess, FsAction parentAccess, FsAction access,
FsAction subAccess, boolean ignoreEmptyDir)
throws AccessControlException {
runCheckPermission(
() -> e.checkPermission(filesystemOwner, superGroup, ugi,
inodeAttrs, inodes, pathByNameArr, snapshotId, path,
ancestorIndex, doCheckOwner, ancestorAccess, parentAccess,
access, subAccess, ignoreEmptyDir),
elapsedMs -> checkAccessControlEnforcerSlowness(elapsedMs,
accessControlEnforcerReportingThresholdMs,
e.getClass(), false, path, operationType.get(),
CallerContext.getCurrent()));
}
@Override
public void checkPermissionWithContext(AuthorizationContext context)
throws AccessControlException {
runCheckPermission(
() -> e.checkPermissionWithContext(context),
elapsedMs -> checkAccessControlEnforcerSlowness(elapsedMs,
e, false, context));
}
@Override
public void checkSuperUserPermissionWithContext(
AuthorizationContext context) throws AccessControlException {
runCheckPermission(
() -> e.checkSuperUserPermissionWithContext(context),
elapsedMs -> checkAccessControlEnforcerSlowness(elapsedMs,
e, true, context));
}
};
}
private AuthorizationContext getAuthorizationContextForSuperUser(
String path) {
String opType = operationType.get();
AuthorizationContext.Builder builder =
new INodeAttributeProvider.AuthorizationContext.Builder();
builder.fsOwner(fsOwner).
supergroup(supergroup).
callerUgi(callerUgi).
operationName(opType).
callerContext(CallerContext.getCurrent());
// Add path to the context builder only if it is not null.
if (path != null && !path.isEmpty()) {
builder.path(path);
}
return builder.build();
}
/**
* This method is retained to maintain backward compatibility.
* Please use the new method {@link #checkSuperuserPrivilege(String)} to make
* sure that the external enforcers have the correct context to audit.
*
* @throws AccessControlException if the caller is not a super user.
*/
public void checkSuperuserPrivilege() throws AccessControlException {
checkSuperuserPrivilege(null);
}
/**
* Checks if the caller has super user privileges.
* Throws {@link AccessControlException} for non super users.
*
* @param path The resource path for which permission is being requested.
* @throws AccessControlException if the caller is not a super user.
*/
public void checkSuperuserPrivilege(String path)
throws AccessControlException {
if (LOG.isDebugEnabled()) {
LOG.debug("SUPERUSER ACCESS CHECK: " + this
+ ", operationName=" + FSPermissionChecker.operationType.get()
+ ", path=" + path);
}
accessControlEnforcer.checkSuperUserPermissionWithContext(
getAuthorizationContextForSuperUser(path));
}
/**
* Calls the external enforcer to notify denial of access to the user with
* the given error message. Always throws an ACE with the given message.
*
* @param path The resource path for which permission is being requested.
* @param errorMessage message for the exception.
* @throws AccessControlException with the error message.
*/
public void denyUserAccess(String path, String errorMessage)
throws AccessControlException {
if (LOG.isDebugEnabled()) {
LOG.debug("DENY USER ACCESS: " + this
+ ", operationName=" + FSPermissionChecker.operationType.get()
+ ", path=" + path);
}
accessControlEnforcer.denyUserAccess(
getAuthorizationContextForSuperUser(path), errorMessage);
}
/**
* Check whether current user have permissions to access the path.
* Traverse is always checked.
*
* Parent path means the parent directory for the path.
* Ancestor path means the last (the closest) existing ancestor directory
* of the path.
* Note that if the parent path exists,
* then the parent path and the ancestor path are the same.
*
* For example, suppose the path is "/foo/bar/baz".
* No matter baz is a file or a directory,
* the parent path is "/foo/bar".
* If bar exists, then the ancestor path is also "/foo/bar".
* If bar does not exist and foo exists,
* then the ancestor path is "/foo".
* Further, if both foo and bar do not exist,
* then the ancestor path is "/".
*
* @param doCheckOwner Require user to be the owner of the path?
* @param ancestorAccess The access required by the ancestor of the path.
* @param parentAccess The access required by the parent of the path.
* @param access The access required by the path.
* @param subAccess If path is a directory,
* it is the access required of the path and all the sub-directories.
* If path is not a directory, there is no effect.
* @param ignoreEmptyDir Ignore permission checking for empty directory?
* @throws AccessControlException
*
* Guarded by {@link FSNamesystem#readLock(RwLockMode)}
* Caller of this method must hold that lock.
*/
void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner,
FsAction ancestorAccess, FsAction parentAccess, FsAction access,
FsAction subAccess, boolean ignoreEmptyDir)
throws AccessControlException {
if (LOG.isDebugEnabled()) {
LOG.debug("ACCESS CHECK: " + this
+ ", doCheckOwner=" + doCheckOwner
+ ", ancestorAccess=" + ancestorAccess
+ ", parentAccess=" + parentAccess
+ ", access=" + access
+ ", subAccess=" + subAccess
+ ", ignoreEmptyDir=" + ignoreEmptyDir);
}
// check if (parentAccess != null) && file exists, then check sb
// If resolveLink, the check is performed on the link target.
final int snapshotId = inodesInPath.getPathSnapshotId();
final INode[] inodes = inodesInPath.getINodesArray();
final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length];
final byte[][] components = inodesInPath.getPathComponents();
for (int i = 0; i < inodes.length && inodes[i] != null; i++) {
inodeAttrs[i] = getINodeAttrs(components, i, inodes[i], snapshotId);
}
String path = inodesInPath.getPath();
int ancestorIndex = inodes.length - 2;
String opType = operationType.get();
try {
if (this.authorizeWithContext && opType != null) {
INodeAttributeProvider.AuthorizationContext.Builder builder =
new INodeAttributeProvider.AuthorizationContext.Builder();
builder.fsOwner(fsOwner).
supergroup(supergroup).
callerUgi(callerUgi).
inodeAttrs(inodeAttrs).
inodes(inodes).
pathByNameArr(components).
snapshotId(snapshotId).
path(path).
ancestorIndex(ancestorIndex).
doCheckOwner(doCheckOwner).
ancestorAccess(ancestorAccess).
parentAccess(parentAccess).
access(access).
subAccess(subAccess).
ignoreEmptyDir(ignoreEmptyDir).
operationName(opType).
callerContext(CallerContext.getCurrent());
accessControlEnforcer.checkPermissionWithContext(builder.build());
} else {
accessControlEnforcer.checkPermission(fsOwner, supergroup, callerUgi, inodeAttrs,
inodes, components, snapshotId, path, ancestorIndex, doCheckOwner,
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
}
} catch (AccessControlException ace) {
Class<?> exceptionClass = ace.getClass();
if (exceptionClass.equals(AccessControlException.class)
|| exceptionClass.equals(TraverseAccessControlException.class)) {
throw ace;
}
// Only form a new ACE for subclasses which come from external enforcers
throw new AccessControlException(ace);
}
}
/**
* Check permission only for the given inode (not checking the children's
* access).
*
* @param inode the inode to check.
* @param snapshotId the snapshot id.
* @param access the target access.
* @throws AccessControlException
*/
void checkPermission(INode inode, int snapshotId, FsAction access)
throws AccessControlException {
byte[][] pathComponents = inode.getPathComponents();
INodeAttributes nodeAttributes = getINodeAttrs(pathComponents,
pathComponents.length - 1, inode, snapshotId);
try {
INodeAttributes[] iNodeAttr = {nodeAttributes};
String opType = operationType.get();
if (this.authorizeWithContext && opType != null) {
INodeAttributeProvider.AuthorizationContext.Builder builder =
new INodeAttributeProvider.AuthorizationContext.Builder();
builder.fsOwner(fsOwner)
.supergroup(supergroup)
.callerUgi(callerUgi)
.inodeAttrs(iNodeAttr) // single inode attr in the array
.inodes(new INode[] { inode }) // single inode attr in the array
.pathByNameArr(pathComponents)
.snapshotId(snapshotId)
.path(null)
.ancestorIndex(-1) // this will skip checkTraverse()
// because not checking ancestor here
.doCheckOwner(false)
.ancestorAccess(null)
.parentAccess(null)
.access(access) // the target access to be checked against
// the inode
.subAccess(null) // passing null sub access avoids checking
// children
.ignoreEmptyDir(false)
.operationName(opType)
.callerContext(CallerContext.getCurrent());
accessControlEnforcer.checkPermissionWithContext(builder.build());
} else {
accessControlEnforcer.checkPermission(
fsOwner, supergroup, callerUgi,
iNodeAttr, // single inode attr in the array
new INode[]{inode}, // single inode in the array
pathComponents, snapshotId,
null, -1, // this will skip checkTraverse() because
// not checking ancestor here
false, null, null,
access, // the target access to be checked against the inode
null, // passing null sub access avoids checking children
false);
}
} catch (AccessControlException ace) {
LOG.debug("Error while checking permission: ", ace);
throw new AccessControlException(
toAccessControlString(nodeAttributes, inode.getFullPathName(),
access));
}
}
@Override
public void checkPermission(String fsOwner, String supergroup,
UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
INode[] inodes, byte[][] components, int snapshotId, String path,
int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
FsAction parentAccess, FsAction access, FsAction subAccess,
boolean ignoreEmptyDir)
throws AccessControlException {
for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
ancestorIndex--);
try {
checkTraverse(inodeAttrs, inodes, components, ancestorIndex);
} catch (UnresolvedPathException | ParentNotDirectoryException ex) {
// must tunnel these exceptions out to avoid breaking | CheckPermission |
java | apache__camel | components/camel-as2/camel-as2-api/src/main/java/org/apache/camel/component/as2/api/AS2ServerConnection.java | {
"start": 12211,
"end": 12571
} | class ____ implements HttpRequestInterceptor {
@Override
public void process(HttpRequest request, EntityDetails entityDetails, HttpContext context)
throws HttpException, IOException {
if (request instanceof ClassicHttpRequest) {
// Now safely calling the method on the outer | AS2ConsumerConfigInterceptor |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/plugin/PluginDescriptorCache.java | {
"start": 1769,
"end": 2537
} | interface ____ {
PluginDescriptor load()
throws PluginResolutionException, PluginDescriptorParsingException, InvalidPluginDescriptorException;
}
Key createKey(Plugin plugin, List<RemoteRepository> repositories, RepositorySystemSession session);
void put(Key key, PluginDescriptor pluginDescriptor);
PluginDescriptor get(Key key);
default PluginDescriptor get(Key key, PluginDescriptorSupplier supplier)
throws PluginResolutionException, PluginDescriptorParsingException, InvalidPluginDescriptorException {
PluginDescriptor pd = get(key);
if (pd == null) {
pd = supplier.load();
put(key, pd);
}
return pd;
}
void flush();
}
| PluginDescriptorSupplier |
java | elastic__elasticsearch | benchmarks/src/main/java/org/elasticsearch/benchmark/vector/quantization/PackAsBinaryBenchmark.java | {
"start": 1683,
"end": 3606
} | class ____ {
static {
LogConfigurator.configureESLogging(); // native access requires logging to be initialized
}
@Param({ "384", "782", "1024" })
int dims;
int length;
int numVectors = 1000;
int[][] qVectors;
byte[] packed;
@Setup
public void setup() throws IOException {
Random random = new Random(123);
this.length = BQVectorUtils.discretize(dims, 64) / 8;
this.packed = new byte[length];
qVectors = new int[numVectors][dims];
for (int[] qVector : qVectors) {
for (int i = 0; i < dims; i++) {
qVector[i] = random.nextInt(2);
}
}
}
@Benchmark
public void packAsBinary(Blackhole bh) {
for (int i = 0; i < numVectors; i++) {
ESVectorUtil.packAsBinary(qVectors[i], packed);
bh.consume(packed);
}
}
@Benchmark
public void packAsBinaryLegacy(Blackhole bh) {
for (int i = 0; i < numVectors; i++) {
packAsBinaryLegacy(qVectors[i], packed);
bh.consume(packed);
}
}
@Benchmark
@Fork(jvmArgsPrepend = { "--add-modules=jdk.incubator.vector" })
public void packAsBinaryPanama(Blackhole bh) {
for (int i = 0; i < numVectors; i++) {
ESVectorUtil.packAsBinary(qVectors[i], packed);
bh.consume(packed);
}
}
private static void packAsBinaryLegacy(int[] vector, byte[] packed) {
for (int i = 0; i < vector.length;) {
byte result = 0;
for (int j = 7; j >= 0 && i < vector.length; j--) {
assert vector[i] == 0 || vector[i] == 1;
result |= (byte) ((vector[i] & 1) << j);
++i;
}
int index = ((i + 7) / 8) - 1;
assert index < packed.length;
packed[index] = result;
}
}
}
| PackAsBinaryBenchmark |
java | apache__flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/statemachine/kafka/EventDeSerializationSchema.java | {
"start": 1374,
"end": 2372
} | class ____
implements DeserializationSchema<Event>, SerializationSchema<Event> {
private static final long serialVersionUID = 1L;
@Override
public byte[] serialize(Event evt) {
ByteBuffer byteBuffer = ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN);
byteBuffer.putInt(0, evt.sourceAddress());
byteBuffer.putInt(4, evt.type().ordinal());
return byteBuffer.array();
}
@Override
public Event deserialize(byte[] message) throws IOException {
ByteBuffer buffer = ByteBuffer.wrap(message).order(ByteOrder.LITTLE_ENDIAN);
int address = buffer.getInt(0);
int typeOrdinal = buffer.getInt(4);
return new Event(EventType.values()[typeOrdinal], address);
}
@Override
public boolean isEndOfStream(Event nextElement) {
return false;
}
@Override
public TypeInformation<Event> getProducedType() {
return TypeInformation.of(Event.class);
}
}
| EventDeSerializationSchema |
java | google__guice | core/test/com/google/inject/BinderTestSuite.java | {
"start": 13810,
"end": 15931
} | class ____ extends TestCase {
final String name;
final Key<?> key;
final Class<? extends Injectable> injectsKey;
final ImmutableList<Module> modules;
final ImmutableList<Object> expectedValues;
public SuccessTest(Builder builder) {
super("test");
name = builder.name;
key = builder.key;
injectsKey = builder.injectsKey;
modules = ImmutableList.copyOf(builder.modules);
expectedValues = ImmutableList.copyOf(builder.expectedValues);
}
@Override
public String getName() {
return name;
}
Injector newInjector() {
nextId.set(101);
return Guice.createInjector(modules);
}
public void test() throws IllegalAccessException, InstantiationException {
Injector injector = newInjector();
nextId.set(201);
for (Object value : expectedValues) {
assertEquals(value, injector.getInstance(key));
}
Provider<?> provider = newInjector().getProvider(key);
nextId.set(201);
for (Object value : expectedValues) {
assertEquals(value, provider.get());
}
Provider<?> bindingProvider = newInjector().getBinding(key).getProvider();
nextId.set(201);
for (Object value : expectedValues) {
assertEquals(value, bindingProvider.get());
}
injector = newInjector();
nextId.set(201);
for (Object value : expectedValues) {
Injectable instance = injector.getInstance(injectsKey);
assertEquals(value, instance.value);
}
injector = newInjector();
nextId.set(201);
for (Object value : expectedValues) {
Injectable injectable = injectsKey.newInstance();
injector.injectMembers(injectable);
assertEquals(value, injectable.value);
}
Injector injector1 = newInjector();
nextId.set(201);
Injectable hasProvider = injector1.getInstance(injectsKey);
hasProvider.provider.get();
nextId.set(201);
for (Object value : expectedValues) {
assertEquals(value, hasProvider.provider.get());
}
}
}
public static | SuccessTest |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/test/RouteConstructionIntegrationTests.java | {
"start": 2020,
"end": 2190
} | class ____ {
private String arg1;
public String getArg1() {
return arg1;
}
public void setArg1(String arg1) {
this.arg1 = arg1;
}
}
}
}
| Config |
java | grpc__grpc-java | binder/src/main/java/io/grpc/binder/BinderServerBuilder.java | {
"start": 1337,
"end": 6135
} | class ____ extends ForwardingServerBuilder<BinderServerBuilder> {
/**
* Creates a server builder that will listen for bindings to the specified address.
*
* <p>The listening {@link IBinder} associated with new {@link Server}s will be stored in {@code
* binderReceiver} upon {@link #build()}. Callers should return it from {@link
* Service#onBind(Intent)} when the binding intent matches {@code listenAddress}.
*
* @param listenAddress an Android Service and binding Intent associated with this server.
* @param receiver an "out param" for the new {@link Server}'s listening {@link IBinder}
* @return a new builder
*/
public static BinderServerBuilder forAddress(
AndroidComponentAddress listenAddress, IBinderReceiver receiver) {
return new BinderServerBuilder(listenAddress, receiver);
}
/** Always fails. Call {@link #forAddress(AndroidComponentAddress, IBinderReceiver)} instead. */
@DoNotCall("Unsupported. Use forAddress() instead")
public static BinderServerBuilder forPort(int port) {
throw new UnsupportedOperationException("call forAddress() instead");
}
private final ServerImplBuilder serverImplBuilder;
private final BinderServer.Builder internalBuilder = new BinderServer.Builder();
private boolean isBuilt;
private BinderServerBuilder(
AndroidComponentAddress listenAddress, IBinderReceiver binderReceiver) {
internalBuilder.setListenAddress(listenAddress);
serverImplBuilder =
new ServerImplBuilder(
streamTracerFactories -> {
internalBuilder.setStreamTracerFactories(streamTracerFactories);
BinderServer server = internalBuilder.build();
BinderInternal.setIBinder(binderReceiver, server.getHostBinder());
return server;
});
// Disable stats and tracing by default.
serverImplBuilder.setStatsEnabled(false);
serverImplBuilder.setTracingEnabled(false);
}
@Override
protected ServerBuilder<?> delegate() {
return serverImplBuilder;
}
/** Enable stats collection using census. */
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/8022")
public BinderServerBuilder enableStats() {
serverImplBuilder.setStatsEnabled(true);
return this;
}
/** Enable tracing using census. */
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/8022")
public BinderServerBuilder enableTracing() {
serverImplBuilder.setTracingEnabled(true);
return this;
}
/**
* Provides a custom scheduled executor service.
*
* <p>It's an optional parameter. If the user has not provided a scheduled executor service when
* the channel is built, the builder will use a static cached thread pool.
*
* @return this
*/
public BinderServerBuilder scheduledExecutorService(
ScheduledExecutorService scheduledExecutorService) {
internalBuilder.setExecutorServicePool(
new FixedObjectPool<>(checkNotNull(scheduledExecutorService, "scheduledExecutorService")));
return this;
}
/**
* Provides a custom security policy.
*
* <p>This is optional. If the user has not provided a security policy, the server will default to
* only accepting calls from the same application UID.
*
* @return this
*/
public BinderServerBuilder securityPolicy(ServerSecurityPolicy securityPolicy) {
internalBuilder.setServerSecurityPolicy(securityPolicy);
return this;
}
/** Sets the policy for inbound parcelable objects. */
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/8022")
public BinderServerBuilder inboundParcelablePolicy(
InboundParcelablePolicy inboundParcelablePolicy) {
internalBuilder.setInboundParcelablePolicy(inboundParcelablePolicy);
return this;
}
/** Always fails. TLS is not supported in BinderServer. */
@Override
public BinderServerBuilder useTransportSecurity(File certChain, File privateKey) {
throw new UnsupportedOperationException("TLS not supported in BinderServer");
}
/**
* Builds a {@link Server} according to this builder's parameters and stores its listening {@link
* IBinder} in the {@link IBinderReceiver} passed to {@link #forAddress(AndroidComponentAddress,
* IBinderReceiver)}.
*
* @return the new Server
*/
@Override
public Server build() {
// Since we install a final interceptor here, we need to ensure we're only built once.
checkState(!isBuilt, "BinderServerBuilder can only be used to build one server instance.");
isBuilt = true;
// We install the security interceptor last, so it's closest to the transport.
BinderTransportSecurity.installAuthInterceptor(this);
internalBuilder.setExecutorPool(serverImplBuilder.getExecutorPool());
return super.build();
}
}
| BinderServerBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/Customer.java | {
"start": 752,
"end": 1899
} | class ____ implements Serializable {
Long id;
String name;
SortedSet<Ticket> tickets;
Collection<Discount> discountTickets;
Passport passport;
public Customer() {
}
@Id
@GeneratedValue
public Long getId() {
return id;
}
public String getName() {
return name;
}
public void setId(Long long1) {
id = long1;
}
public void setName(String string) {
name = string;
}
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.EAGER)
@JoinColumn(name = "CUST_ID")
@SortComparator(TicketComparator.class)
public SortedSet<Ticket> getTickets() {
return tickets;
}
public void setTickets(SortedSet<Ticket> tickets) {
this.tickets = tickets;
}
@OneToMany(targetEntity = Discount.class,
cascade = CascadeType.ALL, mappedBy = "owner")
@Cascade({ALL})
public Collection<Discount> getDiscountTickets() {
return discountTickets;
}
public void setDiscountTickets(Collection<Discount> collection) {
discountTickets = collection;
}
@OneToOne(cascade = CascadeType.ALL)
public Passport getPassport() {
return passport;
}
public void setPassport(Passport passport) {
this.passport = passport;
}
}
| Customer |
java | google__dagger | dagger-android/main/java/dagger/android/internal/AndroidInjectionKeys.java | {
"start": 835,
"end": 913
} | class ____ {
/**
* Accepts the fully qualified name of a | AndroidInjectionKeys |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/XJEndpointBuilderFactory.java | {
"start": 34672,
"end": 34969
} | class ____ extends AbstractEndpointBuilder implements XJEndpointBuilder, AdvancedXJEndpointBuilder {
public XJEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new XJEndpointBuilderImpl(path);
}
} | XJEndpointBuilderImpl |
java | apache__maven | impl/maven-di/src/main/java/org/apache/maven/di/impl/Types.java | {
"start": 24709,
"end": 27278
} | class ____ implements GenericArrayType {
private final Type componentType;
GenericArrayTypeImpl(Type componentType) {
this.componentType = componentType;
}
@Override
public Type getGenericComponentType() {
return componentType;
}
@Override
public int hashCode() {
return componentType.hashCode();
}
@Override
public boolean equals(Object other) {
if (!(other instanceof GenericArrayType that)) {
return false;
}
return this.getGenericComponentType().equals(that.getGenericComponentType());
}
@Override
public String toString() {
return Types.toString(componentType) + "[]";
}
}
private static String toString(Type type) {
return type instanceof Class<?> clazz ? clazz.getName() : type.toString();
}
/**
* Returns a simple name for a given {@link Type}
*
* @see Class#getSimpleName()
*/
public static String getSimpleName(Type type) {
if (type instanceof Class<?> clazz) {
return clazz.getSimpleName();
} else if (type instanceof ParameterizedType parameterizedType) {
return Arrays.stream(parameterizedType.getActualTypeArguments())
.map(Types::getSimpleName)
.collect(joining(",", "<", ">"));
} else if (type instanceof WildcardType wildcardType) {
Type[] upperBounds = wildcardType.getUpperBounds();
Type[] lowerBounds = wildcardType.getLowerBounds();
return "?"
+ (upperBounds.length == 0
? ""
: " extends "
+ Arrays.stream(upperBounds)
.map(Types::getSimpleName)
.collect(joining(" & ")))
+ (lowerBounds.length == 0
? ""
: " super "
+ Arrays.stream(lowerBounds)
.map(Types::getSimpleName)
.collect(joining(" & ")));
} else if (type instanceof GenericArrayType genericArrayType) {
return Types.getSimpleName(genericArrayType.getGenericComponentType()) + "[]";
}
return type.getTypeName();
}
public static | GenericArrayTypeImpl |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/processor/src/main/java/org/jboss/resteasy/reactive/server/processor/generation/injection/TransformedFieldInjectionIndexerExtension.java | {
"start": 656,
"end": 2280
} | class ____ implements ServerEndpointIndexer.FieldInjectionIndexerExtension {
final BiConsumer<String, BiFunction<String, ClassVisitor, ClassVisitor>> transformations;
final boolean requireCreateBeanParams;
private final Consumer<InjectedClassConverterField> injectedClassConverterFieldConsumer;
public TransformedFieldInjectionIndexerExtension(
BiConsumer<String, BiFunction<String, ClassVisitor, ClassVisitor>> transformations, boolean requireCreateBeanParams,
Consumer<InjectedClassConverterField> injectedClassConverterFieldConsumer) {
this.transformations = transformations;
this.requireCreateBeanParams = requireCreateBeanParams;
this.injectedClassConverterFieldConsumer = injectedClassConverterFieldConsumer;
}
@Override
public void handleFieldInjection(String currentTypeName, Map<FieldInfo, ServerIndexedParameter> fieldExtractors,
boolean superTypeIsInjectable, IndexView indexView) {
for (Map.Entry<FieldInfo, ServerIndexedParameter> entry : fieldExtractors.entrySet()) {
if (entry.getValue().getConverter() != null) {
injectedClassConverterFieldConsumer
.accept(new InjectedClassConverterField(
ClassInjectorTransformer.INIT_CONVERTER_METHOD_NAME + entry.getKey().name(), currentTypeName));
}
}
transformations.accept(currentTypeName, new ClassInjectorTransformer(fieldExtractors, superTypeIsInjectable,
requireCreateBeanParams, indexView));
}
}
| TransformedFieldInjectionIndexerExtension |
java | resilience4j__resilience4j | resilience4j-metrics/src/test/java/io/github/resilience4j/metrics/StateTransitionMetricsTest.java | {
"start": 1283,
"end": 8359
} | class ____ {
@SuppressWarnings("rawtypes")
private static void circuitBreakerMetricsUsesFirstStateObjectInstance(
CircuitBreaker circuitBreaker, MetricRegistry metricRegistry) throws Exception {
SortedMap<String, Gauge> gauges = metricRegistry.getGauges();
assertThat(circuitBreaker.getState(), equalTo(CircuitBreaker.State.CLOSED));
assertThat(circuitBreaker.getMetrics().getNumberOfBufferedCalls(), equalTo(0));
assertThat(circuitBreaker.getMetrics().getNumberOfFailedCalls(), equalTo(0));
assertThat(circuitBreaker.getMetrics().getNumberOfSuccessfulCalls(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.state").getValue(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.buffered").getValue(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.failed").getValue(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.successful").getValue(),
equalTo(0));
circuitBreaker.onError(0, TimeUnit.NANOSECONDS, new RuntimeException());
assertThat(circuitBreaker.getState(), equalTo(CircuitBreaker.State.CLOSED));
assertThat(circuitBreaker.getMetrics().getNumberOfBufferedCalls(), equalTo(1));
assertThat(circuitBreaker.getMetrics().getNumberOfFailedCalls(), equalTo(1));
assertThat(circuitBreaker.getMetrics().getNumberOfSuccessfulCalls(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.state").getValue(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.buffered").getValue(), equalTo(1));
assertThat(gauges.get("resilience4j.circuitbreaker.test.failed").getValue(), equalTo(1));
assertThat(gauges.get("resilience4j.circuitbreaker.test.successful").getValue(),
equalTo(0));
for (int i = 0; i < 9; i++) {
circuitBreaker.onError(0, TimeUnit.NANOSECONDS, new RuntimeException());
}
assertThat(circuitBreaker.getState(), equalTo(CircuitBreaker.State.OPEN));
assertThat(circuitBreaker.getMetrics().getNumberOfBufferedCalls(), equalTo(10));
assertThat(circuitBreaker.getMetrics().getNumberOfFailedCalls(), equalTo(10));
assertThat(circuitBreaker.getMetrics().getNumberOfSuccessfulCalls(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.state").getValue(), equalTo(1));
assertThat(gauges.get("resilience4j.circuitbreaker.test.buffered").getValue(), equalTo(10));
assertThat(gauges.get("resilience4j.circuitbreaker.test.failed").getValue(), equalTo(10));
assertThat(gauges.get("resilience4j.circuitbreaker.test.successful").getValue(),
equalTo(0));
await().atMost(1500, TimeUnit.MILLISECONDS)
.until(() -> {
circuitBreaker.tryAcquirePermission();
return circuitBreaker.getState().equals(CircuitBreaker.State.HALF_OPEN);
});
circuitBreaker.onSuccess(0, TimeUnit.NANOSECONDS);
assertThat(circuitBreaker.getState(), equalTo(CircuitBreaker.State.HALF_OPEN));
assertThat(circuitBreaker.getMetrics().getNumberOfBufferedCalls(), equalTo(1));
assertThat(circuitBreaker.getMetrics().getNumberOfFailedCalls(), equalTo(0));
assertThat(circuitBreaker.getMetrics().getNumberOfSuccessfulCalls(), equalTo(1));
assertThat(gauges.get("resilience4j.circuitbreaker.test.state").getValue(), equalTo(2));
assertThat(gauges.get("resilience4j.circuitbreaker.test.buffered").getValue(), equalTo(1));
assertThat(gauges.get("resilience4j.circuitbreaker.test.failed").getValue(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.successful").getValue(),
equalTo(1));
circuitBreaker.onSuccess(0, TimeUnit.NANOSECONDS);
assertThat(circuitBreaker.getState(), equalTo(CircuitBreaker.State.HALF_OPEN));
assertThat(circuitBreaker.getMetrics().getNumberOfBufferedCalls(), equalTo(2));
assertThat(circuitBreaker.getMetrics().getNumberOfFailedCalls(), equalTo(0));
assertThat(circuitBreaker.getMetrics().getNumberOfSuccessfulCalls(), equalTo(2));
assertThat(gauges.get("resilience4j.circuitbreaker.test.state").getValue(), equalTo(2));
assertThat(gauges.get("resilience4j.circuitbreaker.test.buffered").getValue(), equalTo(2));
assertThat(gauges.get("resilience4j.circuitbreaker.test.failed").getValue(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.successful").getValue(),
equalTo(2));
circuitBreaker.onSuccess(0, TimeUnit.NANOSECONDS);
assertThat(circuitBreaker.getState(), equalTo(CircuitBreaker.State.CLOSED));
assertThat(circuitBreaker.getMetrics().getNumberOfBufferedCalls(), equalTo(0));
assertThat(circuitBreaker.getMetrics().getNumberOfFailedCalls(), equalTo(0));
assertThat(circuitBreaker.getMetrics().getNumberOfSuccessfulCalls(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.state").getValue(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.buffered").getValue(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.failed").getValue(), equalTo(0));
assertThat(gauges.get("resilience4j.circuitbreaker.test.successful").getValue(),
equalTo(0));
}
@Test
public void testWithCircuitBreakerMetrics() throws Exception {
CircuitBreakerConfig config =
CircuitBreakerConfig.custom()
.waitDurationInOpenState(Duration.ofMillis(150))
.failureRateThreshold(50)
.permittedNumberOfCallsInHalfOpenState(3)
.slidingWindowSize(10)
.build();
CircuitBreaker circuitBreaker = CircuitBreakerRegistry.ofDefaults()
.circuitBreaker("test", config);
MetricRegistry metricRegistry = new MetricRegistry();
metricRegistry.registerAll(CircuitBreakerMetrics.ofCircuitBreaker(circuitBreaker));
circuitBreakerMetricsUsesFirstStateObjectInstance(circuitBreaker, metricRegistry);
}
@Test
public void testWithCircuitBreakerMetricsPublisher() throws Exception {
CircuitBreakerConfig config =
CircuitBreakerConfig.custom()
.waitDurationInOpenState(Duration.ofSeconds(1))
.failureRateThreshold(50)
.permittedNumberOfCallsInHalfOpenState(3)
.slidingWindowSize(10)
.build();
MetricRegistry metricRegistry = new MetricRegistry();
CircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry
.of(config, new CircuitBreakerMetricsPublisher(metricRegistry));
CircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker("test", config);
circuitBreakerMetricsUsesFirstStateObjectInstance(circuitBreaker, metricRegistry);
}
}
| StateTransitionMetricsTest |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/ContextDataInjector.java | {
"start": 1410,
"end": 2646
} | interface ____ provided as it is
* difficult to do. Instead, provide a custom ContextDataProvider.</b></p>
* <p>
* <p>
* The source of the context data is implementation-specific. The default source for context data is the ThreadContext.
* </p><p>
* In some asynchronous models, work may be delegated to several threads, while conceptually this work shares the same
* context. In such models, storing context data in {@code ThreadLocal} variables is not convenient or desirable.
* Users can configure the {@code ContextDataInjectorFactory} to provide custom {@code ContextDataInjector} objects,
* in order to initialize log events with context data from any arbitrary context.
* </p><p>
* When providing a custom {@code ContextDataInjector}, be aware that the {@code ContextDataInjectorFactory} may be
* invoked multiple times and the various components in Log4j that need access to context data may each have their own
* instance of {@code ContextDataInjector}.
* This includes the object(s) that populate log events, but also various lookups and filters that look at
* context data to determine whether an event should be logged.
* </p><p>
* Implementors should take particular note of how the different methods in the | be |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/UnalignedCheckpointRescaleITCase.java | {
"start": 23833,
"end": 24501
} | class ____
extends KeyedCoProcessFunction<Long, Long, Long, Long> {
private static final long serialVersionUID = 1L;
TestKeyedCoProcessFunction() {}
@Override
public void processElement1(Long value, Context ctx, Collector<Long> out)
throws Exception {
out.collect(checkHeader(value));
}
@Override
public void processElement2(Long value, Context ctx, Collector<Long> out)
throws Exception {
out.collect(checkHeader(value));
}
}
private static | TestKeyedCoProcessFunction |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/web/annotation/WebEndpointDiscoverer.java | {
"start": 5334,
"end": 5624
} | class ____ implements RuntimeHintsRegistrar {
@Override
public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) {
hints.reflection().registerType(WebEndpointFilter.class, MemberCategory.INVOKE_DECLARED_CONSTRUCTORS);
}
}
}
| WebEndpointDiscovererRuntimeHints |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java | {
"start": 22201,
"end": 23334
} | class ____
extends CompositeConstraint<TimedPlacementConstraint> {
private List<TimedPlacementConstraint> children = new ArrayList<>();
public DelayedOr(List<TimedPlacementConstraint> children) {
this.children = children;
}
public DelayedOr(TimedPlacementConstraint... children) {
this(Arrays.asList(children));
}
@Override
public List<TimedPlacementConstraint> getChildren() {
return children;
}
@Override
public <T> T accept(Visitor<T> visitor) {
return visitor.visit(this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("DelayedOr(");
Iterator<TimedPlacementConstraint> it = getChildren().iterator();
while (it.hasNext()) {
TimedPlacementConstraint child = it.next();
sb.append(child.toString());
if (it.hasNext()) {
sb.append(",");
}
}
sb.append(")");
return sb.toString();
}
}
/**
* Represents a timed placement constraint that has to be satisfied within a
* time window.
*/
public static | DelayedOr |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/server/reactive/ServletServerHttpResponse.java | {
"start": 9137,
"end": 9661
} | class ____ implements WriteListener {
@Override
public void onWritePossible() {
ResponseBodyProcessor processor = bodyProcessor;
if (processor != null) {
processor.onWritePossible();
}
else {
ResponseBodyFlushProcessor flushProcessor = bodyFlushProcessor;
if (flushProcessor != null) {
flushProcessor.onFlushPossible();
}
}
}
@Override
public void onError(Throwable ex) {
ServletServerHttpResponse.this.asyncListener.handleError(ex);
}
}
private | ResponseBodyWriteListener |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/TemplateDataTest.java | {
"start": 2052,
"end": 2671
} | class ____ {
public final BigDecimal val;
public Foo(BigDecimal val) {
this.val = val;
}
public boolean hasBar() {
return true;
}
public boolean isBaz() {
return false;
}
public static List<String> alphas() {
return List.of("1", "2");
}
public static List<Integer> alphas(int n) {
return IntStream.range(0, n).mapToObj(Integer::valueOf).toList();
}
}
// namespace is TransactionType
@TemplateData(namespace = TemplateData.SIMPLENAME)
public static | Foo |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/aggfunctions/FirstValueAggFunctionWithoutOrderTest.java | {
"start": 3884,
"end": 4364
} | class ____
extends NumberFirstValueAggFunctionWithoutOrderTest<Long> {
@Override
protected Long getValue(String v) {
return Long.valueOf(v);
}
@Override
protected AggregateFunction<Long, RowData> getAggregator() {
return new FirstValueAggFunction<>(DataTypes.BIGINT().getLogicalType());
}
}
/** Test for {@link FloatType}. */
@Nested
final | LongFirstValueAggFunctionWithoutOrderTest |
java | elastic__elasticsearch | x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregator.java | {
"start": 8873,
"end": 10357
} | class ____ extends MetricValues {
protected final BigArrays bigArrays;
protected final ValuesSourceConfig config;
CollectingMetricValues(BigArrays bigArrays, String name, ValuesSourceConfig config) {
super(name);
this.bigArrays = bigArrays;
this.config = config;
}
@Override
public final boolean needsScores() {
return config.getValuesSource().needsScores();
}
}
static MetricValues buildMetricValues(
ValuesSourceRegistry registry,
BigArrays bigArrays,
int size,
String name,
ValuesSourceConfig config
) {
if (false == config.hasValues()) {
// `config` doesn't have the name if the
return new AlwaysNullMetricValues(name);
}
MetricValuesSupplier supplier = registry.getAggregator(REGISTRY_KEY, config);
return supplier.build(size, bigArrays, name, config);
}
static MetricValues buildNumericMetricValues(int size, BigArrays bigArrays, String name, ValuesSourceConfig config) {
ValuesSource.Numeric numeric = (ValuesSource.Numeric) config.getValuesSource();
if (numeric.isFloatingPoint()) {
return new DoubleMetricValues(size, bigArrays, name, config);
}
return new LongMetricValues(size, bigArrays, name, config);
}
/**
* Loads metrics for floating point numbers.
*/
static | CollectingMetricValues |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/interop/ImmutablesTypeSerializationTest.java | {
"start": 7009,
"end": 7818
} | class ____<T>
implements ImmutablesTypeSerializationTest.Key<T> {
T id;
@JsonProperty("id")
public void setId(T id) {
this.id = id;
}
@Override
public T getId() { throw new UnsupportedOperationException(); }
}
@JsonCreator(mode = JsonCreator.Mode.DELEGATING)
static <T> ImmutableKey<T> fromJson(ImmutableKey.Json<T> json) {
ImmutableKey.Builder<T> builder = ImmutableKey.<T>builder();
if (json.id != null) {
builder.id(json.id);
}
return builder.build();
}
public static <T> ImmutableKey.Builder<T> builder() {
return new ImmutableKey.Builder<>();
}
public static final | Json |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TrileanTests.java | {
"start": 1194,
"end": 3588
} | class ____ {
private static final String TRUE_STR = "true";
private static final String FALSE_STR = "false";
@Test
public void testGetTrileanForBoolean() {
assertThat(Trilean.getTrilean(true)).describedAs(
"getTrilean should return Trilean.TRUE when true is passed")
.isEqualTo(Trilean.TRUE);
assertThat(Trilean.getTrilean(false)).describedAs(
"getTrilean should return Trilean.FALSE when false is passed")
.isEqualTo(Trilean.FALSE);
}
@Test
public void testGetTrileanForString() {
assertThat(Trilean.getTrilean(TRUE_STR.toLowerCase())).describedAs(
"getTrilean should return Trilean.TRUE when true is passed")
.isEqualTo(Trilean.TRUE);
assertThat(Trilean.getTrilean(TRUE_STR.toUpperCase())).describedAs(
"getTrilean should return Trilean.TRUE when TRUE is passed")
.isEqualTo(Trilean.TRUE);
assertThat(Trilean.getTrilean(FALSE_STR.toLowerCase())).describedAs(
"getTrilean should return Trilean.FALSE when false is passed")
.isEqualTo(Trilean.FALSE);
assertThat(Trilean.getTrilean(FALSE_STR.toUpperCase())).describedAs(
"getTrilean should return Trilean.FALSE when FALSE is passed")
.isEqualTo(Trilean.FALSE);
testInvalidString(null);
testInvalidString(" ");
testInvalidString("invalid");
testInvalidString("truee");
testInvalidString("falsee");
}
private void testInvalidString(String invalidString) {
assertThat(Trilean.getTrilean(invalidString)).describedAs(
"getTrilean should return Trilean.UNKNOWN for anything not true/false")
.isEqualTo(Trilean.UNKNOWN);
}
@Test
public void testToBoolean() throws TrileanConversionException {
assertThat(Trilean.TRUE.toBoolean())
.describedAs("toBoolean should return true for Trilean.TRUE").isTrue();
assertThat(Trilean.FALSE.toBoolean())
.describedAs("toBoolean should return false for Trilean.FALSE")
.isFalse();
assertThat(catchThrowable(() -> Trilean.UNKNOWN.toBoolean())).describedAs(
"toBoolean on Trilean.UNKNOWN results in TrileanConversionException")
.isInstanceOf(TrileanConversionException.class).describedAs(
"Exception message should be: catchThrowable(()->Trilean.UNKNOWN"
+ ".toBoolean())")
.hasMessage("Cannot convert Trilean.UNKNOWN to boolean");
}
}
| TrileanTests |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/Filter.java | {
"start": 1564,
"end": 4453
} | interface ____ {
/**
* The proto message types supported by this filter. A filter will be registered by each of its
* supported message types.
*/
String[] typeUrls();
/**
* Whether the filter can be installed on the client side.
*
* <p>Returns true if the filter implements {@link Filter#buildClientInterceptor}.
*/
default boolean isClientFilter() {
return false;
}
/**
* Whether the filter can be installed into xDS-enabled servers.
*
* <p>Returns true if the filter implements {@link Filter#buildServerInterceptor}.
*/
default boolean isServerFilter() {
return false;
}
/**
* Creates a new instance of the filter.
*
* <p>Returns a filter instance registered with the same typeUrls as the provider,
* capable of working with the same FilterConfig type returned by provider's parse functions.
*
* <p>For xDS gRPC clients, new filter instances are created per combination of:
* <ol>
* <li><code>XdsNameResolver</code> instance,</li>
* <li>Filter name+typeUrl in HttpConnectionManager (HCM) http_filters.</li>
* </ol>
*
* <p>For xDS-enabled gRPC servers, new filter instances are created per combination of:
* <ol>
* <li>Server instance,</li>
* <li>FilterChain name,</li>
* <li>Filter name+typeUrl in FilterChain's HCM.http_filters.</li>
* </ol>
*/
Filter newInstance(String name);
/**
* Parses the top-level filter config from raw proto message. The message may be either a {@link
* com.google.protobuf.Any} or a {@link com.google.protobuf.Struct}.
*/
ConfigOrError<? extends FilterConfig> parseFilterConfig(Message rawProtoMessage);
/**
* Parses the per-filter override filter config from raw proto message. The message may be
* either a {@link com.google.protobuf.Any} or a {@link com.google.protobuf.Struct}.
*/
ConfigOrError<? extends FilterConfig> parseFilterConfigOverride(Message rawProtoMessage);
}
/** Uses the FilterConfigs produced above to produce an HTTP filter interceptor for clients. */
@Nullable
default ClientInterceptor buildClientInterceptor(
FilterConfig config, @Nullable FilterConfig overrideConfig,
ScheduledExecutorService scheduler) {
return null;
}
/** Uses the FilterConfigs produced above to produce an HTTP filter interceptor for the server. */
@Nullable
default ServerInterceptor buildServerInterceptor(
FilterConfig config, @Nullable FilterConfig overrideConfig) {
return null;
}
/**
* Releases filter resources like shared resources and remote connections.
*
* <p>See {@link Provider#newInstance()} for details on filter instance creation.
*/
@Override
default void close() {}
/** Filter config with instance name. */
final | Provider |
java | redisson__redisson | redisson/src/test/java/org/redisson/RedissonLiveObjectServiceTest.java | {
"start": 57738,
"end": 60447
} | class ____ implements Serializable {
@RId(generator = LongGenerator.class)
private Long id;
private Long myId;
private String name;
public MyObject() {
}
public MyObject(Long myId) {
super();
this.myId = myId;
}
public Long getMyId() {
return myId;
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
@Test
public void test() {
RLiveObjectService service = redisson.getLiveObjectService();
MyObject object = new MyObject(20L);
try {
service.attach(object);
} catch (Exception e) {
assertEquals("Non-null value is required for the field with RId annotation.", e.getMessage());
}
}
@Test
public void testExpirable() {
testWithParams(redisson -> {
RLiveObjectService service = redisson.getLiveObjectService();
TestIndexed myObject = new TestIndexed("123");
myObject = service.persist(myObject);
myObject.setName1("123345");
myObject.setNum1(455);
myObject.setColl(Arrays.asList(1L, 2L));
assertThat(redisson.getKeys().count()).isEqualTo(6);
assertTrue(service.asLiveObject(myObject).isExists());
service.asRMap(myObject).expire(Duration.ofSeconds(1));
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
assertThat(redisson.getKeys().count()).isZero();
}, NOTIFY_KEYSPACE_EVENTS, "KEA");
}
@Test
public void testMap() {
RLiveObjectService service = redisson.getLiveObjectService();
TestClass myObject = new TestClass();
myObject = service.persist(myObject);
myObject.setValue("123345");
assertEquals("123345", service.asRMap(myObject).get("value"));
service.asRMap(myObject).put("value", "9999");
assertEquals("9999", myObject.getValue());
}
@Test
public void testRObject() {
RLiveObjectService service = redisson.getLiveObjectService();
TestClass myObject = new TestClass();
myObject = service.persist(myObject);
try {
((RObject) myObject).isExists();
} catch (Exception e) {
assertEquals("Please use RLiveObjectService instance for this type of functions", e.getMessage());
}
}
@REntity
public static | MyObject |
java | apache__maven | impl/maven-testing/src/main/java/org/apache/maven/api/plugin/testing/stubs/PluginStub.java | {
"start": 1786,
"end": 3571
} | class ____ implements Plugin {
org.apache.maven.api.model.Plugin model;
PluginDescriptor descriptor;
List<Lifecycle> lifecycles = Collections.emptyList();
ClassLoader classLoader;
Artifact artifact;
List<Dependency> dependencies = Collections.emptyList();
Map<String, Dependency> dependenciesMap = Collections.emptyMap();
@Override
public org.apache.maven.api.model.Plugin getModel() {
return model;
}
public void setModel(org.apache.maven.api.model.Plugin model) {
this.model = model;
}
@Override
public PluginDescriptor getDescriptor() {
return descriptor;
}
public void setDescriptor(PluginDescriptor descriptor) {
this.descriptor = descriptor;
}
@Override
public List<Lifecycle> getLifecycles() {
return lifecycles;
}
public void setLifecycles(List<Lifecycle> lifecycles) {
this.lifecycles = lifecycles;
}
@Override
public ClassLoader getClassLoader() {
return classLoader;
}
public void setClassLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
}
@Override
public Artifact getArtifact() {
return artifact;
}
public void setArtifact(Artifact artifact) {
this.artifact = artifact;
}
@Override
public List<Dependency> getDependencies() {
return dependencies;
}
public void setDependencies(List<Dependency> dependencies) {
this.dependencies = dependencies;
}
@Override
public Map<String, Dependency> getDependenciesMap() {
return dependenciesMap;
}
public void setDependenciesMap(Map<String, Dependency> dependenciesMap) {
this.dependenciesMap = dependenciesMap;
}
}
| PluginStub |
java | apache__spark | common/utils-java/src/main/java/org/apache/spark/api/java/function/VoidFunction2.java | {
"start": 1001,
"end": 1102
} | interface ____<T1, T2> extends Serializable {
void call(T1 v1, T2 v2) throws Exception;
}
| VoidFunction2 |
java | netty__netty | jfr-stub/src/main/java/jdk/jfr/EventSettings.java | {
"start": 661,
"end": 813
} | class ____ {
public EventSettings() {
throw new UnsupportedOperationException("Stub should only be used at compile time");
}
}
| EventSettings |
java | apache__camel | dsl/camel-kamelet-main/src/main/java/org/apache/camel/main/download/BasePackageScanDownloadListener.java | {
"start": 6549,
"end": 7240
} | class ____ previous downloads
String fqn = clazz.getName();
if (scanned.contains(fqn)) {
continue;
} else {
scanned.add(fqn);
}
LOG.debug("Discovered Quarkus @ApplicationScoped/@Singleton class: {}", clazz);
// @Named can dictate the name of the bean
String name = null;
var ann = AnnotationHelper.getAnnotationValue(clazz, "javax.inject.Named");
if (ann != null) {
name = ann;
}
if (name == null || name.isBlank()) {
name = clazz.getSimpleName();
// lower case first if using | from |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/VolumeCheckResult.java | {
"start": 1138,
"end": 1339
} | enum ____ {
HEALTHY(1),
DEGRADED(2),
FAILED(3);
private final int value;
VolumeCheckResult(int value) {
this.value = value;
}
int getValue() {
return value;
}
}
| VolumeCheckResult |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/coordination/PendingClusterStateStats.java | {
"start": 2164,
"end": 2563
} | class ____ {
static final String QUEUE = "cluster_state_queue";
static final String TOTAL = "total";
static final String PENDING = "pending";
static final String COMMITTED = "committed";
}
@Override
public String toString() {
return "PendingClusterStateStats(total=" + total + ", pending=" + pending + ", committed=" + committed + ")";
}
}
| Fields |
java | spring-projects__spring-security | oauth2/oauth2-authorization-server/src/test/java/org/springframework/security/oauth2/server/authorization/JdbcOAuth2AuthorizationConsentServiceTests.java | {
"start": 2518,
"end": 12268
} | class ____ {
private static final String OAUTH2_AUTHORIZATION_CONSENT_SCHEMA_SQL_RESOURCE = "org/springframework/security/oauth2/server/authorization/oauth2-authorization-consent-schema.sql";
private static final String CUSTOM_OAUTH2_AUTHORIZATION_CONSENT_SCHEMA_SQL_RESOURCE = "org/springframework/security/oauth2/server/authorization/custom-oauth2-authorization-consent-schema.sql";
private static final String PRINCIPAL_NAME = "principal-name";
private static final RegisteredClient REGISTERED_CLIENT = TestRegisteredClients.registeredClient().build();
private static final OAuth2AuthorizationConsent AUTHORIZATION_CONSENT = OAuth2AuthorizationConsent
.withId(REGISTERED_CLIENT.getId(), PRINCIPAL_NAME)
.authority(new SimpleGrantedAuthority("SCOPE_scope1"))
.authority(new SimpleGrantedAuthority("SCOPE_scope2"))
.authority(new SimpleGrantedAuthority("SCOPE_scope3"))
.authority(new SimpleGrantedAuthority("authority-a"))
.authority(new SimpleGrantedAuthority("authority-b"))
.build();
private EmbeddedDatabase db;
private JdbcOperations jdbcOperations;
private RegisteredClientRepository registeredClientRepository;
private JdbcOAuth2AuthorizationConsentService authorizationConsentService;
@BeforeEach
public void setUp() {
this.db = createDb();
this.jdbcOperations = new JdbcTemplate(this.db);
this.registeredClientRepository = mock(RegisteredClientRepository.class);
this.authorizationConsentService = new JdbcOAuth2AuthorizationConsentService(this.jdbcOperations,
this.registeredClientRepository);
}
@AfterEach
public void tearDown() {
this.db.shutdown();
}
@Test
public void constructorWhenJdbcOperationsIsNullThenThrowIllegalArgumentException() {
// @formatter:off
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> new JdbcOAuth2AuthorizationConsentService(null, this.registeredClientRepository))
.withMessage("jdbcOperations cannot be null");
// @formatter:on
}
@Test
public void constructorWhenRegisteredClientRepositoryIsNullThenThrowIllegalArgumentException() {
// @formatter:off
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> new JdbcOAuth2AuthorizationConsentService(this.jdbcOperations, null))
.withMessage("registeredClientRepository cannot be null");
// @formatter:on
}
@Test
public void setAuthorizationConsentRowMapperWhenNullThenThrowIllegalArgumentException() {
// @formatter:off
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> this.authorizationConsentService.setAuthorizationConsentRowMapper(null))
.withMessage("authorizationConsentRowMapper cannot be null");
// @formatter:on
}
@Test
public void setAuthorizationConsentParametersMapperWhenNullThenThrowIllegalArgumentException() {
// @formatter:off
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> this.authorizationConsentService.setAuthorizationConsentParametersMapper(null))
.withMessage("authorizationConsentParametersMapper cannot be null");
// @formatter:on
}
@Test
public void saveWhenAuthorizationConsentNullThenThrowIllegalArgumentException() {
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizationConsentService.save(null))
.withMessage("authorizationConsent cannot be null");
// @formatter:on
}
@Test
public void saveWhenAuthorizationConsentNewThenSaved() {
OAuth2AuthorizationConsent expectedAuthorizationConsent = OAuth2AuthorizationConsent
.withId("new-client", "new-principal")
.authority(new SimpleGrantedAuthority("new.authority"))
.build();
RegisteredClient newRegisteredClient = TestRegisteredClients.registeredClient().id("new-client").build();
given(this.registeredClientRepository.findById(eq(newRegisteredClient.getId())))
.willReturn(newRegisteredClient);
this.authorizationConsentService.save(expectedAuthorizationConsent);
OAuth2AuthorizationConsent authorizationConsent = this.authorizationConsentService.findById("new-client",
"new-principal");
assertThat(authorizationConsent).isEqualTo(expectedAuthorizationConsent);
}
@Test
public void saveWhenAuthorizationConsentExistsThenUpdated() {
OAuth2AuthorizationConsent expectedAuthorizationConsent = OAuth2AuthorizationConsent.from(AUTHORIZATION_CONSENT)
.authority(new SimpleGrantedAuthority("new.authority"))
.build();
given(this.registeredClientRepository.findById(eq(REGISTERED_CLIENT.getId()))).willReturn(REGISTERED_CLIENT);
this.authorizationConsentService.save(expectedAuthorizationConsent);
OAuth2AuthorizationConsent authorizationConsent = this.authorizationConsentService
.findById(AUTHORIZATION_CONSENT.getRegisteredClientId(), AUTHORIZATION_CONSENT.getPrincipalName());
assertThat(authorizationConsent).isEqualTo(expectedAuthorizationConsent);
assertThat(authorizationConsent).isNotEqualTo(AUTHORIZATION_CONSENT);
}
@Test
public void saveLoadAuthorizationConsentWhenCustomStrategiesSetThenCalled() throws Exception {
given(this.registeredClientRepository.findById(eq(REGISTERED_CLIENT.getId()))).willReturn(REGISTERED_CLIENT);
JdbcOAuth2AuthorizationConsentService.OAuth2AuthorizationConsentRowMapper authorizationConsentRowMapper = spy(
new JdbcOAuth2AuthorizationConsentService.OAuth2AuthorizationConsentRowMapper(
this.registeredClientRepository));
this.authorizationConsentService.setAuthorizationConsentRowMapper(authorizationConsentRowMapper);
JdbcOAuth2AuthorizationConsentService.OAuth2AuthorizationConsentParametersMapper authorizationConsentParametersMapper = spy(
new JdbcOAuth2AuthorizationConsentService.OAuth2AuthorizationConsentParametersMapper());
this.authorizationConsentService.setAuthorizationConsentParametersMapper(authorizationConsentParametersMapper);
this.authorizationConsentService.save(AUTHORIZATION_CONSENT);
OAuth2AuthorizationConsent authorizationConsent = this.authorizationConsentService
.findById(AUTHORIZATION_CONSENT.getRegisteredClientId(), AUTHORIZATION_CONSENT.getPrincipalName());
assertThat(authorizationConsent).isEqualTo(AUTHORIZATION_CONSENT);
verify(authorizationConsentRowMapper).mapRow(any(), anyInt());
verify(authorizationConsentParametersMapper).apply(any());
}
@Test
public void removeWhenAuthorizationConsentNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.authorizationConsentService.remove(null))
.withMessage("authorizationConsent cannot be null");
}
@Test
public void removeWhenAuthorizationConsentProvidedThenRemoved() {
this.authorizationConsentService.remove(AUTHORIZATION_CONSENT);
assertThat(this.authorizationConsentService.findById(AUTHORIZATION_CONSENT.getRegisteredClientId(),
AUTHORIZATION_CONSENT.getPrincipalName()))
.isNull();
}
@Test
public void findByIdWhenRegisteredClientIdNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizationConsentService.findById(null, "some-user"))
.withMessage("registeredClientId cannot be empty");
}
@Test
public void findByIdWhenPrincipalNameNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizationConsentService.findById("some-client", null))
.withMessage("principalName cannot be empty");
}
@Test
public void findByIdWhenAuthorizationConsentExistsThenFound() {
given(this.registeredClientRepository.findById(eq(REGISTERED_CLIENT.getId()))).willReturn(REGISTERED_CLIENT);
this.authorizationConsentService.save(AUTHORIZATION_CONSENT);
OAuth2AuthorizationConsent authorizationConsent = this.authorizationConsentService
.findById(AUTHORIZATION_CONSENT.getRegisteredClientId(), AUTHORIZATION_CONSENT.getPrincipalName());
assertThat(authorizationConsent).isNotNull();
}
@Test
public void findByIdWhenAuthorizationConsentDoesNotExistThenNull() {
this.authorizationConsentService.save(AUTHORIZATION_CONSENT);
assertThat(this.authorizationConsentService.findById("unknown-client", PRINCIPAL_NAME)).isNull();
assertThat(this.authorizationConsentService.findById(REGISTERED_CLIENT.getId(), "unknown-user")).isNull();
}
@Test
public void tableDefinitionWhenCustomThenAbleToOverride() {
given(this.registeredClientRepository.findById(eq(REGISTERED_CLIENT.getId()))).willReturn(REGISTERED_CLIENT);
EmbeddedDatabase db = createDb(CUSTOM_OAUTH2_AUTHORIZATION_CONSENT_SCHEMA_SQL_RESOURCE);
OAuth2AuthorizationConsentService authorizationConsentService = new CustomJdbcOAuth2AuthorizationConsentService(
new JdbcTemplate(db), this.registeredClientRepository);
authorizationConsentService.save(AUTHORIZATION_CONSENT);
OAuth2AuthorizationConsent foundAuthorizationConsent1 = authorizationConsentService
.findById(AUTHORIZATION_CONSENT.getRegisteredClientId(), AUTHORIZATION_CONSENT.getPrincipalName());
assertThat(foundAuthorizationConsent1).isEqualTo(AUTHORIZATION_CONSENT);
authorizationConsentService.remove(AUTHORIZATION_CONSENT);
OAuth2AuthorizationConsent foundAuthorizationConsent2 = authorizationConsentService
.findById(AUTHORIZATION_CONSENT.getRegisteredClientId(), AUTHORIZATION_CONSENT.getPrincipalName());
assertThat(foundAuthorizationConsent2).isNull();
db.shutdown();
}
private static EmbeddedDatabase createDb() {
return createDb(OAUTH2_AUTHORIZATION_CONSENT_SCHEMA_SQL_RESOURCE);
}
private static EmbeddedDatabase createDb(String schema) {
// @formatter:off
return new EmbeddedDatabaseBuilder()
.generateUniqueName(true)
.setType(EmbeddedDatabaseType.HSQL)
.setScriptEncoding("UTF-8")
.addScript(schema)
.build();
// @formatter:on
}
private static final | JdbcOAuth2AuthorizationConsentServiceTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/derivedidentities/e3/b3/EmployeeId.java | {
"start": 286,
"end": 447
} | class ____ implements Serializable {
@Column(name="firstname", length = 32)
String firstName;
@Column(name="lastname", length = 32)
String lastName;
}
| EmployeeId |
java | elastic__elasticsearch | x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/TemplateDeprecationCheckerTests.java | {
"start": 1010,
"end": 10176
} | class ____ extends ESTestCase {
private final TemplateDeprecationChecker checker = new TemplateDeprecationChecker();
public void testCheckSourceModeInComponentTemplates() throws IOException {
Template template = Template.builder().mappings(CompressedXContent.fromJSON("""
{ "_doc": { "_source": { "mode": "stored"} } }""")).build();
ComponentTemplate componentTemplate = new ComponentTemplate(template, 1L, new HashMap<>());
Template template2 = Template.builder().mappings(CompressedXContent.fromJSON("""
{ "_doc": { "_source": { "enabled": false} } }""")).build();
ComponentTemplate componentTemplate2 = new ComponentTemplate(template2, 1L, new HashMap<>());
ProjectMetadata project = ProjectMetadata.builder(randomProjectIdOrDefault())
.componentTemplates(
Map.of("my-template-1", componentTemplate, "my-template-2", componentTemplate, "my-template-3", componentTemplate2)
)
.build();
Map<String, List<DeprecationIssue>> issuesByComponentTemplate = checker.check(project);
final DeprecationIssue expected = new DeprecationIssue(
DeprecationIssue.Level.CRITICAL,
SourceFieldMapper.DEPRECATION_WARNING_TITLE,
"https://ela.st/migrate-source-mode",
SourceFieldMapper.DEPRECATION_WARNING,
false,
null
);
assertThat(issuesByComponentTemplate.get("my-template-1"), hasItem(expected));
assertThat(issuesByComponentTemplate.get("my-template-2"), hasItem(expected));
assertThat(issuesByComponentTemplate.containsKey("my-template-3"), is(false));
}
public void testCheckLegacyTiersInComponentTemplates() {
String setting = "index.routing.allocation." + randomFrom("include", "require", "exclude") + ".data";
Template template = Template.builder().settings(Settings.builder().put(setting, "hot").build()).build();
ComponentTemplate componentTemplate = new ComponentTemplate(template, 1L, new HashMap<>());
Template template2 = Template.builder()
.settings(Settings.builder().put("index.routing.allocation.require.data", randomAlphaOfLength(10)).build())
.build();
ComponentTemplate componentTemplate2 = new ComponentTemplate(template2, 1L, new HashMap<>());
ProjectMetadata project = ProjectMetadata.builder(randomProjectIdOrDefault())
.componentTemplates(
Map.of("my-template-1", componentTemplate, "my-template-2", componentTemplate, "my-template-3", componentTemplate2)
)
.build();
Map<String, List<DeprecationIssue>> issuesByComponentTemplate = checker.check(project);
final DeprecationIssue expected = new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Configuring tiers via filtered allocation is not recommended.",
"https://ela.st/migrate-to-tiers",
"One or more of your component templates is configured with 'index.routing.allocation.*.data' settings."
+ " This is typically used to create a hot/warm or tiered architecture, based on legacy guidelines."
+ " Data tiers are a recommended replacement for tiered architecture clusters.",
false,
DeprecationIssue.createMetaMapForRemovableSettings(List.of(setting))
);
assertThat(issuesByComponentTemplate.get("my-template-1"), hasItem(expected));
assertThat(issuesByComponentTemplate.get("my-template-2"), hasItem(expected));
assertThat(issuesByComponentTemplate.containsKey("my-template-3"), is(false));
}
public void testCheckLegacyTierSettings() {
String setting = "index.routing.allocation." + randomFrom("include", "require", "exclude") + ".data";
Template template = Template.builder().settings(Settings.builder().put(setting, "hot").build()).build();
Template template2 = Template.builder()
.settings(Settings.builder().put("index.routing.allocation.require.data", randomAlphaOfLength(10)).build())
.build();
ProjectMetadata project = ProjectMetadata.builder(randomProjectIdOrDefault())
.indexTemplates(
Map.of(
"my-template-1",
ComposableIndexTemplate.builder().template(template).indexPatterns(List.of(randomAlphaOfLength(10))).build(),
"my-template-2",
ComposableIndexTemplate.builder().template(template).indexPatterns(List.of(randomAlphaOfLength(10))).build(),
"my-template-3",
ComposableIndexTemplate.builder().template(template2).indexPatterns(List.of(randomAlphaOfLength(10))).build()
)
)
.build();
Map<String, List<DeprecationIssue>> issuesByComponentTemplate = checker.check(project);
final DeprecationIssue expected = new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Configuring tiers via filtered allocation is not recommended.",
"https://ela.st/migrate-to-tiers",
"One or more of your index templates is configured with 'index.routing.allocation.*.data' settings."
+ " This is typically used to create a hot/warm or tiered architecture, based on legacy guidelines."
+ " Data tiers are a recommended replacement for tiered architecture clusters.",
false,
DeprecationIssue.createMetaMapForRemovableSettings(List.of(setting))
);
assertThat(issuesByComponentTemplate.get("my-template-1"), hasItem(expected));
assertThat(issuesByComponentTemplate.get("my-template-2"), hasItem(expected));
assertThat(issuesByComponentTemplate.containsKey("my-template-3"), is(false));
}
public void testComponentAndComposableTemplateWithSameName() {
String setting = "index.routing.allocation." + randomFrom("include", "require", "exclude") + ".data";
Template template = Template.builder().settings(Settings.builder().put(setting, "hot").build()).build();
Template template2 = Template.builder()
.settings(Settings.builder().put("index.routing.allocation.require.data", randomAlphaOfLength(10)).build())
.build();
ComponentTemplate componentTemplate = new ComponentTemplate(template, 1L, new HashMap<>());
ProjectMetadata project = ProjectMetadata.builder(randomProjectIdOrDefault())
.componentTemplates(Map.of("my-template-1", componentTemplate))
.indexTemplates(
Map.of(
"my-template-1",
ComposableIndexTemplate.builder().template(template).indexPatterns(List.of(randomAlphaOfLength(10))).build(),
"my-template-2",
ComposableIndexTemplate.builder().template(template).indexPatterns(List.of(randomAlphaOfLength(10))).build(),
"my-template-3",
ComposableIndexTemplate.builder().template(template2).indexPatterns(List.of(randomAlphaOfLength(10))).build()
)
)
.build();
Map<String, List<DeprecationIssue>> issuesByComponentTemplate = checker.check(project);
final DeprecationIssue expectedIndexTemplateIssue = new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Configuring tiers via filtered allocation is not recommended.",
"https://ela.st/migrate-to-tiers",
"One or more of your index templates is configured with 'index.routing.allocation.*.data' settings."
+ " This is typically used to create a hot/warm or tiered architecture, based on legacy guidelines."
+ " Data tiers are a recommended replacement for tiered architecture clusters.",
false,
DeprecationIssue.createMetaMapForRemovableSettings(List.of(setting))
);
final DeprecationIssue expectedComponentTemplateIssue = new DeprecationIssue(
DeprecationIssue.Level.WARNING,
"Configuring tiers via filtered allocation is not recommended.",
"https://ela.st/migrate-to-tiers",
"One or more of your component templates is configured with 'index.routing.allocation.*.data' settings."
+ " This is typically used to create a hot/warm or tiered architecture, based on legacy guidelines."
+ " Data tiers are a recommended replacement for tiered architecture clusters.",
false,
DeprecationIssue.createMetaMapForRemovableSettings(List.of(setting))
);
assertThat(issuesByComponentTemplate.get("my-template-1"), hasItem(expectedIndexTemplateIssue));
assertThat(issuesByComponentTemplate.get("my-template-1"), hasItem(expectedComponentTemplateIssue));
assertThat(issuesByComponentTemplate.get("my-template-2"), hasItem(expectedIndexTemplateIssue));
assertThat(issuesByComponentTemplate.containsKey("my-template-3"), is(false));
}
}
| TemplateDeprecationCheckerTests |
java | apache__flink | flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/hadoop/mapred/wrapper/HadoopInputSplitTest.java | {
"start": 4156,
"end": 6042
} | class ____ implements InputSplit {
private Path file;
private long start;
private long length;
private String[] hosts;
public FileSplit() {}
private FileSplit(Path file, long start, long length, String[] hosts) {
this.file = file;
this.start = start;
this.length = length;
this.hosts = hosts;
}
@Override
public long getLength() throws IOException {
return length;
}
@Override
public String[] getLocations() throws IOException {
return hosts;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(file.toString());
out.writeLong(start);
out.writeLong(length);
out.writeInt(hosts.length);
for (String host : hosts) {
out.writeUTF(host);
}
}
@Override
public void readFields(DataInput in) throws IOException {
file = new Path(in.readUTF());
start = in.readLong();
length = in.readLong();
int size = in.readInt();
hosts = new String[size];
for (int i = 0; i < size; i++) {
hosts[i] = in.readUTF();
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
FileSplit fileSplit = (FileSplit) o;
return start == fileSplit.start
&& length == fileSplit.length
&& Objects.equals(file, fileSplit.file)
&& Arrays.equals(hosts, fileSplit.hosts);
}
}
private static | FileSplit |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java | {
"start": 1570,
"end": 4000
} | class ____ extends
ApplicationHistoryStoreTestUtils {
private ApplicationHistoryManagerImpl applicationHistoryManagerImpl = null;
@BeforeEach
public void setup() throws Exception {
Configuration config = new Configuration();
config.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
applicationHistoryManagerImpl = new ApplicationHistoryManagerImpl();
applicationHistoryManagerImpl.init(config);
applicationHistoryManagerImpl.start();
store = applicationHistoryManagerImpl.getHistoryStore();
}
@AfterEach
public void tearDown() throws Exception {
applicationHistoryManagerImpl.stop();
}
@Test
void testApplicationReport() throws IOException, YarnException {
ApplicationId appId = null;
appId = ApplicationId.newInstance(0, 1);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
ApplicationReport appReport =
applicationHistoryManagerImpl.getApplication(appId);
assertNotNull(appReport);
assertEquals(appId, appReport.getApplicationId());
assertEquals(appAttemptId,
appReport.getCurrentApplicationAttemptId());
assertEquals(appAttemptId.toString(), appReport.getHost());
assertEquals("test type", appReport.getApplicationType().toString());
assertEquals("test queue", appReport.getQueue().toString());
}
@Test
void testApplications() throws IOException {
ApplicationId appId1 = ApplicationId.newInstance(0, 1);
ApplicationId appId2 = ApplicationId.newInstance(0, 2);
ApplicationId appId3 = ApplicationId.newInstance(0, 3);
writeApplicationStartData(appId1, 1000);
writeApplicationFinishData(appId1);
writeApplicationStartData(appId2, 3000);
writeApplicationFinishData(appId2);
writeApplicationStartData(appId3, 4000);
writeApplicationFinishData(appId3);
Map<ApplicationId, ApplicationReport> reports =
applicationHistoryManagerImpl.getApplications(2, 2000L, 5000L);
assertNotNull(reports);
assertEquals(2, reports.size());
assertNull(reports.get("1"));
assertNull(reports.get("2"));
assertNull(reports.get("3"));
}
}
| TestApplicationHistoryManagerImpl |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/creation/bytebuddy/ByteBuddyCrossClassLoaderSerializationSupport.java | {
"start": 12550,
"end": 12715
} | class ____ it is marked as such.
* <p/>
* <p>Uses the fields {@link #typeToMock} and {@link #extraInterfaces} to
* create the Mockito proxy | if |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MockIllegalThrowsTest.java | {
"start": 2231,
"end": 2828
} | class ____ extends Exception {}
abstract Object foo() throws SpecificException;
void test(Test t) throws Exception {
// BUG: Diagnostic contains: are SpecificException, or any unchecked
when(t.foo()).thenThrow(new Exception());
}
}
""")
.doTest();
}
@Test
public void negative_exceptionTypeViaParameter() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import static org.mockito.Mockito.when;
abstract | SpecificException |
java | quarkusio__quarkus | independent-projects/bootstrap/app-model/src/test/java/io/quarkus/bootstrap/model/PlatformInfoTest.java | {
"start": 299,
"end": 2141
} | class ____ {
@Test
public void emptyIsAligned() throws Exception {
assertTrue(new PlatformInfo("p")
.isAligned(Collections.singletonList(GACTV.fromString("io.playground:playground-bom::pom:1.1.1"))));
}
@Test
public void singleStreamIsAligned() throws Exception {
final PlatformInfo platform = new PlatformInfo("p");
final PlatformStreamInfo stream = platform.getOrCreateStream("1.1");
stream.addIfNotPresent("1", () -> new PlatformReleaseInfo("io.playground", "playground-bom", "1.1",
"io.playground:playground-bom::pom:1.1.1,org.acme:acme-bom::pom:2.2.2,com.foo:foo-bom::pom:3.3.3"));
stream.addIfNotPresent("2", () -> new PlatformReleaseInfo("io.playground", "playground-bom", "1.1",
"io.playground:playground-bom::pom:1.1.2,org.acme:acme-bom::pom:2.2.3,com.foo:foo-bom::pom:3.3.3"));
assertTrue(platform.isAligned(Arrays.asList(GACTV.fromString("io.playground:playground-bom::pom:1.1.1"),
GACTV.fromString("org.acme:acme-bom::pom:2.2.2"))));
assertTrue(platform.isAligned(Arrays.asList(GACTV.fromString("io.playground:playground-bom::pom:1.1.2"),
GACTV.fromString("org.acme:acme-bom::pom:2.2.3"))));
assertFalse(platform.isAligned(Arrays.asList(GACTV.fromString("io.playground:playground-bom::pom:1.1.2"),
GACTV.fromString("org.acme:acme-bom::pom:2.2.2"))));
}
@Test
public void multipleStreamsAreNotAligned() throws Exception {
final PlatformInfo platform = new PlatformInfo("p");
platform.getOrCreateStream("1.1");
platform.getOrCreateStream("1.2");
assertFalse(platform
.isAligned(Collections.singletonList(GACTV.fromString("io.playground:playground-bom::pom:1.1.1"))));
}
}
| PlatformInfoTest |
java | spring-projects__spring-boot | module/spring-boot-resttestclient/src/test/java/org/springframework/boot/resttestclient/autoconfigure/TestRestTemplateAutoConfigurationTests.java | {
"start": 1379,
"end": 2625
} | class ____ {
private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(TestRestTemplateAutoConfiguration.class));
@Test
void shouldFailTotRegisterTestRestTemplateWithoutWebServer() {
this.contextRunner.run((context) -> assertThat(context).hasFailed()
.getFailure()
.hasMessageContaining(" No local test web server available"));
}
@Test
@WithResource(name = "META-INF/spring.factories",
content = """
org.springframework.boot.test.http.server.LocalTestWebServer$Provider=\
org.springframework.boot.resttestclient.autoconfigure.TestRestTemplateAutoConfigurationTests$TestLocalTestWebServerProvider
""")
void shouldDefineTestRestTemplateBoundToWebServer() {
this.contextRunner.run((context) -> {
assertThat(context).hasSingleBean(TestRestTemplate.class)
.hasBean("org.springframework.boot.resttestclient.TestRestTemplate");
TestRestTemplate testRestTemplate = context.getBean(TestRestTemplate.class);
assertThat(testRestTemplate.getRestTemplate().getUriTemplateHandler().expand("/"))
.isEqualTo(URI.create("https://localhost:8182/"));
});
}
@SuppressWarnings("unused")
static | TestRestTemplateAutoConfigurationTests |
java | quarkusio__quarkus | extensions/panache/rest-data-panache/deployment/src/main/java/io/quarkus/rest/data/panache/deployment/methods/hal/ListHalMethodImplementor.java | {
"start": 1005,
"end": 4073
} | class ____ extends ListMethodImplementor {
private static final String METHOD_NAME = "listHal";
public ListHalMethodImplementor(Capabilities capabilities) {
super(capabilities);
}
/**
* Implement a method if it is exposed and hal is enabled.
*/
@Override
public void implement(ClassCreator classCreator, ResourceMetadata resourceMetadata,
ResourceProperties resourceProperties, FieldDescriptor resourceField) {
if (resourceProperties.isHal() && resourceProperties.isExposed(getResourceMethodName())) {
implementInternal(classCreator, resourceMetadata, resourceProperties, resourceField);
}
}
@Override
public String getMethodName() {
return METHOD_NAME;
}
@Override
protected void addProducesJsonAnnotation(AnnotatedElement element, ResourceProperties properties) {
super.addProducesAnnotation(element, APPLICATION_HAL_JSON);
}
@Override
protected void returnValueWithLinks(BytecodeCreator creator, ResourceMetadata resourceMetadata,
ResourceProperties resourceProperties, ResultHandle value, ResultHandle links) {
ResultHandle wrapper = wrapHalEntities(creator, value, resourceMetadata.getEntityType(),
resourceProperties.getHalCollectionName());
creator.invokeVirtualMethod(
ofMethod(HalCollectionWrapper.class, "addLinks", void.class, Link[].class), wrapper, links);
creator.returnValue(responseImplementor.ok(creator, wrapper, links));
}
@Override
protected void returnValue(BytecodeCreator creator, ResourceMetadata resourceMetadata,
ResourceProperties resourceProperties, ResultHandle value) {
ResultHandle wrapper = wrapHalEntities(creator, value, resourceMetadata.getEntityType(),
resourceProperties.getHalCollectionName());
creator.returnValue(responseImplementor.ok(creator, wrapper));
}
private ResultHandle wrapHalEntities(BytecodeCreator creator, ResultHandle entities, String entityType,
String collectionName) {
ResultHandle arcContainer = creator.invokeStaticMethod(ofMethod(Arc.class, "container", ArcContainer.class));
ResultHandle instanceHandle = creator.invokeInterfaceMethod(
ofMethod(ArcContainer.class, "instance", InstanceHandle.class, Class.class, Annotation[].class),
arcContainer,
creator.loadClassFromTCCL(ResteasyReactiveHalService.class),
creator.newArray(Annotation.class, 0));
ResultHandle halService = creator.invokeInterfaceMethod(
ofMethod(InstanceHandle.class, "get", Object.class), instanceHandle);
return creator.invokeVirtualMethod(MethodDescriptor.ofMethod(HalService.class, "toHalCollectionWrapper",
HalCollectionWrapper.class, Collection.class, String.class, Class.class),
halService, entities, creator.load(collectionName), creator.loadClassFromTCCL(entityType));
}
}
| ListHalMethodImplementor |
java | google__dagger | javatests/artifacts/dagger-ksp/java-app/src/main/java/app/SimpleComponentClasses.java | {
"start": 1103,
"end": 1186
} | class ____ {
ScopedProvidedFoo() {}
}
@Module
static final | ScopedProvidedFoo |
java | junit-team__junit5 | junit-platform-console/src/main/java/org/junit/platform/console/command/ExecuteTestsCommand.java | {
"start": 3271,
"end": 3895
} | class ____ {
@Option(names = "--fail-if-no-tests", description = "Fail and return exit status code 2 if no tests are found.")
private boolean failIfNoTests;
/**
* @since 6.0
*/
@Option(names = "--fail-fast", description = "Stops test execution after the first failed test.")
private boolean failFast;
@Option(names = "--reports-dir", paramLabel = "DIR", description = "Enable report output into a specified local directory (will be created if it does not exist).")
private @Nullable Path reportsDir;
Optional<Path> getReportsDir() {
return Optional.ofNullable(reportsDir);
}
}
}
| ReportingOptions |
java | apache__flink | flink-table/flink-sql-parser/src/main/java/org/apache/flink/sql/parser/ddl/SqlAlterViewProperties.java | {
"start": 1377,
"end": 2154
} | class ____ extends SqlAlterView {
private final SqlNodeList propertyList;
public SqlAlterViewProperties(
SqlParserPos pos, SqlIdentifier viewName, SqlNodeList propertyList) {
super(pos, viewName);
this.propertyList = requireNonNull(propertyList, "propertyList should not be null");
}
@Override
public List<SqlNode> getOperandList() {
return ImmutableNullableList.of(name, propertyList);
}
public Map<String, String> getProperties() {
return SqlParseUtils.extractMap(propertyList);
}
@Override
public void unparseAlterOperation(SqlWriter writer, int leftPrec, int rightPrec) {
SqlUnparseUtils.unparseSetOptions(propertyList, writer, leftPrec, rightPrec);
}
}
| SqlAlterViewProperties |
java | spring-projects__spring-framework | framework-docs/src/main/java/org/springframework/docs/testing/mockmvc/assertj/mockmvctestersetup/AccountControllerStandaloneTests.java | {
"start": 797,
"end": 951
} | class ____ {
private final MockMvcTester mockMvc = MockMvcTester.of(new AccountController());
// ...
}
// end::snippet[]
| AccountControllerStandaloneTests |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webmvc/src/test/java/org/springframework/cloud/gateway/server/mvc/handler/DefaultRouteFunctionHandlerTests.java | {
"start": 2930,
"end": 3320
} | class ____ {
static boolean consumerInvoked;
@Bean
Function<String, String> upper() {
return s -> s.toUpperCase(Locale.ROOT);
}
@Bean
Consumer<String> consume() {
return s -> {
consumerInvoked = false;
assertThat(s).isEqualTo("hello");
consumerInvoked = true;
};
}
@Bean
Supplier<String> hello() {
return () -> "hello";
}
}
}
| TestConfiguration |
java | elastic__elasticsearch | modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/ShouldRetryDecorator.java | {
"start": 1607,
"end": 3390
} | interface ____<R> {
boolean shouldRetry(Throwable prevThrowable, R prevResponse, ResultRetryAlgorithm<R> delegate);
}
/**
* @param delegate The delegate {@link StorageRetryStrategy}
* @param shouldRetryDecorator The function to call for shouldRetry for idempotent and non-idempotent requests
*/
@SuppressWarnings("unchecked")
private ShouldRetryDecorator(StorageRetryStrategy delegate, Decorator<T> shouldRetryDecorator) {
this.idempotentRetryAlgorithm = new DelegatingResultRetryAlgorithm<>(
(ResultRetryAlgorithm<T>) delegate.getIdempotentHandler(),
shouldRetryDecorator
);
this.nonIdempotentRetryAlgorithm = new DelegatingResultRetryAlgorithm<>(
(ResultRetryAlgorithm<T>) delegate.getNonidempotentHandler(),
shouldRetryDecorator
);
}
@Override
public ResultRetryAlgorithm<?> getIdempotentHandler() {
return idempotentRetryAlgorithm;
}
@Override
public ResultRetryAlgorithm<?> getNonidempotentHandler() {
return nonIdempotentRetryAlgorithm;
}
private record DelegatingResultRetryAlgorithm<R>(ResultRetryAlgorithm<R> delegate, Decorator<R> shouldRetryDecorator)
implements
ResultRetryAlgorithm<R> {
@Override
public TimedAttemptSettings createNextAttempt(Throwable prevThrowable, R prevResponse, TimedAttemptSettings prevSettings) {
return delegate.createNextAttempt(prevThrowable, prevResponse, prevSettings);
}
@Override
public boolean shouldRetry(Throwable prevThrowable, R prevResponse) throws CancellationException {
return shouldRetryDecorator.shouldRetry(prevThrowable, prevResponse, delegate);
}
}
}
| Decorator |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/api/parallel/ResourceLockAnnotationTests.java | {
"start": 17031,
"end": 17247
} | class ____ implements ResourceLocksProvider {
@Override
public Set<Lock> provideForClass(Class<?> testClass) {
return Set.of(new Lock("a3", ResourceAccessMode.READ));
}
}
static | FirstClassLevelProvider |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/ComposedMessageProcessorTest.java | {
"start": 5424,
"end": 5874
} | class ____ {
public void checkInventory(@Body OrderItem orderItem) {
assertEquals("gadget", orderItem.type);
if (orderItem.quantity < 20) {
orderItem.valid = true;
}
}
}
// END SNIPPET: e6
/**
* Aggregation strategy that re-assembles the validated OrderItems into an order, which is just a List.
*/
// START SNIPPET: e7
public static final | GadgetInventory |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/api/HamcrestCondition.java | {
"start": 1419,
"end": 2773
} | class ____<T> extends Condition<T> {
private Matcher<? extends T> matcher;
/**
* Constructs a {@link Condition} using the matcher given as a parameter.
*
* @param matcher the Hamcrest matcher to use as a condition
*/
public HamcrestCondition(Matcher<? extends T> matcher) {
this.matcher = matcher;
as(describeMatcher());
}
/**
* Constructs a {@link Condition} using the matcher given as a parameter.
* <p>
* Example:
* <pre><code class='java'> import static org.assertj.core.api.Assertions.assertThat;
* import static org.assertj.core.api.HamcrestCondition.matching;
* import static org.hamcrest.core.StringContains.containsString;
*
* assertThat("abc").is(matching(containsString("a")));</code></pre>
*
* @param <T> the type the condition is about
* @param matcher the Hamcrest matcher to use as a condition
* @return the built {@link HamcrestCondition}
*/
public static <T> HamcrestCondition<T> matching(Matcher<? extends T> matcher) {
return new HamcrestCondition<>(matcher);
}
/**
* {@inheritDoc}
*/
@Override
public boolean matches(T value) {
return matcher.matches(value);
}
private String describeMatcher() {
Description d = new StringDescription();
matcher.describeTo(d);
return d.toString();
}
}
| HamcrestCondition |
java | apache__camel | components/camel-opentelemetry-metrics/src/test/java/org/apache/camel/opentelemetry/metrics/eventnotifier/OpenTelemetryRouteEventNotifierTest.java | {
"start": 1535,
"end": 3138
} | class ____ extends AbstractOpenTelemetryTest {
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
OpenTelemetryRouteEventNotifier ren = new OpenTelemetryRouteEventNotifier();
ren.setMeter(otelExtension.getOpenTelemetry().getMeter("meterTest"));
context.getManagementStrategy().addEventNotifier(ren);
ren.init();
return context;
}
@Test
public void testCamelRouteEvents() throws Exception {
verifyMetric(DEFAULT_CAMEL_ROUTES_ADDED, 1L);
verifyMetric(DEFAULT_CAMEL_ROUTES_RUNNING, 1L);
context.getRouteController().stopRoute("test");
verifyMetric(DEFAULT_CAMEL_ROUTES_ADDED, 1L);
verifyMetric(DEFAULT_CAMEL_ROUTES_RUNNING, 0L);
context.removeRoute("test");
verifyMetric(DEFAULT_CAMEL_ROUTES_ADDED, 0L);
verifyMetric(DEFAULT_CAMEL_ROUTES_RUNNING, 0L);
}
private void verifyMetric(String metricName, long expected) {
List<PointData> ls = getAllPointData(metricName);
assertEquals(1, ls.size(), "Expected one point data");
PointData pd = ls.get(0);
assertInstanceOf(LongPointData.class, pd);
assertEquals(expected, ((LongPointData) pd).getValue());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:in").routeId("test").to("mock:out");
}
};
}
}
| OpenTelemetryRouteEventNotifierTest |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/RequestMappingVersionIntegrationTests.java | {
"start": 3926,
"end": 4322
} | class ____ {
@GetMapping
String noVersion() {
return getBody("none");
}
@GetMapping(version = "1.2+")
String version1_2(Version version) {
assertThat(version).isNotNull();
return getBody("1.2");
}
@GetMapping(version = "1.5")
String version1_5() {
return getBody("1.5");
}
private static String getBody(String version) {
return version;
}
}
}
| TestController |
java | grpc__grpc-java | grpclb/src/main/java/io/grpc/grpclb/SubchannelPool.java | {
"start": 1911,
"end": 2340
} | interface ____ {
/**
* Handles a state change on a Subchannel. The behavior is similar to {@link
* io.grpc.LoadBalancer.SubchannelStateListener}.
*
* <p>When a subchannel is reused, subchannel state change event will be triggered even if the
* underlying status remains same.
*/
void onSubchannelState(Subchannel subchannel, ConnectivityStateInfo newState);
}
}
| PooledSubchannelStateListener |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/http/OAuth2LoginBeanDefinitionParser.java | {
"start": 20716,
"end": 22000
} | class ____ implements ApplicationContextAware {
private ApplicationContext context;
@Override
public void setApplicationContext(ApplicationContext context) throws BeansException {
this.context = context;
}
@SuppressWarnings({ "unchecked", "unused" })
Map<String, String> getLoginLinks() {
Iterable<ClientRegistration> clientRegistrations = null;
ClientRegistrationRepository clientRegistrationRepository = this.context
.getBean(ClientRegistrationRepository.class);
ResolvableType type = ResolvableType.forInstance(clientRegistrationRepository).as(Iterable.class);
if (type != ResolvableType.NONE && ClientRegistration.class.isAssignableFrom(type.resolveGenerics()[0])) {
clientRegistrations = (Iterable<ClientRegistration>) clientRegistrationRepository;
}
if (clientRegistrations == null) {
return Collections.emptyMap();
}
String authorizationRequestBaseUri = DEFAULT_AUTHORIZATION_REQUEST_BASE_URI;
Map<String, String> loginUrlToClientName = new HashMap<>();
clientRegistrations.forEach((registration) -> loginUrlToClientName.put(
authorizationRequestBaseUri + "/" + registration.getRegistrationId(),
registration.getClientName()));
return loginUrlToClientName;
}
}
@Deprecated
static | OAuth2LoginBeanConfig |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/profile/IfBuildProfileStereotypeTest.java | {
"start": 3255,
"end": 3352
} | class ____ implements MyService {
}
@InheritableDevOnly
static abstract | DevOnlyMyService |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/FunctionsTest.java | {
"start": 35343,
"end": 35811
} | interface ____ properly defined to throw any exception. using the top level generic types
* Object and Throwable.
*/
@Test
void testThrows_FailableConsumer_Object_Throwable() {
new Functions.FailableConsumer<Object, Throwable>() {
@Override
public void accept(final Object object) throws Throwable {
throw new IOException("test");
}
};
}
/**
* Tests that our failable | is |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RMultimapCacheRx.java | {
"start": 858,
"end": 1384
} | interface ____<K, V> {
/**
* Set a timeout for key. After the timeout has expired, the key and its values will automatically be deleted.
*
* @param key - map key
* @param timeToLive - timeout before key will be deleted
* @param timeUnit - timeout time unit
* @return A Single that will emit <code>true</code> if key exists and the timeout was set and <code>false</code>
* if key not exists
*/
Single<Boolean> expireKey(K key, long timeToLive, TimeUnit timeUnit);
}
| RMultimapCacheRx |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/eval/EvalMethodTest_substring_3.java | {
"start": 185,
"end": 420
} | class ____ extends TestCase {
public void test_method() throws Exception {
assertEquals("aki",
SQLEvalVisitorUtils.evalExpr(JdbcConstants.MYSQL, "SUBSTRING('Sakila', -5, 3)"));
}
}
| EvalMethodTest_substring_3 |
java | alibaba__nacos | common/src/test/java/com/alibaba/nacos/common/tls/TlsFileWatcherTest.java | {
"start": 1891,
"end": 6543
} | class ____ {
static Field watchFilesMapField;
static Field fileMd5MapField;
static Field serviceField;
static Field startedField;
File tempFile;
@Mock
ScheduledExecutorService executorService;
@BeforeAll
static void setUpBeforeClass()
throws NoSuchFieldException, IllegalAccessException, NoSuchMethodException, InvocationTargetException {
watchFilesMapField = TlsFileWatcher.getInstance().getClass().getDeclaredField("watchFilesMap");
watchFilesMapField.setAccessible(true);
fileMd5MapField = TlsFileWatcher.getInstance().getClass().getDeclaredField("fileMd5Map");
fileMd5MapField.setAccessible(true);
serviceField = TlsFileWatcher.getInstance().getClass().getDeclaredField("service");
serviceField.setAccessible(true);
startedField = TlsFileWatcher.getInstance().getClass().getDeclaredField("started");
startedField.setAccessible(true);
}
@BeforeEach
void setUp() throws IOException, IllegalAccessException {
tempFile = new File("test.txt");
tempFile.createNewFile();
serviceField.set(TlsFileWatcher.getInstance(), executorService);
startedField.set(TlsFileWatcher.getInstance(), new AtomicBoolean(false));
Answer<?> answer = invocationOnMock -> {
Runnable runnable = (Runnable) invocationOnMock.getArguments()[0];
runnable.run();
return null;
};
doAnswer(answer).when(executorService).scheduleAtFixedRate(any(), anyLong(), anyLong(), any());
}
@AfterEach
void tearDown() throws IllegalAccessException {
((Map<?, ?>) watchFilesMapField.get(TlsFileWatcher.getInstance())).clear();
((Map<?, ?>) fileMd5MapField.get(TlsFileWatcher.getInstance())).clear();
tempFile.deleteOnExit();
}
@Test
void testAddFileChangeListener1() throws IOException, IllegalAccessException {
TlsFileWatcher.getInstance().addFileChangeListener(filePath -> {
}, "not/exist/path");
assertTrue(((Map<?, ?>) watchFilesMapField.get(TlsFileWatcher.getInstance())).isEmpty());
assertTrue(((Map<?, ?>) fileMd5MapField.get(TlsFileWatcher.getInstance())).isEmpty());
}
@Test
void testAddFileChangeListener2() throws IOException, IllegalAccessException {
TlsFileWatcher.getInstance().addFileChangeListener(filePath -> {
}, (String) null);
assertTrue(((Map<?, ?>) watchFilesMapField.get(TlsFileWatcher.getInstance())).isEmpty());
assertTrue(((Map<?, ?>) fileMd5MapField.get(TlsFileWatcher.getInstance())).isEmpty());
}
@Test
void testAddFileChangeListener3() throws IOException, IllegalAccessException {
TlsFileWatcher.getInstance().addFileChangeListener(filePath -> {
}, tempFile.getPath());
assertEquals(1, ((Map<?, ?>) watchFilesMapField.get(TlsFileWatcher.getInstance())).size());
assertEquals(1, ((Map<?, ?>) fileMd5MapField.get(TlsFileWatcher.getInstance())).size());
}
@Test
void testStartGivenTlsFileNotChangeThenNoNotify() throws IllegalAccessException, InterruptedException, IOException {
// given
AtomicBoolean notified = new AtomicBoolean(false);
TlsFileWatcher.getInstance().addFileChangeListener(filePath -> notified.set(true), tempFile.getPath());
// when
TlsFileWatcher.getInstance().start();
// then
assertFalse(notified.get());
}
@Test
void testStartGivenTlsFileChangeThenNotifyTheChangeFilePath() throws IllegalAccessException, IOException {
// given
AtomicBoolean notified = new AtomicBoolean(false);
AtomicReference<String> changedFilePath = new AtomicReference<>();
TlsFileWatcher.getInstance().addFileChangeListener(filePath -> {
notified.set(true);
changedFilePath.set(filePath);
}, tempFile.getPath());
((Map<String, String>) fileMd5MapField.get(TlsFileWatcher.getInstance())).put("test.txt", "");
// when
TlsFileWatcher.getInstance().start();
// then
assertTrue(notified.get());
assertEquals("test.txt", changedFilePath.get());
}
@Test
void testStartGivenTaskIsAlreadyRunThenNotRunAgain() {
TlsFileWatcher.getInstance().start();
TlsFileWatcher.getInstance().start();
verify(executorService, times(1)).scheduleAtFixedRate(any(), anyLong(), anyLong(), any());
}
} | TlsFileWatcherTest |
java | quarkusio__quarkus | extensions/hibernate-envers/deployment/src/test/java/io/quarkus/hibernate/orm/envers/config/EnversRevisionListenerTestCase.java | {
"start": 526,
"end": 1250
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyAuditedEntity.class, MyListenerlessRevisionEntity.class,
MyListenerlessRevisionListener.class, EnversTestRevisionListenerResource.class,
AbstractEnversResource.class)
.addAsResource("application-with-revision-listener.properties",
"application.properties"));
@Test
public void testRevisionListener() {
RestAssured.when().get("/envers-revision-listener").then()
.body(is("OK"));
}
}
| EnversRevisionListenerTestCase |
java | spring-projects__spring-boot | module/spring-boot-security/src/test/java/org/springframework/boot/security/web/reactive/ApplicationContextServerWebExchangeMatcherTests.java | {
"start": 4442,
"end": 4766
} | class ____ extends HttpWebHandlerAdapter {
TestHttpWebHandlerAdapter(WebHandler delegate) {
super(delegate);
}
@Override
protected ServerWebExchange createExchange(ServerHttpRequest request, ServerHttpResponse response) {
return super.createExchange(request, response);
}
}
static | TestHttpWebHandlerAdapter |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java | {
"start": 2748,
"end": 16313
} | class ____ {
private final Time time;
private final BlockingQueue<ApplicationEvent> applicationEventQueue;
private final ApplicationEventProcessor applicationEventProcessor;
private final OffsetsRequestManager offsetsRequestManager;
private final ConsumerHeartbeatRequestManager heartbeatRequestManager;
private final CoordinatorRequestManager coordinatorRequestManager;
private final ConsumerNetworkThread consumerNetworkThread;
private final NetworkClientDelegate networkClientDelegate;
private final RequestManagers requestManagers;
private final CompletableEventReaper applicationEventReaper;
private final AsyncConsumerMetrics asyncConsumerMetrics;
ConsumerNetworkThreadTest() {
this.networkClientDelegate = mock(NetworkClientDelegate.class);
this.requestManagers = mock(RequestManagers.class);
this.offsetsRequestManager = mock(OffsetsRequestManager.class);
this.heartbeatRequestManager = mock(ConsumerHeartbeatRequestManager.class);
this.coordinatorRequestManager = mock(CoordinatorRequestManager.class);
this.applicationEventProcessor = mock(ApplicationEventProcessor.class);
this.applicationEventReaper = mock(CompletableEventReaper.class);
this.time = new MockTime();
this.applicationEventQueue = new LinkedBlockingQueue<>();
this.asyncConsumerMetrics = mock(AsyncConsumerMetrics.class);
LogContext logContext = new LogContext();
this.consumerNetworkThread = new ConsumerNetworkThread(
logContext,
time,
applicationEventQueue,
applicationEventReaper,
() -> applicationEventProcessor,
() -> networkClientDelegate,
() -> requestManagers,
asyncConsumerMetrics
);
}
@BeforeEach
public void setup() {
consumerNetworkThread.initializeResources();
}
@AfterEach
public void tearDown() {
if (consumerNetworkThread != null)
consumerNetworkThread.close();
}
@Test
public void testEnsureCloseStopsRunningThread() {
assertTrue(consumerNetworkThread.isRunning(),
"ConsumerNetworkThread should start running when created");
consumerNetworkThread.close();
assertFalse(consumerNetworkThread.isRunning(),
"close() should make consumerNetworkThread.running false by calling closeInternal(Duration timeout)");
}
@ParameterizedTest
@ValueSource(longs = {ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS - 1, ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS + 1})
public void testConsumerNetworkThreadPollTimeComputations(long exampleTime) {
List<RequestManager> list = List.of(coordinatorRequestManager, heartbeatRequestManager);
when(requestManagers.entries()).thenReturn(list);
NetworkClientDelegate.PollResult pollResult = new NetworkClientDelegate.PollResult(exampleTime);
NetworkClientDelegate.PollResult pollResult1 = new NetworkClientDelegate.PollResult(exampleTime + 100);
long t = time.milliseconds();
when(coordinatorRequestManager.poll(t)).thenReturn(pollResult);
when(coordinatorRequestManager.maximumTimeToWait(t)).thenReturn(exampleTime);
when(heartbeatRequestManager.poll(t)).thenReturn(pollResult1);
when(heartbeatRequestManager.maximumTimeToWait(t)).thenReturn(exampleTime + 100);
when(networkClientDelegate.addAll(pollResult)).thenReturn(pollResult.timeUntilNextPollMs);
when(networkClientDelegate.addAll(pollResult1)).thenReturn(pollResult1.timeUntilNextPollMs);
consumerNetworkThread.runOnce();
verify(networkClientDelegate).poll(Math.min(exampleTime, ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS), time.milliseconds());
assertEquals(consumerNetworkThread.maximumTimeToWait(), exampleTime);
}
@Test
public void testStartupAndTearDown() throws InterruptedException {
consumerNetworkThread.start();
TestCondition isStarted = consumerNetworkThread::isRunning;
TestCondition isClosed = () -> !(consumerNetworkThread.isRunning() || consumerNetworkThread.isAlive());
// There's a nonzero amount of time between starting the thread and having it
// begin to execute our code. Wait for a bit before checking...
TestUtils.waitForCondition(isStarted,
"The consumer network thread did not start within " + DEFAULT_MAX_WAIT_MS + " ms");
consumerNetworkThread.close(Duration.ofMillis(DEFAULT_MAX_WAIT_MS));
TestUtils.waitForCondition(isClosed,
"The consumer network thread did not stop within " + DEFAULT_MAX_WAIT_MS + " ms");
}
@Test
public void testRequestsTransferFromManagersToClientOnThreadRun() {
List<RequestManager> list = List.of(coordinatorRequestManager, heartbeatRequestManager, offsetsRequestManager);
when(requestManagers.entries()).thenReturn(list);
when(coordinatorRequestManager.poll(anyLong())).thenReturn(mock(NetworkClientDelegate.PollResult.class));
consumerNetworkThread.runOnce();
requestManagers.entries().forEach(rm -> verify(rm).poll(anyLong()));
requestManagers.entries().forEach(rm -> verify(rm).maximumTimeToWait(anyLong()));
verify(networkClientDelegate).addAll(any(NetworkClientDelegate.PollResult.class));
verify(networkClientDelegate).poll(anyLong(), anyLong());
}
@Test
public void testMaximumTimeToWait() {
final int defaultHeartbeatIntervalMs = 1000;
// Initial value before runOnce has been called
assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait());
when(requestManagers.entries()).thenReturn(List.of(heartbeatRequestManager));
when(heartbeatRequestManager.maximumTimeToWait(time.milliseconds())).thenReturn((long) defaultHeartbeatIntervalMs);
consumerNetworkThread.runOnce();
// After runOnce has been called, it takes the default heartbeat interval from the heartbeat request manager
assertEquals(defaultHeartbeatIntervalMs, consumerNetworkThread.maximumTimeToWait());
}
@Test
public void testCleanupInvokesReaper() {
LinkedList<NetworkClientDelegate.UnsentRequest> queue = new LinkedList<>();
when(networkClientDelegate.unsentRequests()).thenReturn(queue);
when(applicationEventReaper.reap(applicationEventQueue)).thenReturn(1L);
consumerNetworkThread.cleanup();
verify(applicationEventReaper).reap(applicationEventQueue);
verify(asyncConsumerMetrics).recordApplicationEventExpiredSize(1L);
}
@Test
public void testRunOnceInvokesReaper() {
when(applicationEventReaper.reap(any(Long.class))).thenReturn(1L);
consumerNetworkThread.runOnce();
verify(applicationEventReaper).reap(any(Long.class));
verify(asyncConsumerMetrics).recordApplicationEventExpiredSize(1L);
}
@Test
public void testSendUnsentRequests() {
when(networkClientDelegate.hasAnyPendingRequests()).thenReturn(true).thenReturn(true).thenReturn(false);
consumerNetworkThread.cleanup();
verify(networkClientDelegate, times(2)).poll(anyLong(), anyLong(), eq(true));
}
@ParameterizedTest
@MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider")
public void testRunOnceRecordTimeBetweenNetworkThreadPoll(String groupName) {
try (Metrics metrics = new Metrics();
AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName);
ConsumerNetworkThread consumerNetworkThread = new ConsumerNetworkThread(
new LogContext(),
time,
applicationEventQueue,
applicationEventReaper,
() -> applicationEventProcessor,
() -> networkClientDelegate,
() -> requestManagers,
asyncConsumerMetrics
)) {
consumerNetworkThread.initializeResources();
consumerNetworkThread.runOnce();
time.sleep(10);
consumerNetworkThread.runOnce();
assertEquals(
10,
(double) metrics.metric(
metrics.metricName("time-between-network-thread-poll-avg", groupName)
).metricValue()
);
assertEquals(
10,
(double) metrics.metric(
metrics.metricName("time-between-network-thread-poll-max", groupName)
).metricValue()
);
}
}
@ParameterizedTest
@MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider")
public void testRunOnceRecordApplicationEventQueueSizeAndApplicationEventQueueTime(String groupName) {
try (Metrics metrics = new Metrics();
AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName);
ConsumerNetworkThread consumerNetworkThread = new ConsumerNetworkThread(
new LogContext(),
time,
applicationEventQueue,
applicationEventReaper,
() -> applicationEventProcessor,
() -> networkClientDelegate,
() -> requestManagers,
asyncConsumerMetrics
)) {
consumerNetworkThread.initializeResources();
AsyncPollEvent event = new AsyncPollEvent(10, 0);
event.setEnqueuedMs(time.milliseconds());
applicationEventQueue.add(event);
asyncConsumerMetrics.recordApplicationEventQueueSize(1);
time.sleep(10);
consumerNetworkThread.runOnce();
assertEquals(
0,
(double) metrics.metric(
metrics.metricName("application-event-queue-size", groupName)
).metricValue()
);
assertEquals(
10,
(double) metrics.metric(
metrics.metricName("application-event-queue-time-avg", groupName)
).metricValue()
);
assertEquals(
10,
(double) metrics.metric(
metrics.metricName("application-event-queue-time-max", groupName)
).metricValue()
);
}
}
@Test
public void testNetworkClientDelegateInitializeResourcesError() {
Supplier<NetworkClientDelegate> networkClientDelegateSupplier = () -> {
throw new KafkaException("Injecting NetworkClientDelegate initialization failure");
};
Supplier<RequestManagers> requestManagersSupplier = () -> requestManagers;
testInitializeResourcesError(networkClientDelegateSupplier, requestManagersSupplier);
}
@Test
public void testRequestManagersInitializeResourcesError() {
Supplier<NetworkClientDelegate> networkClientDelegateSupplier = () -> networkClientDelegate;
Supplier<RequestManagers> requestManagersSupplier = () -> {
throw new KafkaException("Injecting RequestManagers initialization failure");
};
testInitializeResourcesError(networkClientDelegateSupplier, requestManagersSupplier);
}
@Test
public void testNetworkClientDelegateAndRequestManagersInitializeResourcesError() {
Supplier<NetworkClientDelegate> networkClientDelegateSupplier = () -> {
throw new KafkaException("Injecting NetworkClientDelegate initialization failure");
};
Supplier<RequestManagers> requestManagersSupplier = () -> {
throw new KafkaException("Injecting RequestManagers initialization failure");
};
testInitializeResourcesError(networkClientDelegateSupplier, requestManagersSupplier);
}
/**
* Tests that when an error occurs during {@link ConsumerNetworkThread#initializeResources()} that the
* logic in {@link ConsumerNetworkThread#cleanup()} will not throw errors when closing.
*/
private void testInitializeResourcesError(Supplier<NetworkClientDelegate> networkClientDelegateSupplier,
Supplier<RequestManagers> requestManagersSupplier) {
// A new ConsumerNetworkThread is created because the shared one doesn't have any issues initializing its
// resources. However, most of the mocks can be reused, so this is mostly boilerplate except for the error
// when a supplier is invoked.
try (ConsumerNetworkThread thread = new ConsumerNetworkThread(
new LogContext(),
time,
applicationEventQueue,
applicationEventReaper,
() -> applicationEventProcessor,
networkClientDelegateSupplier,
requestManagersSupplier,
asyncConsumerMetrics
)) {
assertThrows(KafkaException.class, thread::initializeResources, "initializeResources should fail because one or more Supplier throws an error on get()");
assertDoesNotThrow(thread::cleanup, "cleanup() should not cause an error because all references are checked before use");
}
}
}
| ConsumerNetworkThreadTest |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-osgi/src/test/java/org/assertj/tests/core/osgi/soft/CustomSoftAssertionTest.java | {
"start": 1721,
"end": 2654
} | class ____")
.isSameAs(Assertions.class.getClassLoader());
}
@Test
void custom_soft_assertions_success() {
// GIVEN
TestSoftAssertions softly = new TestSoftAssertions();
Map<String, String> map = new HashMap<>();
map.put("key1", "value1");
map.put("key2", "value2");
// WHEN
softly.assertThat(map).containsKeys("key1", "key2").containsValues("value1", "value2");
// THEN
softly.assertAll();
}
@Test
void custom_soft_assertions_failure() {
// GIVEN
TestSoftAssertions softly = new TestSoftAssertions();
Map<String, String> map = new HashMap<>();
map.put("key1", "value1");
map.put("key2", "value2");
// WHEN
softly.assertThat(map).containsKeys("key1", "key3").containsValues("value3", "value2");
// THEN
then(softly.wasSuccess()).isFalse();
then(softly.errorsCollected()).hasSize(2);
}
public static | loader |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/xml/XMLEventStreamWriter.java | {
"start": 1398,
"end": 8997
} | class ____ implements XMLStreamWriter {
private static final String DEFAULT_ENCODING = "UTF-8";
private final XMLEventWriter eventWriter;
private final XMLEventFactory eventFactory;
private final List<EndElement> endElements = new ArrayList<>();
private boolean emptyElement = false;
public XMLEventStreamWriter(XMLEventWriter eventWriter, XMLEventFactory eventFactory) {
this.eventWriter = eventWriter;
this.eventFactory = eventFactory;
}
@Override
public void setNamespaceContext(NamespaceContext context) throws XMLStreamException {
this.eventWriter.setNamespaceContext(context);
}
@Override
public NamespaceContext getNamespaceContext() {
return this.eventWriter.getNamespaceContext();
}
@Override
public void setPrefix(String prefix, String uri) throws XMLStreamException {
this.eventWriter.setPrefix(prefix, uri);
}
@Override
public String getPrefix(String uri) throws XMLStreamException {
return this.eventWriter.getPrefix(uri);
}
@Override
public void setDefaultNamespace(String uri) throws XMLStreamException {
this.eventWriter.setDefaultNamespace(uri);
}
@Override
public Object getProperty(String name) throws IllegalArgumentException {
throw new IllegalArgumentException();
}
@Override
public void writeStartDocument() throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createStartDocument());
}
@Override
public void writeStartDocument(String version) throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createStartDocument(DEFAULT_ENCODING, version));
}
@Override
public void writeStartDocument(String encoding, String version) throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createStartDocument(encoding, version));
}
@Override
public void writeStartElement(String localName) throws XMLStreamException {
closeEmptyElementIfNecessary();
doWriteStartElement(this.eventFactory.createStartElement(new QName(localName), null, null));
}
@Override
public void writeStartElement(String namespaceURI, String localName) throws XMLStreamException {
closeEmptyElementIfNecessary();
doWriteStartElement(this.eventFactory.createStartElement(new QName(namespaceURI, localName), null, null));
}
@Override
public void writeStartElement(String prefix, String localName, String namespaceURI) throws XMLStreamException {
closeEmptyElementIfNecessary();
doWriteStartElement(this.eventFactory.createStartElement(new QName(namespaceURI, localName, prefix), null, null));
}
private void doWriteStartElement(StartElement startElement) throws XMLStreamException {
this.eventWriter.add(startElement);
this.endElements.add(this.eventFactory.createEndElement(startElement.getName(), startElement.getNamespaces()));
}
@Override
public void writeEmptyElement(String localName) throws XMLStreamException {
closeEmptyElementIfNecessary();
writeStartElement(localName);
this.emptyElement = true;
}
@Override
public void writeEmptyElement(String namespaceURI, String localName) throws XMLStreamException {
closeEmptyElementIfNecessary();
writeStartElement(namespaceURI, localName);
this.emptyElement = true;
}
@Override
public void writeEmptyElement(String prefix, String localName, String namespaceURI) throws XMLStreamException {
closeEmptyElementIfNecessary();
writeStartElement(prefix, localName, namespaceURI);
this.emptyElement = true;
}
private void closeEmptyElementIfNecessary() throws XMLStreamException {
if (this.emptyElement) {
this.emptyElement = false;
writeEndElement();
}
}
@Override
public void writeEndElement() throws XMLStreamException {
closeEmptyElementIfNecessary();
int last = this.endElements.size() - 1;
EndElement lastEndElement = this.endElements.remove(last);
this.eventWriter.add(lastEndElement);
}
@Override
public void writeAttribute(String localName, String value) throws XMLStreamException {
this.eventWriter.add(this.eventFactory.createAttribute(localName, value));
}
@Override
public void writeAttribute(String namespaceURI, String localName, String value) throws XMLStreamException {
this.eventWriter.add(this.eventFactory.createAttribute(new QName(namespaceURI, localName), value));
}
@Override
public void writeAttribute(String prefix, String namespaceURI, String localName, String value)
throws XMLStreamException {
this.eventWriter.add(this.eventFactory.createAttribute(prefix, namespaceURI, localName, value));
}
@Override
public void writeNamespace(String prefix, String namespaceURI) throws XMLStreamException {
doWriteNamespace(this.eventFactory.createNamespace(prefix, namespaceURI));
}
@Override
public void writeDefaultNamespace(String namespaceURI) throws XMLStreamException {
doWriteNamespace(this.eventFactory.createNamespace(namespaceURI));
}
@SuppressWarnings("rawtypes")
private void doWriteNamespace(Namespace namespace) throws XMLStreamException {
int last = this.endElements.size() - 1;
EndElement oldEndElement = this.endElements.get(last);
Iterator oldNamespaces = oldEndElement.getNamespaces();
List<Namespace> newNamespaces = new ArrayList<>();
while (oldNamespaces.hasNext()) {
Namespace oldNamespace = (Namespace) oldNamespaces.next();
newNamespaces.add(oldNamespace);
}
newNamespaces.add(namespace);
EndElement newEndElement = this.eventFactory.createEndElement(oldEndElement.getName(), newNamespaces.iterator());
this.eventWriter.add(namespace);
this.endElements.set(last, newEndElement);
}
@Override
public void writeCharacters(String text) throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createCharacters(text));
}
@Override
public void writeCharacters(char[] text, int start, int len) throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createCharacters(new String(text, start, len)));
}
@Override
public void writeCData(String data) throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createCData(data));
}
@Override
public void writeComment(String data) throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createComment(data));
}
@Override
public void writeProcessingInstruction(String target) throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createProcessingInstruction(target, ""));
}
@Override
public void writeProcessingInstruction(String target, String data) throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createProcessingInstruction(target, data));
}
@Override
public void writeDTD(String dtd) throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createDTD(dtd));
}
@Override
public void writeEntityRef(String name) throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createEntityReference(name, null));
}
@Override
public void writeEndDocument() throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.add(this.eventFactory.createEndDocument());
}
@Override
public void flush() throws XMLStreamException {
this.eventWriter.flush();
}
@Override
public void close() throws XMLStreamException {
closeEmptyElementIfNecessary();
this.eventWriter.close();
}
}
| XMLEventStreamWriter |
java | apache__camel | components/camel-ibm/camel-ibm-watson-language/src/main/java/org/apache/camel/component/ibm/watson/language/WatsonLanguageProducer.java | {
"start": 1832,
"end": 8602
} | class ____ extends DefaultProducer {
private static final Logger LOG = LoggerFactory.getLogger(WatsonLanguageProducer.class);
public WatsonLanguageProducer(WatsonLanguageEndpoint endpoint) {
super(endpoint);
}
@Override
public void process(Exchange exchange) throws Exception {
WatsonLanguageOperations operation = determineOperation(exchange);
switch (operation) {
case analyzeText:
analyzeText(exchange);
break;
case analyzeUrl:
analyzeUrl(exchange);
break;
default:
throw new IllegalArgumentException("Unsupported operation: " + operation);
}
}
@Override
public WatsonLanguageEndpoint getEndpoint() {
return (WatsonLanguageEndpoint) super.getEndpoint();
}
private WatsonLanguageOperations determineOperation(Exchange exchange) {
WatsonLanguageOperations operation
= exchange.getIn().getHeader(WatsonLanguageConstants.OPERATION, WatsonLanguageOperations.class);
if (operation == null) {
operation = getEndpoint().getConfiguration().getOperation();
}
if (operation == null) {
throw new IllegalArgumentException("Operation must be specified");
}
return operation;
}
private void analyzeText(Exchange exchange) {
NaturalLanguageUnderstanding nlu = getEndpoint().getNluClient();
if (nlu == null) {
throw new IllegalStateException("NLU client not initialized. Use service=nlu");
}
String text = exchange.getIn().getHeader(WatsonLanguageConstants.TEXT, String.class);
if (text == null) {
text = exchange.getIn().getBody(String.class);
}
if (text == null || text.isBlank()) {
throw new IllegalArgumentException("Text to analyze must be specified");
}
LOG.trace("Analyzing text with NLU");
Features features = buildFeatures(exchange);
AnalyzeOptions options = new AnalyzeOptions.Builder()
.text(text)
.features(features)
.build();
AnalysisResults result = nlu.analyze(options).execute().getResult();
Message message = getMessageForResponse(exchange);
// Set the full analysis results as body (contains sentiment, entities, keywords, etc.)
message.setBody(result);
// Set convenience headers for commonly used values
if (result.getSentiment() != null && result.getSentiment().getDocument() != null) {
message.setHeader(WatsonLanguageConstants.SENTIMENT_SCORE, result.getSentiment().getDocument().getScore());
message.setHeader(WatsonLanguageConstants.SENTIMENT_LABEL, result.getSentiment().getDocument().getLabel());
}
if (result.getLanguage() != null) {
message.setHeader(WatsonLanguageConstants.LANGUAGE, result.getLanguage());
}
}
private void analyzeUrl(Exchange exchange) {
NaturalLanguageUnderstanding nlu = getEndpoint().getNluClient();
if (nlu == null) {
throw new IllegalStateException("NLU client not initialized. Use service=nlu");
}
String url = exchange.getIn().getHeader(WatsonLanguageConstants.URL, String.class);
if (url == null) {
url = exchange.getIn().getBody(String.class);
}
if (url == null || url.isBlank()) {
throw new IllegalArgumentException("URL to analyze must be specified");
}
LOG.trace("Analyzing URL with NLU: {}", url);
Features features = buildFeatures(exchange);
AnalyzeOptions options = new AnalyzeOptions.Builder()
.url(url)
.features(features)
.build();
AnalysisResults result = nlu.analyze(options).execute().getResult();
Message message = getMessageForResponse(exchange);
// Set the full analysis results as body (contains sentiment, entities, keywords, etc.)
message.setBody(result);
// Set convenience headers for commonly used values
if (result.getSentiment() != null && result.getSentiment().getDocument() != null) {
message.setHeader(WatsonLanguageConstants.SENTIMENT_SCORE, result.getSentiment().getDocument().getScore());
message.setHeader(WatsonLanguageConstants.SENTIMENT_LABEL, result.getSentiment().getDocument().getLabel());
}
if (result.getLanguage() != null) {
message.setHeader(WatsonLanguageConstants.LANGUAGE, result.getLanguage());
}
}
private Features buildFeatures(Exchange exchange) {
Features.Builder builder = new Features.Builder();
boolean analyzeSentiment = exchange.getIn().getHeader(WatsonLanguageConstants.ANALYZE_SENTIMENT,
getEndpoint().getConfiguration().isAnalyzeSentiment(), Boolean.class);
boolean analyzeEmotion = exchange.getIn().getHeader(WatsonLanguageConstants.ANALYZE_EMOTION,
getEndpoint().getConfiguration().isAnalyzeEmotion(), Boolean.class);
boolean analyzeEntities = exchange.getIn().getHeader(WatsonLanguageConstants.ANALYZE_ENTITIES,
getEndpoint().getConfiguration().isAnalyzeEntities(), Boolean.class);
boolean analyzeKeywords = exchange.getIn().getHeader(WatsonLanguageConstants.ANALYZE_KEYWORDS,
getEndpoint().getConfiguration().isAnalyzeKeywords(), Boolean.class);
boolean analyzeConcepts = exchange.getIn().getHeader(WatsonLanguageConstants.ANALYZE_CONCEPTS,
getEndpoint().getConfiguration().isAnalyzeConcepts(), Boolean.class);
boolean analyzeCategories = exchange.getIn().getHeader(WatsonLanguageConstants.ANALYZE_CATEGORIES,
getEndpoint().getConfiguration().isAnalyzeCategories(), Boolean.class);
if (analyzeSentiment) {
builder.sentiment(new SentimentOptions.Builder().build());
}
if (analyzeEmotion) {
builder.emotion(new EmotionOptions.Builder().build());
}
if (analyzeEntities) {
builder.entities(new EntitiesOptions.Builder().build());
}
if (analyzeKeywords) {
builder.keywords(new KeywordsOptions.Builder().build());
}
if (analyzeConcepts) {
builder.concepts(new ConceptsOptions.Builder().build());
}
if (analyzeCategories) {
builder.categories(new CategoriesOptions.Builder().build());
}
return builder.build();
}
private Message getMessageForResponse(Exchange exchange) {
return exchange.getMessage();
}
}
| WatsonLanguageProducer |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/condition/RequestConditionHolder.java | {
"start": 1511,
"end": 4688
} | class ____ extends AbstractRequestCondition<RequestConditionHolder> {
private final @Nullable RequestCondition<Object> condition;
/**
* Create a new holder to wrap the given request condition.
* @param requestCondition the condition to hold, may be {@code null}
*/
@SuppressWarnings("unchecked")
public RequestConditionHolder(@Nullable RequestCondition<?> requestCondition) {
this.condition = (RequestCondition<Object>) requestCondition;
}
/**
* Return the held request condition, or {@code null} if not holding one.
*/
public @Nullable RequestCondition<?> getCondition() {
return this.condition;
}
@Override
protected Collection<?> getContent() {
return (this.condition != null ? Collections.singleton(this.condition) : Collections.emptyList());
}
@Override
protected String getToStringInfix() {
return " ";
}
/**
* Combine the request conditions held by the two RequestConditionHolder
* instances after making sure the conditions are of the same type.
* Or if one holder is empty, the other holder is returned.
*/
@Override
public RequestConditionHolder combine(RequestConditionHolder other) {
if (this.condition == null && other.condition == null) {
return this;
}
else if (this.condition == null) {
return other;
}
else if (other.condition == null) {
return this;
}
else {
assertEqualConditionTypes(this.condition, other.condition);
RequestCondition<?> combined = (RequestCondition<?>) this.condition.combine(other.condition);
return new RequestConditionHolder(combined);
}
}
/**
* Ensure the held request conditions are of the same type.
*/
private void assertEqualConditionTypes(RequestCondition<?> thisCondition, RequestCondition<?> otherCondition) {
Class<?> clazz = thisCondition.getClass();
Class<?> otherClazz = otherCondition.getClass();
if (!clazz.equals(otherClazz)) {
throw new ClassCastException("Incompatible request conditions: " + clazz + " and " + otherClazz);
}
}
/**
* Get the matching condition for the held request condition wrap it in a
* new RequestConditionHolder instance. Or otherwise if this is an empty
* holder, return the same holder instance.
*/
@Override
public @Nullable RequestConditionHolder getMatchingCondition(HttpServletRequest request) {
if (this.condition == null) {
return this;
}
RequestCondition<?> match = (RequestCondition<?>) this.condition.getMatchingCondition(request);
return (match != null ? new RequestConditionHolder(match) : null);
}
/**
* Compare the request conditions held by the two RequestConditionHolder
* instances after making sure the conditions are of the same type.
* Or if one holder is empty, the other holder is preferred.
*/
@Override
public int compareTo(RequestConditionHolder other, HttpServletRequest request) {
if (this.condition == null && other.condition == null) {
return 0;
}
else if (this.condition == null) {
return 1;
}
else if (other.condition == null) {
return -1;
}
else {
assertEqualConditionTypes(this.condition, other.condition);
return this.condition.compareTo(other.condition, request);
}
}
}
| RequestConditionHolder |
java | quarkusio__quarkus | extensions/scheduler/deployment/src/test/java/io/quarkus/scheduler/test/ConditionalExecutionTest.java | {
"start": 2366,
"end": 2891
} | class ____ implements Scheduled.SkipPredicate {
static final CountDownLatch SKIPPED_LATCH = new CountDownLatch(1);
static final AtomicBoolean DISABLED = new AtomicBoolean(true);
@Override
public boolean test(ScheduledExecution execution) {
return DISABLED.get();
}
void onSkip(@Observes SkippedExecution event) {
if (event.triggerId.equals("foo")) {
SKIPPED_LATCH.countDown();
}
}
}
public static | IsDisabled |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/authentication/jaas/Sec760Tests.java | {
"start": 1122,
"end": 2740
} | class ____ {
public String resolveConfigFile(String filename) {
String resName = "/" + getClass().getPackage().getName().replace('.', '/') + filename;
return resName;
}
private void testConfigureJaasCase(JaasAuthenticationProvider p1, JaasAuthenticationProvider p2) throws Exception {
p1.setLoginConfig(new ClassPathResource(resolveConfigFile("/test1.conf")));
p1.setLoginContextName("test1");
p1.setCallbackHandlers(new JaasAuthenticationCallbackHandler[] { new TestCallbackHandler(),
new JaasNameCallbackHandler(), new JaasPasswordCallbackHandler() });
p1.setAuthorityGranters(new AuthorityGranter[] { new TestAuthorityGranter() });
p1.afterPropertiesSet();
testAuthenticate(p1);
p2.setLoginConfig(new ClassPathResource(resolveConfigFile("/test2.conf")));
p2.setLoginContextName("test2");
p2.setCallbackHandlers(new JaasAuthenticationCallbackHandler[] { new TestCallbackHandler(),
new JaasNameCallbackHandler(), new JaasPasswordCallbackHandler() });
p2.setAuthorityGranters(new AuthorityGranter[] { new TestAuthorityGranter() });
p2.afterPropertiesSet();
testAuthenticate(p2);
}
private void testAuthenticate(JaasAuthenticationProvider p1) {
UsernamePasswordAuthenticationToken token = UsernamePasswordAuthenticationToken.authenticated("user",
"password", AuthorityUtils.createAuthorityList("ROLE_ONE", "ROLE_TWO"));
Authentication auth = p1.authenticate(token);
assertThat(auth).isNotNull();
}
@Test
public void testConfigureJaas() throws Exception {
testConfigureJaasCase(new JaasAuthenticationProvider(), new JaasAuthenticationProvider());
}
}
| Sec760Tests |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/injectionstrategy/shared/GenderDto.java | {
"start": 247,
"end": 277
} | enum ____ {
M, F
}
| GenderDto |
java | spring-projects__spring-security | acl/src/main/java/org/springframework/security/acls/domain/ObjectIdentityImpl.java | {
"start": 3279,
"end": 4931
} | class ____ if it has the same
* <code>classname</code> and <code>id</code> properties.
* <p>
* Numeric identities (Integer and Long values) are considered equal if they are
* numerically equal. Other serializable types are evaluated using a simple equality.
* @param obj object to compare
* @return <code>true</code> if the presented object matches this object
*/
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof ObjectIdentityImpl)) {
return false;
}
ObjectIdentityImpl other = (ObjectIdentityImpl) obj;
if (this.identifier instanceof Number && other.identifier instanceof Number) {
// Integers and Longs with same value should be considered equal
if (((Number) this.identifier).longValue() != ((Number) other.identifier).longValue()) {
return false;
}
}
else {
// Use plain equality for other serializable types
if (!this.identifier.equals(other.identifier)) {
return false;
}
}
return this.type.equals(other.type);
}
@Override
public Serializable getIdentifier() {
return this.identifier;
}
@Override
public String getType() {
return this.type;
}
/**
* Important so caching operates properly.
* @return the hash
*/
@Override
public int hashCode() {
int result = this.type.hashCode();
result = 31 * result + this.identifier.hashCode();
return result;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(this.getClass().getName()).append("[");
sb.append("Type: ").append(this.type);
sb.append("; Identifier: ").append(this.identifier).append("]");
return sb.toString();
}
}
| equal |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/expression/SqmEnumLiteral.java | {
"start": 1210,
"end": 3079
} | class ____<E extends Enum<E>> extends SqmLiteral<E> implements SqmBindableType<E>, SemanticPathPart {
private final EnumJavaType<E> referencedEnumTypeDescriptor;
private final String enumValueName;
public SqmEnumLiteral(
E enumValue,
EnumJavaType<E> referencedEnumTypeDescriptor,
String enumValueName,
NodeBuilder nodeBuilder) {
super( null, enumValue, nodeBuilder );
this.referencedEnumTypeDescriptor = referencedEnumTypeDescriptor;
this.enumValueName = enumValueName;
}
@Override
public SqmEnumLiteral<E> copy(SqmCopyContext context) {
final SqmEnumLiteral<E> existing = context.getCopy( this );
if ( existing != null ) {
return existing;
}
final SqmEnumLiteral<E> expression = context.registerCopy(
this,
new SqmEnumLiteral<>(
getEnumValue(),
referencedEnumTypeDescriptor,
enumValueName,
nodeBuilder()
)
);
copyTo( expression, context );
return expression;
}
@Override
public SqmBindableType<E> getExpressible() {
return this;
}
@Override
public @NonNull SqmBindableType<E> getNodeType() {
return this;
}
@Override
public PersistenceType getPersistenceType() {
return BASIC;
}
@Override
public @Nullable SqmDomainType<E> getSqmType() {
return null;
}
public E getEnumValue() {
return castNonNull( getLiteralValue() );
}
@Override
public EnumJavaType<E> getExpressibleJavaType() {
return referencedEnumTypeDescriptor;
}
@Override
public Class<E> getJavaType() {
return referencedEnumTypeDescriptor.getJavaTypeClass();
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// SemanticPathPart
@Override
public SemanticPathPart resolvePathPart(
String name,
boolean isTerminal,
SqmCreationState creationState) {
throw new UnknownPathException(
String.format(
Locale.ROOT,
"Static | SqmEnumLiteral |
java | processing__processing4 | app/src/processing/app/contrib/ModeContribution.java | {
"start": 4685,
"end": 7074
} | class ____ " + mainJar.getAbsolutePath());
}
}
List<URL> extraUrls = new ArrayList<>();
if (imports != null && imports.size() > 0) {
// if the mode has any dependencies (defined as imports in
// mode.properties), add the dependencies to the classloader
Map<String, Mode> installedModes = new HashMap<>();
for (Mode m : base.getModeList()) {
// Base.log("Mode contrib: " + m.getClass().getName() + " : "+ m.getFolder());
installedModes.put(m.getClass().getName(), m);
}
for (String modeImport : imports) {
if (installedModes.containsKey(modeImport)) {
Messages.log("Found mode dependency " + modeImport);
File modeFolder = installedModes.get(modeImport).getFolder();
File[] archives = Util.listJarFiles(new File(modeFolder, "mode"));
if (archives != null) {
for (File archive : archives) {
// Base.log("Adding jar dependency: " + archives[i].getAbsolutePath());
extraUrls.add(archive.toURI().toURL());
}
}
} else {
throw new IgnorableException("Can't load " + className +
" because the import " + modeImport +
" could not be found. ");
}
}
}
// Add .jar and .zip files from the "mode" folder into the classpath
File[] archives = Util.listJarFiles(modeDirectory);
if (archives != null && archives.length > 0) {
int arrLen = archives.length + extraUrls.size();
URL[] urlList = new URL[arrLen];
int j = 0;
for (; j < extraUrls.size(); j++) {
//Base.log("Found archive " + archives[j] + " for " + getName());
urlList[j] = extraUrls.get(j);
}
for (int k = 0; k < archives.length; k++,j++) {
Messages.log("Found archive " + archives[k] + " for " + getName());
urlList[j] = archives[k].toURI().toURL();
}
loader = new URLClassLoader(urlList);
Messages.log("loading above JARs with loader " + loader);
}
}
// If no archives were found, just use the regular ClassLoader
if (loader == null) {
loader = Thread.currentThread().getContextClassLoader();
}
return className;
}
}
| inside |
java | quarkusio__quarkus | integration-tests/micrometer-prometheus/src/test/java/io/quarkus/it/micrometer/prometheus/PrometheusMetricsRegistryIT.java | {
"start": 124,
"end": 200
} | class ____ extends PrometheusMetricsRegistryTest {
}
| PrometheusMetricsRegistryIT |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/security/contexts/AnotherCompatibleTestSecurityContextFactory.java | {
"start": 1650,
"end": 1830
} | class ____ implements SecurityContext {
@Override
public <T> T runSecured(Callable<T> securedCallable) {
return null;
}
}
}
| TestSecurityContext |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/cascade/Troop.java | {
"start": 537,
"end": 1363
} | class ____ implements Serializable {
private Integer id;
private String name;
private Set<Soldier> soldiers;
@OneToMany(mappedBy = "troop", cascade = {CascadeType.ALL}, fetch = FetchType.LAZY)
@SQLOrder("name desc")
@org.hibernate.annotations.Cascade({org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
public Set<Soldier> getSoldiers() {
return soldiers;
}
public void setSoldiers(Set<Soldier> soldiers) {
this.soldiers = soldiers;
}
@Id
@GeneratedValue
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public void addSoldier(Soldier s) {
if ( soldiers == null ) soldiers = new HashSet<Soldier>();
soldiers.add( s );
s.setTroop( this );
}
}
| Troop |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/jndi/JndiObjectFactoryBean.java | {
"start": 11081,
"end": 11870
} | class ____ {
private static Object createJndiObjectProxy(JndiObjectFactoryBean jof) throws NamingException {
// Create a JndiObjectTargetSource that mirrors the JndiObjectFactoryBean's configuration.
JndiObjectTargetSource targetSource = new JndiObjectTargetSource();
targetSource.setJndiTemplate(jof.getJndiTemplate());
String jndiName = jof.getJndiName();
Assert.state(jndiName != null, "No JNDI name specified");
targetSource.setJndiName(jndiName);
targetSource.setExpectedType(jof.getExpectedType());
targetSource.setResourceRef(jof.isResourceRef());
targetSource.setLookupOnStartup(jof.lookupOnStartup);
targetSource.setCache(jof.cache);
targetSource.afterPropertiesSet();
// Create a proxy with JndiObjectFactoryBean's proxy | JndiObjectProxyFactory |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ActiveMQ6EndpointBuilderFactory.java | {
"start": 156338,
"end": 235060
} | interface ____ extends EndpointProducerBuilder {
default ActiveMQ6EndpointProducerBuilder basic() {
return (ActiveMQ6EndpointProducerBuilder) this;
}
/**
* This option is used to allow additional headers which may have values
* that are invalid according to JMS specification. For example, some
* message systems, such as WMQ, do this with header names using prefix
* JMS_IBM_MQMD_ containing values with byte array or other invalid
* types. You can specify multiple header names separated by comma, and
* use as suffix for wildcard matching.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer (advanced)
*
* @param allowAdditionalHeaders the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder allowAdditionalHeaders(String allowAdditionalHeaders) {
doSetProperty("allowAdditionalHeaders", allowAdditionalHeaders);
return this;
}
/**
* Whether to allow sending messages with no body. If this option is
* false and the message body is null, then an JMSException is thrown.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: producer (advanced)
*
* @param allowNullBody the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder allowNullBody(boolean allowNullBody) {
doSetProperty("allowNullBody", allowNullBody);
return this;
}
/**
* Whether to allow sending messages with no body. If this option is
* false and the message body is null, then an JMSException is thrown.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: producer (advanced)
*
* @param allowNullBody the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder allowNullBody(String allowNullBody) {
doSetProperty("allowNullBody", allowNullBody);
return this;
}
/**
* If true, Camel will always make a JMS message copy of the message
* when it is passed to the producer for sending. Copying the message is
* needed in some situations, such as when a
* replyToDestinationSelectorName is set (incidentally, Camel will set
* the alwaysCopyMessage option to true, if a
* replyToDestinationSelectorName is set).
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param alwaysCopyMessage the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder alwaysCopyMessage(boolean alwaysCopyMessage) {
doSetProperty("alwaysCopyMessage", alwaysCopyMessage);
return this;
}
/**
* If true, Camel will always make a JMS message copy of the message
* when it is passed to the producer for sending. Copying the message is
* needed in some situations, such as when a
* replyToDestinationSelectorName is set (incidentally, Camel will set
* the alwaysCopyMessage option to true, if a
* replyToDestinationSelectorName is set).
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param alwaysCopyMessage the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder alwaysCopyMessage(String alwaysCopyMessage) {
doSetProperty("alwaysCopyMessage", alwaysCopyMessage);
return this;
}
/**
* When using InOut exchange pattern use this JMS property instead of
* JMSCorrelationID JMS property to correlate messages. If set messages
* will be correlated solely on the value of this property
* JMSCorrelationID property will be ignored and not set by Camel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer (advanced)
*
* @param correlationProperty the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder correlationProperty(String correlationProperty) {
doSetProperty("correlationProperty", correlationProperty);
return this;
}
/**
* Use this option to force disabling time to live. For example when you
* do request/reply over JMS, then Camel will by default use the
* requestTimeout value as time to live on the message being sent. The
* problem is that the sender and receiver systems have to have their
* clocks synchronized, so they are in sync. This is not always so easy
* to archive. So you can use disableTimeToLive=true to not set a time
* to live value on the sent message. Then the message will not expire
* on the receiver system. See below in section About time to live for
* more details.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param disableTimeToLive the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder disableTimeToLive(boolean disableTimeToLive) {
doSetProperty("disableTimeToLive", disableTimeToLive);
return this;
}
/**
* Use this option to force disabling time to live. For example when you
* do request/reply over JMS, then Camel will by default use the
* requestTimeout value as time to live on the message being sent. The
* problem is that the sender and receiver systems have to have their
* clocks synchronized, so they are in sync. This is not always so easy
* to archive. So you can use disableTimeToLive=true to not set a time
* to live value on the sent message. Then the message will not expire
* on the receiver system. See below in section About time to live for
* more details.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param disableTimeToLive the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder disableTimeToLive(String disableTimeToLive) {
doSetProperty("disableTimeToLive", disableTimeToLive);
return this;
}
/**
* When using mapJmsMessage=false Camel will create a new JMS message to
* send to a new JMS destination if you touch the headers (get or set)
* during the route. Set this option to true to force Camel to send the
* original JMS message that was received.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param forceSendOriginalMessage the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder forceSendOriginalMessage(boolean forceSendOriginalMessage) {
doSetProperty("forceSendOriginalMessage", forceSendOriginalMessage);
return this;
}
/**
* When using mapJmsMessage=false Camel will create a new JMS message to
* send to a new JMS destination if you touch the headers (get or set)
* during the route. Set this option to true to force Camel to send the
* original JMS message that was received.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param forceSendOriginalMessage the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder forceSendOriginalMessage(String forceSendOriginalMessage) {
doSetProperty("forceSendOriginalMessage", forceSendOriginalMessage);
return this;
}
/**
* Only applicable when sending to JMS destination using InOnly (eg fire
* and forget). Enabling this option will enrich the Camel Exchange with
* the actual JMSMessageID that was used by the JMS client when the
* message was sent to the JMS destination.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param includeSentJMSMessageID the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder includeSentJMSMessageID(boolean includeSentJMSMessageID) {
doSetProperty("includeSentJMSMessageID", includeSentJMSMessageID);
return this;
}
/**
* Only applicable when sending to JMS destination using InOnly (eg fire
* and forget). Enabling this option will enrich the Camel Exchange with
* the actual JMSMessageID that was used by the JMS client when the
* message was sent to the JMS destination.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param includeSentJMSMessageID the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder includeSentJMSMessageID(String includeSentJMSMessageID) {
doSetProperty("includeSentJMSMessageID", includeSentJMSMessageID);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* When using InOut exchange pattern use this JMS property instead of
* JMSCorrelationID JMS property to correlate reply message. Difference
* between this and 'correlationProperty' is that 'correlationProperty'
* tells which request property holds the correlation id value and it
* does not affect the selector for the reply
* (JMSCorrelationID=&lt;correlation id&gt;), while
* 'replyCorrelationProperty' tells which reply property will hold the
* correlation id value and it does affect the selector for the reply
* (&lt;replyCorrelationProperty&gt;=&lt;correlation
* id&gt;).
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer (advanced)
*
* @param replyCorrelationProperty the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder replyCorrelationProperty(String replyCorrelationProperty) {
doSetProperty("replyCorrelationProperty", replyCorrelationProperty);
return this;
}
/**
* Sets the cache level by name for the reply consumer when doing
* request/reply over JMS. This option only applies when using fixed
* reply queues (not temporary). Camel will by default use:
* CACHE_CONSUMER for exclusive or shared w/ replyToSelectorName. And
* CACHE_SESSION for shared without replyToSelectorName. Some JMS
* brokers such as IBM WebSphere may require to set the
* replyToCacheLevelName=CACHE_NONE to work. Note: If using temporary
* queues then CACHE_NONE is not allowed, and you must use a higher
* value such as CACHE_CONSUMER or CACHE_SESSION.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer (advanced)
*
* @param replyToCacheLevelName the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder replyToCacheLevelName(String replyToCacheLevelName) {
doSetProperty("replyToCacheLevelName", replyToCacheLevelName);
return this;
}
/**
* Sets the JMS Selector using the fixed name to be used so you can
* filter out your own replies from the others when using a shared queue
* (that is, if you are not using a temporary reply queue).
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer (advanced)
*
* @param replyToDestinationSelectorName the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder replyToDestinationSelectorName(String replyToDestinationSelectorName) {
doSetProperty("replyToDestinationSelectorName", replyToDestinationSelectorName);
return this;
}
/**
* Sets whether StreamMessage type is enabled or not. Message payloads
* of streaming kind such as files, InputStream, etc will either by sent
* as BytesMessage or StreamMessage. This option controls which kind
* will be used. By default BytesMessage is used which enforces the
* entire message payload to be read into memory. By enabling this
* option the message payload is read into memory in chunks and each
* chunk is then written to the StreamMessage until no more data.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param streamMessageTypeEnabled the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder streamMessageTypeEnabled(boolean streamMessageTypeEnabled) {
doSetProperty("streamMessageTypeEnabled", streamMessageTypeEnabled);
return this;
}
/**
* Sets whether StreamMessage type is enabled or not. Message payloads
* of streaming kind such as files, InputStream, etc will either by sent
* as BytesMessage or StreamMessage. This option controls which kind
* will be used. By default BytesMessage is used which enforces the
* entire message payload to be read into memory. By enabling this
* option the message payload is read into memory in chunks and each
* chunk is then written to the StreamMessage until no more data.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param streamMessageTypeEnabled the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder streamMessageTypeEnabled(String streamMessageTypeEnabled) {
doSetProperty("streamMessageTypeEnabled", streamMessageTypeEnabled);
return this;
}
/**
* Controls whether or not to include serialized headers. Applies only
* when transferExchange is true. This requires that the objects are
* serializable. Camel will exclude any non-serializable objects and log
* it at WARN level.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param allowSerializedHeaders the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder allowSerializedHeaders(boolean allowSerializedHeaders) {
doSetProperty("allowSerializedHeaders", allowSerializedHeaders);
return this;
}
/**
* Controls whether or not to include serialized headers. Applies only
* when transferExchange is true. This requires that the objects are
* serializable. Camel will exclude any non-serializable objects and log
* it at WARN level.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param allowSerializedHeaders the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder allowSerializedHeaders(String allowSerializedHeaders) {
doSetProperty("allowSerializedHeaders", allowSerializedHeaders);
return this;
}
/**
* Whether optimizing for Apache Artemis streaming mode. This can reduce
* memory overhead when using Artemis with JMS StreamMessage types. This
* option must only be enabled if Apache Artemis is being used.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param artemisStreamingEnabled the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder artemisStreamingEnabled(boolean artemisStreamingEnabled) {
doSetProperty("artemisStreamingEnabled", artemisStreamingEnabled);
return this;
}
/**
* Whether optimizing for Apache Artemis streaming mode. This can reduce
* memory overhead when using Artemis with JMS StreamMessage types. This
* option must only be enabled if Apache Artemis is being used.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param artemisStreamingEnabled the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder artemisStreamingEnabled(String artemisStreamingEnabled) {
doSetProperty("artemisStreamingEnabled", artemisStreamingEnabled);
return this;
}
/**
* Whether to startup the JmsConsumer message listener asynchronously,
* when starting a route. For example if a JmsConsumer cannot get a
* connection to a remote JMS broker, then it may block while retrying
* and/or fail-over. This will cause Camel to block while starting
* routes. By setting this option to true, you will let routes startup,
* while the JmsConsumer connects to the JMS broker using a dedicated
* thread in asynchronous mode. If this option is used, then beware that
* if the connection could not be established, then an exception is
* logged at WARN level, and the consumer will not be able to receive
* messages; You can then restart the route to retry.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param asyncStartListener the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder asyncStartListener(boolean asyncStartListener) {
doSetProperty("asyncStartListener", asyncStartListener);
return this;
}
/**
* Whether to startup the JmsConsumer message listener asynchronously,
* when starting a route. For example if a JmsConsumer cannot get a
* connection to a remote JMS broker, then it may block while retrying
* and/or fail-over. This will cause Camel to block while starting
* routes. By setting this option to true, you will let routes startup,
* while the JmsConsumer connects to the JMS broker using a dedicated
* thread in asynchronous mode. If this option is used, then beware that
* if the connection could not be established, then an exception is
* logged at WARN level, and the consumer will not be able to receive
* messages; You can then restart the route to retry.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param asyncStartListener the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder asyncStartListener(String asyncStartListener) {
doSetProperty("asyncStartListener", asyncStartListener);
return this;
}
/**
* Whether to stop the JmsConsumer message listener asynchronously, when
* stopping a route.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param asyncStopListener the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder asyncStopListener(boolean asyncStopListener) {
doSetProperty("asyncStopListener", asyncStopListener);
return this;
}
/**
* Whether to stop the JmsConsumer message listener asynchronously, when
* stopping a route.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param asyncStopListener the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder asyncStopListener(String asyncStopListener) {
doSetProperty("asyncStopListener", asyncStopListener);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option is a: <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder browseLimit(int browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder browseLimit(String browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* A pluggable
* org.springframework.jms.support.destination.DestinationResolver that
* allows you to use your own resolver (for example, to lookup the real
* destination in a JNDI registry).
*
* The option is a:
* <code>org.springframework.jms.support.destination.DestinationResolver</code> type.
*
* Group: advanced
*
* @param destinationResolver the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder destinationResolver(org.springframework.jms.support.destination.DestinationResolver destinationResolver) {
doSetProperty("destinationResolver", destinationResolver);
return this;
}
/**
* A pluggable
* org.springframework.jms.support.destination.DestinationResolver that
* allows you to use your own resolver (for example, to lookup the real
* destination in a JNDI registry).
*
* The option will be converted to a
* <code>org.springframework.jms.support.destination.DestinationResolver</code> type.
*
* Group: advanced
*
* @param destinationResolver the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder destinationResolver(String destinationResolver) {
doSetProperty("destinationResolver", destinationResolver);
return this;
}
/**
* Specifies a org.springframework.util.ErrorHandler to be invoked in
* case of any uncaught exceptions thrown while processing a Message. By
* default these exceptions will be logged at the WARN level, if no
* errorHandler has been configured. You can configure logging level and
* whether stack traces should be logged using errorHandlerLoggingLevel
* and errorHandlerLogStackTrace options. This makes it much easier to
* configure, than having to code a custom errorHandler.
*
* The option is a: <code>org.springframework.util.ErrorHandler</code>
* type.
*
* Group: advanced
*
* @param errorHandler the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder errorHandler(org.springframework.util.ErrorHandler errorHandler) {
doSetProperty("errorHandler", errorHandler);
return this;
}
/**
* Specifies a org.springframework.util.ErrorHandler to be invoked in
* case of any uncaught exceptions thrown while processing a Message. By
* default these exceptions will be logged at the WARN level, if no
* errorHandler has been configured. You can configure logging level and
* whether stack traces should be logged using errorHandlerLoggingLevel
* and errorHandlerLogStackTrace options. This makes it much easier to
* configure, than having to code a custom errorHandler.
*
* The option will be converted to a
* <code>org.springframework.util.ErrorHandler</code> type.
*
* Group: advanced
*
* @param errorHandler the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder errorHandler(String errorHandler) {
doSetProperty("errorHandler", errorHandler);
return this;
}
/**
* Specifies the JMS Exception Listener that is to be notified of any
* underlying JMS exceptions.
*
* The option is a: <code>jakarta.jms.ExceptionListener</code> type.
*
* Group: advanced
*
* @param exceptionListener the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder exceptionListener(jakarta.jms.ExceptionListener exceptionListener) {
doSetProperty("exceptionListener", exceptionListener);
return this;
}
/**
* Specifies the JMS Exception Listener that is to be notified of any
* underlying JMS exceptions.
*
* The option will be converted to a
* <code>jakarta.jms.ExceptionListener</code> type.
*
* Group: advanced
*
* @param exceptionListener the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder exceptionListener(String exceptionListener) {
doSetProperty("exceptionListener", exceptionListener);
return this;
}
/**
* To use a custom HeaderFilterStrategy to filter header to and from
* Camel message.
*
* The option is a:
* <code>org.apache.camel.spi.HeaderFilterStrategy</code> type.
*
* Group: advanced
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder headerFilterStrategy(org.apache.camel.spi.HeaderFilterStrategy headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
/**
* To use a custom HeaderFilterStrategy to filter header to and from
* Camel message.
*
* The option will be converted to a
* <code>org.apache.camel.spi.HeaderFilterStrategy</code> type.
*
* Group: advanced
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder headerFilterStrategy(String headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
/**
* Specify the limit for the number of consumers that are allowed to be
* idle at any given time.
*
* The option is a: <code>int</code> type.
*
* Default: 1
* Group: advanced
*
* @param idleConsumerLimit the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder idleConsumerLimit(int idleConsumerLimit) {
doSetProperty("idleConsumerLimit", idleConsumerLimit);
return this;
}
/**
* Specify the limit for the number of consumers that are allowed to be
* idle at any given time.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 1
* Group: advanced
*
* @param idleConsumerLimit the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder idleConsumerLimit(String idleConsumerLimit) {
doSetProperty("idleConsumerLimit", idleConsumerLimit);
return this;
}
/**
* Marks the consumer as idle after the specified number of idle
* receives have been reached. An idle receive is counted from the
* moment a null message is returned by the receiver after the potential
* setReceiveTimeout elapsed. This gives the opportunity to check if the
* idle task count exceeds setIdleTaskExecutionLimit and based on that
* decide if the task needs to be re-scheduled or not, saving resources
* that would otherwise be held. This setting differs from
* setMaxMessagesPerTask where the task is released and re-scheduled
* after this limit is reached, no matter if the received messages were
* null or non-null messages. This setting alone can be inflexible if
* one desires to have a large enough batch for each task but requires a
* quick(er) release from the moment there are no more messages to
* process. This setting differs from setIdleTaskExecutionLimit where
* this limit decides after how many iterations of being marked as idle,
* a task is released. For example: If setMaxMessagesPerTask is set to
* '500' and #setIdleReceivesPerTaskLimit is set to '60' and
* setReceiveTimeout is set to '1000' and setIdleTaskExecutionLimit is
* set to '1', then 500 messages per task would be processed unless
* there is a subsequent number of 60 idle messages received, the task
* would be marked as idle and released. This also means that after the
* last message was processed, the task would be released after 60
* seconds as long as no new messages appear.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*
* @param idleReceivesPerTaskLimit the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder idleReceivesPerTaskLimit(int idleReceivesPerTaskLimit) {
doSetProperty("idleReceivesPerTaskLimit", idleReceivesPerTaskLimit);
return this;
}
/**
* Marks the consumer as idle after the specified number of idle
* receives have been reached. An idle receive is counted from the
* moment a null message is returned by the receiver after the potential
* setReceiveTimeout elapsed. This gives the opportunity to check if the
* idle task count exceeds setIdleTaskExecutionLimit and based on that
* decide if the task needs to be re-scheduled or not, saving resources
* that would otherwise be held. This setting differs from
* setMaxMessagesPerTask where the task is released and re-scheduled
* after this limit is reached, no matter if the received messages were
* null or non-null messages. This setting alone can be inflexible if
* one desires to have a large enough batch for each task but requires a
* quick(er) release from the moment there are no more messages to
* process. This setting differs from setIdleTaskExecutionLimit where
* this limit decides after how many iterations of being marked as idle,
* a task is released. For example: If setMaxMessagesPerTask is set to
* '500' and #setIdleReceivesPerTaskLimit is set to '60' and
* setReceiveTimeout is set to '1000' and setIdleTaskExecutionLimit is
* set to '1', then 500 messages per task would be processed unless
* there is a subsequent number of 60 idle messages received, the task
* would be marked as idle and released. This also means that after the
* last message was processed, the task would be released after 60
* seconds as long as no new messages appear.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*
* @param idleReceivesPerTaskLimit the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder idleReceivesPerTaskLimit(String idleReceivesPerTaskLimit) {
doSetProperty("idleReceivesPerTaskLimit", idleReceivesPerTaskLimit);
return this;
}
/**
* Specifies the limit for idle executions of a receive task, not having
* received any message within its execution. If this limit is reached,
* the task will shut down and leave receiving to other executing tasks
* (in the case of dynamic scheduling; see the maxConcurrentConsumers
* setting). There is additional doc available from Spring.
*
* The option is a: <code>int</code> type.
*
* Default: 1
* Group: advanced
*
* @param idleTaskExecutionLimit the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder idleTaskExecutionLimit(int idleTaskExecutionLimit) {
doSetProperty("idleTaskExecutionLimit", idleTaskExecutionLimit);
return this;
}
/**
* Specifies the limit for idle executions of a receive task, not having
* received any message within its execution. If this limit is reached,
* the task will shut down and leave receiving to other executing tasks
* (in the case of dynamic scheduling; see the maxConcurrentConsumers
* setting). There is additional doc available from Spring.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 1
* Group: advanced
*
* @param idleTaskExecutionLimit the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder idleTaskExecutionLimit(String idleTaskExecutionLimit) {
doSetProperty("idleTaskExecutionLimit", idleTaskExecutionLimit);
return this;
}
/**
* Whether to include all JMSX prefixed properties when mapping from JMS
* to Camel Message. Setting this to true will include properties such
* as JMSXAppID, and JMSXUserID etc. Note: If you are using a custom
* headerFilterStrategy then this option does not apply.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param includeAllJMSXProperties the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder includeAllJMSXProperties(boolean includeAllJMSXProperties) {
doSetProperty("includeAllJMSXProperties", includeAllJMSXProperties);
return this;
}
/**
* Whether to include all JMSX prefixed properties when mapping from JMS
* to Camel Message. Setting this to true will include properties such
* as JMSXAppID, and JMSXUserID etc. Note: If you are using a custom
* headerFilterStrategy then this option does not apply.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param includeAllJMSXProperties the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder includeAllJMSXProperties(String includeAllJMSXProperties) {
doSetProperty("includeAllJMSXProperties", includeAllJMSXProperties);
return this;
}
/**
* Pluggable strategy for encoding and decoding JMS keys so they can be
* compliant with the JMS specification. Camel provides two
* implementations out of the box: default and passthrough. The default
* strategy will safely marshal dots and hyphens (. and -). The
* passthrough strategy leaves the key as is. Can be used for JMS
* brokers which do not care whether JMS header keys contain illegal
* characters. You can provide your own implementation of the
* org.apache.camel.component.jms.JmsKeyFormatStrategy and refer to it
* using the # notation.
*
* The option is a:
* <code>org.apache.camel.component.jms.JmsKeyFormatStrategy</code>
* type.
*
* Group: advanced
*
* @param jmsKeyFormatStrategy the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder jmsKeyFormatStrategy(org.apache.camel.component.jms.JmsKeyFormatStrategy jmsKeyFormatStrategy) {
doSetProperty("jmsKeyFormatStrategy", jmsKeyFormatStrategy);
return this;
}
/**
* Pluggable strategy for encoding and decoding JMS keys so they can be
* compliant with the JMS specification. Camel provides two
* implementations out of the box: default and passthrough. The default
* strategy will safely marshal dots and hyphens (. and -). The
* passthrough strategy leaves the key as is. Can be used for JMS
* brokers which do not care whether JMS header keys contain illegal
* characters. You can provide your own implementation of the
* org.apache.camel.component.jms.JmsKeyFormatStrategy and refer to it
* using the # notation.
*
* The option will be converted to a
* <code>org.apache.camel.component.jms.JmsKeyFormatStrategy</code>
* type.
*
* Group: advanced
*
* @param jmsKeyFormatStrategy the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder jmsKeyFormatStrategy(String jmsKeyFormatStrategy) {
doSetProperty("jmsKeyFormatStrategy", jmsKeyFormatStrategy);
return this;
}
/**
* Specifies whether Camel should auto map the received JMS message to a
* suited payload type, such as jakarta.jms.TextMessage to a String etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param mapJmsMessage the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder mapJmsMessage(boolean mapJmsMessage) {
doSetProperty("mapJmsMessage", mapJmsMessage);
return this;
}
/**
* Specifies whether Camel should auto map the received JMS message to a
* suited payload type, such as jakarta.jms.TextMessage to a String etc.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param mapJmsMessage the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder mapJmsMessage(String mapJmsMessage) {
doSetProperty("mapJmsMessage", mapJmsMessage);
return this;
}
/**
* The number of messages per task. -1 is unlimited. If you use a range
* for concurrent consumers (eg min max), then this option can be used
* to set a value to eg 100 to control how fast the consumers will
* shrink when less work is required.
*
* The option is a: <code>int</code> type.
*
* Default: -1
* Group: advanced
*
* @param maxMessagesPerTask the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder maxMessagesPerTask(int maxMessagesPerTask) {
doSetProperty("maxMessagesPerTask", maxMessagesPerTask);
return this;
}
/**
* The number of messages per task. -1 is unlimited. If you use a range
* for concurrent consumers (eg min max), then this option can be used
* to set a value to eg 100 to control how fast the consumers will
* shrink when less work is required.
*
* The option will be converted to a <code>int</code> type.
*
* Default: -1
* Group: advanced
*
* @param maxMessagesPerTask the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder maxMessagesPerTask(String maxMessagesPerTask) {
doSetProperty("maxMessagesPerTask", maxMessagesPerTask);
return this;
}
/**
* To use a custom Spring
* org.springframework.jms.support.converter.MessageConverter so you can
* be in control how to map to/from a jakarta.jms.Message.
*
* The option is a:
* <code>org.springframework.jms.support.converter.MessageConverter</code> type.
*
* Group: advanced
*
* @param messageConverter the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder messageConverter(org.springframework.jms.support.converter.MessageConverter messageConverter) {
doSetProperty("messageConverter", messageConverter);
return this;
}
/**
* To use a custom Spring
* org.springframework.jms.support.converter.MessageConverter so you can
* be in control how to map to/from a jakarta.jms.Message.
*
* The option will be converted to a
* <code>org.springframework.jms.support.converter.MessageConverter</code> type.
*
* Group: advanced
*
* @param messageConverter the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder messageConverter(String messageConverter) {
doSetProperty("messageConverter", messageConverter);
return this;
}
/**
* To use the given MessageCreatedStrategy which are invoked when Camel
* creates new instances of jakarta.jms.Message objects when Camel is
* sending a JMS message.
*
* The option is a:
* <code>org.apache.camel.component.jms.MessageCreatedStrategy</code>
* type.
*
* Group: advanced
*
* @param messageCreatedStrategy the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder messageCreatedStrategy(org.apache.camel.component.jms.MessageCreatedStrategy messageCreatedStrategy) {
doSetProperty("messageCreatedStrategy", messageCreatedStrategy);
return this;
}
/**
* To use the given MessageCreatedStrategy which are invoked when Camel
* creates new instances of jakarta.jms.Message objects when Camel is
* sending a JMS message.
*
* The option will be converted to a
* <code>org.apache.camel.component.jms.MessageCreatedStrategy</code>
* type.
*
* Group: advanced
*
* @param messageCreatedStrategy the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder messageCreatedStrategy(String messageCreatedStrategy) {
doSetProperty("messageCreatedStrategy", messageCreatedStrategy);
return this;
}
/**
* When sending, specifies whether message IDs should be added. This is
* just an hint to the JMS broker. If the JMS provider accepts this
* hint, these messages must have the message ID set to null; if the
* provider ignores the hint, the message ID must be set to its normal
* unique value.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param messageIdEnabled the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder messageIdEnabled(boolean messageIdEnabled) {
doSetProperty("messageIdEnabled", messageIdEnabled);
return this;
}
/**
* When sending, specifies whether message IDs should be added. This is
* just an hint to the JMS broker. If the JMS provider accepts this
* hint, these messages must have the message ID set to null; if the
* provider ignores the hint, the message ID must be set to its normal
* unique value.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param messageIdEnabled the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder messageIdEnabled(String messageIdEnabled) {
doSetProperty("messageIdEnabled", messageIdEnabled);
return this;
}
/**
* Registry ID of the MessageListenerContainerFactory used to determine
* what
* org.springframework.jms.listener.AbstractMessageListenerContainer to
* use to consume messages. Setting this will automatically set
* consumerType to Custom.
*
* The option is a:
* <code>org.apache.camel.component.jms.MessageListenerContainerFactory</code> type.
*
* Group: advanced
*
* @param messageListenerContainerFactory the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder messageListenerContainerFactory(org.apache.camel.component.jms.MessageListenerContainerFactory messageListenerContainerFactory) {
doSetProperty("messageListenerContainerFactory", messageListenerContainerFactory);
return this;
}
/**
* Registry ID of the MessageListenerContainerFactory used to determine
* what
* org.springframework.jms.listener.AbstractMessageListenerContainer to
* use to consume messages. Setting this will automatically set
* consumerType to Custom.
*
* The option will be converted to a
* <code>org.apache.camel.component.jms.MessageListenerContainerFactory</code> type.
*
* Group: advanced
*
* @param messageListenerContainerFactory the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder messageListenerContainerFactory(String messageListenerContainerFactory) {
doSetProperty("messageListenerContainerFactory", messageListenerContainerFactory);
return this;
}
/**
* Specifies whether timestamps should be enabled by default on sending
* messages. This is just an hint to the JMS broker. If the JMS provider
* accepts this hint, these messages must have the timestamp set to
* zero; if the provider ignores the hint the timestamp must be set to
* its normal value.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param messageTimestampEnabled the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder messageTimestampEnabled(boolean messageTimestampEnabled) {
doSetProperty("messageTimestampEnabled", messageTimestampEnabled);
return this;
}
/**
* Specifies whether timestamps should be enabled by default on sending
* messages. This is just an hint to the JMS broker. If the JMS provider
* accepts this hint, these messages must have the timestamp set to
* zero; if the provider ignores the hint the timestamp must be set to
* its normal value.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param messageTimestampEnabled the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder messageTimestampEnabled(String messageTimestampEnabled) {
doSetProperty("messageTimestampEnabled", messageTimestampEnabled);
return this;
}
/**
* Specifies whether to inhibit the delivery of messages published by
* its own connection.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param pubSubNoLocal the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder pubSubNoLocal(boolean pubSubNoLocal) {
doSetProperty("pubSubNoLocal", pubSubNoLocal);
return this;
}
/**
* Specifies whether to inhibit the delivery of messages published by
* its own connection.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param pubSubNoLocal the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder pubSubNoLocal(String pubSubNoLocal) {
doSetProperty("pubSubNoLocal", pubSubNoLocal);
return this;
}
/**
* The timeout for receiving messages (in milliseconds).
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: advanced
*
* @param receiveTimeout the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder receiveTimeout(long receiveTimeout) {
doSetProperty("receiveTimeout", receiveTimeout);
return this;
}
/**
* The timeout for receiving messages (in milliseconds).
*
* The option will be converted to a <code>long</code> type.
*
* Default: 1000
* Group: advanced
*
* @param receiveTimeout the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder receiveTimeout(String receiveTimeout) {
doSetProperty("receiveTimeout", receiveTimeout);
return this;
}
/**
* Specifies the interval between recovery attempts, i.e. when a
* connection is being refreshed, in milliseconds. The default is 5000
* ms, that is, 5 seconds.
*
* The option is a: <code>long</code> type.
*
* Default: 5000
* Group: advanced
*
* @param recoveryInterval the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder recoveryInterval(long recoveryInterval) {
doSetProperty("recoveryInterval", recoveryInterval);
return this;
}
/**
* Specifies the interval between recovery attempts, i.e. when a
* connection is being refreshed, in milliseconds. The default is 5000
* ms, that is, 5 seconds.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 5000
* Group: advanced
*
* @param recoveryInterval the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder recoveryInterval(String recoveryInterval) {
doSetProperty("recoveryInterval", recoveryInterval);
return this;
}
/**
* Configures how often Camel should check for timed out Exchanges when
* doing request/reply over JMS. By default Camel checks once per
* second. But if you must react faster when a timeout occurs, then you
* can lower this interval, to check more frequently. The timeout is
* determined by the option requestTimeout.
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: advanced
*
* @param requestTimeoutCheckerInterval the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder requestTimeoutCheckerInterval(long requestTimeoutCheckerInterval) {
doSetProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Configures how often Camel should check for timed out Exchanges when
* doing request/reply over JMS. By default Camel checks once per
* second. But if you must react faster when a timeout occurs, then you
* can lower this interval, to check more frequently. The timeout is
* determined by the option requestTimeout.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 1000
* Group: advanced
*
* @param requestTimeoutCheckerInterval the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder requestTimeoutCheckerInterval(String requestTimeoutCheckerInterval) {
doSetProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Sets whether synchronous processing should be strictly used.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param synchronous the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder synchronous(boolean synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param synchronous the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder synchronous(String synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
/**
* A pluggable TemporaryQueueResolver that allows you to use your own
* resolver for creating temporary queues (some messaging systems has
* special requirements for creating temporary queues).
*
* The option is a:
* <code>org.apache.camel.component.jms.TemporaryQueueResolver</code>
* type.
*
* Group: advanced
*
* @param temporaryQueueResolver the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder temporaryQueueResolver(org.apache.camel.component.jms.TemporaryQueueResolver temporaryQueueResolver) {
doSetProperty("temporaryQueueResolver", temporaryQueueResolver);
return this;
}
/**
* A pluggable TemporaryQueueResolver that allows you to use your own
* resolver for creating temporary queues (some messaging systems has
* special requirements for creating temporary queues).
*
* The option will be converted to a
* <code>org.apache.camel.component.jms.TemporaryQueueResolver</code>
* type.
*
* Group: advanced
*
* @param temporaryQueueResolver the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder temporaryQueueResolver(String temporaryQueueResolver) {
doSetProperty("temporaryQueueResolver", temporaryQueueResolver);
return this;
}
/**
* If enabled and you are using Request Reply messaging (InOut) and an
* Exchange failed on the consumer side, then the caused Exception will
* be send back in response as a jakarta.jms.ObjectMessage. If the
* client is Camel, the returned Exception is rethrown. This allows you
* to use Camel JMS as a bridge in your routing - for example, using
* persistent queues to enable robust routing. Notice that if you also
* have transferExchange enabled, this option takes precedence. The
* caught exception is required to be serializable. The original
* Exception on the consumer side can be wrapped in an outer exception
* such as org.apache.camel.RuntimeCamelException when returned to the
* producer. Use this with caution as the data is using Java Object
* serialization and requires the received to be able to deserialize the
* data at Class level, which forces a strong coupling between the
* producers and consumer!.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param transferException the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder transferException(boolean transferException) {
doSetProperty("transferException", transferException);
return this;
}
/**
* If enabled and you are using Request Reply messaging (InOut) and an
* Exchange failed on the consumer side, then the caused Exception will
* be send back in response as a jakarta.jms.ObjectMessage. If the
* client is Camel, the returned Exception is rethrown. This allows you
* to use Camel JMS as a bridge in your routing - for example, using
* persistent queues to enable robust routing. Notice that if you also
* have transferExchange enabled, this option takes precedence. The
* caught exception is required to be serializable. The original
* Exception on the consumer side can be wrapped in an outer exception
* such as org.apache.camel.RuntimeCamelException when returned to the
* producer. Use this with caution as the data is using Java Object
* serialization and requires the received to be able to deserialize the
* data at Class level, which forces a strong coupling between the
* producers and consumer!.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param transferException the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder transferException(String transferException) {
doSetProperty("transferException", transferException);
return this;
}
/**
* You can transfer the exchange over the wire instead of just the body
* and headers. The following fields are transferred: In body, Out body,
* Fault body, In headers, Out headers, Fault headers, exchange
* properties, exchange exception. This requires that the objects are
* serializable. Camel will exclude any non-serializable objects and log
* it at WARN level. You must enable this option on both the producer
* and consumer side, so Camel knows the payloads is an Exchange and not
* a regular payload. Use this with caution as the data is using Java
* Object serialization and requires the receiver to be able to
* deserialize the data at Class level, which forces a strong coupling
* between the producers and consumers having to use compatible Camel
* versions!.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param transferExchange the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder transferExchange(boolean transferExchange) {
doSetProperty("transferExchange", transferExchange);
return this;
}
/**
* You can transfer the exchange over the wire instead of just the body
* and headers. The following fields are transferred: In body, Out body,
* Fault body, In headers, Out headers, Fault headers, exchange
* properties, exchange exception. This requires that the objects are
* serializable. Camel will exclude any non-serializable objects and log
* it at WARN level. You must enable this option on both the producer
* and consumer side, so Camel knows the payloads is an Exchange and not
* a regular payload. Use this with caution as the data is using Java
* Object serialization and requires the receiver to be able to
* deserialize the data at Class level, which forces a strong coupling
* between the producers and consumers having to use compatible Camel
* versions!.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param transferExchange the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder transferExchange(String transferExchange) {
doSetProperty("transferExchange", transferExchange);
return this;
}
/**
* Specifies whether JMSMessageID should always be used as
* JMSCorrelationID for InOut messages.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param useMessageIDAsCorrelationID the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder useMessageIDAsCorrelationID(boolean useMessageIDAsCorrelationID) {
doSetProperty("useMessageIDAsCorrelationID", useMessageIDAsCorrelationID);
return this;
}
/**
* Specifies whether JMSMessageID should always be used as
* JMSCorrelationID for InOut messages.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param useMessageIDAsCorrelationID the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder useMessageIDAsCorrelationID(String useMessageIDAsCorrelationID) {
doSetProperty("useMessageIDAsCorrelationID", useMessageIDAsCorrelationID);
return this;
}
/**
* Number of times to wait for provisional correlation id to be updated
* to the actual correlation id when doing request/reply over JMS and
* when the option useMessageIDAsCorrelationID is enabled.
*
* The option is a: <code>int</code> type.
*
* Default: 50
* Group: advanced
*
* @param waitForProvisionCorrelationToBeUpdatedCounter the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder waitForProvisionCorrelationToBeUpdatedCounter(int waitForProvisionCorrelationToBeUpdatedCounter) {
doSetProperty("waitForProvisionCorrelationToBeUpdatedCounter", waitForProvisionCorrelationToBeUpdatedCounter);
return this;
}
/**
* Number of times to wait for provisional correlation id to be updated
* to the actual correlation id when doing request/reply over JMS and
* when the option useMessageIDAsCorrelationID is enabled.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 50
* Group: advanced
*
* @param waitForProvisionCorrelationToBeUpdatedCounter the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder waitForProvisionCorrelationToBeUpdatedCounter(String waitForProvisionCorrelationToBeUpdatedCounter) {
doSetProperty("waitForProvisionCorrelationToBeUpdatedCounter", waitForProvisionCorrelationToBeUpdatedCounter);
return this;
}
/**
* Interval in millis to sleep each time while waiting for provisional
* correlation id to be updated.
*
* The option is a: <code>long</code> type.
*
* Default: 100
* Group: advanced
*
* @param waitForProvisionCorrelationToBeUpdatedThreadSleepingTime the
* value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder waitForProvisionCorrelationToBeUpdatedThreadSleepingTime(long waitForProvisionCorrelationToBeUpdatedThreadSleepingTime) {
doSetProperty("waitForProvisionCorrelationToBeUpdatedThreadSleepingTime", waitForProvisionCorrelationToBeUpdatedThreadSleepingTime);
return this;
}
/**
* Interval in millis to sleep each time while waiting for provisional
* correlation id to be updated.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 100
* Group: advanced
*
* @param waitForProvisionCorrelationToBeUpdatedThreadSleepingTime the
* value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder waitForProvisionCorrelationToBeUpdatedThreadSleepingTime(String waitForProvisionCorrelationToBeUpdatedThreadSleepingTime) {
doSetProperty("waitForProvisionCorrelationToBeUpdatedThreadSleepingTime", waitForProvisionCorrelationToBeUpdatedThreadSleepingTime);
return this;
}
/**
* Number of times to wait for temporary replyTo queue to be created and
* ready when doing request/reply over JMS.
*
* The option is a: <code>int</code> type.
*
* Default: 200
* Group: advanced
*
* @param waitForTemporaryReplyToToBeUpdatedCounter the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder waitForTemporaryReplyToToBeUpdatedCounter(int waitForTemporaryReplyToToBeUpdatedCounter) {
doSetProperty("waitForTemporaryReplyToToBeUpdatedCounter", waitForTemporaryReplyToToBeUpdatedCounter);
return this;
}
/**
* Number of times to wait for temporary replyTo queue to be created and
* ready when doing request/reply over JMS.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 200
* Group: advanced
*
* @param waitForTemporaryReplyToToBeUpdatedCounter the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder waitForTemporaryReplyToToBeUpdatedCounter(String waitForTemporaryReplyToToBeUpdatedCounter) {
doSetProperty("waitForTemporaryReplyToToBeUpdatedCounter", waitForTemporaryReplyToToBeUpdatedCounter);
return this;
}
/**
* Interval in millis to sleep each time while waiting for temporary
* replyTo queue to be ready.
*
* The option is a: <code>long</code> type.
*
* Default: 100
* Group: advanced
*
* @param waitForTemporaryReplyToToBeUpdatedThreadSleepingTime the value
* to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder waitForTemporaryReplyToToBeUpdatedThreadSleepingTime(long waitForTemporaryReplyToToBeUpdatedThreadSleepingTime) {
doSetProperty("waitForTemporaryReplyToToBeUpdatedThreadSleepingTime", waitForTemporaryReplyToToBeUpdatedThreadSleepingTime);
return this;
}
/**
* Interval in millis to sleep each time while waiting for temporary
* replyTo queue to be ready.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 100
* Group: advanced
*
* @param waitForTemporaryReplyToToBeUpdatedThreadSleepingTime the value
* to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder waitForTemporaryReplyToToBeUpdatedThreadSleepingTime(String waitForTemporaryReplyToToBeUpdatedThreadSleepingTime) {
doSetProperty("waitForTemporaryReplyToToBeUpdatedThreadSleepingTime", waitForTemporaryReplyToToBeUpdatedThreadSleepingTime);
return this;
}
/**
* If true, Camel will create a JmsTransactionManager, if there is no
* transactionManager injected when option transacted=true.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: transaction (advanced)
*
* @param lazyCreateTransactionManager the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder lazyCreateTransactionManager(boolean lazyCreateTransactionManager) {
doSetProperty("lazyCreateTransactionManager", lazyCreateTransactionManager);
return this;
}
/**
* If true, Camel will create a JmsTransactionManager, if there is no
* transactionManager injected when option transacted=true.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: transaction (advanced)
*
* @param lazyCreateTransactionManager the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder lazyCreateTransactionManager(String lazyCreateTransactionManager) {
doSetProperty("lazyCreateTransactionManager", lazyCreateTransactionManager);
return this;
}
/**
* The Spring transaction manager to use.
*
* The option is a:
* <code>org.springframework.transaction.PlatformTransactionManager</code> type.
*
* Group: transaction (advanced)
*
* @param transactionManager the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder transactionManager(org.springframework.transaction.PlatformTransactionManager transactionManager) {
doSetProperty("transactionManager", transactionManager);
return this;
}
/**
* The Spring transaction manager to use.
*
* The option will be converted to a
* <code>org.springframework.transaction.PlatformTransactionManager</code> type.
*
* Group: transaction (advanced)
*
* @param transactionManager the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder transactionManager(String transactionManager) {
doSetProperty("transactionManager", transactionManager);
return this;
}
/**
* The name of the transaction to use.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: transaction (advanced)
*
* @param transactionName the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder transactionName(String transactionName) {
doSetProperty("transactionName", transactionName);
return this;
}
/**
* The timeout value of the transaction (in seconds), if using
* transacted mode.
*
* The option is a: <code>int</code> type.
*
* Default: -1
* Group: transaction (advanced)
*
* @param transactionTimeout the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder transactionTimeout(int transactionTimeout) {
doSetProperty("transactionTimeout", transactionTimeout);
return this;
}
/**
* The timeout value of the transaction (in seconds), if using
* transacted mode.
*
* The option will be converted to a <code>int</code> type.
*
* Default: -1
* Group: transaction (advanced)
*
* @param transactionTimeout the value to set
* @return the dsl builder
*/
default AdvancedActiveMQ6EndpointProducerBuilder transactionTimeout(String transactionTimeout) {
doSetProperty("transactionTimeout", transactionTimeout);
return this;
}
}
/**
* Builder for endpoint for the ActiveMQ 6.x component.
*/
public | AdvancedActiveMQ6EndpointProducerBuilder |
java | google__guava | guava-tests/test/com/google/common/collect/ImmutableMapTest.java | {
"start": 32519,
"end": 32928
} | class ____ {}
Map<String, NonSerializableClass> map =
RegularImmutableMap.fromEntries(ImmutableMap.entryOf("one", new NonSerializableClass()));
Set<String> set = map.keySet();
LenientSerializableTester.reserializeAndAssertLenient(set);
}
@J2ktIncompatible
@GwtIncompatible // SerializableTester
public void testKeySetIsSerializable_jdkBackedImmutableMap() {
| NonSerializableClass |
java | apache__rocketmq | common/src/test/java/org/apache/rocketmq/common/help/FAQUrlTest.java | {
"start": 922,
"end": 1539
} | class ____ {
@Test
public void testSuggestTodo() {
String expected = "\nSee " + FAQUrl.DEFAULT_FAQ_URL + " for further details.";
String actual = FAQUrl.suggestTodo(FAQUrl.DEFAULT_FAQ_URL);
assertEquals(expected, actual);
}
@Test
public void testAttachDefaultURL() {
String errorMsg = "errorMsg";
String expected = errorMsg
+ "\nFor more information, please visit the url, "
+ FAQUrl.UNEXPECTED_EXCEPTION_URL;
String actual = FAQUrl.attachDefaultURL(errorMsg);
assertEquals(expected, actual);
}
}
| FAQUrlTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java | {
"start": 57194,
"end": 57316
} | class ____ periodically attempts creating a znode to ensure that
* this RM continues to be the Active.
*/
private | that |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/FileConsumerPreMoveTest.java | {
"start": 2791,
"end": 3212
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) {
Path testDirectory = (Path) exchange.getContext().getRegistry()
.lookupByName("testDirectory");
Path file = testDirectory.resolve("work/work-" + TEST_FILE_NAME);
assertTrue(Files.exists(file), "Pre move file should exist");
}
}
}
| MyPreMoveCheckerProcessor |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/IgnoreWithDeserTest.java | {
"start": 746,
"end": 1213
} | class ____
{
int _x = 0;
int _y = 0;
public void setX(int value) { _x = value; }
@JsonIgnore public void setY(int value) { _y = value; }
// Just igoring won't help a lot here; let's define a replacement
// so that we won't get an exception for "unknown field"
@JsonProperty("y") void foobar(int value) {
; // nop
}
}
@JsonIgnoreProperties({ "z" })
final static | SizeClassIgnore |
java | quarkusio__quarkus | extensions/hibernate-search-orm-elasticsearch/runtime/src/main/java/io/quarkus/hibernate/search/orm/elasticsearch/runtime/HibernateSearchElasticsearchRuntimeConfigPersistenceUnit.java | {
"start": 2824,
"end": 3064
} | class ____ a bit unnecessary
// because we could just use @WithName("plan.synchronization.strategy")
// but that leads to bugs
// see https://github.com/quarkusio/quarkus/pull/34251#issuecomment-1611273375
@ConfigGroup
| feels |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/internal/AfterTransactionCompletionProcessQueue.java | {
"start": 688,
"end": 3008
} | class ____
extends AbstractTransactionCompletionProcessQueue<AfterCompletionCallback> {
private final Set<String> querySpacesToInvalidate = new HashSet<>();
AfterTransactionCompletionProcessQueue(SharedSessionContractImplementor session) {
super( session );
}
void addSpaceToInvalidate(String space) {
querySpacesToInvalidate.add( space );
}
@Override
boolean hasActions() {
return super.hasActions() || !querySpacesToInvalidate.isEmpty();
}
void afterTransactionCompletion(boolean success) {
AfterCompletionCallback process;
while ( (process = processes.poll()) != null ) {
try {
process.doAfterTransactionCompletion( success, session );
}
catch (CacheException ce) {
CORE_LOGGER.unableToReleaseCacheLock( ce );
// continue loop
}
catch (Exception e) {
throw new HibernateException(
"Unable to perform afterTransactionCompletion callback: " + e.getMessage(), e );
}
}
final SessionFactoryImplementor factory = session.getFactory();
if ( factory.getSessionFactoryOptions().isQueryCacheEnabled() ) {
factory.getCache().getTimestampsCache()
.invalidate( querySpacesToInvalidate.toArray( new String[0] ), session );
}
querySpacesToInvalidate.clear();
}
void executePendingBulkOperationCleanUpActions() {
AfterCompletionCallback process;
boolean hasPendingBulkOperationCleanUpActions = false;
while ( ( process = processes.poll() ) != null ) {
if ( process instanceof BulkOperationCleanupAction.BulkOperationCleanUpAfterTransactionCompletionProcess ) {
try {
hasPendingBulkOperationCleanUpActions = true;
process.doAfterTransactionCompletion( true, session );
}
catch (CacheException ce) {
CORE_LOGGER.unableToReleaseCacheLock( ce );
// continue loop
}
catch (Exception e) {
throw new HibernateException(
"Unable to perform afterTransactionCompletion callback: " + e.getMessage(),
e
);
}
}
}
if ( hasPendingBulkOperationCleanUpActions ) {
if ( session.getFactory().getSessionFactoryOptions().isQueryCacheEnabled() ) {
session.getFactory().getCache().getTimestampsCache().invalidate(
querySpacesToInvalidate.toArray( new String[0] ),
session
);
}
querySpacesToInvalidate.clear();
}
}
}
| AfterTransactionCompletionProcessQueue |
java | quarkusio__quarkus | extensions/liquibase/liquibase/deployment/src/test/java/io/quarkus/liquibase/test/LiquibaseExtensionMigrateAtStartYamlChangeLogTest.java | {
"start": 444,
"end": 1593
} | class ____ {
// Quarkus built object
@Inject
LiquibaseFactory liquibaseFactory;
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource("db/yaml/changeLog.yaml")
.addAsResource("db/yaml/create-tables.yaml")
.addAsResource("db/yaml/test/test.yaml")
.addAsResource("migrate-at-start-yaml-config.properties", "application.properties"));
@Test
@DisplayName("Migrates at start with change log config correctly")
public void testLiquibaseYamlChangeLog() throws Exception {
try (Liquibase liquibase = liquibaseFactory.createLiquibase()) {
List<ChangeSetStatus> status = liquibase.getChangeSetStatuses(liquibaseFactory.createContexts(),
liquibaseFactory.createLabels());
assertNotNull(status);
assertEquals(2, status.size());
assertFalse(status.get(0).getWillRun());
assertFalse(status.get(1).getWillRun());
}
}
}
| LiquibaseExtensionMigrateAtStartYamlChangeLogTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/LauncherExitCodes.java | {
"start": 1687,
"end": 5069
} | interface ____ {
/**
* Success: {@value}.
*/
int EXIT_SUCCESS = 0;
/**
* Generic "false/fail" response: {@value}.
* The operation worked but the result was not "true" from the viewpoint
* of the executed code.
*/
int EXIT_FAIL = -1;
/**
* Exit code when a client requested service termination: {@value}.
*/
int EXIT_CLIENT_INITIATED_SHUTDOWN = 1;
/**
* Exit code when targets could not be launched: {@value}.
*/
int EXIT_TASK_LAUNCH_FAILURE = 2;
/**
* Exit code when a control-C, kill -3, signal was picked up: {@value}.
*/
int EXIT_INTERRUPTED = 3;
/**
* Exit code when something happened but we can't be specific: {@value}.
*/
int EXIT_OTHER_FAILURE = 5;
/**
* Exit code when the command line doesn't parse: {@value}, or
* when it is otherwise invalid.
* <p>
* Approximate HTTP equivalent: {@code 400 Bad Request}
*/
int EXIT_COMMAND_ARGUMENT_ERROR = 40;
/**
* The request requires user authentication: {@value}.
* <p>
* approximate HTTP equivalent: Approximate HTTP equivalent: {@code 401 Unauthorized}
*/
int EXIT_UNAUTHORIZED = 41;
/**
* Exit code when a usage message was printed: {@value}.
*/
int EXIT_USAGE = 42;
/**
* Forbidden action: {@value}.
* <p>
* Approximate HTTP equivalent: Approximate HTTP equivalent: {@code 403: Forbidden}
*/
int EXIT_FORBIDDEN = 43;
/**
* Something was not found: {@value}.
* <p>
* Approximate HTTP equivalent: {@code 404: Not Found}
*/
int EXIT_NOT_FOUND = 44;
/**
* The operation is not allowed: {@value}.
* <p>
* Approximate HTTP equivalent: {@code 405: Not allowed}
*/
int EXIT_OPERATION_NOT_ALLOWED = 45;
/**
* The command is somehow not acceptable: {@value}.
* <p>
* Approximate HTTP equivalent: {@code 406: Not Acceptable}
*/
int EXIT_NOT_ACCEPTABLE = 46;
/**
* Exit code on connectivity problems: {@value}.
* <p>
* Approximate HTTP equivalent: {@code 408: Request Timeout}
*/
int EXIT_CONNECTIVITY_PROBLEM = 48;
/**
* Exit code when the configurations in valid/incomplete: {@value}.
* <p>
* Approximate HTTP equivalent: {@code 409: Conflict}
*/
int EXIT_BAD_CONFIGURATION = 49;
/**
* Exit code when an exception was thrown from the service: {@value}.
* <p>
* Approximate HTTP equivalent: {@code 500 Internal Server Error}
*/
int EXIT_EXCEPTION_THROWN = 50;
/**
* Unimplemented feature: {@value}.
* <p>
* Approximate HTTP equivalent: {@code 501: Not Implemented}
*/
int EXIT_UNIMPLEMENTED = 51;
/**
* Service Unavailable; it may be available later: {@value}.
* <p>
* Approximate HTTP equivalent: {@code 503 Service Unavailable}
*/
int EXIT_SERVICE_UNAVAILABLE = 53;
/**
* The application does not support, or refuses to support this
* version: {@value}.
* <p>
* If raised, this is expected to be raised server-side and likely due
* to client/server version incompatibilities.
* <p>
* Approximate HTTP equivalent: {@code 505: Version Not Supported}
*/
int EXIT_UNSUPPORTED_VERSION = 55;
/**
* The service instance could not be created: {@value}.
*/
int EXIT_SERVICE_CREATION_FAILURE = 56;
/**
* The service instance could not be created: {@value}.
*/
int EXIT_SERVICE_LIFECYCLE_EXCEPTION = 57;
}
| LauncherExitCodes |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/serialization/ProxySerializationNoSessionFactoryTest.java | {
"start": 1149,
"end": 3202
} | class ____ {
@Test
public void testUninitializedProxy() {
executeTest( false );
}
@Test
public void testInitializedProxy() {
executeTest( true );
}
private void executeTest(boolean initializeProxy) {
SessionFactoryRegistry.INSTANCE.clearRegistrations();
final Configuration cfg = new Configuration()
.setProperty( AvailableSettings.HBM2DDL_AUTO, Action.ACTION_CREATE_THEN_DROP )
.addAnnotatedClass( SimpleEntity.class )
.addAnnotatedClass( ChildEntity.class );
ServiceRegistryUtil.applySettings( cfg.getStandardServiceRegistryBuilder() );
final SimpleEntity parent;
try (final SessionFactoryImplementor factory = (SessionFactoryImplementor) cfg.buildSessionFactory()) {
inTransaction( factory, session -> {
final SimpleEntity entity = new SimpleEntity();
entity.setId( 1L );
entity.setName( "TheParent" );
session.persist( entity );
final ChildEntity child = new ChildEntity();
child.setId( 1L );
child.setParent( entity );
session.persist( child );
}
);
parent = fromTransaction( factory, session -> {
final ChildEntity childEntity = session.find( ChildEntity.class, 1L );
final SimpleEntity entity = childEntity.getParent();
if ( initializeProxy ) {
assertThat( entity.getName() ).isEqualTo( "TheParent" );
}
return entity;
}
);
}
// The session factory is not available anymore
assertThat( SessionFactoryRegistry.INSTANCE.hasRegistrations() ).isFalse();
assertThat( parent instanceof HibernateProxy ).isTrue();
assertThat( Hibernate.isInitialized( parent ) ).isEqualTo( initializeProxy );
// Serialization and deserialization should still work
final SimpleEntity clone = (SimpleEntity) SerializationHelper.clone( parent );
assertThat( clone ).isNotNull();
assertThat( clone.getId() ).isEqualTo( parent.getId() );
if ( initializeProxy ) {
assertThat( clone.getName() ).isEqualTo( parent.getName() );
}
}
@Entity(name = "SimpleEntity")
static | ProxySerializationNoSessionFactoryTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/script/field/AbstractLongDocValuesField.java | {
"start": 1122,
"end": 4181
} | class
____ ScriptDocValues<?> scriptDocValues = null;
protected final SortedNumericLongValues input;
protected long[] values = new long[0];
protected int count;
public AbstractLongDocValuesField(SortedNumericLongValues input, String name) {
this.input = input;
this.name = name;
}
/**
* Override if not using {@link ScriptDocValues.Longs}
*/
protected ScriptDocValues<?> newScriptDocValues() {
return new ScriptDocValues.Longs(this);
}
/**
* Override if long has special formatting.
*/
protected long formatLong(long raw) {
return raw;
}
@Override
public void setNextDocId(int docId) throws IOException {
if (input.advanceExact(docId)) {
resize(input.docValueCount());
for (int i = 0; i < count; i++) {
values[i] = formatLong(input.nextValue());
}
} else {
resize(0);
}
}
@Override
public ScriptDocValues<?> toScriptDocValues() {
if (scriptDocValues == null) {
scriptDocValues = newScriptDocValues();
}
return scriptDocValues;
}
/**
* Set the {@link #size()} and ensure that the {@link #values} array can
* store at least that many entries.
*/
private void resize(int newSize) {
count = newSize;
values = ArrayUtil.grow(values, count);
}
// this method is required to support the Long return values
// for the old-style "doc" access in ScriptDocValues
@Override
public Long getInternal(int index) {
return getLong(index);
}
protected long getLong(int index) {
return values[index];
}
@Override
public int size() {
return count;
}
@Override
public boolean isEmpty() {
return count == 0;
}
@Override
public String getName() {
return name;
}
@Override
public PrimitiveIterator.OfLong iterator() {
return new PrimitiveIterator.OfLong() {
private int index = 0;
@Override
public boolean hasNext() {
return index < count;
}
@Override
public Long next() {
return nextLong();
}
@Override
public long nextLong() {
if (hasNext() == false) {
throw new NoSuchElementException();
}
return getLong(index++);
}
};
}
/** Returns the 0th index value as an {@code long} if it exists, otherwise {@code defaultValue}. */
public long get(long defaultValue) {
return get(0, defaultValue);
}
/** Returns the value at {@code index} as an {@code long} if it exists, otherwise {@code defaultValue}. */
public long get(int index, long defaultValue) {
if (isEmpty() || index < 0 || index >= count) {
return defaultValue;
}
return getLong(index);
}
}
| protected |
java | apache__camel | components/camel-metrics/src/test/java/org/apache/camel/component/metrics/spi/InstrumentedThreadPoolFactoryTest.java | {
"start": 1950,
"end": 5659
} | class ____ {
private static final String METRICS_NAME = "metrics.name";
@Mock
private MetricRegistry registry;
@Mock
private ThreadPoolFactory threadPoolFactory;
@Mock
private ThreadFactory threadFactory;
private ThreadPoolProfile profile;
private InstrumentedThreadPoolFactory instrumentedThreadPoolFactory;
private InOrder inOrder;
@BeforeEach
public void setUp() {
profile = new ThreadPoolProfile(METRICS_NAME);
profile.setDefaultProfile(false);
profile.setMaxPoolSize(10);
profile.setMaxQueueSize(1000);
profile.setPoolSize(5);
profile.setKeepAliveTime(5L);
profile.setTimeUnit(TimeUnit.SECONDS);
profile.setRejectedPolicy(ThreadPoolRejectedPolicy.CallerRuns);
instrumentedThreadPoolFactory = new InstrumentedThreadPoolFactory(registry, threadPoolFactory);
inOrder = Mockito.inOrder(registry);
}
@Test
public void testNewCacheThreadPool() {
final ExecutorService executorService = instrumentedThreadPoolFactory.newCachedThreadPool(threadFactory);
assertThat(executorService, is(notNullValue()));
assertThat(executorService, is(instanceOf(InstrumentedExecutorService.class)));
inOrder.verify(registry, times(1)).meter(anyString());
inOrder.verify(registry, times(1)).counter(anyString());
inOrder.verify(registry, times(1)).meter(anyString());
inOrder.verify(registry, times(2)).timer(anyString());
}
@Test
public void testNewThreadPool() {
final ExecutorService executorService = instrumentedThreadPoolFactory.newThreadPool(profile, threadFactory);
assertThat(executorService, is(notNullValue()));
assertThat(executorService, is(instanceOf(InstrumentedExecutorService.class)));
inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "submitted" }));
inOrder.verify(registry, times(1)).counter(MetricRegistry.name(METRICS_NAME, new String[] { "running" }));
inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "completed" }));
inOrder.verify(registry, times(1)).timer(MetricRegistry.name(METRICS_NAME, new String[] { "duration" }));
}
@Test
public void testNewScheduledThreadPool() {
final ScheduledExecutorService scheduledExecutorService
= instrumentedThreadPoolFactory.newScheduledThreadPool(profile, threadFactory);
assertThat(scheduledExecutorService, is(notNullValue()));
assertThat(scheduledExecutorService, is(instanceOf(InstrumentedScheduledExecutorService.class)));
inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "submitted" }));
inOrder.verify(registry, times(1)).counter(MetricRegistry.name(METRICS_NAME, new String[] { "running" }));
inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "completed" }));
inOrder.verify(registry, times(1)).timer(MetricRegistry.name(METRICS_NAME, new String[] { "duration" }));
inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "scheduled.once" }));
inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "scheduled.repetitively" }));
inOrder.verify(registry, times(1)).counter(MetricRegistry.name(METRICS_NAME, new String[] { "scheduled.overrun" }));
inOrder.verify(registry, times(1))
.histogram(MetricRegistry.name(METRICS_NAME, new String[] { "scheduled.percent-of-period" }));
}
}
| InstrumentedThreadPoolFactoryTest |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/Assertions_assertThat_with_AssertProvider_Test.java | {
"start": 1455,
"end": 1740
} | class ____ implements AssertProvider<TestedObjectAssert> {
private final String text;
public TestedObject(String text) {
this.text = text;
}
public TestedObjectAssert assertThat() {
return new TestedObjectAssert(this);
}
}
private static | TestedObject |
java | apache__camel | components/camel-sql/src/test/java/org/apache/camel/component/sql/SqlConsumerMaxMessagesPerPollTest.java | {
"start": 1403,
"end": 4224
} | class ____ extends CamelTestSupport {
private EmbeddedDatabase db;
@Override
public void doPreSetup() throws Exception {
db = new EmbeddedDatabaseBuilder()
.setName(getClass().getSimpleName())
.setType(EmbeddedDatabaseType.DERBY)
.addScript("sql/createAndPopulateDatabase4.sql")
.build();
}
@Override
public void doPostTearDown() throws Exception {
if (db != null) {
db.shutdown();
}
}
@Test
public void maxMessagesPerPoll() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(3);
MockEndpoint.assertIsSatisfied(context);
List<Exchange> exchanges = mock.getReceivedExchanges();
assertBodyMapValue(1, "ID", exchanges.get(0));
assertBodyMapValue("Camel", "PROJECT", exchanges.get(0));
assertProperty(0, "CamelBatchIndex", exchanges.get(0));
assertProperty(2, "CamelBatchSize", exchanges.get(0));
assertProperty(Boolean.FALSE, "CamelBatchComplete", exchanges.get(0));
assertBodyMapValue(2, "ID", exchanges.get(1));
assertBodyMapValue("AMQ", "PROJECT", exchanges.get(1));
assertProperty(1, "CamelBatchIndex", exchanges.get(1));
assertProperty(2, "CamelBatchSize", exchanges.get(1));
assertProperty(Boolean.TRUE, "CamelBatchComplete", exchanges.get(1)); // end of the first batch
assertBodyMapValue(3, "ID", exchanges.get(2));
assertBodyMapValue("Linux", "PROJECT", exchanges.get(2));
assertProperty(0, "CamelBatchIndex", exchanges.get(2)); // the second batch
assertProperty(1, "CamelBatchSize", exchanges.get(2)); // only one entry in this batch
assertProperty(Boolean.TRUE, "CamelBatchComplete", exchanges.get(2)); // there are no more entries yet
}
private void assertProperty(Object value, String propertyName, Exchange exchange) {
assertEquals(value, exchange.getProperty(propertyName));
}
private void assertBodyMapValue(Object value, String key, Exchange exchange) {
assertEquals(value, exchange.getIn().getBody(Map.class).get(key));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
getContext().setTracing(true);
getContext().getComponent("sql", SqlComponent.class).setDataSource(db);
from("sql:select * from projects where processed = false order by id?maxMessagesPerPoll=2&initialDelay=0&delay=50")
.to("mock:result")
.to("sql:update projects set processed = true where id = :#id");
}
};
}
}
| SqlConsumerMaxMessagesPerPollTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Montgomery.java | {
"start": 1139,
"end": 1980
} | class ____ {
protected final Product product = new Product();
protected long N;
protected long N_I; // N'
protected long R;
protected long R_1; // R - 1
protected int s;
/** Set the modular and initialize this object. */
Montgomery set(long n) {
if (n % 2 != 1)
throw new IllegalArgumentException("n % 2 != 1, n=" + n);
N = n;
R = Long.highestOneBit(n) << 1;
N_I = R - Modular.modInverse(N, R);
R_1 = R - 1;
s = Long.numberOfTrailingZeros(R);
return this;
}
/** Compute 2^y mod N for N odd. */
long mod(final long y) {
long p = R - N;
long x = p << 1;
if (x >= N) x -= N;
for(long mask = Long.highestOneBit(y); mask > 0; mask >>>= 1) {
p = product.m(p, p);
if ((mask & y) != 0) p = product.m(p, x);
}
return product.m(p, 1);
}
| Montgomery |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/call/UnaryClientCallListener.java | {
"start": 1066,
"end": 2370
} | class ____ implements ClientCall.Listener {
private final DeadlineFuture future;
private Object appResponse;
private int actualContentLength;
public UnaryClientCallListener(DeadlineFuture deadlineFuture) {
this.future = deadlineFuture;
}
@Override
public void onMessage(Object message, int actualContentLength) {
this.appResponse = message;
this.actualContentLength = actualContentLength;
}
@Override
public void onClose(TriRpcStatus status, Map<String, Object> trailers, boolean isReturnTriException) {
AppResponse result = new AppResponse();
result.setObjectAttachments(trailers);
if (status.isOk()) {
if (isReturnTriException) {
result.setException((Exception) appResponse);
} else {
result.setValue(appResponse);
}
} else {
result.setException(status.asException());
}
result.setAttribute(Constants.CONTENT_LENGTH_KEY, actualContentLength);
future.received(status, result);
}
@Override
public void onStart(ClientCall call) {
future.addTimeoutListener(() -> call.cancelByLocal(new IllegalStateException("client timeout")));
call.request(2);
}
}
| UnaryClientCallListener |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.