language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | micronaut-projects__micronaut-core | http-server-netty/src/main/java/io/micronaut/http/server/netty/handler/PipeliningServerHandler.java | {
"start": 30233,
"end": 30848
} | class ____ extends InboundHandler {
@Override
void read(Object message) {
if (message instanceof LastHttpContent lhc) {
lhc.release();
inboundHandler = baseInboundHandler;
} else {
((HttpContent) message).release();
}
}
@Override
void handleUpstreamError(Throwable cause) {
requestHandler.handleUnboundError(cause);
}
}
/**
* Class that allows writing the response for the request this object is associated with.
*/
public final | DroppingInboundHandler |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java | {
"start": 16025,
"end": 18049
} | class ____ extends TwoPhaseIterator {
private final Visitor visitor;
private final BitSet parentDocs;
private int nextParent = -1;
private boolean nextParentMatches;
NestedRoutingPartitionedDocIdSetIterator(Visitor visitor, BitSet parentDocs) {
super(DocIdSetIterator.all(visitor.leafReader.maxDoc())); // we iterate all live-docs
this.parentDocs = parentDocs;
this.visitor = visitor;
}
@Override
public boolean matches() throws IOException {
// the educated reader might ask why this works, it does because all live doc ids (root docs and nested docs) are evaluated in
// order and that way we don't need to seek backwards as we do in other nested docs cases.
int doc = approximation.docID();
if (doc > nextParent) {
// we only check once per nested/parent set
nextParent = parentDocs.nextSetBit(doc);
// never check a child document against the visitor, they neihter have _id nor _routing as stored fields
nextParentMatches = visitor.matches(nextParent);
}
return nextParentMatches;
}
@Override
public float matchCost() {
return 42; // that's obvious, right?
}
}
/*
* this is used internally to obtain a bitset for parent documents. We don't cache this since we never access the same reader more
* than once. There is no point in using BitsetFilterCache#BitSetProducerWarmer since we use this only as a delete by query which is
* executed on a recovery-private index writer. There is no point in caching it and it won't have a cache hit either.
*/
private static BitSetProducer newParentDocBitSetProducer(IndexVersion indexCreationVersion) {
return context -> BitsetFilterCache.bitsetFromQuery(Queries.newNonNestedFilter(indexCreationVersion), context);
}
}
| NestedRoutingPartitionedDocIdSetIterator |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/partitioner/ForwardForConsecutiveHashPartitioner.java | {
"start": 2924,
"end": 4798
} | class ____<T> extends ForwardPartitioner<T> {
private final StreamPartitioner<T> hashPartitioner;
/**
* Create a new ForwardForConsecutiveHashPartitioner.
*
* @param hashPartitioner the HashPartitioner
*/
public ForwardForConsecutiveHashPartitioner(StreamPartitioner<T> hashPartitioner) {
this.hashPartitioner = hashPartitioner;
}
@Override
public StreamPartitioner<T> copy() {
throw new RuntimeException(
"ForwardForConsecutiveHashPartitioner is a intermediate partitioner in optimization phase, "
+ "should be converted to a ForwardPartitioner and its underlying hashPartitioner at runtime.");
}
@Override
public SubtaskStateMapper getDownstreamSubtaskStateMapper() {
throw new RuntimeException(
"ForwardForConsecutiveHashPartitioner is a intermediate partitioner in optimization phase, "
+ "should be converted to a ForwardPartitioner and its underlying hashPartitioner at runtime.");
}
@Override
public boolean isPointwise() {
// will be used in StreamGraphGenerator#shouldDisableUnalignedCheckpointing, so can't throw
// exception.
return true;
}
@Override
public void disableUnalignedCheckpoints() {
hashPartitioner.disableUnalignedCheckpoints();
}
@Override
public int selectChannel(SerializationDelegate<StreamRecord<T>> record) {
throw new RuntimeException(
"ForwardForConsecutiveHashPartitioner is a intermediate partitioner in optimization phase, "
+ "should be converted to a ForwardPartitioner and its underlying hashPartitioner at runtime.");
}
public StreamPartitioner<T> getHashPartitioner() {
return hashPartitioner;
}
}
| ForwardForConsecutiveHashPartitioner |
java | apache__maven | api/maven-api-core/src/main/java/org/apache/maven/api/services/xml/XmlWriterException.java | {
"start": 1058,
"end": 1469
} | class ____ extends MavenException {
private final Location location;
/**
* @param message the message for the exception
* @param e the exception itself
*/
public XmlWriterException(String message, Location location, Exception e) {
super(message, e);
this.location = location;
}
public Location getLocation() {
return location;
}
}
| XmlWriterException |
java | spring-projects__spring-boot | core/spring-boot-docker-compose/src/test/java/org/springframework/boot/docker/compose/lifecycle/ReadinessTimeoutExceptionTests.java | {
"start": 1123,
"end": 2015
} | class ____ {
@Test
void createCreatesException() {
Duration timeout = Duration.ofSeconds(10);
RunningService s1 = mock(RunningService.class);
given(s1.name()).willReturn("s1");
RunningService s2 = mock(RunningService.class);
given(s2.name()).willReturn("s2");
ServiceNotReadyException cause1 = new ServiceNotReadyException(s1, "1 not ready");
ServiceNotReadyException cause2 = new ServiceNotReadyException(s2, "2 not ready");
List<ServiceNotReadyException> exceptions = List.of(cause1, cause2);
ReadinessTimeoutException exception = new ReadinessTimeoutException(timeout, exceptions);
assertThat(exception).hasMessage("Readiness timeout of PT10S reached while waiting for services [s1, s2]");
assertThat(exception).hasSuppressedException(cause1).hasSuppressedException(cause2);
assertThat(exception.getTimeout()).isEqualTo(timeout);
}
}
| ReadinessTimeoutExceptionTests |
java | elastic__elasticsearch | benchmarks/src/main/java/org/elasticsearch/benchmark/indices/resolution/IndexNameExpressionResolverBenchmark.java | {
"start": 1992,
"end": 5569
} | class ____ {
private static final String DATA_STREAM_PREFIX = "my-ds-";
private static final String INDEX_PREFIX = "my-index-";
@Param(
{
// # data streams | # indices
" 1000| 100",
" 5000| 500",
" 10000| 1000" }
)
public String resourceMix = "100|10";
@Setup
public void setUp() {
final String[] params = resourceMix.split("\\|");
int numDataStreams = toInt(params[0]);
int numIndices = toInt(params[1]);
ProjectMetadata.Builder pmb = ProjectMetadata.builder(ProjectId.DEFAULT);
String[] indices = new String[numIndices + numDataStreams * (numIndices + 1)];
int position = 0;
for (int i = 1; i <= numIndices; i++) {
String indexName = INDEX_PREFIX + i;
createIndexMetadata(indexName, pmb);
indices[position++] = indexName;
}
for (int i = 1; i <= numDataStreams; i++) {
String dataStreamName = DATA_STREAM_PREFIX + i;
List<Index> backingIndices = new ArrayList<>();
for (int j = 1; j <= numIndices; j++) {
String backingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, j);
backingIndices.add(createIndexMetadata(backingIndexName, pmb).getIndex());
indices[position++] = backingIndexName;
}
indices[position++] = dataStreamName;
pmb.put(DataStream.builder(dataStreamName, backingIndices).build());
}
int mid = indices.length / 2;
clusterState = ClusterState.builder(ClusterName.DEFAULT).putProjectMetadata(pmb).build();
resolver = new IndexNameExpressionResolver(
new ThreadContext(Settings.EMPTY),
new SystemIndices(List.of()),
DefaultProjectResolver.INSTANCE
);
indexListRequest = new Request(IndicesOptions.lenientExpandOpenHidden(), indices);
starRequest = new Request(IndicesOptions.lenientExpandOpenHidden(), "*");
String[] mixed = indices.clone();
mixed[mid] = "my-*";
mixedRequest = new Request(IndicesOptions.lenientExpandOpenHidden(), mixed);
}
private IndexMetadata createIndexMetadata(String indexName, ProjectMetadata.Builder pmb) {
IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
.settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(0)
.build();
pmb.put(indexMetadata, false);
return indexMetadata;
}
private IndexNameExpressionResolver resolver;
private ClusterState clusterState;
private Request starRequest;
private Request indexListRequest;
private Request mixedRequest;
@Benchmark
public String[] resolveResourcesListToConcreteIndices() {
return resolver.concreteIndexNames(clusterState, indexListRequest);
}
@Benchmark
public String[] resolveAllStarToConcreteIndices() {
return resolver.concreteIndexNames(clusterState, starRequest);
}
@Benchmark
public String[] resolveMixedConcreteIndices() {
return resolver.concreteIndexNames(clusterState, mixedRequest);
}
private int toInt(String v) {
return Integer.parseInt(v.trim());
}
record Request(IndicesOptions indicesOptions, String... indices) implements IndicesRequest {
}
}
| IndexNameExpressionResolverBenchmark |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/xml/handler/MyErrorProcessor.java | {
"start": 926,
"end": 1172
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) throws Exception {
// do nothing here
}
@Override
public String toString() {
return "MyErrorProcessor";
}
}
| MyErrorProcessor |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/Audited.java | {
"start": 2966,
"end": 3406
} | class ____ for the annotated
* property. The flag stores information if a property has been changed at a given revision.
* This can be used for example in queries.
*/
boolean withModifiedFlag() default false;
/**
* The column name of the modified field. Analogous to the name attribute of the @{@link jakarta.persistence.Column}
* annotation. Ignored if withModifiedFlag is false.
*/
String modifiedColumnName() default "";
}
| or |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java | {
"start": 2222,
"end": 10288
} | class ____ {
static final Logger LOG =
LoggerFactory.getLogger(TestSocketIOWithTimeout.class);
private static int TIMEOUT = 1*1000;
private static String TEST_STRING = "1234567890";
private MultithreadedTestUtil.TestContext ctx = new TestContext();
private static final int PAGE_SIZE = (int) NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
private void doIO(InputStream in, OutputStream out,
int expectedTimeout) throws IOException {
/* Keep on writing or reading until we get SocketTimeoutException.
* It expects this exception to occur within 100 millis of TIMEOUT.
*/
byte buf[] = new byte[PAGE_SIZE + 19];
while (true) {
long start = Time.now();
try {
if (in != null) {
in.read(buf);
} else {
out.write(buf);
}
} catch (SocketTimeoutException e) {
long diff = Time.now() - start;
LOG.info("Got SocketTimeoutException as expected after " +
diff + " millis : " + e.getMessage());
assertTrue(Math.abs(expectedTimeout - diff) <=
TestNetUtils.TIME_FUDGE_MILLIS);
break;
}
}
}
@Test
public void testSocketIOWithTimeout() throws Exception {
// first open pipe:
Pipe pipe = Pipe.open();
Pipe.SourceChannel source = pipe.source();
Pipe.SinkChannel sink = pipe.sink();
try {
final InputStream in = new SocketInputStream(source, TIMEOUT);
OutputStream out = new SocketOutputStream(sink, TIMEOUT);
byte[] writeBytes = TEST_STRING.getBytes();
byte[] readBytes = new byte[writeBytes.length];
byte byteWithHighBit = (byte)0x80;
out.write(writeBytes);
out.write(byteWithHighBit);
doIO(null, out, TIMEOUT);
in.read(readBytes);
assertTrue(Arrays.equals(writeBytes, readBytes));
assertEquals(byteWithHighBit & 0xff, in.read());
doIO(in, null, TIMEOUT);
// Change timeout on the read side.
((SocketInputStream)in).setTimeout(TIMEOUT * 2);
doIO(in, null, TIMEOUT * 2);
/*
* Verify that it handles interrupted threads properly.
* Use a large timeout and expect the thread to return quickly
* upon interruption.
*/
((SocketInputStream)in).setTimeout(0);
TestingThread thread = new TestingThread(ctx) {
@Override
public void doWork() throws Exception {
try {
in.read();
fail("Did not fail with interrupt");
} catch (InterruptedIOException ste) {
LOG.info("Got expection while reading as expected : " +
ste.getMessage());
}
}
};
ctx.addThread(thread);
ctx.startThreads();
// If the thread is interrupted before it calls read()
// then it throws ClosedByInterruptException due to
// some Java quirk. Waiting for it to call read()
// gets it into select(), so we get the expected
// InterruptedIOException.
Thread.sleep(1000);
thread.interrupt();
ctx.stop();
//make sure the channels are still open
assertTrue(source.isOpen());
assertTrue(sink.isOpen());
// Nevertheless, the output stream is closed, because
// a partial write may have succeeded (see comment in
// SocketOutputStream#write(byte[]), int, int)
// This portion of the test cannot pass on Windows due to differences in
// behavior of partial writes. Windows appears to buffer large amounts of
// written data and send it all atomically, thus making it impossible to
// simulate a partial write scenario. Attempts were made to switch the
// test from using a pipe to a network socket and also to use larger and
// larger buffers in doIO. Nothing helped the situation though.
if (!Shell.WINDOWS) {
try {
out.write(1);
fail("Did not throw");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"stream is closed", ioe);
}
}
out.close();
assertFalse(sink.isOpen());
// close sink and expect -1 from source.read()
assertEquals(-1, in.read());
// make sure close() closes the underlying channel.
in.close();
assertFalse(source.isOpen());
} finally {
if (source != null) {
source.close();
}
if (sink != null) {
sink.close();
}
}
}
@Test
public void testSocketIOWithTimeoutByMultiThread() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
Runnable ioTask = () -> {
try {
Pipe pipe = Pipe.open();
try (Pipe.SourceChannel source = pipe.source();
InputStream in = new SocketInputStream(source, TIMEOUT);
Pipe.SinkChannel sink = pipe.sink();
OutputStream out = new SocketOutputStream(sink, TIMEOUT)) {
byte[] writeBytes = TEST_STRING.getBytes();
byte[] readBytes = new byte[writeBytes.length];
latch.await();
out.write(writeBytes);
doIO(null, out, TIMEOUT);
in.read(readBytes);
assertArrayEquals(writeBytes, readBytes);
doIO(in, null, TIMEOUT);
}
} catch (Exception e) {
fail(e.getMessage());
}
};
int threadCnt = 64;
ExecutorService threadPool = Executors.newFixedThreadPool(threadCnt);
for (int i = 0; i < threadCnt; ++i) {
threadPool.submit(ioTask);
}
Thread.sleep(1000);
latch.countDown();
threadPool.shutdown();
assertTrue(threadPool.awaitTermination(3, TimeUnit.SECONDS));
}
@Test
public void testSocketIOWithTimeoutInterrupted() throws Exception {
Pipe pipe = Pipe.open();
final int timeout = TIMEOUT * 10;
try (Pipe.SourceChannel source = pipe.source();
InputStream in = new SocketInputStream(source, timeout)) {
TestingThread thread = new TestingThread(ctx) {
@Override
public void doWork() throws Exception {
try {
in.read();
fail("Did not fail with interrupt");
} catch (InterruptedIOException ste) {
String detail = ste.getMessage();
String totalString = "Total timeout mills is " + timeout;
String leftString = "millis timeout left";
assertTrue(detail.contains(totalString));
assertTrue(detail.contains(leftString));
}
}
};
ctx.addThread(thread);
ctx.startThreads();
// If the thread is interrupted before it calls read()
// then it throws ClosedByInterruptException due to
// some Java quirk. Waiting for it to call read()
// gets it into select(), so we get the expected
// InterruptedIOException.
Thread.sleep(1000);
thread.interrupt();
ctx.stop();
}
}
@Test
public void testSocketIOWithTimeoutInterruptedByMultiThread()
throws Exception {
final int timeout = TIMEOUT * 10;
AtomicLong readCount = new AtomicLong();
AtomicLong exceptionCount = new AtomicLong();
Runnable ioTask = () -> {
try {
Pipe pipe = Pipe.open();
try (Pipe.SourceChannel source = pipe.source();
InputStream in = new SocketInputStream(source, timeout)) {
in.read();
readCount.incrementAndGet();
} catch (InterruptedIOException ste) {
exceptionCount.incrementAndGet();
}
} catch (Exception e) {
fail(e.getMessage());
}
};
int threadCnt = 64;
ExecutorService threadPool = Executors.newFixedThreadPool(threadCnt);
for (int i = 0; i < threadCnt; ++i) {
threadPool.submit(ioTask);
}
Thread.sleep(1000);
threadPool.shutdownNow();
threadPool.awaitTermination(1, TimeUnit.SECONDS);
assertEquals(0, readCount.get());
assertEquals(threadCnt, exceptionCount.get());
}
}
| TestSocketIOWithTimeout |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldHaveSameTime_create_Test.java | {
"start": 1257,
"end": 2232
} | class ____ {
@Test
void should_create_error_message() {
// GIVEN
Date actual = DateUtil.parseDatetime("2011-01-01T05:01:00");
Date expected = DateUtil.parseDatetime("2011-01-01T05:01:01");
// WHEN
String message = shouldHaveSameTime(actual, expected).create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting%n" +
" 2011-01-01T05:01:00.000 (java.util.Date)%n" +
"to have the same time as:%n" +
" 2011-01-01T05:01:01.000 (java.util.Date)%n" +
"but actual time is%n" +
" " + actual.getTime() + "L%n" +
"and expected was:%n" +
" " + expected.getTime() + "L"));
}
}
| ShouldHaveSameTime_create_Test |
java | apache__flink | flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgram.java | {
"start": 2273,
"end": 2471
} | class ____ represents a program, packaged in a jar file. It supplies functionality
* to extract nested libraries, search for the program entry point, and extract a program plan.
*/
public | encapsulates |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/modifiedflags/HasChangedBidirectionalTest.java | {
"start": 1285,
"end": 5152
} | class ____ extends AbstractModifiedFlagsEntityTest {
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
// Revision 1 | Create ticket with comments
scope.inEntityManager( entityManager -> {
entityManager.getTransaction().begin();
final Ticket ticket = new Ticket( 1, "data-t1" );
final Comment comment = new Comment( 1, "Initial comment-t1" );
ticket.addComment( comment );
entityManager.persist( comment );
entityManager.persist( ticket );
entityManager.getTransaction().commit();
} );
// Revision 2 | Create ticket without comments
scope.inEntityManager( entityManager -> {
entityManager.getTransaction().begin();
final Ticket ticket = new Ticket( 2, "data-t2" );
entityManager.persist( ticket );
entityManager.getTransaction().commit();
} );
// Revision 3 | Update ticket with comments
scope.inEntityManager( entityManager -> {
entityManager.getTransaction().begin();
final Ticket ticket = entityManager.find( Ticket.class, 1 );
ticket.setData( "data-changed-t1" );
entityManager.merge( ticket );
entityManager.getTransaction().commit();
} );
// Revision 4 | Update ticket without comments
scope.inEntityManager( entityManager -> {
entityManager.getTransaction().begin();
final Ticket ticket = entityManager.find( Ticket.class, 2 );
ticket.setData( "data-changed-t2" );
entityManager.merge( ticket );
entityManager.getTransaction().commit();
} );
// Revision 5 | Update ticket and comment
scope.inEntityManager( entityManager -> {
entityManager.getTransaction().begin();
final Ticket ticket = entityManager.find( Ticket.class, 1 );
ticket.setData( "data-changed-twice" );
ticket.getComments().get( 0 ).setText( "comment-modified" );
ticket.getComments().forEach( entityManager::merge );
entityManager.merge( ticket );
entityManager.getTransaction().commit();
} );
// Revision 6 | Update ticket and comment collection
scope.inEntityManager( entityManager -> {
entityManager.getTransaction().begin();
final Ticket ticket = entityManager.find( Ticket.class, 1 );
final Comment comment = new Comment( 2, "Comment2" );
ticket.addComment( comment );
entityManager.merge( comment );
entityManager.merge( ticket );
entityManager.getTransaction().commit();
} );
}
@Test
public void testRevisionCounts(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( Arrays.asList( 1, 3, 5, 6 ), auditReader.getRevisions( Ticket.class, 1 ) );
assertEquals( Arrays.asList( 2, 4 ), auditReader.getRevisions( Ticket.class, 2 ) );
assertEquals( Arrays.asList( 1, 5 ), auditReader.getRevisions( Comment.class, 1 ) );
assertEquals( Arrays.asList( 6 ), auditReader.getRevisions( Comment.class, 2 ) );
} );
}
@Test
public void testHasChanged(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( Arrays.asList( 1, 6 ), extractRevisionNumbers( queryForPropertyHasChanged( auditReader, Ticket.class, 1, "comments" ) ) );
assertEquals( Arrays.asList( 2 ), extractRevisionNumbers( queryForPropertyHasChanged( auditReader, Ticket.class, 2, "comments" ) ) );
} );
}
@Test
public void testHasNotChanged(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( Arrays.asList( 3, 5 ), extractRevisionNumbers( queryForPropertyHasNotChanged( auditReader, Ticket.class, 1, "comments" ) ) );
assertEquals( Arrays.asList( 4 ), extractRevisionNumbers( queryForPropertyHasNotChanged( auditReader, Ticket.class, 2, "comments" ) ) );
} );
}
@Entity(name = "Ticket")
@Audited(withModifiedFlag = true)
public static | HasChangedBidirectionalTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/interceptor/AdviceWithRecipientListMockEndpointsTest.java | {
"start": 1085,
"end": 2505
} | class ____ extends ContextTestSupport {
@Test
public void testAdvisedMockEndpoints() throws Exception {
// advice the second route using the inlined AdviceWith route builder
// which has extended capabilities than the regular route builder
AdviceWith.adviceWith(context.getRouteDefinitions().get(1), context, new AdviceWithRouteBuilder() {
@Override
public void configure() throws Exception {
// mock all endpoints
mockEndpoints("log*");
}
});
// log:bar is a dynamic endpoint created on-the-fly (eg not in the
// route)
getMockEndpoint("mock:log:bar").expectedMessageCount(1);
// log:foo is in the route
getMockEndpoint("mock:log:foo").expectedMessageCount(1);
getMockEndpoint("mock:result").expectedMessageCount(1);
template.sendBodyAndHeader("direct:start", "Hello World", "foo", "log:bar,direct:foo");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:start").recipientList(header("foo"));
from("direct:foo").to("log:foo").to("mock:result");
}
};
}
}
| AdviceWithRecipientListMockEndpointsTest |
java | apache__rocketmq | client/src/test/java/org/apache/rocketmq/client/impl/consumer/ProcessQueueTest.java | {
"start": 1984,
"end": 7490
} | class ____ {
@Test
public void testCachedMessageCount() {
ProcessQueue pq = new ProcessQueue();
pq.putMessage(createMessageList());
assertThat(pq.getMsgCount().get()).isEqualTo(100);
pq.takeMessages(10);
pq.commit();
assertThat(pq.getMsgCount().get()).isEqualTo(90);
pq.removeMessage(Collections.singletonList(pq.getMsgTreeMap().lastEntry().getValue()));
assertThat(pq.getMsgCount().get()).isEqualTo(89);
}
@Test
public void testCachedMessageSize() {
ProcessQueue pq = new ProcessQueue();
pq.putMessage(createMessageList());
assertThat(pq.getMsgSize().get()).isEqualTo(100 * 123);
pq.takeMessages(10);
pq.commit();
assertThat(pq.getMsgSize().get()).isEqualTo(90 * 123);
pq.removeMessage(Collections.singletonList(pq.getMsgTreeMap().lastEntry().getValue()));
assertThat(pq.getMsgSize().get()).isEqualTo(89 * 123);
}
@Test
public void testContainsMessage() {
ProcessQueue pq = new ProcessQueue();
final List<MessageExt> messageList = createMessageList(2);
final MessageExt message0 = messageList.get(0);
final MessageExt message1 = messageList.get(1);
pq.putMessage(Lists.list(message0));
assertThat(pq.containsMessage(message0)).isTrue();
assertThat(pq.containsMessage(message1)).isFalse();
}
@Test
public void testFillProcessQueueInfo() throws IllegalAccessException {
ProcessQueue pq = new ProcessQueue();
pq.putMessage(createMessageList(102400));
ProcessQueueInfo processQueueInfo = new ProcessQueueInfo();
pq.fillProcessQueueInfo(processQueueInfo);
assertThat(processQueueInfo.getCachedMsgSizeInMiB()).isEqualTo(12);
pq.takeMessages(10000);
pq.commit();
pq.fillProcessQueueInfo(processQueueInfo);
assertThat(processQueueInfo.getCachedMsgSizeInMiB()).isEqualTo(10);
pq.takeMessages(10000);
pq.commit();
pq.fillProcessQueueInfo(processQueueInfo);
assertThat(processQueueInfo.getCachedMsgSizeInMiB()).isEqualTo(9);
pq.takeMessages(80000);
pq.commit();
pq.fillProcessQueueInfo(processQueueInfo);
assertThat(processQueueInfo.getCachedMsgSizeInMiB()).isEqualTo(0);
TreeMap<Long, MessageExt> consumingMsgOrderlyTreeMap = new TreeMap<>();
consumingMsgOrderlyTreeMap.put(0L, createMessageList(1).get(0));
FieldUtils.writeDeclaredField(pq, "consumingMsgOrderlyTreeMap", consumingMsgOrderlyTreeMap, true);
pq.fillProcessQueueInfo(processQueueInfo);
assertEquals(0, processQueueInfo.getTransactionMsgMinOffset());
assertEquals(0, processQueueInfo.getTransactionMsgMaxOffset());
assertEquals(1, processQueueInfo.getTransactionMsgCount());
}
@Test
public void testPopRequest() throws MQBrokerException, RemotingException, InterruptedException, MQClientException {
ProcessQueue processQueue = createProcessQueue();
MessageExt messageExt = createMessageList(1).get(0);
messageExt.getProperties().put(MessageConst.PROPERTY_CONSUME_START_TIMESTAMP, System.currentTimeMillis() - 20 * 60 * 1000L + "");
processQueue.getMsgTreeMap().put(0L, messageExt);
DefaultMQPushConsumer pushConsumer = mock(DefaultMQPushConsumer.class);
processQueue.cleanExpiredMsg(pushConsumer);
verify(pushConsumer).sendMessageBack(any(MessageExt.class), eq(3));
}
@Test
public void testRollback() throws IllegalAccessException {
ProcessQueue processQueue = createProcessQueue();
processQueue.rollback();
Field consumingMsgOrderlyTreeMapField = FieldUtils.getDeclaredField(processQueue.getClass(), "consumingMsgOrderlyTreeMap", true);
TreeMap<Long, MessageExt> consumingMsgOrderlyTreeMap = (TreeMap<Long, MessageExt>) consumingMsgOrderlyTreeMapField.get(processQueue);
assertEquals(0, consumingMsgOrderlyTreeMap.size());
}
@Test
public void testHasTempMessage() {
ProcessQueue processQueue = createProcessQueue();
assertFalse(processQueue.hasTempMessage());
}
@Test
public void testProcessQueue() {
ProcessQueue processQueue1 = createProcessQueue();
ProcessQueue processQueue2 = createProcessQueue();
assertEquals(processQueue1.getMsgAccCnt(), processQueue2.getMsgAccCnt());
assertEquals(processQueue1.getTryUnlockTimes(), processQueue2.getTryUnlockTimes());
assertEquals(processQueue1.getLastPullTimestamp(), processQueue2.getLastPullTimestamp());
}
private ProcessQueue createProcessQueue() {
ProcessQueue result = new ProcessQueue();
result.setMsgAccCnt(1);
result.incTryUnlockTimes();
return result;
}
private List<MessageExt> createMessageList() {
return createMessageList(100);
}
private List<MessageExt> createMessageList(int count) {
List<MessageExt> result = new ArrayList<>();
for (int i = 0; i < count; i++) {
MessageExt messageExt = new MessageExt();
messageExt.setQueueOffset(i);
messageExt.setBody(new byte[123]);
messageExt.setKeys("keys" + i);
messageExt.getProperties().put(MessageConst.PROPERTY_CONSUME_START_TIMESTAMP, System.currentTimeMillis() + "");
result.add(messageExt);
}
return result;
}
}
| ProcessQueueTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/mapper/FloatRangeFieldMapperTests.java | {
"start": 644,
"end": 2175
} | class ____ extends RangeFieldMapperTests {
@Override
protected XContentBuilder rangeSource(XContentBuilder in) throws IOException {
return rangeSource(in, "0.5", "2.7");
}
@Override
protected String storedValue() {
return "0.5 : 2.7";
}
@Override
protected Object rangeValue() {
return 2.7;
}
@Override
protected void minimalMapping(XContentBuilder b) throws IOException {
b.field("type", "float_range");
}
@Override
protected boolean supportsDecimalCoerce() {
return false;
}
@Override
protected TestRange<Float> randomRangeForSyntheticSourceTest() {
Float from = (float) randomDoubleBetween(-Float.MAX_VALUE, Float.MAX_VALUE - Math.ulp(Float.MAX_VALUE), true);
Float to = (float) randomDoubleBetween(from + Math.ulp(from), Float.MAX_VALUE, true);
boolean valuesAdjacent = Math.nextUp(from) > Math.nextDown(to);
var includeFrom = valuesAdjacent || randomBoolean();
var includeTo = valuesAdjacent || randomBoolean();
if (rarely()) {
from = null;
}
if (rarely()) {
to = null;
}
return new TestRange<>(rangeType(), from, to, includeFrom, includeTo);
}
@Override
protected RangeType rangeType() {
return RangeType.FLOAT;
}
@Override
protected IngestScriptSupport ingestScriptSupport() {
throw new AssumptionViolatedException("not supported");
}
}
| FloatRangeFieldMapperTests |
java | redisson__redisson | redisson/src/main/java/org/redisson/codec/CompositeCodec.java | {
"start": 890,
"end": 3087
} | class ____ implements Codec {
private final Codec mapKeyCodec;
private final Codec mapValueCodec;
private final Codec valueCodec;
public CompositeCodec(Codec mapKeyCodec, Codec mapValueCodec) {
this(mapKeyCodec, mapValueCodec, null);
}
public CompositeCodec(Codec mapKeyCodec, Codec mapValueCodec, Codec valueCodec) {
super();
this.mapKeyCodec = mapKeyCodec;
this.mapValueCodec = mapValueCodec;
this.valueCodec = valueCodec;
}
public CompositeCodec(ClassLoader classLoader, CompositeCodec codec) throws ReflectiveOperationException {
super();
this.mapKeyCodec = BaseCodec.copy(classLoader, codec.mapKeyCodec);
this.mapValueCodec = BaseCodec.copy(classLoader, codec.mapValueCodec);
this.valueCodec = BaseCodec.copy(classLoader, codec.valueCodec);
}
@Override
public Decoder<Object> getMapValueDecoder() {
return mapValueCodec.getMapValueDecoder();
}
@Override
public Encoder getMapValueEncoder() {
return mapValueCodec.getMapValueEncoder();
}
@Override
public Decoder<Object> getMapKeyDecoder() {
return mapKeyCodec.getMapKeyDecoder();
}
@Override
public Encoder getMapKeyEncoder() {
return mapKeyCodec.getMapKeyEncoder();
}
@Override
public Decoder<Object> getValueDecoder() {
return valueCodec.getValueDecoder();
}
@Override
public Encoder getValueEncoder() {
return valueCodec.getValueEncoder();
}
@Override
public ClassLoader getClassLoader() {
return getClass().getClassLoader();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CompositeCodec that = (CompositeCodec) o;
return Objects.equals(mapKeyCodec, that.mapKeyCodec)
&& Objects.equals(mapValueCodec, that.mapValueCodec)
&& Objects.equals(valueCodec, that.valueCodec);
}
@Override
public int hashCode() {
return Objects.hash(mapKeyCodec, mapValueCodec, valueCodec);
}
}
| CompositeCodec |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableImpl.java | {
"start": 5836,
"end": 71451
} | class ____<K, S, V> extends AbstractStream<K, V> implements KTable<K, V> {
private static final Logger LOG = LoggerFactory.getLogger(KTableImpl.class);
static final String SOURCE_NAME = "KTABLE-SOURCE-";
static final String STATE_STORE_NAME = "STATE-STORE-";
private static final String FILTER_NAME = "KTABLE-FILTER-";
private static final String JOINTHIS_NAME = "KTABLE-JOINTHIS-";
private static final String JOINOTHER_NAME = "KTABLE-JOINOTHER-";
private static final String MAPVALUES_NAME = "KTABLE-MAPVALUES-";
private static final String MERGE_NAME = "KTABLE-MERGE-";
private static final String SELECT_NAME = "KTABLE-SELECT-";
private static final String SUPPRESS_NAME = "KTABLE-SUPPRESS-";
private static final String TOSTREAM_NAME = "KTABLE-TOSTREAM-";
private static final String TRANSFORMVALUES_NAME = "KTABLE-TRANSFORMVALUES-";
private static final String FK_JOIN = "KTABLE-FK-JOIN-";
private static final String FK_JOIN_STATE_STORE_NAME = FK_JOIN + "SUBSCRIPTION-STATE-STORE-";
private static final String SUBSCRIPTION_REGISTRATION = FK_JOIN + "SUBSCRIPTION-REGISTRATION-";
private static final String SUBSCRIPTION_RESPONSE = FK_JOIN + "SUBSCRIPTION-RESPONSE-";
private static final String SUBSCRIPTION_PROCESSOR = FK_JOIN + "SUBSCRIPTION-PROCESSOR-";
private static final String SUBSCRIPTION_RESPONSE_RESOLVER_PROCESSOR = FK_JOIN + "SUBSCRIPTION-RESPONSE-RESOLVER-PROCESSOR-";
private static final String FK_JOIN_OUTPUT_NAME = FK_JOIN + "OUTPUT-";
private static final String TOPIC_SUFFIX = "-topic";
private static final String SINK_NAME = "KTABLE-SINK-";
private final org.apache.kafka.streams.processor.api.ProcessorSupplier<?, ?, ?, ?> processorSupplier;
private final String queryableStoreName;
private boolean sendOldValues = false;
public KTableImpl(final String name,
final Serde<K> keySerde,
final Serde<V> valueSerde,
final Set<String> subTopologySourceNodes,
final String queryableStoreName,
final org.apache.kafka.streams.processor.api.ProcessorSupplier<?, ?, ?, ?> newProcessorSupplier,
final GraphNode graphNode,
final InternalStreamsBuilder builder) {
super(name, keySerde, valueSerde, subTopologySourceNodes, graphNode, builder);
this.processorSupplier = newProcessorSupplier;
this.queryableStoreName = queryableStoreName;
}
@Override
public String queryableStoreName() {
return queryableStoreName;
}
@SuppressWarnings("resource")
private KTable<K, V> doFilter(final Predicate<? super K, ? super V> predicate,
final Named named,
final MaterializedInternal<K, V, KeyValueStore<Bytes, byte[]>> materializedInternal,
final boolean filterNot) {
final Serde<K> keySerde;
final Serde<V> valueSerde;
final String queryableStoreName;
final StoreFactory storeFactory;
if (materializedInternal != null) {
// we actually do not need to generate store names at all since if it is not specified, we will not
// materialize the store; but we still need to burn one index BEFORE generating the processor to keep compatibility.
if (materializedInternal.storeName() == null) {
builder.newStoreName(FILTER_NAME);
}
// we can inherit parent key and value serde if user do not provide specific overrides, more specifically:
// we preserve the key following the order of 1) materialized, 2) parent
keySerde = materializedInternal.keySerde() != null ? materializedInternal.keySerde() : this.keySerde;
// we preserve the value following the order of 1) materialized, 2) parent
valueSerde = materializedInternal.valueSerde() != null ? materializedInternal.valueSerde() : this.valueSerde;
queryableStoreName = materializedInternal.queryableStoreName();
// only materialize if materialized is specified and it has queryable name
storeFactory = queryableStoreName != null ? (new KeyValueStoreMaterializer<>(materializedInternal)) : null;
} else {
keySerde = this.keySerde;
valueSerde = this.valueSerde;
queryableStoreName = null;
storeFactory = null;
}
final String name = new NamedInternal(named).orElseGenerateWithPrefix(builder, FILTER_NAME);
final KTableProcessorSupplier<K, V, K, V> processorSupplier =
new KTableFilter<>(this, predicate, filterNot, queryableStoreName, storeFactory);
final ProcessorParameters<K, V, ?, ?> processorParameters = unsafeCastProcessorParametersToCompletelyDifferentType(
new ProcessorParameters<>(processorSupplier, name)
);
final GraphNode tableNode = new TableFilterNode<>(name, processorParameters);
maybeSetOutputVersioned(tableNode, materializedInternal);
builder.addGraphNode(this.graphNode, tableNode);
return new KTableImpl<K, V, V>(
name,
keySerde,
valueSerde,
subTopologySourceNodes,
queryableStoreName,
processorSupplier,
tableNode,
builder);
}
@Override
public KTable<K, V> filter(final Predicate<? super K, ? super V> predicate) {
Objects.requireNonNull(predicate, "predicate can't be null");
return doFilter(predicate, NamedInternal.empty(), null, false);
}
@Override
public KTable<K, V> filter(final Predicate<? super K, ? super V> predicate, final Named named) {
Objects.requireNonNull(predicate, "predicate can't be null");
return doFilter(predicate, named, null, false);
}
@Override
public KTable<K, V> filter(final Predicate<? super K, ? super V> predicate,
final Named named,
final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) {
Objects.requireNonNull(predicate, "predicate can't be null");
Objects.requireNonNull(materialized, "materialized can't be null");
final MaterializedInternal<K, V, KeyValueStore<Bytes, byte[]>> materializedInternal = new MaterializedInternal<>(materialized);
return doFilter(predicate, named, materializedInternal, false);
}
@Override
public KTable<K, V> filter(final Predicate<? super K, ? super V> predicate,
final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) {
return filter(predicate, NamedInternal.empty(), materialized);
}
@Override
public KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate) {
Objects.requireNonNull(predicate, "predicate can't be null");
return doFilter(predicate, NamedInternal.empty(), null, true);
}
@Override
public KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate,
final Named named) {
Objects.requireNonNull(predicate, "predicate can't be null");
return doFilter(predicate, named, null, true);
}
@Override
public KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate,
final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) {
return filterNot(predicate, NamedInternal.empty(), materialized);
}
@Override
public KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate,
final Named named,
final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) {
Objects.requireNonNull(predicate, "predicate can't be null");
Objects.requireNonNull(materialized, "materialized can't be null");
final MaterializedInternal<K, V, KeyValueStore<Bytes, byte[]>> materializedInternal = new MaterializedInternal<>(materialized);
final NamedInternal renamed = new NamedInternal(named);
return doFilter(predicate, renamed, materializedInternal, true);
}
@SuppressWarnings("resource")
private <VR> KTable<K, VR> doMapValues(final ValueMapperWithKey<? super K, ? super V, ? extends VR> mapper,
final Named named,
final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal) {
final Serde<K> keySerde;
final Serde<VR> valueSerde;
final String queryableStoreName;
final StoreFactory storeFactory;
if (materializedInternal != null) {
// we actually do not need to generate store names at all since if it is not specified, we will not
// materialize the store; but we still need to burn one index BEFORE generating the processor to keep compatibility.
if (materializedInternal.storeName() == null) {
builder.newStoreName(MAPVALUES_NAME);
}
keySerde = materializedInternal.keySerde() != null ? materializedInternal.keySerde() : this.keySerde;
valueSerde = materializedInternal.valueSerde();
queryableStoreName = materializedInternal.queryableStoreName();
// only materialize if materialized is specified and it has queryable name
storeFactory = queryableStoreName != null ? (new KeyValueStoreMaterializer<>(materializedInternal)) : null;
} else {
keySerde = this.keySerde;
valueSerde = null;
queryableStoreName = null;
storeFactory = null;
}
final String name = new NamedInternal(named).orElseGenerateWithPrefix(builder, MAPVALUES_NAME);
final KTableProcessorSupplier<K, V, K, VR> processorSupplier = new KTableMapValues<>(this, mapper, queryableStoreName, storeFactory);
// leaving in calls to ITB until building topology with graph
final ProcessorParameters<K, VR, ?, ?> processorParameters = unsafeCastProcessorParametersToCompletelyDifferentType(
new ProcessorParameters<>(processorSupplier, name)
);
final GraphNode tableNode = new ProcessorGraphNode<>(
name,
processorParameters
);
maybeSetOutputVersioned(tableNode, materializedInternal);
builder.addGraphNode(this.graphNode, tableNode);
// don't inherit parent value serde, since this operation may change the value type, more specifically:
// we preserve the key following the order of 1) materialized, 2) parent, 3) null
// we preserve the value following the order of 1) materialized, 2) null
return new KTableImpl<>(
name,
keySerde,
valueSerde,
subTopologySourceNodes,
queryableStoreName,
processorSupplier,
tableNode,
builder
);
}
@Override
public <VR> KTable<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> mapper) {
Objects.requireNonNull(mapper, "mapper can't be null");
return doMapValues(withKey(mapper), NamedInternal.empty(), null);
}
@Override
public <VR> KTable<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> mapper,
final Named named) {
Objects.requireNonNull(mapper, "mapper can't be null");
return doMapValues(withKey(mapper), named, null);
}
@Override
public <VR> KTable<K, VR> mapValues(final ValueMapperWithKey<? super K, ? super V, ? extends VR> mapper) {
Objects.requireNonNull(mapper, "mapper can't be null");
return doMapValues(mapper, NamedInternal.empty(), null);
}
@Override
public <VR> KTable<K, VR> mapValues(final ValueMapperWithKey<? super K, ? super V, ? extends VR> mapper,
final Named named) {
Objects.requireNonNull(mapper, "mapper can't be null");
return doMapValues(mapper, named, null);
}
@Override
public <VR> KTable<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> mapper,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
return mapValues(mapper, NamedInternal.empty(), materialized);
}
@Override
public <VR> KTable<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> mapper,
final Named named,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
Objects.requireNonNull(mapper, "mapper can't be null");
Objects.requireNonNull(materialized, "materialized can't be null");
final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal = new MaterializedInternal<>(materialized);
return doMapValues(withKey(mapper), named, materializedInternal);
}
@Override
public <VR> KTable<K, VR> mapValues(final ValueMapperWithKey<? super K, ? super V, ? extends VR> mapper,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
return mapValues(mapper, NamedInternal.empty(), materialized);
}
@Override
public <VR> KTable<K, VR> mapValues(final ValueMapperWithKey<? super K, ? super V, ? extends VR> mapper,
final Named named,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
Objects.requireNonNull(mapper, "mapper can't be null");
Objects.requireNonNull(materialized, "materialized can't be null");
final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal = new MaterializedInternal<>(materialized);
return doMapValues(mapper, named, materializedInternal);
}
@Override
public <VR> KTable<K, VR> transformValues(final ValueTransformerWithKeySupplier<? super K, ? super V, ? extends VR> transformerSupplier,
final String... stateStoreNames) {
return doTransformValues(transformerSupplier, null, NamedInternal.empty(), stateStoreNames);
}
@Override
public <VR> KTable<K, VR> transformValues(final ValueTransformerWithKeySupplier<? super K, ? super V, ? extends VR> transformerSupplier,
final Named named,
final String... stateStoreNames) {
Objects.requireNonNull(named, "processorName can't be null");
return doTransformValues(transformerSupplier, null, new NamedInternal(named), stateStoreNames);
}
@Override
public <VR> KTable<K, VR> transformValues(final ValueTransformerWithKeySupplier<? super K, ? super V, ? extends VR> transformerSupplier,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized,
final String... stateStoreNames) {
return transformValues(transformerSupplier, materialized, NamedInternal.empty(), stateStoreNames);
}
@Override
public <VR> KTable<K, VR> transformValues(final ValueTransformerWithKeySupplier<? super K, ? super V, ? extends VR> transformerSupplier,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized,
final Named named,
final String... stateStoreNames) {
Objects.requireNonNull(materialized, "materialized can't be null");
Objects.requireNonNull(named, "named can't be null");
final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal = new MaterializedInternal<>(materialized);
return doTransformValues(transformerSupplier, materializedInternal, new NamedInternal(named), stateStoreNames);
}
@SuppressWarnings("resource")
private <VR> KTable<K, VR> doTransformValues(final ValueTransformerWithKeySupplier<? super K, ? super V, ? extends VR> transformerSupplier,
final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal,
final NamedInternal namedInternal,
final String... stateStoreNames) {
Objects.requireNonNull(stateStoreNames, "stateStoreNames");
final Serde<K> keySerde;
final Serde<VR> valueSerde;
final String queryableStoreName;
final Set<StoreBuilder<?>> storeBuilder;
if (materializedInternal != null) {
// don't inherit parent value serde, since this operation may change the value type, more specifically:
// we preserve the key following the order of 1) materialized, 2) parent, 3) null
keySerde = materializedInternal.keySerde() != null ? materializedInternal.keySerde() : this.keySerde;
// we preserve the value following the order of 1) materialized, 2) null
valueSerde = materializedInternal.valueSerde();
queryableStoreName = materializedInternal.queryableStoreName();
// only materialize if materialized is specified and it has queryable name
if (queryableStoreName != null) {
final StoreFactory storeFactory = new KeyValueStoreMaterializer<>(materializedInternal);
storeBuilder = Collections.singleton(new FactoryWrappingStoreBuilder<>(storeFactory));
} else {
storeBuilder = null;
}
} else {
keySerde = this.keySerde;
valueSerde = null;
queryableStoreName = null;
storeBuilder = null;
}
final String name = namedInternal.orElseGenerateWithPrefix(builder, TRANSFORMVALUES_NAME);
final KTableProcessorSupplier<K, V, K, VR> processorSupplier = new KTableTransformValues<>(
this,
transformerSupplier,
queryableStoreName);
final ProcessorParameters<K, VR, ?, ?> processorParameters =
unsafeCastProcessorParametersToCompletelyDifferentType(
new ProcessorParameters<>(
new StoreDelegatingProcessorSupplier<>(
processorSupplier,
storeBuilder),
name
));
final GraphNode tableNode = new ProcessorToStateConnectorNode<>(
name,
processorParameters,
stateStoreNames
);
maybeSetOutputVersioned(tableNode, materializedInternal);
builder.addGraphNode(this.graphNode, tableNode);
return new KTableImpl<>(
name,
keySerde,
valueSerde,
subTopologySourceNodes,
queryableStoreName,
processorSupplier,
tableNode,
builder);
}
@Override
public KStream<K, V> toStream() {
return toStream(NamedInternal.empty());
}
@Override
public KStream<K, V> toStream(final Named named) {
Objects.requireNonNull(named, "named can't be null");
final String name = new NamedInternal(named).orElseGenerateWithPrefix(builder, TOSTREAM_NAME);
final KStreamMapValues<K, Change<V>, V> kStreamMapValues = new KStreamMapValues<>((key, change) -> change.newValue);
final ProcessorParameters<K, V, ?, ?> processorParameters = unsafeCastProcessorParametersToCompletelyDifferentType(
new ProcessorParameters<>(kStreamMapValues, name)
);
final ProcessorGraphNode<K, V> toStreamNode = new ProcessorGraphNode<>(
name,
processorParameters
);
builder.addGraphNode(this.graphNode, toStreamNode);
// we can inherit parent key and value serde
return new KStreamImpl<>(name, keySerde, valueSerde, subTopologySourceNodes, false, toStreamNode, builder);
}
@Override
public <K1> KStream<K1, V> toStream(final KeyValueMapper<? super K, ? super V, ? extends K1> mapper) {
return toStream().selectKey(mapper);
}
@Override
public <K1> KStream<K1, V> toStream(final KeyValueMapper<? super K, ? super V, ? extends K1> mapper,
final Named named) {
return toStream(named).selectKey(mapper);
}
@Override
public KTable<K, V> suppress(final Suppressed<? super K> suppressed) {
// this is an eager, but insufficient check
// the check only works if the direct parent is materialized
// the actual check for "version inheritance" can only be done in the build-phase later
// we keep this check to get a better stack trace if possible
if (graphNode.isOutputVersioned().isPresent() && graphNode.isOutputVersioned().get()) {
throw new TopologyException("suppress() is only supported for non-versioned KTables");
}
final String name;
if (suppressed instanceof NamedSuppressed) {
final String givenName = ((NamedSuppressed<?>) suppressed).name();
name = givenName != null ? givenName : builder.newProcessorName(SUPPRESS_NAME);
} else {
throw new IllegalArgumentException("Custom subclasses of Suppressed are not supported.");
}
final SuppressedInternal<K> suppressedInternal = buildSuppress(suppressed, name);
final String storeName;
if (suppressedInternal.name() != null) {
storeName = suppressedInternal.name() + "-store";
} else {
storeName = builder.newStoreName(SUPPRESS_NAME);
if (suppressedInternal.bufferConfig().isLoggingEnabled()) {
internalTopologyBuilder().addImplicitInternalNames(InternalResourcesNaming.builder().withChangelogTopic(storeName + "-changelog").build());
}
}
final StoreBuilder<InMemoryTimeOrderedKeyValueChangeBuffer<K, V, Change<V>>> storeBuilder;
if (suppressedInternal.bufferConfig().isLoggingEnabled()) {
final Map<String, String> topicConfig = suppressedInternal.bufferConfig().logConfig();
storeBuilder = new InMemoryTimeOrderedKeyValueChangeBuffer.Builder<>(
storeName,
keySerde,
valueSerde)
.withLoggingEnabled(topicConfig);
} else {
storeBuilder = new InMemoryTimeOrderedKeyValueChangeBuffer.Builder<>(
storeName,
keySerde,
valueSerde)
.withLoggingDisabled();
}
final ProcessorSupplier<K, Change<V>, K, Change<V>> suppressionSupplier = new KTableSuppressProcessorSupplier<>(
suppressedInternal,
storeBuilder,
this
);
final ProcessorGraphNode<K, Change<V>> node = new TableSuppressNode<>(
name,
new ProcessorParameters<>(suppressionSupplier, name)
);
node.setOutputVersioned(false);
builder.addGraphNode(graphNode, node);
return new KTableImpl<K, S, V>(
name,
keySerde,
valueSerde,
Collections.singleton(this.name),
null,
suppressionSupplier,
node,
builder
);
}
@SuppressWarnings("unchecked")
private SuppressedInternal<K> buildSuppress(final Suppressed<? super K> suppress, final String name) {
if (suppress instanceof FinalResultsSuppressionBuilder) {
final long grace = findAndVerifyWindowGrace(graphNode);
LOG.info("Using grace period of [{}] as the suppress duration for node [{}].",
Duration.ofMillis(grace), name);
final FinalResultsSuppressionBuilder<?> builder = (FinalResultsSuppressionBuilder<?>) suppress;
final SuppressedInternal<?> finalResultsSuppression =
builder.buildFinalResultsSuppression(Duration.ofMillis(grace));
return (SuppressedInternal<K>) finalResultsSuppression;
} else if (suppress instanceof SuppressedInternal) {
return (SuppressedInternal<K>) suppress;
} else {
throw new IllegalArgumentException("Custom subclasses of Suppressed are not allowed.");
}
}
@Override
public <V1, R> KTable<K, R> join(final KTable<K, V1> other,
final ValueJoiner<? super V, ? super V1, ? extends R> joiner) {
return doJoin(other, joiner, NamedInternal.empty(), null, false, false);
}
@Override
public <V1, R> KTable<K, R> join(final KTable<K, V1> other,
final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
final Named named) {
return doJoin(other, joiner, named, null, false, false);
}
@Override
public <VO, VR> KTable<K, VR> join(final KTable<K, VO> other,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
return join(other, joiner, NamedInternal.empty(), materialized);
}
@Override
public <VO, VR> KTable<K, VR> join(final KTable<K, VO> other,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Named named,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
Objects.requireNonNull(materialized, "materialized can't be null");
final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal =
new MaterializedInternal<>(materialized, builder, MERGE_NAME);
return doJoin(other, joiner, named, materializedInternal, false, false);
}
@Override
public <V1, R> KTable<K, R> outerJoin(final KTable<K, V1> other,
final ValueJoiner<? super V, ? super V1, ? extends R> joiner) {
return outerJoin(other, joiner, NamedInternal.empty());
}
@Override
public <V1, R> KTable<K, R> outerJoin(final KTable<K, V1> other,
final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
final Named named) {
return doJoin(other, joiner, named, null, true, true);
}
@Override
public <VO, VR> KTable<K, VR> outerJoin(final KTable<K, VO> other,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
return outerJoin(other, joiner, NamedInternal.empty(), materialized);
}
@Override
public <VO, VR> KTable<K, VR> outerJoin(final KTable<K, VO> other,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Named named,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
Objects.requireNonNull(materialized, "materialized can't be null");
final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal =
new MaterializedInternal<>(materialized, builder, MERGE_NAME);
return doJoin(other, joiner, named, materializedInternal, true, true);
}
@Override
public <V1, R> KTable<K, R> leftJoin(final KTable<K, V1> other,
final ValueJoiner<? super V, ? super V1, ? extends R> joiner) {
return leftJoin(other, joiner, NamedInternal.empty());
}
@Override
public <V1, R> KTable<K, R> leftJoin(final KTable<K, V1> other,
final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
final Named named) {
return doJoin(other, joiner, named, null, true, false);
}
@Override
public <VO, VR> KTable<K, VR> leftJoin(final KTable<K, VO> other,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
return leftJoin(other, joiner, NamedInternal.empty(), materialized);
}
@Override
public <VO, VR> KTable<K, VR> leftJoin(final KTable<K, VO> other,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Named named,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
Objects.requireNonNull(materialized, "materialized can't be null");
final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal =
new MaterializedInternal<>(materialized, builder, MERGE_NAME);
return doJoin(other, joiner, named, materializedInternal, true, false);
}
@SuppressWarnings({"unchecked", "resource"})
private <VO, VR> KTable<K, VR> doJoin(final KTable<K, VO> other,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Named joinName,
final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal,
final boolean leftOuter,
final boolean rightOuter) {
Objects.requireNonNull(other, "other can't be null");
Objects.requireNonNull(joiner, "joiner can't be null");
Objects.requireNonNull(joinName, "joinName can't be null");
final NamedInternal renamed = new NamedInternal(joinName);
final String joinMergeName = renamed.orElseGenerateWithPrefix(builder, MERGE_NAME);
final Set<String> allSourceNodes = ensureCopartitionWith(Collections.singleton((AbstractStream<K, VO>) other));
if (leftOuter) {
enableSendingOldValues(true);
}
if (rightOuter) {
((KTableImpl<?, ?, ?>) other).enableSendingOldValues(true);
}
final Serde<K> keySerde;
final Serde<VR> valueSerde;
final String queryableStoreName;
final StoreFactory storeFactory;
if (materializedInternal != null) {
if (materializedInternal.keySerde() == null) {
materializedInternal.withKeySerde(this.keySerde);
}
keySerde = materializedInternal.keySerde();
valueSerde = materializedInternal.valueSerde();
queryableStoreName = materializedInternal.storeName();
storeFactory = new KeyValueStoreMaterializer<>(materializedInternal);
} else {
keySerde = this.keySerde;
valueSerde = null;
queryableStoreName = null;
storeFactory = null;
}
final KTableKTableAbstractJoin<K, V, VO, VR> joinThis;
final KTableKTableAbstractJoin<K, VO, V, VR> joinOther;
if (!leftOuter) { // inner
joinThis = new KTableKTableInnerJoin<>(this, (KTableImpl<K, ?, VO>) other, joiner);
joinOther = new KTableKTableInnerJoin<>((KTableImpl<K, ?, VO>) other, this, reverseJoiner(joiner));
} else if (!rightOuter) { // left
joinThis = new KTableKTableLeftJoin<>(this, (KTableImpl<K, ?, VO>) other, joiner);
joinOther = new KTableKTableRightJoin<>((KTableImpl<K, ?, VO>) other, this, reverseJoiner(joiner));
} else { // outer
joinThis = new KTableKTableOuterJoin<>(this, (KTableImpl<K, ?, VO>) other, joiner);
joinOther = new KTableKTableOuterJoin<>((KTableImpl<K, ?, VO>) other, this, reverseJoiner(joiner));
}
final String joinThisName = renamed.suffixWithOrElseGet("-join-this", builder, JOINTHIS_NAME);
final String joinOtherName = renamed.suffixWithOrElseGet("-join-other", builder, JOINOTHER_NAME);
final ProcessorParameters<K, Change<V>, ?, ?> joinThisProcessorParameters = new ProcessorParameters<>(joinThis, joinThisName);
final ProcessorParameters<K, Change<VO>, ?, ?> joinOtherProcessorParameters = new ProcessorParameters<>(joinOther, joinOtherName);
final ProcessorParameters<K, Change<VR>, ?, ?> joinMergeProcessorParameters = new ProcessorParameters<>(
KTableKTableJoinMerger.of(
(KTableProcessorSupplier<K, V, K, VR>) joinThisProcessorParameters.processorSupplier(),
(KTableProcessorSupplier<K, VO, K, VR>) joinOtherProcessorParameters.processorSupplier(),
queryableStoreName,
storeFactory),
joinMergeName);
final KTableKTableJoinNode<K, V, VO, VR> kTableKTableJoinNode =
KTableKTableJoinNode.<K, V, VO, VR>kTableKTableJoinNodeBuilder()
.withNodeName(joinMergeName)
.withJoinThisProcessorParameters(joinThisProcessorParameters)
.withJoinOtherProcessorParameters(joinOtherProcessorParameters)
.withMergeProcessorParameters(joinMergeProcessorParameters)
.withThisJoinSideNodeName(name)
.withOtherJoinSideNodeName(((KTableImpl<?, ?, ?>) other).name)
.withJoinThisStoreNames(valueGetterSupplier().storeNames())
.withJoinOtherStoreNames(((KTableImpl<?, ?, ?>) other).valueGetterSupplier().storeNames())
.withKeySerde(keySerde)
.withValueSerde(valueSerde)
.build();
final boolean isOutputVersioned = materializedInternal != null
&& materializedInternal.storeSupplier() instanceof VersionedBytesStoreSupplier;
kTableKTableJoinNode.setOutputVersioned(isOutputVersioned);
builder.addGraphNode(this.graphNode, kTableKTableJoinNode);
builder.addGraphNode(((KTableImpl<?, ?, ?>) other).graphNode, kTableKTableJoinNode);
// we can inherit parent key serde if user do not provide specific overrides
return new KTableImpl<K, Change<VR>, VR>(
kTableKTableJoinNode.nodeName(),
kTableKTableJoinNode.keySerde(),
kTableKTableJoinNode.valueSerde(),
allSourceNodes,
kTableKTableJoinNode.queryableStoreName(),
kTableKTableJoinNode.joinMerger(),
kTableKTableJoinNode,
builder
);
}
@Override
public <K1, V1> KGroupedTable<K1, V1> groupBy(final KeyValueMapper<? super K, ? super V, ? extends KeyValue<? extends K1, ? extends V1>> selector) {
return groupBy(selector, Grouped.with(null, null));
}
@Override
public <K1, V1> KGroupedTable<K1, V1> groupBy(final KeyValueMapper<? super K, ? super V, ? extends KeyValue<? extends K1, ? extends V1>> selector,
final Grouped<K1, V1> grouped) {
Objects.requireNonNull(selector, "selector can't be null");
Objects.requireNonNull(grouped, "grouped can't be null");
final GroupedInternal<K1, V1> groupedInternal = new GroupedInternal<>(grouped);
final String selectName = new NamedInternal(groupedInternal.name()).orElseGenerateWithPrefix(builder, SELECT_NAME);
final KTableRepartitionMapSupplier<K, V, KeyValue<? extends K1, ? extends V1>, K1, V1> selectSupplier = new KTableRepartitionMap<>(this, selector);
final ProcessorParameters<K, Change<V>, ?, ?> processorParameters = new ProcessorParameters<>(selectSupplier, selectName);
// select the aggregate key and values (old and new), it would require parent to send old values
final TableRepartitionMapNode<K, Change<V>> groupByMapNode = new TableRepartitionMapNode<>(selectName, processorParameters);
builder.addGraphNode(this.graphNode, groupByMapNode);
this.enableSendingOldValues(true);
return new KGroupedTableImpl<>(
builder,
selectName,
subTopologySourceNodes,
groupedInternal,
groupByMapNode
);
}
@SuppressWarnings("unchecked")
public KTableValueGetterSupplier<K, V> valueGetterSupplier() {
if (processorSupplier instanceof KTableSource) {
final KTableSource<K, V> source = (KTableSource<K, V>) processorSupplier;
// whenever a source ktable is required for getter, it should be materialized
source.materialize();
return new KTableSourceValueGetterSupplier<>(source.queryableName());
} else if (processorSupplier instanceof KStreamAggProcessorSupplier) {
return ((KStreamAggProcessorSupplier<?, ?, K, V>) processorSupplier).view();
} else {
return ((KTableProcessorSupplier<?, ?, K, V>) processorSupplier).view();
}
}
public boolean enableSendingOldValues(final boolean forceMaterialization) {
if (!sendOldValues) {
if (processorSupplier instanceof KTableSource) {
final KTableSource<?, ?> source = (KTableSource<?, ?>) processorSupplier;
if (!forceMaterialization && !source.materialized()) {
return false;
}
source.enableSendingOldValues();
} else if (processorSupplier instanceof KStreamAggProcessorSupplier) {
((KStreamAggProcessorSupplier<?, ?, ?, ?>) processorSupplier).enableSendingOldValues();
} else if (processorSupplier instanceof KTableProcessorSupplier) {
final KTableProcessorSupplier<?, ?, ?, ?> tableProcessorSupplier =
(KTableProcessorSupplier<?, ?, ?, ?>) processorSupplier;
if (!tableProcessorSupplier.enableSendingOldValues(forceMaterialization)) {
return false;
}
}
sendOldValues = true;
}
return true;
}
boolean sendingOldValueEnabled() {
return sendOldValues;
}
/**
* We conflate V with Change<V> in many places. This will get fixed in the implementation of KIP-478.
* For now, I'm just explicitly lying about the parameterized type.
*/
@SuppressWarnings("unchecked")
private <VR> ProcessorParameters<K, VR, ?, ?> unsafeCastProcessorParametersToCompletelyDifferentType(final ProcessorParameters<K, Change<V>, ?, ?> kObjectProcessorParameters) {
return (ProcessorParameters<K, VR, ?, ?>) kObjectProcessorParameters;
}
@Override
public <VR, KO, VO> KTable<K, VR> join(final KTable<KO, VO> other,
final Function<? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
TableJoined.with(null, null),
Materialized.with(null, null),
false
);
}
@Override
public <VR, KO, VO> KTable<K, VR> join(final KTable<KO, VO> other,
final BiFunction<? super K, ? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
TableJoined.with(null, null),
Materialized.with(null, null),
false
);
}
@Override
public <VR, KO, VO> KTable<K, VR> join(final KTable<KO, VO> other,
final Function<? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final TableJoined<K, KO> tableJoined) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
tableJoined,
Materialized.with(null, null),
false
);
}
@Override
public <VR, KO, VO> KTable<K, VR> join(final KTable<KO, VO> other,
final BiFunction<? super K, ? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final TableJoined<K, KO> tableJoined) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
tableJoined,
Materialized.with(null, null),
false
);
}
@Override
public <VR, KO, VO> KTable<K, VR> join(final KTable<KO, VO> other,
final Function<? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
TableJoined.with(null, null),
materialized,
false
);
}
@Override
public <VR, KO, VO> KTable<K, VR> join(final KTable<KO, VO> other,
final BiFunction<? super K, ? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
TableJoined.with(null, null),
materialized,
false
);
}
@Override
public <VR, KO, VO> KTable<K, VR> join(final KTable<KO, VO> other,
final Function<? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final TableJoined<K, KO> tableJoined,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
tableJoined,
materialized,
false
);
}
@Override
public <VR, KO, VO> KTable<K, VR> join(final KTable<KO, VO> other,
final BiFunction<? super K, ? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final TableJoined<K, KO> tableJoined,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
tableJoined,
materialized,
false
);
}
@Override
public <VR, KO, VO> KTable<K, VR> leftJoin(final KTable<KO, VO> other,
final Function<? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
TableJoined.with(null, null),
Materialized.with(null, null),
true
);
}
@Override
public <VR, KO, VO> KTable<K, VR> leftJoin(final KTable<KO, VO> other,
final BiFunction<? super K, ? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
TableJoined.with(null, null),
Materialized.with(null, null),
true
);
}
@Override
public <VR, KO, VO> KTable<K, VR> leftJoin(final KTable<KO, VO> other,
final Function<? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final TableJoined<K, KO> tableJoined) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
tableJoined,
Materialized.with(null, null),
true
);
}
@Override
public <VR, KO, VO> KTable<K, VR> leftJoin(final KTable<KO, VO> other,
final BiFunction<? super K, ? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final TableJoined<K, KO> tableJoined) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
tableJoined,
Materialized.with(null, null),
true
);
}
@Override
public <VR, KO, VO> KTable<K, VR> leftJoin(final KTable<KO, VO> other,
final Function<? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final TableJoined<K, KO> tableJoined,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
tableJoined,
materialized,
true);
}
@Override
public <VR, KO, VO> KTable<K, VR> leftJoin(final KTable<KO, VO> other,
final BiFunction<? super K, ? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final TableJoined<K, KO> tableJoined,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
tableJoined,
materialized,
true);
}
@Override
public <VR, KO, VO> KTable<K, VR> leftJoin(final KTable<KO, VO> other,
final Function<? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
TableJoined.with(null, null),
materialized,
true
);
}
@Override
public <VR, KO, VO> KTable<K, VR> leftJoin(final KTable<KO, VO> other,
final BiFunction<? super K, ? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> adaptedExtractor
= ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor);
return doJoinOnForeignKey(
other,
adaptedExtractor,
joiner,
TableJoined.with(null, null),
materialized,
true
);
}
private final Function<Optional<Set<Integer>>, Optional<Set<Integer>>> getPartition = maybeMulticastPartitions -> {
if (maybeMulticastPartitions.isEmpty()) {
return Optional.empty();
}
if (maybeMulticastPartitions.get().size() != 1) {
throw new IllegalArgumentException("The partitions returned by StreamPartitioner#partitions method when used for FK join should be a singleton set");
}
return maybeMulticastPartitions;
};
@SuppressWarnings("resource")
private <VR, KO, VO> KTable<K, VR> doJoinOnForeignKey(final KTable<KO, VO> foreignKeyTable,
final ForeignKeyExtractor<? super K, ? super V, ? extends KO> foreignKeyExtractor,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final TableJoined<K, KO> tableJoined,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized,
final boolean leftJoin) {
Objects.requireNonNull(foreignKeyTable, "foreignKeyTable can't be null");
Objects.requireNonNull(foreignKeyExtractor, "foreignKeyExtractor can't be null");
Objects.requireNonNull(joiner, "joiner can't be null");
Objects.requireNonNull(tableJoined, "tableJoined can't be null");
Objects.requireNonNull(materialized, "materialized can't be null");
//Old values are a useful optimization. The old values from the foreignKeyTable table are compared to the new values,
//such that identical values do not cause a prefixScan. PrefixScan and propagation can be expensive and should
//not be done needlessly.
((KTableImpl<?, ?, ?>) foreignKeyTable).enableSendingOldValues(true);
//Old values must be sent such that the SubscriptionSendProcessorSupplier can propagate deletions to the correct node.
//This occurs whenever the extracted foreignKey changes values.
enableSendingOldValues(true);
final TableJoinedInternal<K, KO> tableJoinedInternal = new TableJoinedInternal<>(tableJoined);
final NamedInternal renamed = new NamedInternal(tableJoinedInternal.name());
final String subscriptionTopicName = renamed.suffixWithOrElseGet(
"-subscription-registration",
builder,
SUBSCRIPTION_REGISTRATION
) + TOPIC_SUFFIX;
// the decoration can't be performed until we have the configuration available when the app runs,
// so we pass Suppliers into the components, which they can call at run time
final Supplier<String> subscriptionPrimaryKeySerdePseudoTopic =
() -> internalTopologyBuilder().decoratePseudoTopic(subscriptionTopicName + "-pk");
final Supplier<String> subscriptionForeignKeySerdePseudoTopic =
() -> internalTopologyBuilder().decoratePseudoTopic(subscriptionTopicName + "-fk");
final Supplier<String> valueHashSerdePseudoTopic =
() -> internalTopologyBuilder().decoratePseudoTopic(subscriptionTopicName + "-vh");
builder.internalTopologyBuilder.addInternalTopic(subscriptionTopicName, InternalTopicProperties.empty());
final Serde<KO> foreignKeySerde = ((KTableImpl<KO, ?, ?>) foreignKeyTable).keySerde;
final Serde<SubscriptionWrapper<K>> subscriptionWrapperSerde = new SubscriptionWrapperSerde<>(subscriptionPrimaryKeySerdePseudoTopic, keySerde);
final SubscriptionResponseWrapperSerde<VO> responseWrapperSerde =
new SubscriptionResponseWrapperSerde<>(((KTableImpl<?, ?, VO>) foreignKeyTable).valueSerde);
final CombinedKeySchema<KO, K> combinedKeySchema = new CombinedKeySchema<>(
subscriptionForeignKeySerdePseudoTopic,
foreignKeySerde,
subscriptionPrimaryKeySerdePseudoTopic,
keySerde
);
final ProcessorGraphNode<K, Change<V>> subscriptionSendNode = new ForeignJoinSubscriptionSendNode<>(
new ProcessorParameters<>(
new SubscriptionSendProcessorSupplier<>(
foreignKeyExtractor,
subscriptionForeignKeySerdePseudoTopic,
valueHashSerdePseudoTopic,
foreignKeySerde,
valueSerde == null ? null : valueSerde.serializer(),
leftJoin
),
renamed.suffixWithOrElseGet("-subscription-registration-processor", builder, SUBSCRIPTION_REGISTRATION)
)
);
builder.addGraphNode(graphNode, subscriptionSendNode);
final StreamPartitioner<KO, SubscriptionWrapper<K>> subscriptionSinkPartitioner =
tableJoinedInternal.otherPartitioner() == null
? null
: (topic, key, val, numPartitions) -> getPartition.apply(tableJoinedInternal.otherPartitioner().partitions(topic, key, null, numPartitions));
final StreamSinkNode<KO, SubscriptionWrapper<K>> subscriptionSink = new StreamSinkNode<>(
renamed.suffixWithOrElseGet("-subscription-registration-sink", builder, SINK_NAME),
new StaticTopicNameExtractor<>(subscriptionTopicName),
new ProducedInternal<>(Produced.with(foreignKeySerde, subscriptionWrapperSerde, subscriptionSinkPartitioner))
);
builder.addGraphNode(subscriptionSendNode, subscriptionSink);
final StreamSourceNode<KO, SubscriptionWrapper<K>> subscriptionSource = new StreamSourceNode<>(
renamed.suffixWithOrElseGet("-subscription-registration-source", builder, SOURCE_NAME),
Collections.singleton(subscriptionTopicName),
new ConsumedInternal<>(Consumed.with(foreignKeySerde, subscriptionWrapperSerde))
);
builder.addGraphNode(subscriptionSink, subscriptionSource);
// The subscription source is the source node on the *receiving* end *after* the repartition.
// This topic needs to be copartitioned with the Foreign Key table.
final Set<String> copartitionedRepartitionSources =
new HashSet<>(((KTableImpl<?, ?, ?>) foreignKeyTable).subTopologySourceNodes);
copartitionedRepartitionSources.add(subscriptionSource.nodeName());
builder.internalTopologyBuilder.copartitionSources(copartitionedRepartitionSources);
final String subscriptionStoreName = renamed
.suffixWithOrElseGet("-subscription-store", builder, FK_JOIN_STATE_STORE_NAME);
final StoreFactory subscriptionStoreFactory =
new SubscriptionStoreFactory<>(subscriptionStoreName, subscriptionWrapperSerde);
final String subscriptionReceiveName = renamed.suffixWithOrElseGet(
"-subscription-receive", builder, SUBSCRIPTION_PROCESSOR);
final ProcessorGraphNode<KO, SubscriptionWrapper<K>> subscriptionReceiveNode =
new ProcessorGraphNode<>(
subscriptionReceiveName,
new ProcessorParameters<>(
new SubscriptionReceiveProcessorSupplier<>(subscriptionStoreFactory, combinedKeySchema),
subscriptionReceiveName)
);
builder.addGraphNode(subscriptionSource, subscriptionReceiveNode);
final KTableValueGetterSupplier<KO, VO> foreignKeyValueGetter = ((KTableImpl<KO, ?, VO>) foreignKeyTable).valueGetterSupplier();
final ProcessorToStateConnectorNode<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> subscriptionJoinNode =
new ProcessorToStateConnectorNode<>(
new ProcessorParameters<>(
new SubscriptionJoinProcessorSupplier<>(
foreignKeyValueGetter
),
renamed.suffixWithOrElseGet("-subscription-join-foreign", builder, SUBSCRIPTION_PROCESSOR)
),
Collections.singleton(foreignKeyValueGetter)
);
builder.addGraphNode(subscriptionReceiveNode, subscriptionJoinNode);
final String foreignTableJoinName = renamed
.suffixWithOrElseGet("-foreign-join-subscription", builder, SUBSCRIPTION_PROCESSOR);
final ProcessorGraphNode<KO, Change<VO>> foreignTableJoinNode = new ForeignTableJoinNode<>(
new ProcessorParameters<>(
new ForeignTableJoinProcessorSupplier<>(subscriptionStoreFactory, combinedKeySchema),
foreignTableJoinName
)
);
builder.addGraphNode(((KTableImpl<?, ?, ?>) foreignKeyTable).graphNode, foreignTableJoinNode);
final String finalRepartitionTopicName = renamed.suffixWithOrElseGet("-subscription-response", builder, SUBSCRIPTION_RESPONSE) + TOPIC_SUFFIX;
builder.internalTopologyBuilder.addInternalTopic(finalRepartitionTopicName, InternalTopicProperties.empty());
final StreamPartitioner<K, SubscriptionResponseWrapper<VO>> defaultForeignResponseSinkPartitioner =
(topic, key, subscriptionResponseWrapper, numPartitions) -> {
final Integer partition = subscriptionResponseWrapper.primaryPartition();
return partition == null ? Optional.empty() : Optional.of(Collections.singleton(partition));
};
final StreamPartitioner<K, SubscriptionResponseWrapper<VO>> foreignResponseSinkPartitioner =
tableJoinedInternal.partitioner() == null
? defaultForeignResponseSinkPartitioner
: (topic, key, val, numPartitions) -> getPartition.apply(tableJoinedInternal.partitioner().partitions(topic, key, null, numPartitions));
final StreamSinkNode<K, SubscriptionResponseWrapper<VO>> foreignResponseSink =
new StreamSinkNode<>(
renamed.suffixWithOrElseGet("-subscription-response-sink", builder, SINK_NAME),
new StaticTopicNameExtractor<>(finalRepartitionTopicName),
new ProducedInternal<>(Produced.with(keySerde, responseWrapperSerde, foreignResponseSinkPartitioner))
);
builder.addGraphNode(subscriptionJoinNode, foreignResponseSink);
builder.addGraphNode(foreignTableJoinNode, foreignResponseSink);
final StreamSourceNode<K, SubscriptionResponseWrapper<VO>> foreignResponseSource = new StreamSourceNode<>(
renamed.suffixWithOrElseGet("-subscription-response-source", builder, SOURCE_NAME),
Collections.singleton(finalRepartitionTopicName),
new ConsumedInternal<>(Consumed.with(keySerde, responseWrapperSerde))
);
builder.addGraphNode(foreignResponseSink, foreignResponseSource);
// the response topic has to be copartitioned with the left (primary) side of the join
final Set<String> resultSourceNodes = new HashSet<>(this.subTopologySourceNodes);
resultSourceNodes.add(foreignResponseSource.nodeName());
builder.internalTopologyBuilder.copartitionSources(resultSourceNodes);
final KTableValueGetterSupplier<K, V> primaryKeyValueGetter = valueGetterSupplier();
final ProcessorToStateConnectorNode<K, SubscriptionResponseWrapper<VO>> responseJoinNode = new ProcessorToStateConnectorNode<>(
new ProcessorParameters<>(
new ResponseJoinProcessorSupplier<>(
primaryKeyValueGetter,
valueSerde == null ? null : valueSerde.serializer(),
valueHashSerdePseudoTopic,
joiner,
leftJoin
),
renamed.suffixWithOrElseGet("-subscription-response-resolver", builder, SUBSCRIPTION_RESPONSE_RESOLVER_PROCESSOR)
),
Collections.singleton(primaryKeyValueGetter)
);
builder.addGraphNode(foreignResponseSource, responseJoinNode);
final String resultProcessorName = renamed.suffixWithOrElseGet("-result", builder, FK_JOIN_OUTPUT_NAME);
final MaterializedInternal<K, VR, KeyValueStore<Bytes, byte[]>> materializedInternal =
new MaterializedInternal<>(
materialized,
builder,
FK_JOIN_OUTPUT_NAME
);
// If we have a key serde, it's still valid, but we don't know the value serde, since it's the result
// of the joiner (VR).
if (materializedInternal.keySerde() == null) {
materializedInternal.withKeySerde(keySerde);
}
final KTableSource<K, VR> resultProcessorSupplier = new KTableSource<>(materializedInternal);
final ProcessorGraphNode<K, VR> resultNode = new ProcessorGraphNode<>(
resultProcessorName,
new ProcessorParameters<>(
resultProcessorSupplier,
resultProcessorName
)
);
resultNode.setOutputVersioned(materializedInternal.storeSupplier() instanceof VersionedBytesStoreSupplier);
builder.addGraphNode(responseJoinNode, resultNode);
return new KTableImpl<K, V, VR>(
resultProcessorName,
keySerde,
materializedInternal.valueSerde(),
resultSourceNodes,
materializedInternal.storeName(),
resultProcessorSupplier,
resultNode,
builder
);
}
private static void maybeSetOutputVersioned(final GraphNode tableNode,
final MaterializedInternal<?, ?, KeyValueStore<Bytes, byte[]>> materializedInternal) {
if (materializedInternal != null) {
tableNode.setOutputVersioned(materializedInternal.storeSupplier() instanceof VersionedBytesStoreSupplier);
}
}
}
| KTableImpl |
java | netty__netty | microbench/src/main/java/io/netty/handler/codec/http2/HpackHeader.java | {
"start": 1443,
"end": 1539
} | class ____ a single header entry. Used by the benchmarks.
*/
@UnstableApi
public final | representing |
java | netty__netty | codec-socks/src/main/java/io/netty/handler/codec/socksx/v4/AbstractSocks4Message.java | {
"start": 853,
"end": 1038
} | class ____ extends AbstractSocksMessage implements Socks4Message {
@Override
public final SocksVersion version() {
return SocksVersion.SOCKS4a;
}
}
| AbstractSocks4Message |
java | quarkusio__quarkus | integration-tests/reactive-messaging-context-propagation/src/main/java/io/quarkus/it/kafka/FlowerConsumers.java | {
"start": 514,
"end": 1960
} | class ____ {
@Inject
RequestBean reqBean;
@Inject
FlowerProducer producer;
@Incoming("flowers-in")
@NonBlocking
void receive(String flower) {
Context ctx = Vertx.currentContext();
assert Context.isOnEventLoopThread();
Log.info(ctx + "[" + ctx.getClass() + "]");
Log.infof("bean: %s, id: %s", reqBean, reqBean.getId());
System.out.println("Received io: " + flower);
producer.addReceived(flower);
}
@Incoming("flowers-in")
@Blocking
void receiveBlocking(String flower) {
Context ctx = Vertx.currentContext();
assert Context.isOnWorkerThread();
Log.info(ctx + "[" + ctx.getClass() + "]");
Log.infof("bean: %s, id: %s", reqBean, reqBean.getId());
System.out.println("Received blocking: " + flower);
producer.addReceived(flower);
}
@Incoming("flowers-in")
@RunOnVirtualThread
void receiveVT(String flower) {
Context ctx = Vertx.currentContext();
if (Runtime.version().feature() >= 21) {
VirtualThreadsAssertions.assertThatItRunsOnVirtualThread();
VirtualThreadsAssertions.assertThatItRunsOnADuplicatedContext();
}
Log.info(ctx + "[" + ctx.getClass() + "]");
Log.infof("bean: %s, id: %s", reqBean, reqBean.getId());
System.out.println("Received vt: " + flower);
producer.addReceived(flower);
}
}
| FlowerConsumers |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/quota/SensorAccess.java | {
"start": 1278,
"end": 3859
} | class ____ {
private final ReadWriteLock lock;
private final Metrics metrics;
public SensorAccess(ReadWriteLock lock, Metrics metrics) {
this.lock = lock;
this.metrics = metrics;
}
public Sensor getOrCreate(String sensorName, long expirationTime, Consumer<Sensor> registerMetrics) {
Sensor sensor;
/* Acquire the read lock to fetch the sensor. It is safe to call getSensor from multiple threads.
* The read lock allows a thread to create a sensor in isolation. The thread creating the sensor
* will acquire the write lock and prevent the sensors from being read while they are being created.
* It should be sufficient to simply check if the sensor is null without acquiring a read lock but the
* sensor being present doesn't mean that it is fully initialized i.e. all the Metrics may not have been added.
* This read lock waits until the writer thread has released its lock i.e. fully initialized the sensor
* at which point it is safe to read
*/
lock.readLock().lock();
try {
sensor = metrics.getSensor(sensorName);
} finally {
lock.readLock().unlock();
}
/* If the sensor is null, try to create it else return the existing sensor
* The sensor can be null, hence the null checks
*/
if (sensor == null) {
/* Acquire a write lock because the sensor may not have been created and we only want one thread to create it.
* Note that multiple threads may acquire the write lock if they all see a null sensor initially
* In this case, the writer checks the sensor after acquiring the lock again.
* This is safe from Double-Checked Locking because the references are read
* after acquiring read locks and hence they cannot see a partially published reference
*/
lock.writeLock().lock();
try {
// Set the var for both sensors in case another thread has won the race to acquire the write lock. This will
// ensure that we initialise `ClientSensors` with non-null parameters.
sensor = metrics.getSensor(sensorName);
if (sensor == null) {
sensor = metrics.sensor(sensorName, null, expirationTime);
registerMetrics.accept(sensor);
}
} finally {
lock.writeLock().unlock();
}
}
return sensor;
}
}
| SensorAccess |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestUpdatePipelineWithSnapshots.java | {
"start": 1629,
"end": 4289
} | class ____ {
// Regression test for HDFS-6647.
@Test
public void testUpdatePipelineAfterDelete() throws Exception {
Configuration conf = new HdfsConfiguration();
Path file = new Path("/test-file");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
DFSOutputStream out = null;
try {
// Create a file and make sure a block is allocated for it.
out = (DFSOutputStream)(fs.create(file).
getWrappedStream());
out.write(1);
out.hflush();
// Create a snapshot that includes the file.
SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs,
new Path("/"), "s1");
// Grab the block info of this file for later use.
FSDataInputStream in = null;
ExtendedBlock oldBlock = null;
try {
in = fs.open(file);
oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
} finally {
IOUtils.closeStream(in);
}
// Allocate a new block ID/gen stamp so we can simulate pipeline
// recovery.
String clientName = ((DistributedFileSystem)fs).getClient()
.getClientName();
LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(
oldBlock, clientName);
ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
oldBlock.getBlockId(), oldBlock.getNumBytes(),
newLocatedBlock.getBlock().getGenerationStamp());
// Delete the file from the present FS. It will still exist the
// previously-created snapshot. This will log an OP_DELETE for the
// file in question.
fs.delete(file, true);
// Simulate a pipeline recovery, wherein a new block is allocated
// for the existing block, resulting in an OP_UPDATE_BLOCKS being
// logged for the file in question.
try {
namenode.updatePipeline(clientName, oldBlock, newBlock,
newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
} catch (IOException ioe) {
// normal
assertExceptionContains(
"does not exist or it is not under construction", ioe);
}
// Make sure the NN can restart with the edit logs as we have them now.
cluster.restartNameNode(true);
} finally {
IOUtils.closeStream(out);
}
} finally {
cluster.shutdown();
}
}
}
| TestUpdatePipelineWithSnapshots |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/NonFinalCompileTimeConstant.java | {
"start": 1588,
"end": 2189
} | class ____ extends BugChecker implements MethodTreeMatcher {
@Override
public Description matchMethod(MethodTree tree, VisitorState state) {
if (tree.getBody() == null) {
return NO_MATCH;
}
for (VariableTree parameter : tree.getParameters()) {
VarSymbol sym = ASTHelpers.getSymbol(parameter);
if (!CompileTimeConstantExpressionMatcher.hasCompileTimeConstantAnnotation(state, sym)) {
continue;
}
if (isConsideredFinal(sym)) {
continue;
}
return describeMatch(parameter);
}
return NO_MATCH;
}
}
| NonFinalCompileTimeConstant |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/asyncprocessing/operators/AsyncIntervalJoinOperatorTest.java | {
"start": 33517,
"end": 36332
} | class ____ {
String key;
long ts;
String source;
public TestElem(long ts, String source) {
this.key = "key";
this.ts = ts;
this.source = source;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TestElem testElem = (TestElem) o;
if (ts != testElem.ts) {
return false;
}
if (key != null ? !key.equals(testElem.key) : testElem.key != null) {
return false;
}
return source != null ? source.equals(testElem.source) : testElem.source == null;
}
@Override
public int hashCode() {
int result = key != null ? key.hashCode() : 0;
result = 31 * result + (int) (ts ^ (ts >>> 32));
result = 31 * result + (source != null ? source.hashCode() : 0);
return result;
}
@Override
public String toString() {
return this.source + ":" + this.ts;
}
public static TypeSerializer<TestElem> serializer() {
return TypeInformation.of(new TypeHint<AsyncIntervalJoinOperatorTest.TestElem>() {})
.createSerializer(new SerializerConfigImpl());
}
}
private static StreamRecord<TestElem> createStreamRecord(long ts, String source) {
TestElem testElem = new TestElem(ts, source);
return new StreamRecord<>(testElem, ts);
}
private void processElementsAndWatermarks(TestHarness testHarness) throws Exception {
if (lhsFasterThanRhs) {
// add to lhs
for (int i = 1; i <= 4; i++) {
testHarness.processElement1(createStreamRecord(i, "lhs"));
testHarness.processWatermark1(new Watermark(i));
}
// add to rhs
for (int i = 1; i <= 4; i++) {
testHarness.processElement2(createStreamRecord(i, "rhs"));
testHarness.processWatermark2(new Watermark(i));
}
} else {
// add to rhs
for (int i = 1; i <= 4; i++) {
testHarness.processElement2(createStreamRecord(i, "rhs"));
testHarness.processWatermark2(new Watermark(i));
}
// add to lhs
for (int i = 1; i <= 4; i++) {
testHarness.processElement1(createStreamRecord(i, "lhs"));
testHarness.processWatermark1(new Watermark(i));
}
}
}
/** Custom test harness to avoid endless generics in all of the test code. */
private static | TestElem |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/rest/action/synonyms/RestGetSynonymsSetsAction.java | {
"start": 1028,
"end": 1885
} | class ____ extends BaseRestHandler {
private static final Integer DEFAULT_FROM_PARAM = 0;
private static final Integer DEFAULT_SIZE_PARAM = 10;
@Override
public String getName() {
return "synonyms_sets_get_action";
}
@Override
public List<Route> routes() {
return List.of(new Route(GET, "/_synonyms"));
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
GetSynonymsSetsAction.Request request = new GetSynonymsSetsAction.Request(
restRequest.paramAsInt("from", DEFAULT_FROM_PARAM),
restRequest.paramAsInt("size", DEFAULT_SIZE_PARAM)
);
return channel -> client.execute(GetSynonymsSetsAction.INSTANCE, request, new RestToXContentListener<>(channel));
}
}
| RestGetSynonymsSetsAction |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/dao/annotation/PersistenceExceptionTranslationAdvisor.java | {
"start": 1607,
"end": 3063
} | class ____ extends AbstractPointcutAdvisor {
private final PersistenceExceptionTranslationInterceptor advice;
private final AnnotationMatchingPointcut pointcut;
/**
* Create a new PersistenceExceptionTranslationAdvisor.
* @param persistenceExceptionTranslator the PersistenceExceptionTranslator to use
* @param repositoryAnnotationType the annotation type to check for
*/
public PersistenceExceptionTranslationAdvisor(
PersistenceExceptionTranslator persistenceExceptionTranslator,
Class<? extends Annotation> repositoryAnnotationType) {
this.advice = new PersistenceExceptionTranslationInterceptor(persistenceExceptionTranslator);
this.pointcut = new AnnotationMatchingPointcut(repositoryAnnotationType, true);
}
/**
* Create a new PersistenceExceptionTranslationAdvisor.
* @param beanFactory the ListableBeanFactory to obtaining all
* PersistenceExceptionTranslators from
* @param repositoryAnnotationType the annotation type to check for
*/
PersistenceExceptionTranslationAdvisor(
ListableBeanFactory beanFactory, Class<? extends Annotation> repositoryAnnotationType) {
this.advice = new PersistenceExceptionTranslationInterceptor(beanFactory);
this.pointcut = new AnnotationMatchingPointcut(repositoryAnnotationType, true);
}
@Override
public Advice getAdvice() {
return this.advice;
}
@Override
public Pointcut getPointcut() {
return this.pointcut;
}
}
| PersistenceExceptionTranslationAdvisor |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/results/graph/entity/internal/EntityInitializerImpl.java | {
"start": 6760,
"end": 11026
} | class ____ extends InitializerData {
protected final boolean shallowCached;
protected final LockMode lockMode;
protected final String uniqueKeyAttributePath;
protected final Type[] uniqueKeyPropertyTypes;
protected final boolean canUseEmbeddedIdentifierInstanceAsEntity;
protected final boolean hasCallbackActions;
protected final @Nullable EntityPersister defaultConcreteDescriptor;
// per-row state
protected @Nullable EntityPersister concreteDescriptor;
protected @Nullable EntityKey entityKey;
protected @Nullable Object entityInstanceForNotify;
protected @Nullable EntityHolder entityHolder;
public EntityInitializerData(EntityInitializerImpl initializer, RowProcessingState rowProcessingState) {
super( rowProcessingState );
final var entityDescriptor = initializer.entityDescriptor;
shallowCached = rowProcessingState.isQueryCacheHit() && entityDescriptor.useShallowQueryCacheLayout();
lockMode = rowProcessingState.determineEffectiveLockMode( initializer.sourceAlias );
if ( initializer.isResultInitializer() ) {
uniqueKeyAttributePath = rowProcessingState.getEntityUniqueKeyAttributePath();
uniqueKeyPropertyTypes =
uniqueKeyAttributePath != null
? initializer.getParentEntityAttributeTypes( uniqueKeyAttributePath )
: null;
canUseEmbeddedIdentifierInstanceAsEntity =
rowProcessingState.getEntityId() != null
&& initializer.couldUseEmbeddedIdentifierInstanceAsEntity;
}
else {
uniqueKeyAttributePath = null;
uniqueKeyPropertyTypes = null;
canUseEmbeddedIdentifierInstanceAsEntity = false;
}
hasCallbackActions = rowProcessingState.hasCallbackActions();
defaultConcreteDescriptor =
hasConcreteDescriptor( rowProcessingState, initializer.discriminatorAssembler, entityDescriptor )
? entityDescriptor
: null;
}
/*
* Used by Hibernate Reactive
*/
public EntityInitializerData(EntityInitializerData original) {
super( original );
this.shallowCached = original.shallowCached;
this.lockMode = original.lockMode;
this.uniqueKeyAttributePath = original.uniqueKeyAttributePath;
this.uniqueKeyPropertyTypes = original.uniqueKeyPropertyTypes;
this.canUseEmbeddedIdentifierInstanceAsEntity = original.canUseEmbeddedIdentifierInstanceAsEntity;
this.hasCallbackActions = original.hasCallbackActions;
this.defaultConcreteDescriptor = original.defaultConcreteDescriptor;
this.concreteDescriptor = original.concreteDescriptor;
this.entityKey = original.entityKey;
this.entityInstanceForNotify = original.entityInstanceForNotify;
this.entityHolder = original.entityHolder;
}
}
private static boolean hasConcreteDescriptor(
RowProcessingState rowProcessingState,
BasicResultAssembler<?> discriminatorAssembler,
EntityPersister entityDescriptor) {
return discriminatorAssembler == null
|| rowProcessingState.isQueryCacheHit()
&& entityDescriptor.useShallowQueryCacheLayout()
&& !entityDescriptor.storeDiscriminatorInShallowQueryCacheLayout();
}
public EntityInitializerImpl(
EntityResultGraphNode resultDescriptor,
String sourceAlias,
@Nullable Fetch identifierFetch,
@Nullable Fetch discriminatorFetch,
@Nullable DomainResult<?> keyResult,
@Nullable DomainResult<Object> rowIdResult,
NotFoundAction notFoundAction,
boolean affectedByFilter,
@Nullable InitializerParent<?> parent,
boolean isResultInitializer,
AssemblerCreationState creationState) {
super( creationState );
this.sourceAlias = sourceAlias;
this.parent = parent;
this.isResultInitializer = isResultInitializer;
referencedModelPart = resultDescriptor.getEntityValuedModelPart();
entityDescriptor = (EntityPersister) referencedModelPart.getEntityMappingType();
final String rootEntityName = entityDescriptor.getRootEntityName();
rootEntityDescriptor =
rootEntityName == null || rootEntityName.equals( entityDescriptor.getEntityName() )
? entityDescriptor
: entityDescriptor.getRootEntityDescriptor().getEntityPersister();
keyTypeForEqualsHashCode = entityDescriptor.getIdentifierType().getTypeForEqualsHashCode();
// The id can only be the entity instance if this is a non-aggregated id that has no containing | EntityInitializerData |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/descriptor/DirectorySource.java | {
"start": 913,
"end": 2480
} | class ____ implements FileSystemSource {
@Serial
private static final long serialVersionUID = 1L;
/**
* Create a new {@code DirectorySource} using the supplied
* {@linkplain File directory}.
*
* @param directory the source directory; must not be {@code null}
*/
public static DirectorySource from(File directory) {
return new DirectorySource(directory);
}
private final File directory;
private DirectorySource(File directory) {
Preconditions.notNull(directory, "directory must not be null");
try {
this.directory = directory.getCanonicalFile();
}
catch (IOException ex) {
throw new JUnitException("Failed to retrieve canonical path for directory: " + directory, ex);
}
}
/**
* Get the {@link URI} for the source {@linkplain #getFile directory}.
*
* @return the source {@code URI}; never {@code null}
*/
@Override
public URI getUri() {
return getFile().toURI();
}
/**
* Get the source {@linkplain File directory}.
*
* @return the source directory; never {@code null}
*/
@Override
public File getFile() {
return this.directory;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DirectorySource that = (DirectorySource) o;
return this.directory.equals(that.directory);
}
@Override
public int hashCode() {
return this.directory.hashCode();
}
@Override
public String toString() {
return new ToStringBuilder(this).append("directory", this.directory).toString();
}
}
| DirectorySource |
java | google__guava | android/guava/src/com/google/common/eventbus/Dispatcher.java | {
"start": 5135,
"end": 6927
} | class ____ extends Dispatcher {
// This dispatcher matches the original dispatch behavior of AsyncEventBus.
//
// We can't really make any guarantees about the overall dispatch order for this dispatcher in
// a multithreaded environment for a couple of reasons:
//
// 1. Subscribers to events posted on different threads can be interleaved with each other
// freely. (A event on one thread, B event on another could yield any of
// [a1, a2, a3, b1, b2], [a1, b2, a2, a3, b2], [a1, b2, b3, a2, a3], etc.)
// 2. It's possible for subscribers to actually be dispatched to in a different order than they
// were added to the queue. It's easily possible for one thread to take the head of the
// queue, immediately followed by another thread taking the next element in the queue. That
// second thread can then dispatch to the subscriber it took before the first thread does.
//
// All this makes me really wonder if there's any value in queueing here at all. A dispatcher
// that simply loops through the subscribers and dispatches the event to each would actually
// probably provide a stronger order guarantee, though that order would obviously be different
// in some cases.
/** Global event queue. */
private final ConcurrentLinkedQueue<EventWithSubscriber> queue = new ConcurrentLinkedQueue<>();
@Override
void dispatch(Object event, Iterator<Subscriber> subscribers) {
checkNotNull(event);
while (subscribers.hasNext()) {
queue.add(new EventWithSubscriber(event, subscribers.next()));
}
EventWithSubscriber e;
while ((e = queue.poll()) != null) {
e.subscriber.dispatchEvent(e.event);
}
}
private static final | LegacyAsyncDispatcher |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FtpConsumerUsingFTPClientConfigIT.java | {
"start": 1146,
"end": 2469
} | class ____ extends FtpServerTestSupport {
private String getFtpUrl() {
return "ftp://admin@localhost:{{ftp.server.port}}/clientconfig?password=admin&ftpClientConfig=#myConfig";
}
@Override
public void doPostSetup() throws Exception {
prepareFtpServer();
}
@BindToRegistry("myConfig")
public FTPClientConfig createConfig() {
FTPClientConfig config = new FTPClientConfig(FTPClientConfig.SYST_UNIX);
config.setServerTimeZoneId("Europe/Paris");
return config;
}
@Test
public void testFTPClientConfig() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.expectedBodiesReceived("Hello World");
MockEndpoint.assertIsSatisfied(context);
}
private void prepareFtpServer() {
// prepares the FTP Server by creating files on the server that we want
// to unit
// test that we can pool and store as a local file
sendFile(getFtpUrl(), "Hello World", "hello.txt");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(getFtpUrl()).to("mock:result");
}
};
}
}
| FtpConsumerUsingFTPClientConfigIT |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownThreadsHelper.java | {
"start": 1165,
"end": 2323
} | class ____ {
private Runnable sampleRunnable = new Runnable() {
@Override
public void run() {
try {
Thread.sleep(2 * ShutdownThreadsHelper.SHUTDOWN_WAIT_MS);
} catch (InterruptedException ie) {
System.out.println("Thread interrupted");
}
}
};
@Test
@Timeout(value = 3)
public void testShutdownThread() {
Thread thread = new SubjectInheritingThread(sampleRunnable);
thread.start();
boolean ret = ShutdownThreadsHelper.shutdownThread(thread);
boolean isTerminated = !thread.isAlive();
assertEquals(ret, isTerminated, "Incorrect return value");
assertTrue(isTerminated, "Thread is not shutdown");
}
@Test
public void testShutdownThreadPool() throws InterruptedException {
ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1);
executor.execute(sampleRunnable);
boolean ret = ShutdownThreadsHelper.shutdownExecutorService(executor);
boolean isTerminated = executor.isTerminated();
assertEquals(ret, isTerminated, "Incorrect return value");
assertTrue(isTerminated, "ExecutorService is not shutdown");
}
}
| TestShutdownThreadsHelper |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/ReferenceCounted.java | {
"start": 1309,
"end": 3787
} | class ____<ReleaseHelper> {
/** The "unsafe", which can be used to perform native memory accesses. */
@SuppressWarnings({"restriction", "UseOfSunClasses"})
private static final Unsafe unsafe = MemoryUtils.UNSAFE;
private static final long referenceOffset;
static {
try {
referenceOffset =
unsafe.objectFieldOffset(
ReferenceCounted.class.getDeclaredField("referenceCount"));
} catch (SecurityException e) {
throw new Error(
"Could not get field 'referenceCount' offset in class 'ReferenceCounted'"
+ " for unsafe operations, permission denied by security manager.",
e);
} catch (NoSuchFieldException e) {
throw new Error(
"Could not get field 'referenceCount' offset in class 'ReferenceCounted'"
+ " for unsafe operations",
e);
} catch (Throwable t) {
throw new Error(
"Could not get field 'referenceCount' offset in class 'ReferenceCounted'"
+ " for unsafe operations, unclassified error",
t);
}
}
private volatile int referenceCount;
public ReferenceCounted(int initReference) {
this.referenceCount = initReference;
}
public int retain() {
return unsafe.getAndAddInt(this, referenceOffset, 1) + 1;
}
/**
* Try to retain this object. Fail if reference count is already zero.
*
* @return zero if failed, otherwise current reference count.
*/
public int tryRetain() {
int v;
do {
v = unsafe.getIntVolatile(this, referenceOffset);
} while (v != 0 && !unsafe.compareAndSwapInt(this, referenceOffset, v, v + 1));
return v == 0 ? 0 : v + 1;
}
public int release() {
return release(null);
}
public int release(@Nullable ReleaseHelper releaseHelper) {
int r = unsafe.getAndAddInt(this, referenceOffset, -1) - 1;
if (r == 0) {
referenceCountReachedZero(releaseHelper);
}
return r;
}
public int getReferenceCount() {
return referenceCount;
}
/** A method called when the reference count reaches zero. */
protected abstract void referenceCountReachedZero(@Nullable ReleaseHelper releaseHelper);
}
| ReferenceCounted |
java | spring-projects__spring-boot | module/spring-boot-jdbc/src/main/java/org/springframework/boot/jdbc/autoconfigure/JdbcClientAutoConfiguration.java | {
"start": 1694,
"end": 1852
} | class ____ {
@Bean
JdbcClient jdbcClient(NamedParameterJdbcTemplate jdbcTemplate) {
return JdbcClient.create(jdbcTemplate);
}
}
| JdbcClientAutoConfiguration |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/SpringAiEmbeddingsComponentBuilderFactory.java | {
"start": 1897,
"end": 5244
} | interface ____ extends ComponentBuilder<SpringAiEmbeddingsComponent> {
/**
* The configuration.
*
* The option is a:
* <code>org.apache.camel.component.springai.embeddings.SpringAiEmbeddingsConfiguration</code> type.
*
* Group: producer
*
* @param configuration the value to set
* @return the dsl builder
*/
default SpringAiEmbeddingsComponentBuilder configuration(org.apache.camel.component.springai.embeddings.SpringAiEmbeddingsConfiguration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* The EmbeddingModel to use for generating embeddings.
*
* The option is a:
* <code>org.springframework.ai.embedding.EmbeddingModel</code> type.
*
* Group: producer
*
* @param embeddingModel the value to set
* @return the dsl builder
*/
default SpringAiEmbeddingsComponentBuilder embeddingModel(org.springframework.ai.embedding.EmbeddingModel embeddingModel) {
doSetProperty("embeddingModel", embeddingModel);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default SpringAiEmbeddingsComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default SpringAiEmbeddingsComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
| SpringAiEmbeddingsComponentBuilder |
java | elastic__elasticsearch | test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/Version.java | {
"start": 866,
"end": 1936
} | class ____ implements Comparable<Version>, Serializable {
public static final Version CURRENT;
private static final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)(?:-(alpha\\d+|beta\\d+|rc\\d+|SNAPSHOT))?");
private static final Pattern relaxedPattern = Pattern.compile(
"v?(\\d+)\\.(\\d+)(?:\\.(\\d+))?(?:[\\-+]+([a-zA-Z0-9_]+(?:-[a-zA-Z0-9]+)*))?"
);
private final int major;
private final int minor;
private final int revision;
private final int id;
private final String qualifier;
private final boolean detached;
static {
Properties versionProperties = new Properties();
try (InputStream in = Version.class.getClassLoader().getResourceAsStream("version.properties")) {
versionProperties.load(in);
CURRENT = Version.fromString(versionProperties.getProperty("elasticsearch"));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
/**
* Specifies how a version string should be parsed.
*/
public | Version |
java | google__dagger | javatests/dagger/hilt/android/MultiTestRoot2Test.java | {
"start": 4198,
"end": 8032
} | interface ____ {
Qux getQux();
}
@Rule public HiltAndroidRule rule = new HiltAndroidRule(this);
@Inject Foo foo;
@Inject Qux qux;
@Inject String str;
@Inject Long longValue;
@Inject @MultiTestRootExternalModules.External String externalStrValue;
@BindValue
@Named(TEST_QUALIFIER)
String bindValueString = BIND_VALUE_STRING;
@Test
public void testInjectFromTestModule() throws Exception {
assertThat(foo).isNull();
rule.inject();
assertThat(foo).isNotNull();
assertThat(foo.value).isEqualTo(INT_VALUE);
}
@Test
public void testInjectFromTestModuleWithArgs() throws Exception {
assertThat(str).isNull();
rule.inject();
assertThat(str).isNotNull();
assertThat(str).isEqualTo(STR_VALUE);
}
@Test
public void testInjectFromNestedTestModule() throws Exception {
assertThat(longValue).isNull();
rule.inject();
assertThat(longValue).isNotNull();
assertThat(longValue).isEqualTo(LONG_VALUE);
}
@Test
public void testInjectFromPkgPrivateTestModule() throws Exception {
assertThat(qux).isNull();
rule.inject();
assertThat(qux).isNotNull();
}
@Test
public void testInjectFromExternalAppModule() throws Exception {
assertThat(externalStrValue).isNull();
rule.inject();
assertThat(externalStrValue).isNotNull();
assertThat(externalStrValue).isEqualTo(MultiTestRootExternalModules.EXTERNAL_STR_VALUE);
}
@Test
public void testInjectFromExternalActivityModule() throws Exception {
rule.inject();
ActivityController<TestActivity> ac = Robolectric.buildActivity(TestActivity.class);
assertThat(ac.get().externalLongValue).isNull();
ac.create();
assertThat(ac.get().externalLongValue).isNotNull();
assertThat(ac.get().externalLongValue)
.isEqualTo(MultiTestRootExternalModules.EXTERNAL_LONG_VALUE);
}
@Test
public void testLocalEntryPoint() throws Exception {
rule.inject();
Bar bar = EntryPoints.get(getApplicationContext(), BarEntryPoint.class).getBar();
assertThat(bar).isNotNull();
assertThat(bar.value).isEqualTo(STR_VALUE);
}
@Test
public void testLocalPkgPrivateEntryPoint() throws Exception {
rule.inject();
Qux qux = EntryPoints.get(getApplicationContext(), PkgPrivateQuxEntryPoint.class).getQux();
assertThat(qux).isNotNull();
}
@Test
public void testAndroidEntryPoint() throws Exception {
rule.inject();
ActivityController<TestActivity> ac = Robolectric.buildActivity(TestActivity.class);
assertThat(ac.get().baz).isNull();
ac.create();
assertThat(ac.get().baz).isNotNull();
assertThat(ac.get().baz.value).isEqualTo(LONG_VALUE);
}
@Test
public void testMissingMultiTestRoot1EntryPoint() throws Exception {
rule.inject();
ClassCastException exception =
assertThrows(
ClassCastException.class,
() -> EntryPoints.get(getApplicationContext(), MultiTestRoot1Test.BarEntryPoint.class));
assertThat(exception)
.hasMessageThat()
.isEqualTo(
"Cannot cast dagger.hilt.android.internal.testing.root."
+ "DaggerMultiTestRoot2Test_HiltComponents_SingletonC$SingletonCImpl"
+ " to dagger.hilt.android.MultiTestRoot1Test$BarEntryPoint");
}
@Test
public void testBindValueFieldIsProvided() throws Exception {
rule.inject();
assertThat(bindValueString).isEqualTo(BIND_VALUE_STRING);
assertThat(getBinding()).isEqualTo(BIND_VALUE_STRING);
}
@Test
public void testBindValueIsMutable() throws Exception {
rule.inject();
bindValueString = "newValue";
assertThat(getBinding()).isEqualTo("newValue");
}
private static String getBinding() {
return EntryPoints.get(getApplicationContext(), BindValueEntryPoint.class).bindValueString();
}
}
| PkgPrivateQuxEntryPoint |
java | apache__flink | flink-clients/src/test/java/org/apache/flink/client/program/StreamContextEnvironmentTest.java | {
"start": 2018,
"end": 9599
} | class ____ {
@ParameterizedTest
@MethodSource("provideExecutors")
void testDisallowProgramConfigurationChanges(
ThrowingConsumer<StreamExecutionEnvironment, Exception> executor) {
final Configuration clusterConfig = new Configuration();
clusterConfig.set(DeploymentOptions.PROGRAM_CONFIG_ENABLED, false);
clusterConfig.set(DeploymentOptions.TARGET, "local");
clusterConfig.set(StateRecoveryOptions.SAVEPOINT_PATH, "/flink/savepoints");
clusterConfig.set(ExecutionOptions.RUNTIME_MODE, RuntimeExecutionMode.STREAMING);
final Configuration programConfig = new Configuration();
programConfig.set(DeploymentOptions.PROGRAM_CONFIG_ENABLED, false);
programConfig.set(DeploymentOptions.TARGET, "local");
programConfig.set(ExecutionOptions.RUNTIME_MODE, RuntimeExecutionMode.BATCH);
programConfig.set(ExecutionOptions.SORT_INPUTS, true);
final StreamContextEnvironment environment =
constructStreamContextEnvironment(clusterConfig, Collections.emptyList());
// Change the CheckpointConfig
environment.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);
// Change the ExecutionConfig
environment.setParallelism(25);
environment.getConfig().setMaxParallelism(1024);
// Add/mutate values in the configuration
environment.configure(programConfig);
environment.fromData(Collections.singleton(1)).sinkTo(new DiscardingSink<>());
assertThatThrownBy(() -> executor.accept(environment))
.isInstanceOf(MutatedConfigurationException.class)
.hasMessageContainingAll(
ExecutionOptions.RUNTIME_MODE.key(),
ExecutionOptions.SORT_INPUTS.key(),
CheckpointingOptions.CHECKPOINTING_INTERVAL.key(),
PipelineOptions.MAX_PARALLELISM.key());
}
@ParameterizedTest
@MethodSource("provideExecutors")
void testDisallowCheckpointStorageByConfiguration(
ThrowingConsumer<StreamExecutionEnvironment, Exception> executor) {
final Configuration clusterConfig = new Configuration();
Configuration jobConfig = new Configuration();
String disallowedPath = "file:///flink/disallowed/modification";
jobConfig.set(CheckpointingOptions.CHECKPOINT_STORAGE, "jobmanager");
jobConfig.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, disallowedPath);
final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
final StreamContextEnvironment environment =
new StreamContextEnvironment(
new MockExecutorServiceLoader(),
clusterConfig,
jobConfig,
classLoader,
true,
true,
false,
Collections.emptyList());
environment.fromData(Collections.singleton(1)).sinkTo(new DiscardingSink<>());
assertThatThrownBy(() -> executor.accept(environment))
.isInstanceOf(MutatedConfigurationException.class)
.hasMessageContainingAll(
CheckpointingOptions.CHECKPOINT_STORAGE.key(),
CheckpointingOptions.CHECKPOINTS_DIRECTORY.key());
}
@ParameterizedTest
@MethodSource("provideExecutors")
void testNotModifiedCheckpointStorage(
ThrowingConsumer<StreamExecutionEnvironment, Exception> executor) {
final Configuration clusterConfig = new Configuration();
clusterConfig.set(DeploymentOptions.PROGRAM_CONFIG_ENABLED, false);
clusterConfig.set(DeploymentOptions.TARGET, "local");
clusterConfig.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, "file:///flink/checkpoints");
final StreamContextEnvironment environment =
constructStreamContextEnvironment(clusterConfig, Collections.emptyList());
environment.fromData(Collections.singleton(1)).sinkTo(new DiscardingSink<>());
assertThatThrownBy(() -> executor.accept(environment))
.isInstanceOf(ExecutorReachedException.class);
}
@ParameterizedTest
@MethodSource("provideExecutors")
void testForSinkTransformation(
ThrowingConsumer<StreamExecutionEnvironment, Exception> executor) {
final Configuration clusterConfig = new Configuration();
clusterConfig.set(DeploymentOptions.PROGRAM_CONFIG_ENABLED, false);
clusterConfig.set(DeploymentOptions.TARGET, "local");
final StreamContextEnvironment environment =
constructStreamContextEnvironment(clusterConfig, Collections.emptyList());
environment.fromData(Collections.singleton(1)).sinkTo(new DiscardingSink<>());
assertThatThrownBy(() -> executor.accept(environment))
.isInstanceOf(ExecutorReachedException.class);
}
@ParameterizedTest
@MethodSource("provideExecutors")
void testAllowProgramConfigurationWildcards(
ThrowingConsumer<StreamExecutionEnvironment, Exception> executor) {
final Configuration clusterConfig = new Configuration();
clusterConfig.set(DeploymentOptions.TARGET, "local");
clusterConfig.set(ExecutionOptions.RUNTIME_MODE, RuntimeExecutionMode.STREAMING);
// Changing GLOBAL_JOB_PARAMETERS is always allowed, as it's one of the fields not checked
// with PROGRAM_CONFIG_ENABLED set to false
clusterConfig.setString(
PipelineOptions.GLOBAL_JOB_PARAMETERS.key() + "." + "my-param", "my-value");
final Configuration jobConfig = new Configuration();
jobConfig.set(
PipelineOptions.GLOBAL_JOB_PARAMETERS,
Collections.singletonMap("my-other-param", "my-other-value"));
final StreamContextEnvironment environment =
constructStreamContextEnvironment(
clusterConfig,
Arrays.asList(
PipelineOptions.GLOBAL_JOB_PARAMETERS.key(),
PipelineOptions.MAX_PARALLELISM.key()));
// Change ExecutionConfig
environment.configure(jobConfig);
environment.getConfig().setMaxParallelism(1024);
environment.fromData(Collections.singleton(1)).sinkTo(new DiscardingSink<>());
assertThatThrownBy(() -> executor.accept(environment))
.isInstanceOf(ExecutorReachedException.class);
assertThat(environment.getConfig().getGlobalJobParameters().toMap())
.containsOnlyKeys("my-other-param");
}
private static StreamContextEnvironment constructStreamContextEnvironment(
Configuration clusterConfig, Collection<String> programConfigWildcards) {
final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
return new StreamContextEnvironment(
new MockExecutorServiceLoader(),
clusterConfig,
clusterConfig,
classLoader,
true,
true,
false,
programConfigWildcards);
}
private static List<ThrowingConsumer<StreamExecutionEnvironment, Exception>>
provideExecutors() {
return Arrays.asList(
StreamExecutionEnvironment::execute, StreamExecutionEnvironment::executeAsync);
}
private static | StreamContextEnvironmentTest |
java | apache__camel | components/camel-jcache/src/main/java/org/apache/camel/component/jcache/JCacheConfiguration.java | {
"start": 1437,
"end": 3851
} | class ____ {
@UriParam(label = "common")
private String cachingProvider;
@UriParam(label = "advanced")
private Configuration cacheConfiguration;
@UriParam
private Properties cacheConfigurationProperties;
@UriParam
private String configurationUri;
@UriParam(label = "advanced")
private Factory<CacheLoader> cacheLoaderFactory;
@UriParam(label = "advanced")
private Factory<CacheWriter> cacheWriterFactory;
@UriParam(label = "advanced")
private Factory<ExpiryPolicy> expiryPolicyFactory;
@UriParam
private boolean readThrough;
@UriParam
private boolean writeThrough;
@UriParam(defaultValue = "true")
private boolean storeByValue = true;
@UriParam
private boolean statisticsEnabled;
@UriParam
private boolean managementEnabled;
@UriParam(label = "consumer", enums = "CREATED,UPDATED,REMOVED,EXPIRED")
private String filteredEvents;
@UriParam(label = "consumer,advanced")
private List<CacheEntryEventFilter> eventFilters;
@UriParam(label = "consumer")
private boolean oldValueRequired;
@UriParam(label = "consumer")
private boolean synchronous;
@UriParam(label = "producer")
private String action;
@UriParam(label = "advanced", defaultValue = "true")
private boolean createCacheIfNotExists = true;
@UriParam(label = "advanced")
private boolean lookupProviders;
private CamelContext camelContext;
private String cacheName;
public JCacheConfiguration() {
this(null, null);
}
public JCacheConfiguration(String cacheName) {
this(null, cacheName);
}
public JCacheConfiguration(CamelContext camelContext, String cacheName) {
this.camelContext = camelContext;
this.cacheName = cacheName;
}
public CamelContext getCamelContext() {
return this.camelContext;
}
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
public String getCacheName() {
return this.cacheName;
}
public void setCacheName(String cacheName) {
this.cacheName = cacheName;
}
public ClassLoader getApplicationContextClassLoader() {
return this.camelContext != null
? this.camelContext.getApplicationContextClassLoader()
: null;
}
/**
* The fully qualified | JCacheConfiguration |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/RefactoringCollection.java | {
"start": 2061,
"end": 2670
} | class ____ implements DescriptionListener.Factory {
private static final Logger logger = Logger.getLogger(RefactoringCollection.class.getName());
private final SetMultimap<URI, DelegatingDescriptionListener> foundSources =
HashMultimap.create();
private final Path rootPath;
private final FileDestination fileDestination;
private final Function<URI, RefactoringResult> postProcess;
private final DescriptionListener.Factory descriptionsFactory;
private final ImportOrganizer importOrganizer;
record RefactoringResult(String message, RefactoringResultType type) {}
| RefactoringCollection |
java | playframework__playframework | documentation/manual/working/javaGuide/main/ws/code/javaguide/ws/JavaWS.java | {
"start": 12928,
"end": 13598
} | class ____ extends MockJavaAction
implements WSBodyWritables, WSBodyReadables {
private final WSClient ws;
@Inject
public Controller2(JavaHandlerComponents javaHandlerComponents, WSClient ws) {
super(javaHandlerComponents);
this.ws = ws;
}
// #composed-call
public CompletionStage<Result> index() {
return ws.url(feedUrl)
.get()
.thenCompose(response -> ws.url(response.asJson().findPath("commentsUrl").asText()).get())
.thenApply(
response -> ok("Number of comments: " + response.asJson().findPath("count").asInt()));
}
// #composed-call
}
public static | Controller2 |
java | netty__netty | codec-http3/src/main/java/io/netty/handler/codec/http3/QpackEncoderDynamicTable.java | {
"start": 17310,
"end": 18780
} | class ____ extends QpackHeaderField {
/**
* Pointer to the next entry in insertion order with a different {@link #hash} than this entry.
*/
HeaderEntry next;
/**
* Pointer to the next entry in insertion order with the same {@link #hash} as this entry, a.k.a hash collisions
*/
HeaderEntry nextSibling;
/**
* Number of header blocks that refer to this entry as the value for its <a
* href="https://www.rfc-editor.org/rfc/rfc9204.html#name-required-insert-count">
* required insert count</a>
*/
int refCount;
/**
* Hashcode for this entry.
*/
final int hash;
/**
* Insertion index for this entry.
*/
final int index;
HeaderEntry(int hash, CharSequence name, CharSequence value, int index, @Nullable HeaderEntry nextSibling) {
super(name, value);
this.index = index;
this.hash = hash;
this.nextSibling = nextSibling;
}
void remove(HeaderEntry prev) {
assert prev != this;
prev.next = next;
next = null; // null references to prevent nepotism in generational GC.
nextSibling = null;
}
void addNextTo(HeaderEntry prev) {
assert prev != this;
this.next = prev.next;
prev.next = this;
}
}
}
| HeaderEntry |
java | apache__camel | components/camel-debezium/camel-debezium-db2/src/generated/java/org/apache/camel/component/debezium/db2/DebeziumDb2EndpointUriFactory.java | {
"start": 522,
"end": 6507
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":name";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(93);
props.add("additionalProperties");
props.add("bridgeErrorHandler");
props.add("cdcChangeTablesSchema");
props.add("cdcControlSchema");
props.add("columnExcludeList");
props.add("columnIncludeList");
props.add("columnPropagateSourceType");
props.add("connectionValidationTimeoutMs");
props.add("converters");
props.add("customMetricTags");
props.add("databaseDbname");
props.add("databaseHostname");
props.add("databasePassword");
props.add("databasePort");
props.add("databaseUser");
props.add("datatypePropagateSourceType");
props.add("db2Platform");
props.add("decimalHandlingMode");
props.add("errorsMaxRetries");
props.add("eventProcessingFailureHandlingMode");
props.add("exceptionHandler");
props.add("exchangePattern");
props.add("executorShutdownTimeoutMs");
props.add("extendedHeadersEnabled");
props.add("guardrailCollectionsLimitAction");
props.add("guardrailCollectionsMax");
props.add("heartbeatIntervalMs");
props.add("heartbeatTopicsPrefix");
props.add("includeSchemaChanges");
props.add("incrementalSnapshotChunkSize");
props.add("incrementalSnapshotWatermarkingStrategy");
props.add("internalKeyConverter");
props.add("internalValueConverter");
props.add("maxBatchSize");
props.add("maxQueueSize");
props.add("maxQueueSizeInBytes");
props.add("messageKeyColumns");
props.add("name");
props.add("notificationEnabledChannels");
props.add("notificationSinkTopicName");
props.add("offsetCommitPolicy");
props.add("offsetCommitTimeoutMs");
props.add("offsetFlushIntervalMs");
props.add("offsetStorage");
props.add("offsetStorageFileName");
props.add("offsetStoragePartitions");
props.add("offsetStorageReplicationFactor");
props.add("offsetStorageTopic");
props.add("openlineageIntegrationConfigFilePath");
props.add("openlineageIntegrationDatasetKafkaBootstrapServers");
props.add("openlineageIntegrationEnabled");
props.add("openlineageIntegrationJobDescription");
props.add("openlineageIntegrationJobNamespace");
props.add("openlineageIntegrationJobOwners");
props.add("openlineageIntegrationJobTags");
props.add("pollIntervalMs");
props.add("postProcessors");
props.add("provideTransactionMetadata");
props.add("queryFetchSize");
props.add("retriableRestartConnectorWaitMs");
props.add("schemaHistoryInternal");
props.add("schemaHistoryInternalFileFilename");
props.add("schemaHistoryInternalSkipUnparseableDdl");
props.add("schemaHistoryInternalStoreOnlyCapturedDatabasesDdl");
props.add("schemaHistoryInternalStoreOnlyCapturedTablesDdl");
props.add("schemaNameAdjustmentMode");
props.add("signalDataCollection");
props.add("signalEnabledChannels");
props.add("signalPollIntervalMs");
props.add("skippedOperations");
props.add("snapshotDelayMs");
props.add("snapshotFetchSize");
props.add("snapshotIncludeCollectionList");
props.add("snapshotLockTimeoutMs");
props.add("snapshotMode");
props.add("snapshotModeConfigurationBasedSnapshotData");
props.add("snapshotModeConfigurationBasedSnapshotOnDataError");
props.add("snapshotModeConfigurationBasedSnapshotOnSchemaError");
props.add("snapshotModeConfigurationBasedSnapshotSchema");
props.add("snapshotModeConfigurationBasedStartStream");
props.add("snapshotModeCustomName");
props.add("snapshotSelectStatementOverrides");
props.add("snapshotTablesOrderByRowCount");
props.add("sourceinfoStructMaker");
props.add("streamingDelayMs");
props.add("tableExcludeList");
props.add("tableIgnoreBuiltin");
props.add("tableIncludeList");
props.add("timePrecisionMode");
props.add("tombstonesOnDelete");
props.add("topicNamingStrategy");
props.add("topicPrefix");
props.add("transactionMetadataFactory");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
SECRET_PROPERTY_NAMES = Collections.emptySet();
Map<String, String> prefixes = new HashMap<>(1);
prefixes.put("additionalProperties", "additionalProperties.");
MULTI_VALUE_PREFIXES = Collections.unmodifiableMap(prefixes);
}
@Override
public boolean isEnabled(String scheme) {
return "debezium-db2".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "name", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| DebeziumDb2EndpointUriFactory |
java | alibaba__nacos | naming/src/test/java/com/alibaba/nacos/naming/core/v2/cleaner/ExpiredMetadataCleanerTest.java | {
"start": 1406,
"end": 2610
} | class ____ {
private ExpiredMetadataCleaner expiredMetadataCleaner;
@Mock
private NamingMetadataManager metadataManagerMock;
@Mock
private NamingMetadataOperateService metadataOperateServiceMock;
private Set<ExpiredMetadataInfo> set = new ConcurrentHashSet<>();
@Mock
private ExpiredMetadataInfo expiredMetadataInfoMock;
@BeforeEach
void setUp() throws Exception {
EnvUtil.setEnvironment(new MockEnvironment());
expiredMetadataCleaner = new ExpiredMetadataCleaner(metadataManagerMock, metadataOperateServiceMock);
set.add(expiredMetadataInfoMock);
when(metadataManagerMock.getExpiredMetadataInfos()).thenReturn(set);
when(expiredMetadataInfoMock.getCreateTime()).thenReturn(0L);
when(metadataManagerMock.containServiceMetadata(expiredMetadataInfoMock.getService())).thenReturn(true);
}
@Test
void testDoClean() {
expiredMetadataCleaner.doClean();
verify(metadataManagerMock).getExpiredMetadataInfos();
verify(metadataOperateServiceMock).deleteServiceMetadata(expiredMetadataInfoMock.getService());
}
} | ExpiredMetadataCleanerTest |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/metric/CustomTaskMetric.java | {
"start": 1901,
"end": 2086
} | interface ____ {
/**
* Returns the name of custom task metric.
*/
String name();
/**
* Returns the long value of custom task metric.
*/
long value();
}
| CustomTaskMetric |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/jdbc/ObjectNullResolvingJdbcType.java | {
"start": 580,
"end": 2393
} | class ____ extends ObjectJdbcType {
/**
* Singleton access
*/
public static final ObjectNullResolvingJdbcType INSTANCE = new ObjectNullResolvingJdbcType( Types.JAVA_OBJECT );
public ObjectNullResolvingJdbcType(int jdbcTypeCode) {
super( jdbcTypeCode );
}
@Override
public <X> ValueBinder<X> getBinder(JavaType<X> javaType) {
if ( Serializable.class.isAssignableFrom( javaType.getJavaTypeClass() ) ) {
return VarbinaryJdbcType.INSTANCE.getBinder( javaType );
}
return new BasicBinder<>( javaType, this ) {
@Override
protected void doBindNull(PreparedStatement st, int index, WrapperOptions options)
throws SQLException {
if ( options.getDialect().supportsBindingNullForSetObject() ) {
st.setObject( index, null );
}
else {
final int sqlType = options.getDialect().supportsBindingNullSqlTypeForSetNull() ? Types.NULL
: st.getParameterMetaData().getParameterType( index );
st.setNull( index, sqlType );
}
}
@Override
protected void doBindNull(CallableStatement st, String name, WrapperOptions options)
throws SQLException {
if ( options.getDialect().supportsBindingNullForSetObject() ) {
st.setObject( name, null );
}
else {
final int sqlType = options.getDialect().supportsBindingNullSqlTypeForSetNull() ? Types.NULL
: Types.JAVA_OBJECT;
st.setNull( name, sqlType );
}
}
@Override
protected void doBind(PreparedStatement st, X value, int index, WrapperOptions options)
throws SQLException {
st.setObject( index, value, getJdbcTypeCode() );
}
@Override
protected void doBind(CallableStatement st, X value, String name, WrapperOptions options)
throws SQLException {
st.setObject( name, value, getJdbcTypeCode() );
}
};
}
}
| ObjectNullResolvingJdbcType |
java | netty__netty | codec-classes-quic/src/main/java/io/netty/handler/codec/quic/QuicPathEvent.java | {
"start": 5514,
"end": 8609
} | class ____ extends QuicPathEvent {
private final long seq;
private final InetSocketAddress oldLocal;
private final InetSocketAddress oldRemote;
/**
* The stack observes that the Source Connection ID with the given sequence number,
* initially used by the peer over the first pair of addresses, is now reused over
* the second pair of addresses.
*
* @param seq sequence number
* @param oldLocal old local address.
* @param oldRemote old remote address.
* @param local local address.
* @param remote remote address.
*/
public ReusedSourceConnectionId(long seq, InetSocketAddress oldLocal, InetSocketAddress oldRemote,
InetSocketAddress local, InetSocketAddress remote) {
super(local, remote);
this.seq = seq;
this.oldLocal = requireNonNull(oldLocal, "oldLocal");
this.oldRemote = requireNonNull(oldRemote, "oldRemote");
}
/**
* Source connection id sequence number.
*
* @return sequence number
*/
public long seq() {
return seq;
}
/**
* The old local address of the network path.
*
* @return local
*/
public InetSocketAddress oldLocal() {
return oldLocal;
}
/**
* The old remote address of the network path.
*
* @return local
*/
public InetSocketAddress oldRemote() {
return oldRemote;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
ReusedSourceConnectionId that = (ReusedSourceConnectionId) o;
if (seq != that.seq) {
return false;
}
if (!Objects.equals(oldLocal, that.oldLocal)) {
return false;
}
return Objects.equals(oldRemote, that.oldRemote);
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (int) (seq ^ (seq >>> 32));
result = 31 * result + (oldLocal != null ? oldLocal.hashCode() : 0);
result = 31 * result + (oldRemote != null ? oldRemote.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "QuicPathEvent.ReusedSourceConnectionId{" +
"seq=" + seq +
", oldLocal=" + oldLocal +
", oldRemote=" + oldRemote +
", local=" + local() +
", remote=" + remote() +
'}';
}
}
public static final | ReusedSourceConnectionId |
java | spring-projects__spring-security | oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/aot/hint/OAuth2AuthorizationServerBeanRegistrationAotProcessor.java | {
"start": 3911,
"end": 5302
} | class ____ implements BeanRegistrationAotProcessor {
private static final boolean jackson2Present;
private static final boolean jackson3Present;
static {
ClassLoader classLoader = ClassUtils.getDefaultClassLoader();
jackson2Present = ClassUtils.isPresent("com.fasterxml.jackson.databind.ObjectMapper", classLoader)
&& ClassUtils.isPresent("com.fasterxml.jackson.core.JsonGenerator", classLoader);
jackson3Present = ClassUtils.isPresent("tools.jackson.databind.json.JsonMapper", classLoader);
}
private boolean jacksonContributed;
@Override
public BeanRegistrationAotContribution processAheadOfTime(RegisteredBean registeredBean) {
boolean isJdbcBasedOAuth2AuthorizationService = JdbcOAuth2AuthorizationService.class
.isAssignableFrom(registeredBean.getBeanClass());
boolean isJdbcBasedRegisteredClientRepository = JdbcRegisteredClientRepository.class
.isAssignableFrom(registeredBean.getBeanClass());
// @formatter:off
if ((isJdbcBasedOAuth2AuthorizationService || isJdbcBasedRegisteredClientRepository)
&& !this.jacksonContributed) {
JacksonConfigurationBeanRegistrationAotContribution jacksonContribution =
new JacksonConfigurationBeanRegistrationAotContribution();
this.jacksonContributed = true;
return jacksonContribution;
}
// @formatter:on
return null;
}
private static | OAuth2AuthorizationServerBeanRegistrationAotProcessor |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationMetadata.java | {
"start": 1011,
"end": 1126
} | class ____ was processed by the annotation
* processor.
* @param className the source class
* @return if the | name |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/jdk8/ParallelMapTryOptional.java | {
"start": 2693,
"end": 6045
} | class ____<T, R> implements ConditionalSubscriber<T>, Subscription {
final Subscriber<? super R> downstream;
final Function<? super T, Optional<? extends R>> mapper;
final BiFunction<? super Long, ? super Throwable, ParallelFailureHandling> errorHandler;
Subscription upstream;
boolean done;
ParallelMapTrySubscriber(Subscriber<? super R> actual,
Function<? super T, Optional<? extends R>> mapper,
BiFunction<? super Long, ? super Throwable, ParallelFailureHandling> errorHandler) {
this.downstream = actual;
this.mapper = mapper;
this.errorHandler = errorHandler;
}
@Override
public void request(long n) {
upstream.request(n);
}
@Override
public void cancel() {
upstream.cancel();
}
@Override
public void onSubscribe(Subscription s) {
if (SubscriptionHelper.validate(this.upstream, s)) {
this.upstream = s;
downstream.onSubscribe(this);
}
}
@Override
public void onNext(T t) {
if (!tryOnNext(t) && !done) {
upstream.request(1);
}
}
@Override
public boolean tryOnNext(T t) {
if (done) {
return false;
}
long retries = 0;
for (;;) {
Optional<? extends R> v;
try {
v = Objects.requireNonNull(mapper.apply(t), "The mapper returned a null Optional");
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
ParallelFailureHandling h;
try {
h = Objects.requireNonNull(errorHandler.apply(++retries, ex), "The errorHandler returned a null ParallelFailureHandling");
} catch (Throwable exc) {
Exceptions.throwIfFatal(exc);
cancel();
onError(new CompositeException(ex, exc));
return false;
}
switch (h) {
case RETRY:
continue;
case SKIP:
return false;
case STOP:
cancel();
onComplete();
return false;
default:
cancel();
onError(ex);
return false;
}
}
if (v.isPresent()) {
downstream.onNext(v.get());
return true;
}
return false;
}
}
@Override
public void onError(Throwable t) {
if (done) {
RxJavaPlugins.onError(t);
return;
}
done = true;
downstream.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
downstream.onComplete();
}
}
static final | ParallelMapTrySubscriber |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/codec/DefaultMediaTypeCodecRegistry.java | {
"start": 1121,
"end": 3410
} | class ____ implements MediaTypeCodecRegistry {
Map<String, Optional<MediaTypeCodec>> decodersByExtension = new LinkedHashMap<>(3);
Map<MediaType, Optional<MediaTypeCodec>> decodersByType = new LinkedHashMap<>(3);
private final Collection<MediaTypeCodec> codecs;
/**
* @param codecs The media type codecs
*/
DefaultMediaTypeCodecRegistry(MediaTypeCodec... codecs) {
this(Arrays.asList(codecs));
}
/**
* @param codecs The media type codecs
*/
DefaultMediaTypeCodecRegistry(Collection<MediaTypeCodec> codecs) {
if (codecs != null) {
this.codecs = Collections.unmodifiableCollection(codecs);
for (MediaTypeCodec decoder : codecs) {
Collection<MediaType> mediaTypes = decoder.getMediaTypes();
for (MediaType mediaType : mediaTypes) {
if (mediaType != null) {
decodersByExtension.put(mediaType.getExtension(), Optional.of(decoder));
decodersByType.put(mediaType, Optional.of(decoder));
}
}
}
} else {
this.codecs = Collections.emptyList();
}
}
@Override
@SuppressWarnings("java:S2789") // performance optimization
public Optional<MediaTypeCodec> findCodec(@Nullable MediaType mediaType) {
if (mediaType == null) {
return Optional.empty();
}
Optional<MediaTypeCodec> decoder = decodersByType.get(mediaType);
if (decoder == null) {
decoder = decodersByExtension.get(mediaType.getExtension());
}
return decoder == null ? Optional.empty() : decoder;
}
@Override
public Optional<MediaTypeCodec> findCodec(@Nullable MediaType mediaType, Class<?> type) {
Optional<MediaTypeCodec> codec = findCodec(mediaType);
if (codec.isPresent()) {
MediaTypeCodec mediaTypeCodec = codec.get();
if (mediaTypeCodec.supportsType(type)) {
return codec;
} else {
return Optional.empty();
}
}
return codec;
}
@Override
public Collection<MediaTypeCodec> getCodecs() {
return codecs;
}
}
| DefaultMediaTypeCodecRegistry |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForDataToRetrieve.java | {
"start": 1452,
"end": 3627
} | class ____ implements TimelineParser {
private String expr;
private final int exprLength;
public TimelineParserForDataToRetrieve(String expression) {
this.expr = expression;
if (expression != null) {
this.expr = expr.trim();
exprLength = expr.length();
} else {
exprLength = 0;
}
}
@Override
public TimelineFilterList parse() throws TimelineParseException {
if (expr == null || exprLength == 0) {
return null;
}
TimelineCompareOp compareOp = null;
int openingBracketIndex =
expr.indexOf(TimelineParseConstants.OPENING_BRACKET_CHAR);
if (expr.charAt(0) == TimelineParseConstants.NOT_CHAR) {
if (openingBracketIndex == -1) {
throw new TimelineParseException("Invalid config/metric to retrieve " +
"expression");
}
if (openingBracketIndex != 1 &&
expr.substring(1, openingBracketIndex + 1).trim().length() != 1) {
throw new TimelineParseException("Invalid config/metric to retrieve " +
"expression");
}
compareOp = TimelineCompareOp.NOT_EQUAL;
} else if (openingBracketIndex <= 0) {
compareOp = TimelineCompareOp.EQUAL;
}
char lastChar = expr.charAt(exprLength - 1);
if (compareOp == TimelineCompareOp.NOT_EQUAL &&
lastChar != TimelineParseConstants.CLOSING_BRACKET_CHAR) {
throw new TimelineParseException("Invalid config/metric to retrieve " +
"expression");
}
if (openingBracketIndex != -1 &&
expr.charAt(exprLength - 1) ==
TimelineParseConstants.CLOSING_BRACKET_CHAR) {
expr = expr.substring(openingBracketIndex + 1, exprLength - 1).trim();
}
if (expr.isEmpty()) {
return null;
}
Operator op =
(compareOp == TimelineCompareOp.NOT_EQUAL) ? Operator.AND : Operator.OR;
TimelineFilterList list = new TimelineFilterList(op);
String[] splits = expr.split(TimelineParseConstants.COMMA_DELIMITER);
for (String split : splits) {
list.addFilter(new TimelinePrefixFilter(compareOp, split.trim()));
}
return list;
}
@Override
public void close() {
}
}
| TimelineParserForDataToRetrieve |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/reader/SimpleStreamFormat.java | {
"start": 1900,
"end": 4496
} | class ____<T> implements StreamFormat<T> {
private static final long serialVersionUID = 1L;
/**
* Creates a new reader. This method is called both for the creation of new reader (from the
* beginning of a file) and for restoring checkpointed readers.
*
* <p>If the reader previously checkpointed an offset, then the input stream will be positioned
* to that particular offset. Readers checkpoint an offset by returning a value from the method
* {@link Reader#getCheckpointedPosition()} method with an offset other than {@link
* CheckpointedPosition#NO_OFFSET}).
*/
public abstract Reader<T> createReader(Configuration config, FSDataInputStream stream)
throws IOException;
/**
* Gets the type produced by this format. This type will be the type produced by the file source
* as a whole.
*/
@Override
public abstract TypeInformation<T> getProducedType();
// ------------------------------------------------------------------------
// pre-defined methods from Stream Format
// ------------------------------------------------------------------------
/** This format is always not splittable. */
@Override
public final boolean isSplittable() {
return false;
}
@Override
public final Reader<T> createReader(
Configuration config, FSDataInputStream stream, long fileLen, long splitEnd)
throws IOException {
checkNotSplit(fileLen, splitEnd);
final long streamPos = stream.getPos();
checkArgument(
streamPos == 0L,
"SimpleStreamFormat is not splittable, but found non-zero stream position (%s)",
streamPos);
return createReader(config, stream);
}
@Override
public final Reader<T> restoreReader(
final Configuration config,
final FSDataInputStream stream,
final long restoredOffset,
final long fileLen,
final long splitEnd)
throws IOException {
checkNotSplit(fileLen, splitEnd);
stream.seek(restoredOffset);
return createReader(config, stream);
}
private static void checkNotSplit(long fileLen, long splitEnd) {
if (splitEnd != fileLen) {
throw new IllegalArgumentException(
String.format(
"SimpleStreamFormat is not splittable, but found split end (%d) different from file length (%d)",
splitEnd, fileLen));
}
}
}
| SimpleStreamFormat |
java | apache__camel | components/camel-irc/src/main/java/org/apache/camel/component/irc/IrcLogger.java | {
"start": 1049,
"end": 4351
} | class ____ extends IRCEventAdapter {
private Logger log;
private String server;
public IrcLogger(Logger log, String server) {
this.log = log;
this.server = server;
}
@Override
public void onDisconnected() {
log.info("Server: {} - onDisconnected", server);
}
@Override
public void onError(int num, String msg) {
log.error("Server: {} - onError num={} msg=\"{}\"", server, num, msg);
}
@Override
public void onError(String msg) {
log.error("Server: {} - onError msg=\"{}\"", server, msg);
}
@Override
public void onInvite(String chan, IRCUser user, String passiveNick) {
log.debug("Server: {} - onInvite chan={} user={} passiveNick={}", server, chan, user, passiveNick);
}
@Override
public void onJoin(String chan, IRCUser user) {
log.debug("Server: {} - onJoin chan={} user={}", server, chan, user);
}
@Override
public void onKick(String chan, IRCUser user, String passiveNick, String msg) {
log.debug("Server: {} - onKick chan={} user={} passiveNick={} msg=\"{}\"", server, chan, user, passiveNick, msg);
}
@Override
public void onMode(String chan, IRCUser user, IRCModeParser ircModeParser) {
log.info("Server: {} - onMode chan={} user={} ircModeParser={}", server, chan, user, ircModeParser);
}
@Override
public void onMode(IRCUser user, String passiveNick, String mode) {
log.info("Server: {} - onMode user={} passiveNick={} mode={}", server, user, passiveNick, mode);
}
@Override
public void onNick(IRCUser user, String newNick) {
log.debug("Server: {} - onNick user={} newNick={}", server, user, newNick);
}
@Override
public void onNotice(String target, IRCUser user, String msg) {
log.debug("Server: {} - onNotice target={} user={} msg=\"{}\"", server, target, user, msg);
}
@Override
public void onPart(String chan, IRCUser user, String msg) {
log.debug("Server: {} - onPart chan={} user={} msg=\"{}\"", server, chan, user, msg);
}
@Override
public void onPing(String ping) {
log.info("Server: {} - onPing ping={}", server, ping);
}
@Override
public void onPrivmsg(String target, IRCUser user, String msg) {
log.debug("Server: {} - onPrivmsg target={} user={} msg=\"{}\"", server, target, user, msg);
}
@Override
public void onQuit(IRCUser user, String msg) {
log.debug("Server: {} - onQuit user={} msg=\"{}\"", server, user, msg);
}
@Override
public void onRegistered() {
log.info("Server: {} - onRegistered", server);
}
@Override
public void onReply(int num, String value, String msg) {
log.debug("Server: {} - onReply num={} value=\"{}\" msg=\"{}\"", server, num, value, msg);
}
@Override
public void onTopic(String chan, IRCUser user, String topic) {
log.debug("Server: {} - onTopic chan={} user={} topic={}", server, chan, user, topic);
}
@Override
public void unknown(String prefix, String command, String middle, String trailing) {
log.info("Server: {} - unknown prefix={} command={} middle={} trailing={}", server, prefix, command,
middle, trailing);
}
}
| IrcLogger |
java | apache__camel | core/camel-support/src/generated/java/org/apache/camel/support/processor/idempotent/FileIdempotentRepositoryConfigurer.java | {
"start": 750,
"end": 3164
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.support.processor.idempotent.FileIdempotentRepository target = (org.apache.camel.support.processor.idempotent.FileIdempotentRepository) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "cachesize":
case "cacheSize": target.setCacheSize(property(camelContext, int.class, value)); return true;
case "dropoldestfilestore":
case "dropOldestFileStore": target.setDropOldestFileStore(property(camelContext, long.class, value)); return true;
case "filestore":
case "fileStore": target.setFileStore(property(camelContext, java.io.File.class, value)); return true;
case "maxfilestoresize":
case "maxFileStoreSize": target.setMaxFileStoreSize(property(camelContext, long.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "cachesize":
case "cacheSize": return int.class;
case "dropoldestfilestore":
case "dropOldestFileStore": return long.class;
case "filestore":
case "fileStore": return java.io.File.class;
case "maxfilestoresize":
case "maxFileStoreSize": return long.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.support.processor.idempotent.FileIdempotentRepository target = (org.apache.camel.support.processor.idempotent.FileIdempotentRepository) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "cachesize":
case "cacheSize": return target.getCacheSize();
case "dropoldestfilestore":
case "dropOldestFileStore": return target.getDropOldestFileStore();
case "filestore":
case "fileStore": return target.getFileStore();
case "maxfilestoresize":
case "maxFileStoreSize": return target.getMaxFileStoreSize();
default: return null;
}
}
}
| FileIdempotentRepositoryConfigurer |
java | redisson__redisson | redisson/src/main/java/org/redisson/client/protocol/decoder/ScoredSortedSetScanDecoder.java | {
"start": 899,
"end": 1256
} | class ____<T> extends ObjectListReplayDecoder<T> {
@Override
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state, long size) {
if (paramNum % 2 != 0) {
return DoubleCodec.INSTANCE.getValueDecoder();
}
return super.getDecoder(codec, paramNum, state, size);
}
}
| ScoredSortedSetScanDecoder |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesBeanFactoryInitializationAotProcessorTests.java | {
"start": 8501,
"end": 9274
} | class ____ extends AbstractAssert<BindableAssert, Bindable<?>> {
BindableAssert(Bindable<?> bindable) {
super(bindable, BindableAssert.class);
}
BindableAssert hasBindMethod(BindMethod bindMethod) {
if (this.actual.getBindMethod() != bindMethod) {
throwAssertionError(
new BasicErrorMessageFactory("Expected %s to have bind method %s but bind method was %s",
this.actual, bindMethod, this.actual.getBindMethod()));
}
return this;
}
BindableAssert hasType(Class<?> type) {
if (!type.equals(this.actual.getType().resolve())) {
throwAssertionError(new BasicErrorMessageFactory("Expected %s to have type %s but type was %s",
this.actual, type, this.actual.getType().resolve()));
}
return this;
}
}
}
| BindableAssert |
java | apache__camel | tooling/camel-tooling-maven/src/test/java/org/apache/camel/tooling/maven/support/DIRegistryTest.java | {
"start": 5878,
"end": 6344
} | class ____ {
private final MySimpleBean simpleBean;
private final MyNamedBean namedBean;
@Inject
public MyComplexishBean(MySimpleBean sb, MyNamedBean nb) {
simpleBean = sb;
namedBean = nb;
}
public MySimpleBean getSimpleBean() {
return simpleBean;
}
public MyNamedBean getNamedBean() {
return namedBean;
}
}
public static | MyComplexishBean |
java | google__dagger | javatests/dagger/internal/codegen/ModuleFactoryGeneratorTest.java | {
"start": 10243,
"end": 11361
} | class ____ {}");
daggerCompiler(module)
.compile(
subject -> {
subject.hasErrorCount(2);
// We avoid asserting on the line number because ksp and javac report different lines.
// The main issue here is that ksp doesn't allow reporting errors on individual
// annotation values, it only allows reporting errors on annotations themselves.
subject.hasErrorContaining(
"java.lang.Void is listed as a module, but is not annotated with @Module")
.onSource(module);
subject.hasErrorContaining(
"java.lang.String is listed as a module, but is not annotated with @Module")
.onSource(module);
});
}
@Test public void singleProvidesMethodNoArgs() {
Source moduleFile =
CompilerTests.javaSource(
"test.TestModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"",
"@Module",
"final | TestModule |
java | google__guice | extensions/persist/test/com/google/inject/persist/jpa/EntityManagerPerRequestProvisionTest.java | {
"start": 3104,
"end": 3590
} | class ____ {
static EntityManager em;
@Inject
public JpaDao(EntityManager em) {
JpaDao.em = em;
}
@Transactional
public <T> void persist(T t) {
assertTrue("em is not open!", em.isOpen());
assertTrue("no active txn!", em.getTransaction().isActive());
em.persist(t);
assertTrue("Persisting object failed", em.contains(t));
}
@Transactional
public <T> boolean contains(T t) {
return em.contains(t);
}
}
}
| JpaDao |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/UnaryExec.java | {
"start": 587,
"end": 2046
} | class ____ extends PhysicalPlan {
private final PhysicalPlan child;
private AttributeSet lazyOutputSet;
protected UnaryExec(Source source, PhysicalPlan child) {
super(source, Collections.singletonList(child));
this.child = child;
}
@Override
public final PhysicalPlan replaceChildren(List<PhysicalPlan> newChildren) {
return replaceChild(newChildren.get(0));
}
public abstract UnaryExec replaceChild(PhysicalPlan newChild);
public PhysicalPlan child() {
return child;
}
@Override
public List<Attribute> output() {
return child.output();
}
@Override
public AttributeSet outputSet() {
if (lazyOutputSet == null) {
List<Attribute> output = output();
lazyOutputSet = output == child.output() ? child.outputSet() : AttributeSet.of(output);
return lazyOutputSet;
}
return lazyOutputSet;
}
@Override
public AttributeSet inputSet() {
return child.outputSet();
}
@Override
public int hashCode() {
return Objects.hashCode(child());
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
UnaryExec other = (UnaryExec) obj;
return Objects.equals(child, other.child);
}
}
| UnaryExec |
java | spring-projects__spring-boot | module/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/logging/LogFileWebEndpointAutoConfiguration.java | {
"start": 2159,
"end": 2506
} | class ____ {
@Bean
@ConditionalOnMissingBean
@Conditional(LogFileCondition.class)
LogFileWebEndpoint logFileWebEndpoint(ObjectProvider<LogFile> logFile, LogFileWebEndpointProperties properties) {
return new LogFileWebEndpoint(logFile.getIfAvailable(), properties.getExternalFile());
}
private static final | LogFileWebEndpointAutoConfiguration |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/hierarchical/Node.java | {
"start": 10753,
"end": 11232
} | enum ____ {
/**
* Force execution in same thread as the parent node.
*
* @see #CONCURRENT
*/
SAME_THREAD,
/**
* Allow concurrent execution with any other node.
*
* @see #SAME_THREAD
*/
CONCURRENT
}
/**
* Represents an invocation that runs with the supplied context.
*
* @param <C> the type of {@code EngineExecutionContext} used by the {@code HierarchicalTestEngine}
* @since 1.4
*/
@API(status = STABLE, since = "1.10")
| ExecutionMode |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/CsdsService.java | {
"start": 3394,
"end": 10602
} | class ____
extends ClientStatusDiscoveryServiceGrpc.ClientStatusDiscoveryServiceImplBase {
@Override
public void fetchClientStatus(
ClientStatusRequest request, StreamObserver<ClientStatusResponse> responseObserver) {
if (handleRequest(request, responseObserver)) {
responseObserver.onCompleted();
}
// TODO(sergiitk): Add a case covering mutating handleRequest return false to true - to verify
// that responseObserver.onCompleted() isn't erroneously called on error.
}
@Override
public StreamObserver<ClientStatusRequest> streamClientStatus(
final StreamObserver<ClientStatusResponse> responseObserver) {
return new StreamObserver<ClientStatusRequest>() {
@Override
public void onNext(ClientStatusRequest request) {
handleRequest(request, responseObserver);
}
@Override
public void onError(Throwable t) {
onCompleted();
}
@Override
public void onCompleted() {
responseObserver.onCompleted();
}
};
}
}
private boolean handleRequest(
ClientStatusRequest request, StreamObserver<ClientStatusResponse> responseObserver) {
StatusException error = null;
if (request.getNodeMatchersCount() > 0) {
error = new StatusException(
Status.INVALID_ARGUMENT.withDescription("node_matchers not supported"));
} else {
List<String> targets = xdsClientPoolFactory.getTargets();
List<ClientConfig> clientConfigs = new ArrayList<>(targets.size());
for (int i = 0; i < targets.size() && error == null; i++) {
try {
ClientConfig clientConfig = getConfigForRequest(targets.get(i));
if (clientConfig != null) {
clientConfigs.add(clientConfig);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.log(Level.FINE, "Server interrupted while building CSDS config dump", e);
error = Status.ABORTED.withDescription("Thread interrupted").withCause(e).asException();
} catch (RuntimeException e) {
logger.log(Level.WARNING, "Unexpected error while building CSDS config dump", e);
error = Status.INTERNAL.withDescription("Unexpected internal error").withCause(e)
.asException();
}
}
try {
responseObserver.onNext(getStatusResponse(clientConfigs));
} catch (RuntimeException e) {
logger.log(Level.WARNING, "Unexpected error while processing CSDS config dump", e);
error = Status.INTERNAL.withDescription("Unexpected internal error").withCause(e)
.asException();
}
}
if (error == null) {
return true; // All clients reported without error
}
responseObserver.onError(error);
return false;
}
private ClientConfig getConfigForRequest(String target) throws InterruptedException {
ObjectPool<XdsClient> xdsClientPool = xdsClientPoolFactory.get(target);
if (xdsClientPool == null) {
return null;
}
XdsClient xdsClient = null;
try {
xdsClient = xdsClientPool.getObject();
return getClientConfigForXdsClient(xdsClient, target);
} finally {
if (xdsClient != null) {
xdsClientPool.returnObject(xdsClient);
}
}
}
private ClientStatusResponse getStatusResponse(List<ClientConfig> clientConfigs) {
if (clientConfigs.isEmpty()) {
return ClientStatusResponse.getDefaultInstance();
}
return ClientStatusResponse.newBuilder().addAllConfig(clientConfigs).build();
}
@VisibleForTesting
static ClientConfig getClientConfigForXdsClient(XdsClient xdsClient, String target)
throws InterruptedException {
ClientConfig.Builder builder = ClientConfig.newBuilder()
.setClientScope(target)
.setNode(xdsClient.getBootstrapInfo().node().toEnvoyProtoNode());
Map<XdsResourceType<?>, Map<String, ResourceMetadata>> metadataByType =
awaitSubscribedResourcesMetadata(xdsClient.getSubscribedResourcesMetadataSnapshot());
for (Map.Entry<XdsResourceType<?>, Map<String, ResourceMetadata>> metadataByTypeEntry
: metadataByType.entrySet()) {
XdsResourceType<?> type = metadataByTypeEntry.getKey();
Map<String, ResourceMetadata> metadataByResourceName = metadataByTypeEntry.getValue();
for (Map.Entry<String, ResourceMetadata> metadataEntry : metadataByResourceName.entrySet()) {
String resourceName = metadataEntry.getKey();
ResourceMetadata metadata = metadataEntry.getValue();
GenericXdsConfig.Builder genericXdsConfigBuilder = GenericXdsConfig.newBuilder()
.setTypeUrl(type.typeUrl())
.setName(resourceName)
.setClientStatus(metadataStatusToClientStatus(metadata.getStatus()));
if (metadata.getRawResource() != null) {
genericXdsConfigBuilder
.setVersionInfo(metadata.getVersion())
.setLastUpdated(Timestamps.fromNanos(metadata.getUpdateTimeNanos()))
.setXdsConfig(metadata.getRawResource());
}
if (metadata.getStatus() == ResourceMetadataStatus.NACKED) {
verifyNotNull(metadata.getErrorState(), "resource %s getErrorState", resourceName);
genericXdsConfigBuilder
.setErrorState(metadataUpdateFailureStateToProto(metadata.getErrorState()));
}
builder.addGenericXdsConfigs(genericXdsConfigBuilder);
}
}
return builder.build();
}
private static Map<XdsResourceType<?>, Map<String, ResourceMetadata>>
awaitSubscribedResourcesMetadata(
ListenableFuture<Map<XdsResourceType<?>, Map<String, ResourceMetadata>>> future)
throws InterruptedException {
try {
// Normally this shouldn't take long, but add some slack for cases like a cold JVM.
return future.get(20, TimeUnit.SECONDS);
} catch (ExecutionException | TimeoutException e) {
// For CSDS' purposes, the exact reason why metadata not loaded isn't important.
throw new RuntimeException(e);
}
}
@VisibleForTesting
static ClientResourceStatus metadataStatusToClientStatus(ResourceMetadataStatus status) {
switch (status) {
case UNKNOWN:
return ClientResourceStatus.UNKNOWN;
case DOES_NOT_EXIST:
return ClientResourceStatus.DOES_NOT_EXIST;
case REQUESTED:
return ClientResourceStatus.REQUESTED;
case ACKED:
return ClientResourceStatus.ACKED;
case NACKED:
return ClientResourceStatus.NACKED;
case TIMEOUT:
return ClientResourceStatus.TIMEOUT;
default:
throw new AssertionError("Unexpected ResourceMetadataStatus: " + status);
}
}
private static io.envoyproxy.envoy.admin.v3.UpdateFailureState metadataUpdateFailureStateToProto(
UpdateFailureState errorState) {
return io.envoyproxy.envoy.admin.v3.UpdateFailureState.newBuilder()
.setLastUpdateAttempt(Timestamps.fromNanos(errorState.getFailedUpdateTimeNanos()))
.setDetails(errorState.getFailedDetails())
.setVersionInfo(errorState.getFailedVersion())
.build();
}
}
| CsdsServiceInternal |
java | google__dagger | examples/bazel/java/example/common/CoffeeMaker.java | {
"start": 727,
"end": 1195
} | class ____ {
private final CoffeeLogger logger;
private final Lazy<Heater> heater; // Create a possibly costly heater only when we use it.
private final Pump pump;
@Inject
CoffeeMaker(CoffeeLogger logger, Lazy<Heater> heater, Pump pump) {
this.logger = logger;
this.heater = heater;
this.pump = pump;
}
public void brew() {
heater.get().on();
pump.pump();
logger.log(" [_]P coffee! [_]P ");
heater.get().off();
}
}
| CoffeeMaker |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java | {
"start": 2606,
"end": 12637
} | class ____ implements FsDatasetSpi<ExternalVolumeImpl> {
private final DatanodeStorage storage = new DatanodeStorage(
DatanodeStorage.generateUuid(), DatanodeStorage.State.NORMAL,
StorageType.DEFAULT);
@Override
public FsVolumeReferences getFsVolumeReferences() {
return null;
}
@Override
public void addVolume(StorageLocation location, List<NamespaceInfo> nsInfos)
throws IOException {
}
@Override
public void removeVolumes(Collection<StorageLocation> volumes,
boolean clearFailure) {
throw new UnsupportedOperationException();
}
@Override
public DatanodeStorage getStorage(String storageUuid) {
return null;
}
@Override
public StorageReport[] getStorageReports(String bpid) throws IOException {
StorageReport[] result = new StorageReport[1];
result[0] = new StorageReport(storage, false, 0, 0, 0, 0, 0);
return result;
}
@Override
public ExternalVolumeImpl getVolume(ExtendedBlock b) {
return null;
}
@Override
public Map<String, Object> getVolumeInfoMap() {
return null;
}
@Override
public List<ReplicaInfo> getFinalizedBlocks(String bpid) {
return null;
}
@Override
public void checkAndUpdate(String bpid, ScanInfo info) {
return;
}
@Override
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
throws IOException {
return new LengthInputStream(null, 0);
}
@Override
public long getLength(ExtendedBlock b) throws IOException {
return 0;
}
@Override
@Deprecated
public Replica getReplica(String bpid, long blockId) {
return new ExternalReplica();
}
@Override
public String getReplicaString(String bpid, long blockId) {
return null;
}
@Override
public Block getStoredBlock(String bpid, long blkid) throws IOException {
return new Block();
}
@Override
public InputStream getBlockInputStream(ExtendedBlock b, long seekOffset)
throws IOException {
return null;
}
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
long ckoff) throws IOException {
return new ReplicaInputStreams(null, null, null, null);
}
@Override
public ReplicaHandler createTemporary(StorageType t, String i,
ExtendedBlock b, boolean isTransfer) throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
public ReplicaHandler createRbw(StorageType storageType, String id,
ExtendedBlock b, boolean tf) throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
public ReplicaHandler createRbw(StorageType storageType, String storageId,
ExtendedBlock b, boolean allowLazyPersist, long newGS) throws IOException {
return createRbw(storageType, storageId, b, allowLazyPersist);
}
@Override
public ReplicaHandler recoverRbw(ExtendedBlock b, long newGS,
long minBytesRcvd, long maxBytesRcvd) throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
public ReplicaInPipeline convertTemporaryToRbw(
ExtendedBlock temporary) throws IOException {
return new ExternalReplicaInPipeline();
}
@Override
public ReplicaHandler append(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
public ReplicaHandler recoverAppend(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
public Replica recoverClose(ExtendedBlock b, long newGS, long expectedBlkLen)
throws IOException {
return null;
}
@Override
public void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
throws IOException {
}
@Override
public void unfinalizeBlock(ExtendedBlock b) throws IOException {
}
@Override
public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
final Map<DatanodeStorage, BlockListAsLongs> result =
new HashMap<DatanodeStorage, BlockListAsLongs>();
result.put(storage, BlockListAsLongs.EMPTY);
return result;
}
@Override
public List<Long> getCacheReport(String bpid) {
return null;
}
@Override
public boolean contains(ExtendedBlock block) {
return false;
}
@Override
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state) throws ReplicaNotFoundException, UnexpectedReplicaStateException, FileNotFoundException, EOFException, IOException {
}
@Override
public boolean isValidBlock(ExtendedBlock b) {
return false;
}
@Override
public boolean isValidRbw(ExtendedBlock b) {
return false;
}
@Override
public void invalidate(String bpid, Block[] invalidBlks) throws IOException {
}
@Override
public void invalidateMissingBlock(String bpid, Block block) {
}
@Override
public void cache(String bpid, long[] blockIds) {
}
@Override
public void uncache(String bpid, long[] blockIds) {
}
@Override
public boolean isCached(String bpid, long blockId) {
return false;
}
@Override
public void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes) {
}
@Override
public void shutdown() {
}
@Override
public void adjustCrcChannelPosition(ExtendedBlock b,
ReplicaOutputStreams outs, int checksumSize) throws IOException {
}
@Override
public boolean hasEnoughResource() {
return false;
}
@Override
public long getReplicaVisibleLength(ExtendedBlock block) throws IOException {
return 0;
}
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
return new ReplicaRecoveryInfo(0, 0, 0, ReplicaState.FINALIZED);
}
@Override
public Replica updateReplicaUnderRecovery(ExtendedBlock oldBlock,
long recoveryId, long newBlockId, long newLength) throws IOException {
return null;
}
@Override
public void addBlockPool(String bpid, Configuration conf) throws IOException {
}
@Override
public void shutdownBlockPool(String bpid) {
}
@Override
public void deleteBlockPool(String bpid, boolean force) throws IOException {
}
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b)
throws IOException {
return new BlockLocalPathInfo(null, "file", "metafile");
}
@Override
public void enableTrash(String bpid) {
}
@Override
public void clearTrash(String bpid) {
}
@Override
public boolean trashEnabled(String bpid) {
return false;
}
@Override
public void setRollingUpgradeMarker(String bpid) throws IOException {
}
@Override
public void clearRollingUpgradeMarker(String bpid) throws IOException {
}
@Override
public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block,
ReplicaOutputStreams outs, long offset, long nbytes, int flags) {
}
@Override
public void onCompleteLazyPersist(String bpId, long blockId, long creationTime, File[] savedFiles, ExternalVolumeImpl targetVolume) {
}
@Override
public void onFailLazyPersist(String bpId, long blockId) {
}
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
StorageType targetStorageType, String storageId) throws IOException {
return null;
}
@Override
public long getBlockPoolUsed(String bpid) throws IOException {
return 0;
}
@Override
public long getDfsUsed() throws IOException {
return 0;
}
@Override
public long getCapacity() throws IOException {
return 0;
}
@Override
public long getRemaining() throws IOException {
return 0;
}
@Override
public String getStorageInfo() {
return null;
}
@Override
public int getNumFailedVolumes() {
return 0;
}
@Override
public String[] getFailedStorageLocations() {
return null;
}
@Override
public long getLastVolumeFailureDate() {
return 0;
}
@Override
public long getEstimatedCapacityLostTotal() {
return 0;
}
@Override
public VolumeFailureSummary getVolumeFailureSummary() {
return null;
}
@Override
public long getCacheUsed() {
return 0;
}
@Override
public long getCacheCapacity() {
return 0;
}
@Override
public long getNumBlocksCached() {
return 0;
}
@Override
public long getNumBlocksFailedToCache() {
return 0;
}
@Override
public long getNumBlocksFailedToUncache() {
return 0;
}
/**
* Get metrics from the metrics source
*
* @param collector to contain the resulting metrics snapshot
* @param all if true, return all metrics even if unchanged.
*/
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
try {
DataNodeMetricHelper.getMetrics(collector, this, "ExternalDataset");
} catch (Exception e){
//ignore exceptions
}
}
@Override
public void setPinning(ExtendedBlock block) throws IOException {
}
@Override
public boolean getPinning(ExtendedBlock block) throws IOException {
return false;
}
@Override
public boolean isDeletingBlock(String bpid, long blockId) {
return false;
}
@Override
public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block,
FsVolumeSpi destination)
throws IOException {
return null;
}
@Override
public DataNodeLockManager<AutoCloseDataSetLock> acquireDatasetLockManager() {
return null;
}
@Override
public Set<? extends Replica> deepCopyReplica(String bpid)
throws IOException {
return Collections.EMPTY_SET;
}
@Override
public MountVolumeMap getMountVolumeMap() {
return null;
}
@Override
public List<FsVolumeImpl> getVolumeList() {
return null;
}
@Override
public long getLastDirScannerFinishTime() {
return 0L;
}
@Override
public long getPendingAsyncDeletions() {
return 0;
}
}
| ExternalDatasetImpl |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/websocket/WebSocketMessageBrokerConfigTests.java | {
"start": 27474,
"end": 27933
} | class ____ implements DeferredCsrfToken {
private final CsrfToken csrfToken;
TestDeferredCsrfToken(CsrfToken csrfToken) {
this.csrfToken = csrfToken;
}
@Override
public CsrfToken get() {
return this.csrfToken;
}
@Override
public boolean isGenerated() {
return false;
}
}
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.PARAMETER)
@AuthenticationPrincipal(expression = "#this.equals('{value}')")
@ | TestDeferredCsrfToken |
java | elastic__elasticsearch | x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/BlockConverter.java | {
"start": 5333,
"end": 6747
} | class ____ extends BlockConverter {
public AsInt64(String esqlType) {
this(esqlType, Types.MinorType.BIGINT);
}
protected AsInt64(String esqlType, Types.MinorType minorType) {
super(esqlType, minorType);
}
@Override
public void convert(Block b, boolean multivalued, List<ArrowBuf> bufs, List<BufWriter> bufWriters) {
LongBlock block = (LongBlock) b;
if (multivalued) {
addListOffsets(bufs, bufWriters, block);
}
accumulateVectorValidity(bufs, bufWriters, block, multivalued);
bufs.add(dummyArrowBuf(vectorByteSize(block)));
bufWriters.add(out -> {
if (block.areAllValuesNull()) {
return BlockConverter.writeZeroes(out, vectorByteSize(block));
}
// TODO could we "just" get the memory of the array and dump it?
int count = BlockConverter.valueCount(block);
for (int i = 0; i < count; i++) {
out.writeLongLE(block.getLong(i));
}
return (long) count * Long.BYTES;
});
}
private static int vectorByteSize(LongBlock b) {
return Long.BYTES * BlockConverter.valueCount(b);
}
}
/**
* Conversion of Boolean blocks
*/
public static | AsInt64 |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/Planner.java | {
"start": 967,
"end": 1029
} | interface ____ different planners to be created.
*/
public | allows |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/AllLongBytesRefState.java | {
"start": 785,
"end": 857
} | class ____ generated. Edit {@code X-All2State.java.st} instead.
*/
final | is |
java | alibaba__fastjson | src/test/java/com/alibaba/json/test/epubview/TestKlutz3.java | {
"start": 172,
"end": 825
} | class ____ extends TestCase {
public void test_0 () throws Exception {
EpubViewBook book = new EpubViewBook();
book.setBookName("xx");
book.setPageList(new ArrayList<EpubViewPage>());
EpubViewPage page = new EpubViewPage();
book.getPageList().add(page);
EpubViewMetaData metadata = new EpubViewMetaData();
metadata.setProperties(new HashMap<String, String>());
// book.setMetadata(null);
String str = JSON.toJSONString(book);
System.out.println(str);
JSON.parseObject(str, EpubViewBook.class);
}
}
| TestKlutz3 |
java | quarkusio__quarkus | integration-tests/devtools/src/test/java/io/quarkus/devtools/codestarts/quarkus/FunqyGoogleCloudFunctionsCodestartTest.java | {
"start": 376,
"end": 1437
} | class ____ {
@RegisterExtension
public static QuarkusCodestartTest codestartTest = QuarkusCodestartTest.builder()
.codestarts("funqy-google-cloud-functions")
.languages(JAVA)
.build();
@Test
void testContent() throws Throwable {
codestartTest.checkGeneratedSource("org.acme.funqygooglecloudfunctions.GreetingFunctions");
codestartTest.checkGeneratedSource("org.acme.funqygooglecloudfunctions.GreetingService");
codestartTest.checkGeneratedTestSource("org.acme.funqygooglecloudfunctions.GreetingFunctionsCloudEventsTest");
codestartTest.checkGeneratedTestSource("org.acme.funqygooglecloudfunctions.GreetingFunctionsPubSubTest");
codestartTest.checkGeneratedTestSource("org.acme.funqygooglecloudfunctions.GreetingFunctionsStorageTest");
}
@Test
@EnabledIfSystemProperty(named = "build-projects", matches = "true")
void buildAllProjectsForLocalUse() throws Throwable {
codestartTest.buildAllProjects();
}
}
| FunqyGoogleCloudFunctionsCodestartTest |
java | apache__flink | flink-table/flink-table-api-java/src/test/java/org/apache/flink/table/catalog/FunctionCatalogTest.java | {
"start": 28290,
"end": 30974
} | class ____ validated
functionCatalog.dropTemporaryCatalogFunction(
PARTIAL_UNRESOLVED_IDENTIFIER, true);
functionCatalog.registerTemporaryCatalogFunction(
PARTIAL_UNRESOLVED_IDENTIFIER,
new CatalogFunctionImpl(FUNCTION_INVALID.getClass().getName()),
false);
})
.satisfies(
anyCauseMatches(
ValidationException.class,
"Could not register temporary catalog function '"
+ IDENTIFIER.asSummaryString()
+ "' due to implementation errors."));
functionCatalog.dropTemporaryCatalogFunction(PARTIAL_UNRESOLVED_IDENTIFIER, true);
// test register uninstantiated table function
final CatalogFunctionImpl temporaryTableCatalogFunction =
new CatalogFunctionImpl(TABLE_FUNCTION.getClass().getName());
functionCatalog.registerTemporaryCatalogFunction(
PARTIAL_UNRESOLVED_IDENTIFIER, temporaryTableCatalogFunction, false);
assertThat(functionCatalog.lookupFunction(PARTIAL_UNRESOLVED_IDENTIFIER))
.hasValue(
ContextResolvedFunction.temporary(
FunctionIdentifier.of(IDENTIFIER),
TABLE_FUNCTION,
temporaryTableCatalogFunction));
functionCatalog.dropTemporaryCatalogFunction(PARTIAL_UNRESOLVED_IDENTIFIER, true);
// test register uninstantiated aggregate function
final CatalogFunctionImpl temporaryAggregateCatalogFunction =
new CatalogFunctionImpl(AGGREGATE_FUNCTION.getClass().getName());
functionCatalog.registerTemporaryCatalogFunction(
PARTIAL_UNRESOLVED_IDENTIFIER, temporaryAggregateCatalogFunction, false);
assertThat(functionCatalog.lookupFunction(PARTIAL_UNRESOLVED_IDENTIFIER))
.hasValue(
ContextResolvedFunction.temporary(
FunctionIdentifier.of(IDENTIFIER),
AGGREGATE_FUNCTION,
temporaryAggregateCatalogFunction));
}
// --------------------------------------------------------------------------------------------
// Test classes
// --------------------------------------------------------------------------------------------
private static | gets |
java | google__dagger | javatests/dagger/internal/codegen/InjectConstructorFactoryGeneratorTest.java | {
"start": 7598,
"end": 8361
} | class ____<A extends Number & Comparable<A>,",
" B extends List<? extends String>,",
" C extends List<? super String>> {",
" @Inject GenericClass(A a, B b, C c) {}",
"}");
daggerCompiler(file)
.compile(
subject -> {
subject.hasErrorCount(0);
assertSourceMatchesGolden(subject, "test/GenericClass_Factory");
});
}
@Test
public void boundedGenerics_withPackagePrivateDependency() {
Source genericClass =
CompilerTests.javaSource(
"test.GenericClass",
"package test;",
"",
"import javax.inject.Inject;",
"import java.util.List;",
"",
" | GenericClass |
java | junit-team__junit5 | junit-platform-launcher/src/main/java/org/junit/platform/launcher/core/LauncherFactory.java | {
"start": 2895,
"end": 9290
} | class ____ {
private LauncherFactory() {
/* no-op */
}
/**
* Factory method for opening a new {@link LauncherSession} using the
* {@linkplain LauncherConfig#DEFAULT default} {@link LauncherConfig}.
*
* @throws PreconditionViolationException if no test engines are detected
* @since 1.8
* @see #openSession(LauncherConfig)
*/
@API(status = STABLE, since = "1.10")
public static LauncherSession openSession() throws PreconditionViolationException {
return openSession(LauncherConfig.DEFAULT);
}
/**
* Factory method for opening a new {@link LauncherSession} using the
* supplied {@link LauncherConfig}.
*
* @param config the configuration for the session and the launcher; never
* {@code null}
* @throws PreconditionViolationException if the supplied configuration is
* {@code null}, or if no test engines are detected
* @since 1.8
* @see #openSession()
*/
@API(status = STABLE, since = "1.10")
public static LauncherSession openSession(LauncherConfig config) throws PreconditionViolationException {
Preconditions.notNull(config, "LauncherConfig must not be null");
LauncherConfigurationParameters configurationParameters = LauncherConfigurationParameters.builder().build();
return new DefaultLauncherSession(collectLauncherInterceptors(configurationParameters),
() -> createLauncherSessionListener(config),
sessionLevelStore -> createDefaultLauncher(config, configurationParameters, sessionLevelStore));
}
/**
* Factory method for creating a new {@link Launcher} using the
* {@linkplain LauncherConfig#DEFAULT default} {@link LauncherConfig}.
*
* @throws PreconditionViolationException if no test engines are detected
* @see #create(LauncherConfig)
*/
public static Launcher create() throws PreconditionViolationException {
return create(LauncherConfig.DEFAULT);
}
/**
* Factory method for creating a new {@link Launcher} using the supplied
* {@link LauncherConfig}.
*
* @param config the configuration for the launcher; never {@code null}
* @throws PreconditionViolationException if the supplied configuration is
* {@code null}, or if no test engines are detected
* registered
* @since 1.3
* @see #create()
*/
@API(status = STABLE, since = "1.10")
public static Launcher create(LauncherConfig config) throws PreconditionViolationException {
Preconditions.notNull(config, "LauncherConfig must not be null");
LauncherConfigurationParameters configurationParameters = LauncherConfigurationParameters.builder().build();
return new SessionPerRequestLauncher(
sessionLevelStore -> createDefaultLauncher(config, configurationParameters, sessionLevelStore),
() -> createLauncherSessionListener(config), () -> collectLauncherInterceptors(configurationParameters));
}
private static DefaultLauncher createDefaultLauncher(LauncherConfig config,
LauncherConfigurationParameters configurationParameters,
NamespacedHierarchicalStore<Namespace> sessionLevelStore) {
Set<TestEngine> engines = collectTestEngines(config);
List<PostDiscoveryFilter> filters = collectPostDiscoveryFilters(config);
DefaultLauncher launcher = new DefaultLauncher(engines, filters, sessionLevelStore);
JfrUtils.registerListeners(launcher);
registerLauncherDiscoveryListeners(config, launcher);
registerTestExecutionListeners(config, launcher, configurationParameters);
return launcher;
}
private static List<LauncherInterceptor> collectLauncherInterceptors(
LauncherConfigurationParameters configurationParameters) {
List<LauncherInterceptor> interceptors = new ArrayList<>();
if (configurationParameters.getBoolean(ENABLE_LAUNCHER_INTERCEPTORS).orElse(false)) {
ServiceLoaderRegistry.load(LauncherInterceptor.class).forEach(interceptors::add);
}
interceptors.add(ClasspathAlignmentCheckingLauncherInterceptor.INSTANCE);
return interceptors;
}
private static Set<TestEngine> collectTestEngines(LauncherConfig config) {
Set<TestEngine> engines = new LinkedHashSet<>();
if (config.isTestEngineAutoRegistrationEnabled()) {
new ServiceLoaderTestEngineRegistry().loadTestEngines().forEach(engines::add);
}
engines.addAll(config.getAdditionalTestEngines());
return engines;
}
private static LauncherSessionListener createLauncherSessionListener(LauncherConfig config) {
ListenerRegistry<LauncherSessionListener> listenerRegistry = ListenerRegistry.forLauncherSessionListeners();
if (config.isLauncherSessionListenerAutoRegistrationEnabled()) {
ServiceLoaderRegistry.load(LauncherSessionListener.class).forEach(listenerRegistry::add);
}
config.getAdditionalLauncherSessionListeners().forEach(listenerRegistry::add);
return listenerRegistry.getCompositeListener();
}
private static List<PostDiscoveryFilter> collectPostDiscoveryFilters(LauncherConfig config) {
List<PostDiscoveryFilter> filters = new ArrayList<>();
if (config.isPostDiscoveryFilterAutoRegistrationEnabled()) {
ServiceLoaderRegistry.load(PostDiscoveryFilter.class).forEach(filters::add);
}
filters.addAll(config.getAdditionalPostDiscoveryFilters());
return filters;
}
private static void registerLauncherDiscoveryListeners(LauncherConfig config, Launcher launcher) {
if (config.isLauncherDiscoveryListenerAutoRegistrationEnabled()) {
ServiceLoaderRegistry.load(LauncherDiscoveryListener.class).forEach(
launcher::registerLauncherDiscoveryListeners);
}
config.getAdditionalLauncherDiscoveryListeners().forEach(launcher::registerLauncherDiscoveryListeners);
}
private static void registerTestExecutionListeners(LauncherConfig config, Launcher launcher,
LauncherConfigurationParameters configurationParameters) {
if (config.isTestExecutionListenerAutoRegistrationEnabled()) {
loadAndFilterTestExecutionListeners(configurationParameters).forEach(
launcher::registerTestExecutionListeners);
}
config.getAdditionalTestExecutionListeners().forEach(launcher::registerTestExecutionListeners);
}
private static Iterable<TestExecutionListener> loadAndFilterTestExecutionListeners(
ConfigurationParameters configurationParameters) {
Predicate<String> classNameFilter = configurationParameters.get(DEACTIVATE_LISTENERS_PATTERN_PROPERTY_NAME) //
.map(ClassNamePatternFilterUtils::excludeMatchingClassNames) //
.orElse(__ -> true);
return ServiceLoaderRegistry.load(TestExecutionListener.class, classNameFilter);
}
}
| LauncherFactory |
java | apache__avro | lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroSerialization.java | {
"start": 4725,
"end": 4973
} | interface ____ a flush()
// method and the Hadoop framework called it when needed rather
// than for every record.
encoder.flush();
}
@Override
public void close() throws IOException {
out.close();
}
}
}
| had |
java | google__guice | core/src/com/google/inject/multibindings/OptionalBinder.java | {
"start": 3049,
"end": 3448
} | class ____ extends AbstractModule {
* protected void configure() {
* OptionalBinder.newOptionalBinder(binder(), Renamer.class);
* }
* }</code></pre>
*
* <p>With this module, an {@code Optional<Renamer>} can now be injected. With no other bindings,
* the optional will be absent. Users can specify bindings in one of two ways:
*
* <p>Option 1:
*
* <pre><code>
* public | FrameworkModule |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/test/java/io/quarkus/redis/datasource/RedisCommandCondition.java | {
"start": 528,
"end": 1584
} | class ____ implements ExecutionCondition {
private static final ConditionEvaluationResult ENABLED_BY_DEFAULT = enabled("@RequiresCommand is not present");
@Override
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) {
Optional<RequiresCommand> optional = AnnotationUtils.findAnnotation(context.getElement(),
RequiresCommand.class);
if (optional.isPresent()) {
String[] cmd = optional.get().value();
List<String> commands = RedisServerExtension.getAvailableCommands()
.stream().map(String::toLowerCase)
.collect(Collectors.toList());
for (String c : cmd) {
if (!commands.contains(c.toLowerCase())) {
return disabled("Disabled, Redis command " + c + " not available.");
}
}
return enabled("Redis commands " + String.join(", ", cmd) + " are available");
}
return ENABLED_BY_DEFAULT;
}
}
| RedisCommandCondition |
java | apache__camel | components/camel-huawei/camel-huaweicloud-iam/src/main/java/org/apache/camel/component/huaweicloud/iam/IAMEndpoint.java | {
"start": 1803,
"end": 9033
} | class ____ extends DefaultEndpoint {
@UriPath(description = "Operation to be performed", displayName = "Operation", label = "producer", secret = false)
@Metadata(required = true)
private String operation;
@UriParam(description = "IAM service region",
displayName = "Service region", secret = false)
@Metadata(required = true)
private String region;
@UriParam(description = "Proxy server ip/hostname", displayName = "Proxy server host", secret = false)
@Metadata(required = false)
private String proxyHost;
@UriParam(description = "Proxy server port", displayName = "Proxy server port", secret = false)
@Metadata(required = false)
private int proxyPort;
@UriParam(description = "Proxy authentication user", displayName = "Proxy user", secret = true)
@Metadata(required = false)
private String proxyUser;
@UriParam(description = "Proxy authentication password", displayName = "Proxy password", secret = true)
@Metadata(required = false)
private String proxyPassword;
@UriParam(description = "Ignore SSL verification", displayName = "SSL Verification Ignored", secret = false,
defaultValue = "false")
@Metadata(required = false)
private boolean ignoreSslVerification;
@UriParam(description = "Configuration object for cloud service authentication", displayName = "Service Configuration",
secret = true)
@Metadata(required = false)
private ServiceKeys serviceKeys;
@UriParam(description = "Access key for the cloud user", displayName = "API access key (AK)", secret = true)
@Metadata(required = true)
private String accessKey;
@UriParam(description = "Secret key for the cloud user", displayName = "API secret key (SK)", secret = true)
@Metadata(required = true)
private String secretKey;
@UriParam(description = "User ID to perform operation with", displayName = "User ID", secret = true)
@Metadata(required = false)
private String userId;
@UriParam(description = "Group ID to perform operation with", displayName = "Group ID", secret = true)
@Metadata(required = false)
private String groupId;
private IamClient iamClient;
public IAMEndpoint() {
}
public IAMEndpoint(String uri, String operation, IAMComponent component) {
super(uri, component);
this.operation = operation;
}
@Override
public Producer createProducer() throws Exception {
return new IAMProducer(this);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
throw new UnsupportedOperationException("You cannot receive messages from this endpoint");
}
public String getOperation() {
return operation;
}
public void setOperation(String operation) {
this.operation = operation;
}
public String getRegion() {
return region;
}
public void setRegion(String region) {
this.region = region;
}
public String getProxyHost() {
return proxyHost;
}
public void setProxyHost(String proxyHost) {
this.proxyHost = proxyHost;
}
public int getProxyPort() {
return proxyPort;
}
public void setProxyPort(int proxyPort) {
this.proxyPort = proxyPort;
}
public String getProxyUser() {
return proxyUser;
}
public void setProxyUser(String proxyUser) {
this.proxyUser = proxyUser;
}
public String getProxyPassword() {
return proxyPassword;
}
public void setProxyPassword(String proxyPassword) {
this.proxyPassword = proxyPassword;
}
public boolean isIgnoreSslVerification() {
return ignoreSslVerification;
}
public void setIgnoreSslVerification(boolean ignoreSslVerification) {
this.ignoreSslVerification = ignoreSslVerification;
}
public ServiceKeys getServiceKeys() {
return serviceKeys;
}
public void setServiceKeys(ServiceKeys serviceKeys) {
this.serviceKeys = serviceKeys;
}
public String getAccessKey() {
return accessKey;
}
public void setAccessKey(String accessKey) {
this.accessKey = accessKey;
}
public String getSecretKey() {
return secretKey;
}
public void setSecretKey(String secretKey) {
this.secretKey = secretKey;
}
public String getUserId() {
return userId;
}
public void setUserId(String userId) {
this.userId = userId;
}
public String getGroupId() {
return groupId;
}
public void setGroupId(String groupId) {
this.groupId = groupId;
}
public IamClient getIamClient() {
return iamClient;
}
public void setIamClient(IamClient iamClient) {
this.iamClient = iamClient;
}
/**
* Initialize and return a new IAM Client
*
* @return
*/
public IamClient initClient() {
if (iamClient != null) {
return iamClient;
}
// check for mandatory AK/SK in ServiceKeys object or in endpoint
if (ObjectHelper.isEmpty(getServiceKeys()) && ObjectHelper.isEmpty(getAccessKey())) {
throw new IllegalArgumentException("Authentication parameter 'access key (AK)' not found");
}
if (ObjectHelper.isEmpty(getServiceKeys()) && ObjectHelper.isEmpty(getSecretKey())) {
throw new IllegalArgumentException("Authentication parameter 'secret key (SK)' not found");
}
// setup AK/SK credential information. AK/SK provided through ServiceKeys overrides the AK/SK passed through the endpoint
GlobalCredentials auth = new GlobalCredentials()
.withAk(getServiceKeys() != null
? getServiceKeys().getAccessKey()
: getAccessKey())
.withSk(getServiceKeys() != null
? getServiceKeys().getSecretKey()
: getSecretKey());
// setup http information (including proxy information if provided)
HttpConfig httpConfig = HttpConfig.getDefaultHttpConfig();
httpConfig.withIgnoreSSLVerification(isIgnoreSslVerification());
if (ObjectHelper.isNotEmpty(getProxyHost())
&& ObjectHelper.isNotEmpty(getProxyPort())) {
httpConfig.withProxyHost(getProxyHost())
.withProxyPort(getProxyPort());
if (ObjectHelper.isNotEmpty(getProxyUser())) {
httpConfig.withProxyUsername(getProxyUser());
if (ObjectHelper.isNotEmpty(getProxyPassword())) {
httpConfig.withProxyPassword(getProxyPassword());
}
}
}
// Build IamClient with mandatory region parameter.
if (ObjectHelper.isNotEmpty(getRegion())) {
return IamClient.newBuilder()
.withCredential(auth)
.withHttpConfig(httpConfig)
.withRegion(IamRegion.valueOf(getRegion()))
.build();
} else {
throw new IllegalArgumentException("Region not found");
}
}
}
| IAMEndpoint |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/qualifier/errors/ErroneousMessageByAnnotationAndNamedMapper.java | {
"start": 440,
"end": 788
} | interface ____ {
@Mapping(target = "nested", source = "value", qualifiedBy = {
SelectMe1.class,
SelectMe2.class
}, qualifiedByName = { "selectMe1", "selectMe2" })
Target map(Source source);
default Nested map(String in) {
return null;
}
// CHECKSTYLE:OFF
| ErroneousMessageByAnnotationAndNamedMapper |
java | apache__camel | components/camel-plc4x/src/test/java/org/apache/camel/component/plc4x/Plc4XComponentTest.java | {
"start": 1101,
"end": 2717
} | class ____ extends CamelTestSupport {
@Test
public void testSimpleRouting() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMinimumMessageCount(1);
mock.expectedMessageCount(2);
template.sendBody("direct:plc4x", Collections.singletonList("irrelevant"));
template.sendBody("direct:plc4x2", Collections.singletonList("irrelevant"));
MockEndpoint.assertIsSatisfied(context, 2, TimeUnit.SECONDS);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() throws Exception {
Map<String, String> tags = new HashMap<>();
tags.put("Test1", "%TestQuery");
Plc4XEndpoint producer = getContext().getEndpoint("plc4x:mock:10.10.10.1/1/1", Plc4XEndpoint.class);
producer.setTags(tags);
producer.setAutoReconnect(true);
from("direct:plc4x")
.setBody(constant(Collections.singletonMap("test", Collections.singletonMap("testAddress", false))))
.to("plc4x:mock:10.10.10.1/1/1")
.to("mock:result");
from("direct:plc4x2")
.setBody(constant(Collections.singletonMap("test2", Collections.singletonMap("testAddress2", 0x05))))
.to("plc4x:mock:10.10.10.1/1/1")
.to("mock:result");
from(producer)
.log("Got ${body}");
}
};
}
}
| Plc4XComponentTest |
java | apache__flink | flink-formats/flink-sequence-file/src/test/java/org/apache/flink/formats/sequencefile/SerializableHadoopConfigurationTest.java | {
"start": 1252,
"end": 3737
} | class ____ {
private static final String TEST_KEY = "test-key";
private static final String TEST_VALUE = "test-value";
private Configuration configuration;
@BeforeEach
void createConfigWithCustomProperty() {
this.configuration = new Configuration();
configuration.set(TEST_KEY, TEST_VALUE);
}
@Test
void customPropertiesSurviveSerializationDeserialization()
throws IOException, ClassNotFoundException {
final SerializableHadoopConfiguration serializableConfigUnderTest =
new SerializableHadoopConfiguration(configuration);
final byte[] serializedConfigUnderTest = serializeAndGetBytes(serializableConfigUnderTest);
final SerializableHadoopConfiguration deserializableConfigUnderTest =
deserializeAndGetConfiguration(serializedConfigUnderTest);
Assertions.<Configuration>assertThat(deserializableConfigUnderTest.get())
.describedAs(
"a Hadoop Configuration with property: key=%s and value=%s",
TEST_KEY, TEST_VALUE)
.satisfies(
actualConfig -> {
Assertions.assertThat(actualConfig)
.isNotSameAs(serializableConfigUnderTest.get());
Assertions.assertThat(actualConfig.get(TEST_KEY))
.isEqualTo(serializableConfigUnderTest.get().get(TEST_KEY));
});
}
// ---------------------------------------- Helper Methods
// ---------------------------------------- //
private byte[] serializeAndGetBytes(SerializableHadoopConfiguration serializableConfigUnderTest)
throws IOException {
try (ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
ObjectOutputStream out = new ObjectOutputStream(byteStream)) {
out.writeObject(serializableConfigUnderTest);
out.flush();
return byteStream.toByteArray();
}
}
private SerializableHadoopConfiguration deserializeAndGetConfiguration(byte[] serializedConfig)
throws IOException, ClassNotFoundException {
try (ObjectInputStream in =
new ObjectInputStream(new ByteArrayInputStream(serializedConfig))) {
return (SerializableHadoopConfiguration) in.readObject();
}
}
}
| SerializableHadoopConfigurationTest |
java | alibaba__nacos | plugin/control/src/main/java/com/alibaba/nacos/plugin/control/rule/parser/ConnectionControlRuleParser.java | {
"start": 826,
"end": 915
} | interface ____ extends RuleParser<ConnectionControlRule> {
}
| ConnectionControlRuleParser |
java | spring-projects__spring-framework | spring-websocket/src/test/java/org/springframework/web/socket/messaging/WebSocketStompClientTests.java | {
"start": 2928,
"end": 17097
} | class ____ {
@Mock
private TaskScheduler taskScheduler;
@Mock
private ConnectionHandlingStompSession stompSession;
@Mock
private WebSocketSession webSocketSession;
private TestWebSocketStompClient stompClient;
private ArgumentCaptor<WebSocketHandler> webSocketHandlerCaptor;
private CompletableFuture<WebSocketSession> handshakeFuture;
@BeforeEach
void setUp() {
WebSocketClient webSocketClient = mock();
this.stompClient = new TestWebSocketStompClient(webSocketClient);
this.stompClient.setTaskScheduler(this.taskScheduler);
this.stompClient.setStompSession(this.stompSession);
this.webSocketHandlerCaptor = ArgumentCaptor.forClass(WebSocketHandler.class);
this.handshakeFuture = new CompletableFuture<>();
given(webSocketClient.execute(this.webSocketHandlerCaptor.capture(), any(), any(URI.class)))
.willReturn(this.handshakeFuture);
}
@Test
void webSocketHandshakeFailure() {
connect();
IllegalStateException handshakeFailure = new IllegalStateException("simulated exception");
this.handshakeFuture.completeExceptionally(handshakeFailure);
verify(this.stompSession).afterConnectFailure(same(handshakeFailure));
}
@Test
void webSocketConnectionEstablished() throws Exception {
connect().afterConnectionEstablished(this.webSocketSession);
verify(this.stompSession).afterConnected(notNull());
}
@Test
void webSocketTransportError() throws Exception {
IllegalStateException exception = new IllegalStateException("simulated exception");
connect().handleTransportError(this.webSocketSession, exception);
verify(this.stompSession).handleFailure(same(exception));
}
@Test
void webSocketConnectionClosed() throws Exception {
connect().afterConnectionClosed(this.webSocketSession, CloseStatus.NORMAL);
verify(this.stompSession).afterConnectionClosed();
}
@Test
@SuppressWarnings({"unchecked", "rawtypes"})
void handleWebSocketMessage() throws Exception {
String text = "SEND\na:alpha\n\nMessage payload\0";
connect().handleMessage(this.webSocketSession, new TextMessage(text));
ArgumentCaptor<Message> captor = ArgumentCaptor.forClass(Message.class);
verify(this.stompSession).handleMessage(captor.capture());
Message<byte[]> message = captor.getValue();
assertThat(message).isNotNull();
StompHeaderAccessor accessor = MessageHeaderAccessor.getAccessor(message, StompHeaderAccessor.class);
StompHeaders headers = StompHeaders.readOnlyStompHeaders(accessor.toNativeHeaderMap());
assertThat(accessor.getCommand()).isEqualTo(StompCommand.SEND);
assertThat(headers.getFirst("a")).isEqualTo("alpha");
assertThat(new String(message.getPayload(), StandardCharsets.UTF_8)).isEqualTo("Message payload");
}
@Test
@SuppressWarnings({"unchecked", "rawtypes"})
void handleWebSocketMessageSplitAcrossTwoMessage() throws Exception {
WebSocketHandler webSocketHandler = connect();
String part1 = "SEND\na:alpha\n\nMessage";
webSocketHandler.handleMessage(this.webSocketSession, new TextMessage(part1));
verifyNoMoreInteractions(this.stompSession);
String part2 = " payload\0";
webSocketHandler.handleMessage(this.webSocketSession, new TextMessage(part2));
ArgumentCaptor<Message> captor = ArgumentCaptor.forClass(Message.class);
verify(this.stompSession).handleMessage(captor.capture());
Message<byte[]> message = captor.getValue();
assertThat(message).isNotNull();
StompHeaderAccessor accessor = MessageHeaderAccessor.getAccessor(message, StompHeaderAccessor.class);
StompHeaders headers = StompHeaders.readOnlyStompHeaders(accessor.toNativeHeaderMap());
assertThat(accessor.getCommand()).isEqualTo(StompCommand.SEND);
assertThat(headers.getFirst("a")).isEqualTo("alpha");
assertThat(new String(message.getPayload(), StandardCharsets.UTF_8)).isEqualTo("Message payload");
}
@Test
@SuppressWarnings({"unchecked", "rawtypes"})
void handleWebSocketMessageBinary() throws Exception {
String text = "SEND\na:alpha\n\nMessage payload\0";
connect().handleMessage(this.webSocketSession, new BinaryMessage(text.getBytes(StandardCharsets.UTF_8)));
ArgumentCaptor<Message> captor = ArgumentCaptor.forClass(Message.class);
verify(this.stompSession).handleMessage(captor.capture());
Message<byte[]> message = captor.getValue();
assertThat(message).isNotNull();
StompHeaderAccessor accessor = MessageHeaderAccessor.getAccessor(message, StompHeaderAccessor.class);
StompHeaders headers = StompHeaders.readOnlyStompHeaders(accessor.toNativeHeaderMap());
assertThat(accessor.getCommand()).isEqualTo(StompCommand.SEND);
assertThat(headers.getFirst("a")).isEqualTo("alpha");
assertThat(new String(message.getPayload(), StandardCharsets.UTF_8)).isEqualTo("Message payload");
}
@Test
void handleWebSocketMessagePong() throws Exception {
connect().handleMessage(this.webSocketSession, new PongMessage());
verifyNoMoreInteractions(this.stompSession);
}
@Test
void sendWebSocketMessage() throws Exception {
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.SEND);
accessor.setDestination("/topic/foo");
byte[] payload = "payload".getBytes(StandardCharsets.UTF_8);
getTcpConnection().sendAsync(MessageBuilder.createMessage(payload, accessor.getMessageHeaders()));
ArgumentCaptor<TextMessage> textMessageCaptor = ArgumentCaptor.forClass(TextMessage.class);
verify(this.webSocketSession).sendMessage(textMessageCaptor.capture());
TextMessage textMessage = textMessageCaptor.getValue();
assertThat(textMessage).isNotNull();
assertThat(textMessage.getPayload()).isEqualTo("SEND\ndestination:/topic/foo\ncontent-length:7\n\npayload\0");
}
@Test
void sendWebSocketMessageExceedOutboundMessageSizeLimit() throws Exception {
stompClient.setOutboundMessageSizeLimit(30);
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.SEND);
accessor.setDestination("/topic/foo");
byte[] payload = "payload".getBytes(StandardCharsets.UTF_8);
getTcpConnection().sendAsync(MessageBuilder.createMessage(payload, accessor.getMessageHeaders()));
ArgumentCaptor<TextMessage> textMessageCaptor = ArgumentCaptor.forClass(TextMessage.class);
verify(this.webSocketSession, times(2)).sendMessage(textMessageCaptor.capture());
TextMessage textMessage = textMessageCaptor.getAllValues().get(0);
assertThat(textMessage).isNotNull();
assertThat(textMessage.getPayload()).isEqualTo("SEND\ndestination:/topic/foo\nco");
assertThat(textMessage.getPayload().getBytes().length).isEqualTo(30);
textMessage = textMessageCaptor.getAllValues().get(1);
assertThat(textMessage).isNotNull();
assertThat(textMessage.getPayload()).isEqualTo("ntent-length:7\n\npayload\0");
assertThat(textMessage.getPayload().getBytes().length).isEqualTo(24);
}
@Test
void sendWebSocketBinary() throws Exception {
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.SEND);
accessor.setDestination("/b");
accessor.setContentType(MimeTypeUtils.APPLICATION_OCTET_STREAM);
byte[] payload = "payload".getBytes(StandardCharsets.UTF_8);
getTcpConnection().sendAsync(MessageBuilder.createMessage(payload, accessor.getMessageHeaders()));
ArgumentCaptor<BinaryMessage> binaryMessageCaptor = ArgumentCaptor.forClass(BinaryMessage.class);
verify(this.webSocketSession).sendMessage(binaryMessageCaptor.capture());
BinaryMessage binaryMessage = binaryMessageCaptor.getValue();
assertThat(binaryMessage).isNotNull();
assertThat(new String(binaryMessage.getPayload().array(), StandardCharsets.UTF_8))
.isEqualTo("SEND\ndestination:/b\ncontent-type:application/octet-stream\ncontent-length:7\n\npayload\0");
}
@Test
void sendWebSocketBinaryExceedOutboundMessageSizeLimit() throws Exception {
stompClient.setOutboundMessageSizeLimit(50);
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.SEND);
accessor.setDestination("/b");
accessor.setContentType(MimeTypeUtils.APPLICATION_OCTET_STREAM);
byte[] payload = "payload".getBytes(StandardCharsets.UTF_8);
getTcpConnection().sendAsync(MessageBuilder.createMessage(payload, accessor.getMessageHeaders()));
ArgumentCaptor<BinaryMessage> binaryMessageCaptor = ArgumentCaptor.forClass(BinaryMessage.class);
verify(this.webSocketSession, times(2)).sendMessage(binaryMessageCaptor.capture());
BinaryMessage binaryMessage = binaryMessageCaptor.getAllValues().get(0);
assertThat(binaryMessage).isNotNull();
assertThat(new String(binaryMessage.getPayload().array(), StandardCharsets.UTF_8))
.isEqualTo("SEND\ndestination:/b\ncontent-type:application/octet");
assertThat(binaryMessage.getPayload().array().length).isEqualTo(50);
binaryMessage = binaryMessageCaptor.getAllValues().get(1);
assertThat(binaryMessage).isNotNull();
assertThat(new String(binaryMessage.getPayload().array(), StandardCharsets.UTF_8))
.isEqualTo("-stream\ncontent-length:7\n\npayload\0");
assertThat(binaryMessage.getPayload().array().length).isEqualTo(34);
}
@Test
@SuppressWarnings({ "rawtypes", "unchecked" })
void reassembleReceivedIFragmentedFrames() throws Exception {
WebSocketHandler handler = connect();
handler.handleMessage(this.webSocketSession, new TextMessage("SEND\ndestination:/topic/foo\nco"));
handler.handleMessage(this.webSocketSession, new TextMessage("ntent-length:7\n\npayload\0"));
ArgumentCaptor<Message> receiveMessageCaptor = ArgumentCaptor.forClass(Message.class);
verify(this.stompSession).handleMessage(receiveMessageCaptor.capture());
Message<byte[]> receiveMessage = receiveMessageCaptor.getValue();
assertThat(receiveMessage).isNotNull();
StompHeaderAccessor headers = StompHeaderAccessor.wrap(receiveMessage);
assertThat(headers.toNativeHeaderMap()).hasSize(2);
assertThat(headers.getContentLength()).isEqualTo(7);
assertThat(headers.getDestination()).isEqualTo("/topic/foo");
assertThat(new String(receiveMessage.getPayload())).isEqualTo("payload");
}
@Test
void heartbeatDefaultValue() {
WebSocketStompClient stompClient = new WebSocketStompClient(mock());
assertThat(stompClient.getDefaultHeartbeat()).isEqualTo(new long[] {0, 0});
StompHeaders connectHeaders = stompClient.processConnectHeaders(null);
assertThat(connectHeaders.getHeartbeat()).isEqualTo(new long[] {0, 0});
}
@Test
void heartbeatDefaultValueWithScheduler() {
WebSocketStompClient stompClient = new WebSocketStompClient(mock());
stompClient.setTaskScheduler(mock());
assertThat(stompClient.getDefaultHeartbeat()).isEqualTo(new long[] {10000, 10000});
StompHeaders connectHeaders = stompClient.processConnectHeaders(null);
assertThat(connectHeaders.getHeartbeat()).isEqualTo(new long[] {10000, 10000});
}
@Test
void heartbeatDefaultValueSetWithoutScheduler() {
WebSocketStompClient stompClient = new WebSocketStompClient(mock());
stompClient.setDefaultHeartbeat(new long[] {5, 5});
assertThatIllegalStateException().isThrownBy(() ->
stompClient.processConnectHeaders(null));
}
@Test
void readInactivityAfterDelayHasElapsed() throws Exception {
TcpConnection<byte[]> tcpConnection = getTcpConnection();
Runnable runnable = mock();
long delay = 2;
tcpConnection.onReadInactivity(runnable, delay);
testInactivityTaskScheduling(runnable, delay, 10);
}
@Test
void readInactivityBeforeDelayHasElapsed() throws Exception {
TcpConnection<byte[]> tcpConnection = getTcpConnection();
Runnable runnable = mock();
long delay = 10000;
tcpConnection.onReadInactivity(runnable, delay);
testInactivityTaskScheduling(runnable, delay, 0);
}
@Test
void writeInactivityAfterDelayHasElapsed() throws Exception {
TcpConnection<byte[]> tcpConnection = getTcpConnection();
Runnable runnable = mock();
long delay = 2;
tcpConnection.onWriteInactivity(runnable, delay);
testInactivityTaskScheduling(runnable, delay, 10);
}
@Test
void writeInactivityBeforeDelayHasElapsed() throws Exception {
TcpConnection<byte[]> tcpConnection = getTcpConnection();
Runnable runnable = mock();
long delay = 1000;
tcpConnection.onWriteInactivity(runnable, delay);
testInactivityTaskScheduling(runnable, delay, 0);
}
@Test
@SuppressWarnings({"rawtypes", "unchecked"})
void cancelInactivityTasks() throws Exception {
TcpConnection<byte[]> tcpConnection = getTcpConnection();
ScheduledFuture future = mock();
given(this.taskScheduler.scheduleWithFixedDelay(any(), eq(Duration.ofMillis(1)))).willReturn(future);
tcpConnection.onReadInactivity(mock(), 2L);
tcpConnection.onWriteInactivity(mock(), 2L);
WebSocketHandler handler = this.webSocketHandlerCaptor.getValue();
TcpConnection<?> connection = (TcpConnection<?>) WebSocketHandlerDecorator.unwrap(handler);
connection.close();
verify(future, times(2)).cancel(true);
verifyNoMoreInteractions(future);
}
private WebSocketHandler connect() {
this.stompClient.connectAsync("/foo", mock());
verify(this.stompSession).getSession();
verifyNoMoreInteractions(this.stompSession);
WebSocketHandler webSocketHandler = this.webSocketHandlerCaptor.getValue();
assertThat(webSocketHandler).isNotNull();
return webSocketHandler;
}
@SuppressWarnings("unchecked")
private TcpConnection<byte[]> getTcpConnection() throws Exception {
WebSocketHandler handler = connect();
handler.afterConnectionEstablished(this.webSocketSession);
if (handler instanceof WebSocketHandlerDecorator handlerDecorator) {
handler = handlerDecorator.getLastHandler();
}
return (TcpConnection<byte[]>) handler;
}
private void testInactivityTaskScheduling(Runnable runnable, long delay, long sleepTime)
throws InterruptedException {
ArgumentCaptor<Runnable> inactivityTaskCaptor = ArgumentCaptor.forClass(Runnable.class);
verify(this.taskScheduler).scheduleWithFixedDelay(inactivityTaskCaptor.capture(), eq(Duration.ofMillis(delay/2)));
verifyNoMoreInteractions(this.taskScheduler);
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
Runnable inactivityTask = inactivityTaskCaptor.getValue();
assertThat(inactivityTask).isNotNull();
inactivityTask.run();
if (sleepTime > 0) {
verify(runnable).run();
}
else {
verifyNoMoreInteractions(runnable);
}
}
private static | WebSocketStompClientTests |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/CollectionUtil.java | {
"start": 1581,
"end": 9330
} | class ____ {
/** A safe maximum size for arrays in the JVM. */
public static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
/** The default load factor for hash maps create with this util class. */
static final float HASH_MAP_DEFAULT_LOAD_FACTOR = 0.75f;
private CollectionUtil() {
throw new AssertionError();
}
/** Returns true if the given collection is null or empty. */
public static boolean isNullOrEmpty(Collection<?> collection) {
return collection == null || collection.isEmpty();
}
/** Returns true if the given collection is empty or contains only null elements. */
public static boolean isEmptyOrAllElementsNull(Collection<?> collection) {
for (Object o : collection) {
if (o != null) {
return false;
}
}
return true;
}
public static boolean isNullOrEmpty(Map<?, ?> map) {
return map == null || map.isEmpty();
}
public static <T> Set<T> ofNullable(@Nullable T obj) {
return obj == null ? Collections.emptySet() : Collections.singleton(obj);
}
public static <T, R> Stream<R> mapWithIndex(
Collection<T> input, final BiFunction<T, Integer, R> mapper) {
final AtomicInteger count = new AtomicInteger(0);
return input.stream().map(element -> mapper.apply(element, count.getAndIncrement()));
}
/** Partition a collection into approximately n buckets. */
public static <T> Collection<List<T>> partition(Collection<T> elements, int numBuckets) {
Map<Integer, List<T>> buckets = newHashMapWithExpectedSize(numBuckets);
int initialCapacity = elements.size() / numBuckets;
int index = 0;
for (T element : elements) {
int bucket = index % numBuckets;
buckets.computeIfAbsent(bucket, key -> new ArrayList<>(initialCapacity)).add(element);
index++;
}
return buckets.values();
}
public static <I, O> Collection<O> project(Collection<I> collection, Function<I, O> projector) {
return collection.stream().map(projector).collect(toList());
}
/**
* Collects the elements in the Iterable in a List. If the iterable argument is null, this
* method returns an empty list.
*/
public static <E> List<E> iterableToList(@Nullable Iterable<E> iterable) {
if (iterable == null) {
return Collections.emptyList();
}
final ArrayList<E> list = new ArrayList<>();
iterable.iterator().forEachRemaining(list::add);
return list;
}
/**
* Collects the elements in the Iterator in a List. If the iterator argument is null, this
* method returns an empty list.
*/
public static <E> List<E> iteratorToList(@Nullable Iterator<E> iterator) {
if (iterator == null) {
return Collections.emptyList();
}
final ArrayList<E> list = new ArrayList<>();
iterator.forEachRemaining(list::add);
return list;
}
/** Returns an immutable {@link Map.Entry}. */
public static <K, V> Map.Entry<K, V> entry(K k, V v) {
return new AbstractMap.SimpleImmutableEntry<>(k, v);
}
/** Returns an immutable {@link Map} from the provided entries. */
@SafeVarargs
public static <K, V> Map<K, V> map(Map.Entry<K, V>... entries) {
if (entries == null) {
return Collections.emptyMap();
}
Map<K, V> map = new HashMap<>();
for (Map.Entry<K, V> entry : entries) {
map.put(entry.getKey(), entry.getValue());
}
return Collections.unmodifiableMap(map);
}
/**
* Creates a new {@link HashMap} of the expected size, i.e. a hash map that will not rehash if
* expectedSize many keys are inserted, considering the load factor.
*
* @param expectedSize the expected size of the created hash map.
* @return a new hash map instance with enough capacity for the expected size.
* @param <K> the type of keys maintained by this map.
* @param <V> the type of mapped values.
*/
public static <K, V> HashMap<K, V> newHashMapWithExpectedSize(int expectedSize) {
return new HashMap<>(
computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR),
HASH_MAP_DEFAULT_LOAD_FACTOR);
}
/**
* Creates a new {@link LinkedHashMap} of the expected size, i.e. a hash map that will not
* rehash if expectedSize many keys are inserted, considering the load factor.
*
* @param expectedSize the expected size of the created hash map.
* @return a new hash map instance with enough capacity for the expected size.
* @param <K> the type of keys maintained by this map.
* @param <V> the type of mapped values.
*/
public static <K, V> LinkedHashMap<K, V> newLinkedHashMapWithExpectedSize(int expectedSize) {
return new LinkedHashMap<>(
computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR),
HASH_MAP_DEFAULT_LOAD_FACTOR);
}
/**
* Creates a new {@link HashSet} of the expected size, i.e. a hash set that will not rehash if
* expectedSize many unique elements are inserted, considering the load factor.
*
* @param expectedSize the expected size of the created hash map.
* @return a new hash map instance with enough capacity for the expected size.
* @param <E> the type of elements stored by this set.
*/
public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize) {
return new HashSet<>(
computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR),
HASH_MAP_DEFAULT_LOAD_FACTOR);
}
/**
* Creates a new {@link LinkedHashSet} of the expected size, i.e. a hash set that will not
* rehash if expectedSize many unique elements are inserted, considering the load factor.
*
* @param expectedSize the expected size of the created hash map.
* @return a new hash map instance with enough capacity for the expected size.
* @param <E> the type of elements stored by this set.
*/
public static <E> LinkedHashSet<E> newLinkedHashSetWithExpectedSize(int expectedSize) {
return new LinkedHashSet<>(
computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR),
HASH_MAP_DEFAULT_LOAD_FACTOR);
}
/**
* Helper method to compute the right capacity for a hash map with load factor
* HASH_MAP_DEFAULT_LOAD_FACTOR.
*/
@VisibleForTesting
static int computeRequiredCapacity(int expectedSize, float loadFactor) {
Preconditions.checkArgument(expectedSize >= 0);
Preconditions.checkArgument(loadFactor > 0f);
if (expectedSize <= 2) {
return expectedSize + 1;
}
return expectedSize < (Integer.MAX_VALUE / 2 + 1)
? (int) Math.ceil(expectedSize / loadFactor)
: Integer.MAX_VALUE;
}
/**
* Casts the given collection to a subtype. This is an unchecked cast that can lead to runtime
* exceptions.
*
* @param collection the collection to cast.
* @return the collection unchecked-cast to a subtype.
* @param <T> the subtype to cast to.
*/
public static <T> Collection<T> subTypeCast(Collection<? super T> collection) {
@SuppressWarnings("unchecked")
Collection<T> result = (Collection<T>) collection;
return result;
}
/**
* Casts the given collection to a subtype. This is a checked cast.
*
* @param collection the collection to cast.
* @param subTypeClass the | CollectionUtil |
java | dropwizard__dropwizard | dropwizard-jackson/src/test/java/io/dropwizard/jackson/ImplA.java | {
"start": 113,
"end": 151
} | class ____ implements ExampleSPI {
}
| ImplA |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/IrcEndpointBuilderFactory.java | {
"start": 19871,
"end": 26584
} | interface ____
extends
EndpointConsumerBuilder {
default IrcEndpointConsumerBuilder basic() {
return (IrcEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedIrcEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedIrcEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedIrcEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedIrcEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedIrcEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedIrcEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Whether or not the server supports color codes.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param colors the value to set
* @return the dsl builder
*/
default AdvancedIrcEndpointConsumerBuilder colors(boolean colors) {
doSetProperty("colors", colors);
return this;
}
/**
* Whether or not the server supports color codes.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param colors the value to set
* @return the dsl builder
*/
default AdvancedIrcEndpointConsumerBuilder colors(String colors) {
doSetProperty("colors", colors);
return this;
}
}
/**
* Builder for endpoint producers for the IRC component.
*/
public | AdvancedIrcEndpointConsumerBuilder |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/erroneous/attributereference/ErroneousMapper1.java | {
"start": 315,
"end": 493
} | interface ____ {
@Mappings({
@Mapping(target = "foo", source = "source.foobar")
})
Target sourceToTarget(Source source, DummySource source1);
}
| ErroneousMapper1 |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnsynchronizedOverridesSynchronizedTest.java | {
"start": 5226,
"end": 5590
} | class ____ extends Throwable {
@Override
public Throwable fillInStackTrace() {
return this;
}
}
""")
.doTest();
}
@Test
public void ignoreOverrideThatReturnsConstant() {
compilationHelper
.addSourceLines(
"A.java",
"""
| Test |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/create/MySQLCreateMaterializedViewTest5.java | {
"start": 1003,
"end": 2977
} | class ____ extends MysqlTest {
public void test_types() throws Exception {
String sql = //
"CREATE MATERIALIZED VIEW mymv (\n" +
" PRIMARY KEY(id)\n" +
")\n" +
"DISTRIBUTED BY HASH (id)\n" +
"REFRESH COMPLETE ON DEMAND\n" +
"START WITH '2020-08-20 14:50:00'\n" +
"NEXT current_date() + INTERVAL 15 DAY\n" +
"ENABLE QUERY REWRITE\n" +
"AS SELECT id FROM base;";
SQLStatement stmt = SQLUtils.parseSingleMysqlStatement(sql);
assertEquals("CREATE MATERIALIZED VIEW mymv (\n" +
"\tPRIMARY KEY (id)\n" +
")\n" +
"DISTRIBUTED BY HASH(id)\n" +
"REFRESH COMPLETE ON DEMAND\n" +
"START WITH '2020-08-20 14:50:00' NEXT current_date() + INTERVAL 15 DAY\n" +
"ENABLE QUERY REWRITE\n" +
"AS\n" +
"SELECT id\n" +
"FROM base;",
SQLUtils.toSQLString(stmt, DbType.mysql, null, VisitorFeature.OutputDistributedLiteralInCreateTableStmt));
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(1, visitor.getColumns().size());
assertTrue(visitor.getColumns().contains(new TableStat.Column("base", "id")));
}
}
| MySQLCreateMaterializedViewTest5 |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/requests/AlterPartitionRequestTest.java | {
"start": 1550,
"end": 4054
} | class ____ {
Uuid topicId = Uuid.randomUuid();
@ParameterizedTest
@ApiKeyVersionsSource(apiKey = ApiKeys.ALTER_PARTITION)
public void testBuildAlterPartitionRequest(short version) {
AlterPartitionRequestData request = new AlterPartitionRequestData()
.setBrokerId(1)
.setBrokerEpoch(1);
TopicData topicData = new TopicData().setTopicId(topicId);
List<BrokerState> newIsrWithBrokerEpoch = new LinkedList<>();
newIsrWithBrokerEpoch.add(new BrokerState().setBrokerId(1).setBrokerEpoch(1001));
newIsrWithBrokerEpoch.add(new BrokerState().setBrokerId(2).setBrokerEpoch(1002));
newIsrWithBrokerEpoch.add(new BrokerState().setBrokerId(3).setBrokerEpoch(1003));
topicData.partitions().add(new PartitionData()
.setPartitionIndex(0)
.setLeaderEpoch(1)
.setPartitionEpoch(10)
.setNewIsrWithEpochs(newIsrWithBrokerEpoch));
request.topics().add(topicData);
AlterPartitionRequest.Builder builder = new AlterPartitionRequest.Builder(request);
AlterPartitionRequest alterPartitionRequest = builder.build(version);
assertEquals(1, alterPartitionRequest.data().topics().size());
assertEquals(1, alterPartitionRequest.data().topics().get(0).partitions().size());
PartitionData partitionData = alterPartitionRequest.data().topics().get(0).partitions().get(0);
if (version < 3) {
assertEquals(Arrays.asList(1, 2, 3), partitionData.newIsr());
assertTrue(partitionData.newIsrWithEpochs().isEmpty());
} else {
assertEquals(newIsrWithBrokerEpoch, partitionData.newIsrWithEpochs());
assertTrue(partitionData.newIsr().isEmpty());
}
// Build the request again to make sure build() is idempotent.
alterPartitionRequest = builder.build(version);
assertEquals(1, alterPartitionRequest.data().topics().size());
assertEquals(1, alterPartitionRequest.data().topics().get(0).partitions().size());
alterPartitionRequest.data().topics().get(0).partitions().get(0);
if (version < 3) {
assertEquals(Arrays.asList(1, 2, 3), partitionData.newIsr());
assertTrue(partitionData.newIsrWithEpochs().isEmpty());
} else {
assertEquals(newIsrWithBrokerEpoch, partitionData.newIsrWithEpochs());
assertTrue(partitionData.newIsr().isEmpty());
}
}
}
| AlterPartitionRequestTest |
java | quarkusio__quarkus | extensions/mongodb-client/runtime/src/main/java/io/quarkus/mongodb/health/MongoHealthCheck.java | {
"start": 6117,
"end": 6829
} | class ____ implements Supplier<Uni<Tuple2<String, String>>> {
final String name;
final ReactiveMongoClient client;
final MongoClientConfig config;
ReactiveMongoClientCheck(String name, ReactiveMongoClient client, MongoClientConfig config) {
this.name = name;
this.client = client;
this.config = config;
}
public Uni<Tuple2<String, String>> get() {
return client.getDatabase(config.healthDatabase()).runCommand(COMMAND)
.ifNoItem().after(config.readTimeout().orElse(DEFAULT_TIMEOUT)).fail()
.onItemOrFailure().transform(toResult(name));
}
}
}
| ReactiveMongoClientCheck |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/TokensInvalidationResultTests.java | {
"start": 887,
"end": 3073
} | class ____ extends ESTestCase {
public void testToXcontent() throws Exception {
TokensInvalidationResult result = new TokensInvalidationResult(
Arrays.asList("token1", "token2"),
Arrays.asList("token3", "token4"),
Arrays.asList(
new ElasticsearchException("foo", new IllegalStateException("bar")),
new ElasticsearchException("boo", new IllegalStateException("far"))
),
RestStatus.OK
);
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
result.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertThat(Strings.toString(builder), equalTo(XContentHelper.stripWhitespace("""
{
"invalidated_tokens": 2,
"previously_invalidated_tokens": 2,
"error_count": 2,
"error_details": [
{
"type": "exception",
"reason": "foo",
"caused_by": {
"type": "illegal_state_exception",
"reason": "bar"
}
},
{
"type": "exception",
"reason": "boo",
"caused_by": {
"type": "illegal_state_exception",
"reason": "far"
}
}
]
}""")));
}
}
public void testToXcontentWithNoErrors() throws Exception {
TokensInvalidationResult result = new TokensInvalidationResult(
Arrays.asList("token1", "token2"),
Collections.emptyList(),
Collections.emptyList(),
RestStatus.OK
);
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
result.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertThat(Strings.toString(builder), equalTo("""
{"invalidated_tokens":2,"previously_invalidated_tokens":0,"error_count":0}"""));
}
}
}
| TokensInvalidationResultTests |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/netty/TlsSystemConfig.java | {
"start": 948,
"end": 6658
} | class ____ {
public static final String TLS_SERVER_MODE = "tls.server.mode";
public static final String TLS_ENABLE = "tls.enable";
public static final String TLS_CONFIG_FILE = "tls.config.file";
public static final String TLS_TEST_MODE_ENABLE = "tls.test.mode.enable";
public static final String TLS_SERVER_NEED_CLIENT_AUTH = "tls.server.need.client.auth";
public static final String TLS_SERVER_KEYPATH = "tls.server.keyPath";
public static final String TLS_SERVER_KEYPASSWORD = "tls.server.keyPassword";
public static final String TLS_SERVER_CERTPATH = "tls.server.certPath";
public static final String TLS_SERVER_AUTHCLIENT = "tls.server.authClient";
public static final String TLS_SERVER_TRUSTCERTPATH = "tls.server.trustCertPath";
public static final String TLS_CLIENT_KEYPATH = "tls.client.keyPath";
public static final String TLS_CLIENT_KEYPASSWORD = "tls.client.keyPassword";
public static final String TLS_CLIENT_CERTPATH = "tls.client.certPath";
public static final String TLS_CLIENT_AUTHSERVER = "tls.client.authServer";
public static final String TLS_CLIENT_TRUSTCERTPATH = "tls.client.trustCertPath";
public static final String TLS_CIPHERS = "tls.ciphers";
public static final String TLS_PROTOCOLS = "tls.protocols";
/**
* To determine whether use SSL in client-side, include SDK client and BrokerOuterAPI
*/
public static boolean tlsEnable = Boolean.parseBoolean(System.getProperty(TLS_ENABLE, "false"));
/**
* To determine whether use test mode when initialize TLS context
*/
public static boolean tlsTestModeEnable = Boolean.parseBoolean(System.getProperty(TLS_TEST_MODE_ENABLE, "true"));
/**
* Indicates the state of the {@link javax.net.ssl.SSLEngine} with respect to client authentication.
* This configuration item really only applies when building the server-side {@link SslContext},
* and can be set to none, require or optional.
*/
public static String tlsServerNeedClientAuth = System.getProperty(TLS_SERVER_NEED_CLIENT_AUTH, "none");
/**
* The store path of server-side private key
*/
public static String tlsServerKeyPath = System.getProperty(TLS_SERVER_KEYPATH, null);
/**
* The password of the server-side private key
*/
public static String tlsServerKeyPassword = System.getProperty(TLS_SERVER_KEYPASSWORD, null);
/**
* The store path of server-side X.509 certificate chain in PEM format
*/
public static String tlsServerCertPath = System.getProperty(TLS_SERVER_CERTPATH, null);
/**
* To determine whether verify the client endpoint's certificate strictly
*/
public static boolean tlsServerAuthClient = Boolean.parseBoolean(System.getProperty(TLS_SERVER_AUTHCLIENT, "false"));
/**
* The store path of trusted certificates for verifying the client endpoint's certificate
*/
public static String tlsServerTrustCertPath = System.getProperty(TLS_SERVER_TRUSTCERTPATH, null);
/**
* The store path of client-side private key
*/
public static String tlsClientKeyPath = System.getProperty(TLS_CLIENT_KEYPATH, null);
/**
* The password of the client-side private key
*/
public static String tlsClientKeyPassword = System.getProperty(TLS_CLIENT_KEYPASSWORD, null);
/**
* The store path of client-side X.509 certificate chain in PEM format
*/
public static String tlsClientCertPath = System.getProperty(TLS_CLIENT_CERTPATH, null);
/**
* To determine whether verify the server endpoint's certificate strictly
*/
public static boolean tlsClientAuthServer = Boolean.parseBoolean(System.getProperty(TLS_CLIENT_AUTHSERVER, "false"));
/**
* The store path of trusted certificates for verifying the server endpoint's certificate
*/
public static String tlsClientTrustCertPath = System.getProperty(TLS_CLIENT_TRUSTCERTPATH, null);
/**
* For server, three SSL modes are supported: disabled, permissive and enforcing.
* For client, use {@link TlsSystemConfig#tlsEnable} to determine whether use SSL.
* <ol>
* <li><strong>disabled:</strong> SSL is not supported; any incoming SSL handshake will be rejected, causing connection closed.</li>
* <li><strong>permissive:</strong> SSL is optional, aka, server in this mode can serve client connections with or without SSL;</li>
* <li><strong>enforcing:</strong> SSL is required, aka, non SSL connection will be rejected.</li>
* </ol>
*/
public static TlsMode tlsMode = TlsMode.parse(System.getProperty(TLS_SERVER_MODE, "permissive"));
/**
* A config file to store the above TLS related configurations,
* except {@link TlsSystemConfig#tlsMode} and {@link TlsSystemConfig#tlsEnable}
*/
public static String tlsConfigFile = System.getProperty(TLS_CONFIG_FILE, "/etc/rocketmq/tls.properties");
/**
* The ciphers to be used in TLS
* <ol>
* <li>If null, use the default ciphers</li>
* <li>Otherwise, use the ciphers specified in this string, eg: -Dtls.ciphers=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256</li>
* </ol>
*/
public static String tlsCiphers = System.getProperty(TLS_CIPHERS, null);
/**
* The protocols to be used in TLS
* <ol>
* <li>If null, use the default protocols</li>
* <li>Otherwise, use the protocols specified in this string, eg: -Dtls.protocols=TLSv1.2,TLSv1.3</li>
* </ol>
*/
public static String tlsProtocols = System.getProperty(TLS_PROTOCOLS, null);
}
| TlsSystemConfig |
java | processing__processing4 | java/src/processing/mode/java/JavaMode.java | {
"start": 1312,
"end": 12851
} | class ____ extends Mode {
public Editor createEditor(Base base, String path,
EditorState state) throws EditorException {
return new JavaEditor(base, path, state, this);
}
public JavaMode(Base base, File folder) {
super(base, folder);
loadPreferences();
loadSuggestionsMap();
}
public String getTitle() {
return "Java";
}
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
public File[] getExampleCategoryFolders() {
return new File[] {
new File(examplesFolder, "Basics"),
new File(examplesFolder, "Topics"),
new File(examplesFolder, "Demos"),
new File(examplesFolder, "Books")
};
}
public String getDefaultExtension() {
return "pde";
}
public String[] getExtensions() {
return new String[] { "pde", "java" };
}
public String[] getIgnorable() {
// folder names for exported applications
return Platform.getSupportedVariants().keyArray();
}
public Library getCoreLibrary() {
if (coreLibrary == null) {
File coreFolder = Platform.getContentFile("core");
coreLibrary = new Library(coreFolder);
}
return coreLibrary;
}
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
/** Handles the standard Java "Run" or "Present" */
public Runner handleLaunch(Sketch sketch, RunnerListener listener,
final boolean present) throws SketchException {
JavaBuild build = new JavaBuild(sketch);
// String appletClassName = build.build(false);
String appletClassName = build.build(true);
if (appletClassName != null) {
final Runner runtime = new Runner(build, listener);
new Thread(() -> {
// these block until finished
if (present) {
runtime.present(null);
} else {
runtime.launch(null);
}
}).start();
return runtime;
}
return null;
}
/** Start a sketch in tweak mode */
public Runner handleTweak(Sketch sketch,
RunnerListener listener,
JavaEditor editor) throws SketchException {
// first try to build the unmodified code
JavaBuild build = new JavaBuild(sketch);
// String appletClassName = build.build(false);
String appletClassName = build.build(true);
if (appletClassName == null) {
// unmodified build failed, so fail
return null;
}
// if compilation passed, modify the code and build again
// save the original sketch code of the user
editor.initBaseCode();
// check for "// tweak" comment in the sketch
boolean requiresTweak = SketchParser.containsTweakComment(editor.baseCode);
// parse the saved sketch to get all (or only with "//tweak" comment) numbers
final SketchParser parser = new SketchParser(editor.baseCode, requiresTweak);
// add our code to the sketch
final boolean launchInteractive = editor.automateSketch(sketch, parser);
build = new JavaBuild(sketch);
appletClassName = build.build(false);
if (appletClassName != null) {
final Runner runtime = new Runner(build, listener);
new Thread(() -> {
runtime.launch(null);
// next lines are executed when the sketch quits
if (launchInteractive) {
// fix swing deadlock issue: https://github.com/processing/processing/issues/3928
EventQueue.invokeLater(() -> {
editor.initEditorCode(parser.allHandles);
editor.stopTweakMode(parser.allHandles);
});
}
}).start();
if (launchInteractive) {
// fix swing deadlock issue: https://github.com/processing/processing/issues/3928
EventQueue.invokeLater(() -> {
// replace editor code with baseCode
editor.initEditorCode(parser.allHandles);
editor.updateInterface(parser.allHandles, parser.colorBoxes);
editor.startTweakMode();
});
}
return runtime;
}
return null;
}
public boolean handleExportApplication(Sketch sketch) throws SketchException, IOException {
JavaBuild build = new JavaBuild(sketch);
return build.exportApplication();
}
/**
* Any modes that extend JavaMode can override this method to add additional
* JARs to be included in the classpath for code completion and error checking
* @return searchPath: file-paths separated by File.pathSeparatorChar
*/
public String getSearchPath() {
return getCoreLibrary().getJarPath();
}
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
// Merged from ExperimentalMode
static volatile boolean errorCheckEnabled = true;
static volatile boolean warningsEnabled = true;
static volatile boolean errorLogsEnabled = false;
static volatile boolean autoSaveEnabled = true;
static volatile boolean autoSavePromptEnabled = true;
static volatile boolean defaultAutoSaveEnabled = true;
static volatile boolean codeCompletionsEnabled = true;
static volatile boolean ccTriggerEnabled = false;
static volatile boolean importSuggestEnabled = true;
static volatile boolean inspectModeHotkeyEnabled = true;
static public int autoSaveInterval = 3; //in minutes
static public final String prefErrorCheck = "pdex.errorCheckEnabled";
static public final String prefWarnings = "pdex.warningsEnabled";
static public final String prefErrorLogs = "pdex.writeErrorLogs";
static public final String prefAutoSave = "pdex.autoSave.autoSaveEnabled";
static public final String prefAutoSaveInterval = "pdex.autoSaveInterval";
static public final String prefAutoSavePrompt = "pdex.autoSave.promptDisplay";
static public final String prefDefaultAutoSave = "pdex.autoSave.autoSaveByDefault";
static public final String COMPLETION_PREF = "pdex.completion";
static public final String COMPLETION_TRIGGER_PREF = "pdex.completion.trigger";
static public final String SUGGEST_IMPORTS_PREF = "pdex.suggest.imports";
static public final String INSPECT_MODE_HOTKEY_PREF = "pdex.inspectMode.hotkey";
/**
* Stores the white list/black list of allowed/blacklisted imports.
* These are defined in suggestions.txt in java mode folder.
*/
private final Set<String> includedSuggestions = ConcurrentHashMap.newKeySet();
private final Set<String> excludedSuggestions = ConcurrentHashMap.newKeySet();
boolean includeSuggestion(String impName) {
return includedSuggestions.contains(impName);
}
boolean excludeSuggestion(String impName) {
return excludedSuggestions.contains(impName);
}
/*
static boolean checkSuggestion(String mapName, String impName) {
return suggestionsMap.containsKey(mapName) &&
suggestionsMap.get(mapName).contains(impName);
}
*/
public void loadPreferences() {
Messages.log("Load PDEX prefs");
errorCheckEnabled = Preferences.getBoolean(prefErrorCheck);
warningsEnabled = Preferences.getBoolean(prefWarnings);
errorLogsEnabled = Preferences.getBoolean(prefErrorLogs);
autoSaveEnabled = Preferences.getBoolean(prefAutoSave);
autoSaveInterval = Preferences.getInteger(prefAutoSaveInterval);
autoSavePromptEnabled = Preferences.getBoolean(prefAutoSavePrompt);
defaultAutoSaveEnabled = Preferences.getBoolean(prefDefaultAutoSave);
codeCompletionsEnabled = Preferences.getBoolean(COMPLETION_PREF);
ccTriggerEnabled = Preferences.getBoolean(COMPLETION_TRIGGER_PREF);
importSuggestEnabled = Preferences.getBoolean(SUGGEST_IMPORTS_PREF);
inspectModeHotkeyEnabled = Preferences.getBoolean(INSPECT_MODE_HOTKEY_PREF);
}
public void savePreferences() {
Messages.log("Saving PDEX prefs");
Preferences.setBoolean(prefErrorCheck, errorCheckEnabled);
Preferences.setBoolean(prefWarnings, warningsEnabled);
Preferences.setBoolean(prefErrorLogs, errorLogsEnabled);
Preferences.setBoolean(prefAutoSave, autoSaveEnabled);
Preferences.setInteger(prefAutoSaveInterval, autoSaveInterval);
Preferences.setBoolean(prefAutoSavePrompt, autoSavePromptEnabled);
Preferences.setBoolean(prefDefaultAutoSave, defaultAutoSaveEnabled);
Preferences.setBoolean(COMPLETION_PREF, codeCompletionsEnabled);
Preferences.setBoolean(COMPLETION_TRIGGER_PREF, ccTriggerEnabled);
Preferences.setBoolean(SUGGEST_IMPORTS_PREF, importSuggestEnabled);
Preferences.setBoolean(INSPECT_MODE_HOTKEY_PREF, inspectModeHotkeyEnabled);
}
private void loadSuggestionsMap() {
Collections.addAll(includedSuggestions, getSuggestionIncludeList());
Collections.addAll(excludedSuggestions, getSuggestionExcludeList());
}
// broken out so that it can be overridden by Android, etc
protected String[] getSuggestionIncludeList() {
return new String[] {
"processing.core.PApplet",
"processing.core.PFont",
"processing.core.PGraphics",
"processing.core.PImage",
"processing.core.PMatrix2D",
"processing.core.PMatrix3D",
"processing.core.PStyle",
"processing.core.PVector",
"processing.core.PShape",
"processing.core.PGraphicsJava2D",
"processing.core.PGraphics2D",
"processing.core.PGraphics3D",
"processing.data.FloatDict",
"processing.data.FloatList",
"processing.data.IntDict",
"processing.data.IntList",
"processing.data.JSONArray",
"processing.data.JSONObject",
"processing.data.StringDict",
"processing.data.StringList",
"processing.data.Table",
"processing.data.XML",
"processing.event.Event",
"processing.event.KeyEvent",
"processing.event.MouseEvent",
"processing.event.TouchEvent",
"processing.opengl.PShader",
"processing.opengl.PGL",
"java.util.ArrayList",
"java.io.BufferedReader",
"java.util.HashMap",
"java.io.PrintWriter",
"java.lang.String"
};
}
// broken out so that it can be overridden by Android, etc
protected String[] getSuggestionExcludeList() {
return new String[] {
"processing.core.PGraphicsRetina2D",
"processing.core.PShapeOBJ",
"processing.core.PShapeSVG",
"processing.data.Sort",
"processing.opengl.FrameBuffer",
"processing.opengl.LinePath",
"processing.opengl.LinePath.PathIterator",
"processing.opengl.LineStroker",
"processing.opengl.PGraphicsOpenGL"
};
}
/*
private void loadSuggestionsMap() {
File suggestionsFile = new File(getFolder(), "suggestions.txt");
if (suggestionsFile.exists()) {
String[] lines = PApplet.loadStrings(suggestionsFile);
if (lines != null) {
for (String line : lines) {
line = line.trim();
if (line.length() > 0 && !line.startsWith("#")) {
int equals = line.indexOf('=');
if (equals != -1) {
String key = line.substring(0, equals).trim();
String value = line.substring(equals + 1).trim();
if (key.equals("include")) {
includedSuggestions.add(value);
} else if (key.equals("exclude")) {
excludedSuggestions.add(value);
} else {
Messages.loge("Should be include or exclude: " + key);
}
} else {
Messages.loge("Bad line found in suggestions file: " + line);
}
}
}
}
} else {
Messages.loge("Suggestions file not found at " + suggestionsFile);
}
}
*/
}
| JavaMode |
java | playframework__playframework | core/play-exceptions/src/main/java/play/api/PlayException.java | {
"start": 348,
"end": 1292
} | class ____ extends UsefulException {
/** Statically compiled Pattern for splitting lines. */
private static final Pattern SPLIT_LINES = Pattern.compile("\\r?\\n");
private final AtomicLong generator = new AtomicLong(System.currentTimeMillis());
/** Generates a new unique exception ID. */
private String nextId() {
return java.lang.Long.toString(generator.incrementAndGet(), 26);
}
public PlayException(String title, String description, Throwable cause) {
super(title + "[" + description + "]", cause);
this.title = title;
this.description = description;
this.id = nextId();
this.cause = cause;
}
public PlayException(String title, String description) {
super(title + "[" + description + "]");
this.title = title;
this.description = description;
this.id = nextId();
this.cause = null;
}
/** Adds source attachment to a Play exception. */
public abstract static | PlayException |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/time/FastTimeZoneTest.java | {
"start": 1026,
"end": 3964
} | class ____ extends AbstractLangTest {
private static final int HOURS_23 = 23 * 60 * 60 * 1000;
private static final int HOURS_2 = 2 * 60 * 60 * 1000;
private static final int MINUTES_59 = 59 * 60 * 1000;
private static final int MINUTES_5 = 5 * 60 * 1000;
@Test
void testBareGmt() {
assertEquals(FastTimeZone.getGmtTimeZone(), FastTimeZone.getTimeZone(TimeZones.GMT_ID));
}
@Test
void testGetGmtTimeZone() {
assertEquals(0, FastTimeZone.getGmtTimeZone().getRawOffset());
}
@Test
void testGmtPrefix() {
assertEquals(HOURS_23, FastTimeZone.getGmtTimeZone("GMT+23:00").getRawOffset());
assertEquals(-HOURS_23, FastTimeZone.getGmtTimeZone("GMT-23:00").getRawOffset());
}
@Test
void testHoursColonMinutes() {
assertEquals(HOURS_23, FastTimeZone.getGmtTimeZone("23:00").getRawOffset());
assertEquals(HOURS_2, FastTimeZone.getGmtTimeZone("2:00").getRawOffset());
assertEquals(MINUTES_59, FastTimeZone.getGmtTimeZone("00:59").getRawOffset());
assertEquals(MINUTES_5, FastTimeZone.getGmtTimeZone("00:5").getRawOffset());
assertEquals(HOURS_23 + MINUTES_59, FastTimeZone.getGmtTimeZone("23:59").getRawOffset());
assertEquals(HOURS_2 + MINUTES_5, FastTimeZone.getGmtTimeZone("2:5").getRawOffset());
}
@Test
void testHoursMinutes() {
assertEquals(HOURS_23, FastTimeZone.getGmtTimeZone("2300").getRawOffset());
assertEquals(HOURS_2, FastTimeZone.getGmtTimeZone("0200").getRawOffset());
assertEquals(MINUTES_59, FastTimeZone.getGmtTimeZone("0059").getRawOffset());
assertEquals(MINUTES_5, FastTimeZone.getGmtTimeZone("0005").getRawOffset());
assertEquals(HOURS_23 + MINUTES_59, FastTimeZone.getGmtTimeZone("2359").getRawOffset());
assertEquals(HOURS_2 + MINUTES_5, FastTimeZone.getGmtTimeZone("0205").getRawOffset());
}
@Test
void testOlson() {
assertEquals(TimeZones.getTimeZone("America/New_York"), FastTimeZone.getTimeZone("America/New_York"));
}
@Test
void testSign() {
assertEquals(HOURS_23, FastTimeZone.getGmtTimeZone("+23:00").getRawOffset());
assertEquals(HOURS_2, FastTimeZone.getGmtTimeZone("+2:00").getRawOffset());
assertEquals(-HOURS_23, FastTimeZone.getGmtTimeZone("-23:00").getRawOffset());
assertEquals(-HOURS_2, FastTimeZone.getGmtTimeZone("-2:00").getRawOffset());
}
@Test
void testUTC() {
assertEquals(FastTimeZone.getGmtTimeZone(), FastTimeZone.getTimeZone("UTC"));
}
@Test
void testZ() {
assertEquals(FastTimeZone.getGmtTimeZone(), FastTimeZone.getTimeZone("Z"));
}
@Test
void testZeroOffsetsReturnSingleton() {
assertEquals(FastTimeZone.getGmtTimeZone(), FastTimeZone.getTimeZone("+0"));
assertEquals(FastTimeZone.getGmtTimeZone(), FastTimeZone.getTimeZone("-0"));
}
}
| FastTimeZoneTest |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/traintestsplit/AbstractReservoirTrainTestSplitter.java | {
"start": 2097,
"end": 2322
} | class
____ targetSampleCount = (long) Math.max(1.0, samplingRatio * sample.classCount);
// The idea here is that the probability increases as the chances we have to get the target proportion
// for a | long |
java | apache__rocketmq | tools/src/main/java/org/apache/rocketmq/tools/command/consumer/ConsumerProgressSubCommand.java | {
"start": 13168,
"end": 15317
} | class ____ implements Comparable<GroupConsumeInfo> {
private String group;
private int version;
private int count;
private ConsumeType consumeType;
private MessageModel messageModel;
private int consumeTps;
private long diffTotal;
public String getGroup() {
return group;
}
public void setGroup(String group) {
this.group = group;
}
public String consumeTypeDesc() {
if (this.count != 0) {
return this.getConsumeType() == ConsumeType.CONSUME_ACTIVELY ? "PULL" : "PUSH";
}
return "";
}
public ConsumeType getConsumeType() {
return consumeType;
}
public void setConsumeType(ConsumeType consumeType) {
this.consumeType = consumeType;
}
public String messageModelDesc() {
if (this.count != 0 && this.getConsumeType() == ConsumeType.CONSUME_PASSIVELY) {
return this.getMessageModel().toString();
}
return "";
}
public MessageModel getMessageModel() {
return messageModel;
}
public void setMessageModel(MessageModel messageModel) {
this.messageModel = messageModel;
}
public String versionDesc() {
if (this.count != 0) {
return MQVersion.getVersionDesc(this.version);
}
return "";
}
public int getCount() {
return count;
}
public void setCount(int count) {
this.count = count;
}
public long getDiffTotal() {
return diffTotal;
}
public void setDiffTotal(long diffTotal) {
this.diffTotal = diffTotal;
}
@Override
public int compareTo(GroupConsumeInfo o) {
if (this.count != o.count) {
return o.count - this.count;
}
return (int) (o.diffTotal - diffTotal);
}
public int getConsumeTps() {
return consumeTps;
}
public void setConsumeTps(int consumeTps) {
this.consumeTps = consumeTps;
}
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
}
| GroupConsumeInfo |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentState.java | {
"start": 866,
"end": 2664
} | class ____ implements AggregatorState {
private final PointType pointType;
private boolean seen = false;
private int minX = Integer.MAX_VALUE;
private int maxX = Integer.MIN_VALUE;
private int maxY = Integer.MIN_VALUE;
private int minY = Integer.MAX_VALUE;
SpatialExtentState(PointType pointType) {
this.pointType = pointType;
}
@Override
public void close() {}
@Override
public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) {
assert blocks.length >= offset + 4;
var blockFactory = driverContext.blockFactory();
blocks[offset + 0] = blockFactory.newConstantIntBlockWith(minX, 1);
blocks[offset + 1] = blockFactory.newConstantIntBlockWith(maxX, 1);
blocks[offset + 2] = blockFactory.newConstantIntBlockWith(maxY, 1);
blocks[offset + 3] = blockFactory.newConstantIntBlockWith(minY, 1);
}
public void add(Geometry geo) {
pointType.computeEnvelope(geo)
.ifPresent(
r -> add(
pointType.encoder().encodeX(r.getMinX()),
pointType.encoder().encodeX(r.getMaxX()),
pointType.encoder().encodeY(r.getMaxY()),
pointType.encoder().encodeY(r.getMinY())
)
);
}
/**
* This method is used when extents are extracted from the doc-values field by the {@link GeometryDocValueReader}.
* This optimization is enabled when the field has doc-values and is only used in the ST_EXTENT aggregation.
*/
public void add(int p, IntBlock values) {
int count = values.getValueCount(p);
if (count == 6) {
// Values are stored according to the order defined in the Extent | SpatialExtentState |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/maps/Maps_assertHasSize_Test.java | {
"start": 1297,
"end": 2183
} | class ____ extends MapsBaseTest {
@Test
void should_pass_if_size_of_actual_is_equal_to_expected_size() {
Map<?, ?> actual = mapOf(entry("name", "Yoda"), entry("job", "Yedi Master"));
maps.assertHasSize(INFO, actual, 2);
}
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> maps.assertHasSize(INFO, null, 8))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_size_of_actual_is_not_equal_to_expected_size() {
Map<?, ?> actual = mapOf(entry("name", "Yoda"), entry("job", "Yedi Master"));
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> maps.assertHasSize(INFO, actual, 8))
.withMessage(shouldHaveSize(actual, actual.size(), 8).create());
}
}
| Maps_assertHasSize_Test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.