language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/TypeUtilsTest_interface.java | {
"start": 1510,
"end": 1572
} | class ____<T> extends X<T> {
}
public static | X_X |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/GenericTypeResolverTests.java | {
"start": 13475,
"end": 13498
} | class ____ extends C {}
| E |
java | junit-team__junit5 | documentation/src/test/java/example/ParameterizedTestDemo.java | {
"start": 14850,
"end": 16641
} | class ____ extends TypedArgumentConverter<String, Integer> {
protected ToLengthArgumentConverter() {
super(String.class, Integer.class);
}
@Override
protected Integer convert(String source) {
return (source != null ? source.length() : 0);
}
}
// end::explicit_conversion_example_TypedArgumentConverter[]
// tag::explicit_java_time_converter[]
@ParameterizedTest
@ValueSource(strings = { "01.01.2017", "31.12.2017" })
void testWithExplicitJavaTimeConverter(
@JavaTimeConversionPattern("dd.MM.yyyy") LocalDate argument) {
assertEquals(2017, argument.getYear());
}
// end::explicit_java_time_converter[]
// @formatter:on
// @formatter:off
// tag::ArgumentsAccessor_example[]
@ParameterizedTest
@CsvSource({
"Jane, Doe, F, 1990-05-20",
"John, Doe, M, 1990-10-22"
})
void testWithArgumentsAccessor(ArgumentsAccessor arguments) {
Person person = new Person(
arguments.getString(0),
arguments.getString(1),
arguments.get(2, Gender.class),
arguments.get(3, LocalDate.class));
if (person.getFirstName().equals("Jane")) {
assertEquals(Gender.F, person.getGender());
}
else {
assertEquals(Gender.M, person.getGender());
}
assertEquals("Doe", person.getLastName());
assertEquals(1990, person.getDateOfBirth().getYear());
}
// end::ArgumentsAccessor_example[]
// @formatter:on
// @formatter:off
// tag::ArgumentsAggregator_example[]
@ParameterizedTest
@CsvSource({
"Jane, Doe, F, 1990-05-20",
"John, Doe, M, 1990-10-22"
})
void testWithArgumentsAggregator(@AggregateWith(PersonAggregator.class) Person person) {
// perform assertions against person
}
// end::ArgumentsAggregator_example[]
static
// tag::ArgumentsAggregator_example_PersonAggregator[]
public | ToLengthArgumentConverter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/query/NativeQueryWithDatetimesTest.java | {
"start": 1924,
"end": 2235
} | class ____ {
@Id
long id;
@Column(nullable = false, name = "ctime")
LocalTime localTime = LocalTime.now();
@Column(nullable = false, name = "cdate")
LocalDate localDate = LocalDate.now();
@Column(nullable = false, name = "cdatetime")
LocalDateTime localDateTime = LocalDateTime.now();
}
}
| Datetimes |
java | apache__camel | components/camel-leveldb/src/test/java/org/apache/camel/component/leveldb/LevelDBAggregateRecoverDeadLetterChannelTest.java | {
"start": 1282,
"end": 4564
} | class ____ extends LevelDBTestSupport {
@Override
public void doPreSetup() throws Exception {
deleteDirectory("target/data");
// enable recovery
getRepo().setUseRecovery(true);
// exhaust after at most 3 attempts
getRepo().setMaximumRedeliveries(3);
// and move to this dead letter channel
getRepo().setDeadLetterUri("mock:dead");
// check faster
getRepo().setRecoveryInterval(500, TimeUnit.MILLISECONDS);
}
@Test
public void testLevelDBAggregateRecoverDeadLetterChannel() throws Exception {
// should fail all times
getMockEndpoint("mock:result").expectedMessageCount(0);
getMockEndpoint("mock:aggregated").expectedMessageCount(4);
getMockEndpoint("mock:aggregated").message(0).header(Exchange.REDELIVERED).isNull();
getMockEndpoint("mock:aggregated").message(1).header(Exchange.REDELIVERED).isEqualTo(Boolean.TRUE);
getMockEndpoint("mock:aggregated").message(1).header(Exchange.REDELIVERY_COUNTER).isEqualTo(1);
getMockEndpoint("mock:aggregated").message(1).header(Exchange.REDELIVERY_MAX_COUNTER).isEqualTo(3);
getMockEndpoint("mock:aggregated").message(2).header(Exchange.REDELIVERED).isEqualTo(Boolean.TRUE);
getMockEndpoint("mock:aggregated").message(2).header(Exchange.REDELIVERY_COUNTER).isEqualTo(2);
getMockEndpoint("mock:aggregated").message(2).header(Exchange.REDELIVERY_MAX_COUNTER).isEqualTo(3);
getMockEndpoint("mock:aggregated").message(3).header(Exchange.REDELIVERED).isEqualTo(Boolean.TRUE);
getMockEndpoint("mock:aggregated").message(3).header(Exchange.REDELIVERY_COUNTER).isEqualTo(3);
getMockEndpoint("mock:aggregated").message(3).header(Exchange.REDELIVERY_MAX_COUNTER).isEqualTo(3);
getMockEndpoint("mock:dead").expectedBodiesReceived("ABCDE");
getMockEndpoint("mock:dead").message(0).header(Exchange.REDELIVERED).isEqualTo(Boolean.TRUE);
getMockEndpoint("mock:dead").message(0).header(Exchange.REDELIVERY_COUNTER).isEqualTo(3);
getMockEndpoint("mock:dead").message(0).header(Exchange.REDELIVERY_MAX_COUNTER).isNull();
template.sendBodyAndHeader("direct:start", "A", "id", 123);
template.sendBodyAndHeader("direct:start", "B", "id", 123);
template.sendBodyAndHeader("direct:start", "C", "id", 123);
template.sendBodyAndHeader("direct:start", "D", "id", 123);
template.sendBodyAndHeader("direct:start", "E", "id", 123);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.aggregate(header("id"), new StringAggregationStrategy())
.completionSize(5).aggregationRepository(getRepo())
.log("aggregated exchange id ${exchangeId} with ${body}")
.to("mock:aggregated")
.throwException(new IllegalArgumentException("Damn"))
.to("mock:result")
.end();
}
};
}
}
| LevelDBAggregateRecoverDeadLetterChannelTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java | {
"start": 6118,
"end": 22390
} | class ____ extends MapperServiceTestCase {
private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings(
"index",
Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build()
);
private static final BytesArray TRANSLOG_OPERATION_SOURCE = new BytesArray("{}".getBytes(StandardCharsets.UTF_8));
private final ShardId shardId = new ShardId(INDEX_SETTINGS.getIndex(), 1);
private final ClusterSettings service = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
private final RecoveryPlannerService recoveryPlannerService = PeerOnlyRecoveryPlannerService.INSTANCE;
private ThreadPool threadPool;
private Executor recoveryExecutor;
@Before
public void setUpThreadPool() {
if (randomBoolean()) {
threadPool = new TestThreadPool(getTestName());
recoveryExecutor = threadPool.generic();
} else {
// verify that both sending and receiving files can be completed with a single thread
threadPool = new TestThreadPool(
getTestName(),
new FixedExecutorBuilder(
Settings.EMPTY,
"recovery_executor",
between(1, 16),
between(16, 128),
"recovery_executor",
EsExecutors.TaskTrackingConfig.DO_NOT_TRACK
)
);
recoveryExecutor = threadPool.executor("recovery_executor");
}
}
@After
public void tearDownThreadPool() {
terminate(threadPool);
}
public void testSendFiles() throws Throwable {
final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service);
final StartRecoveryRequest request = getStartRecoveryRequest();
Store store = newStore(createTempDir());
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
List<StoreFileMetadata> metas = new ArrayList<>();
for (StoreFileMetadata md : metadata) {
metas.add(md);
}
Store targetStore = newStore(createTempDir());
MultiFileWriter multiFileWriter = new MultiFileWriter(targetStore, mock(RecoveryState.Index.class), "", logger);
RecoveryTargetHandler target = new TestRecoveryTargetHandler() {
@Override
public void writeFileChunk(
StoreFileMetadata md,
long position,
ReleasableBytesReference content,
boolean lastChunk,
int totalTranslogOps,
ActionListener<Void> listener
) {
ActionListener.completeWith(listener, () -> {
multiFileWriter.writeFileChunk(md, position, content, lastChunk);
return null;
});
}
};
RecoverySourceHandler handler = new RecoverySourceHandler(
null,
new AsyncRecoveryTarget(target, recoveryExecutor),
threadPool,
request,
Math.toIntExact(recoverySettings.getChunkSize().getBytes()),
between(1, 5),
between(1, 5),
between(1, 5),
false,
recoveryPlannerService
);
PlainActionFuture<Void> sendFilesFuture = new PlainActionFuture<>();
handler.sendFiles(store, metas.toArray(new StoreFileMetadata[0]), () -> 0, sendFilesFuture);
sendFilesFuture.actionGet();
Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(null);
Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata);
assertEquals(metas.size(), recoveryDiff.identical.size());
assertEquals(0, recoveryDiff.different.size());
assertEquals(0, recoveryDiff.missing.size());
IndexReader reader = DirectoryReader.open(targetStore.directory());
assertEquals(numDocs, reader.maxDoc());
IOUtils.close(reader, store, multiFileWriter, targetStore);
}
public StartRecoveryRequest getStartRecoveryRequest() {
Store.MetadataSnapshot metadataSnapshot = randomBoolean()
? Store.MetadataSnapshot.EMPTY
: new Store.MetadataSnapshot(
Collections.emptyMap(),
Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()),
randomIntBetween(0, 100)
);
return new StartRecoveryRequest(
shardId,
null,
DiscoveryNodeUtils.builder("b").roles(emptySet()).build(),
DiscoveryNodeUtils.builder("b").roles(emptySet()).build(),
0L,
metadataSnapshot,
randomBoolean(),
randomNonNegativeLong(),
randomBoolean() || metadataSnapshot.getHistoryUUID() == null ? UNASSIGNED_SEQ_NO : randomNonNegativeLong(),
true
);
}
public void testSendSnapshotSendsOps() throws IOException {
IndexOpFactory iof = randomBoolean() ? new StandardModeIndexOpFactory() : new TimeSeriesModeIndexOpFactory();
final int fileChunkSizeInBytes = between(1, 4096);
final StartRecoveryRequest request = getStartRecoveryRequest();
final IndexShard shard = mock(IndexShard.class);
when(shard.state()).thenReturn(IndexShardState.STARTED);
final List<Translog.Operation> operations = new ArrayList<>();
final int initialNumberOfDocs = randomIntBetween(10, 1000);
for (int i = 0; i < initialNumberOfDocs; i++) {
final Engine.Index index = iof.createIndexOp(i);
operations.add(new Translog.Index(index, new Engine.IndexResult(1, 1, SequenceNumbers.UNASSIGNED_SEQ_NO, true, index.id())));
}
final int numberOfDocsWithValidSequenceNumbers = randomIntBetween(10, 1000);
for (int i = initialNumberOfDocs; i < initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers; i++) {
final Engine.Index index = iof.createIndexOp(i);
operations.add(new Translog.Index(index, new Engine.IndexResult(1, 1, i - initialNumberOfDocs, true, index.id())));
}
final long startingSeqNo = randomIntBetween(0, numberOfDocsWithValidSequenceNumbers - 1);
final long endingSeqNo = randomLongBetween(startingSeqNo, numberOfDocsWithValidSequenceNumbers - 1);
final Queue<Translog.Operation> shippedOps = ConcurrentCollections.newQueue();
final AtomicLong checkpointOnTarget = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
RecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() {
@Override
public void indexTranslogOperations(
List<Translog.Operation> operations,
int totalTranslogOps,
long timestamp,
long msu,
RetentionLeases retentionLeases,
long mappingVersion,
ActionListener<Long> listener
) {
shippedOps.addAll(operations);
if (randomBoolean()) {
checkpointOnTarget.addAndGet(between(1, 20));
}
listener.onResponse(checkpointOnTarget.get());
}
};
RecoverySourceHandler handler = new RecoverySourceHandler(
shard,
new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()),
threadPool,
request,
fileChunkSizeInBytes,
between(1, 10),
between(1, 10),
between(1, 10),
false,
recoveryPlannerService
);
PlainActionFuture<RecoverySourceHandler.SendSnapshotResult> future = new PlainActionFuture<>();
handler.phase2(
startingSeqNo,
endingSeqNo,
newTranslogSnapshot(operations, emptyList()),
randomNonNegativeLong(),
randomNonNegativeLong(),
RetentionLeases.EMPTY,
randomNonNegativeLong(),
future
);
final int expectedOps = (int) (endingSeqNo - startingSeqNo + 1);
RecoverySourceHandler.SendSnapshotResult result = future.actionGet();
assertThat(result.sentOperations(), equalTo(expectedOps));
List<Translog.Operation> sortedShippedOps = shippedOps.stream().sorted(Comparator.comparing(Translog.Operation::seqNo)).toList();
assertThat(shippedOps.size(), equalTo(expectedOps));
for (int i = 0; i < shippedOps.size(); i++) {
assertThat(sortedShippedOps.get(i), equalTo(operations.get(i + (int) startingSeqNo + initialNumberOfDocs)));
}
assertThat(result.targetLocalCheckpoint(), equalTo(checkpointOnTarget.get()));
}
public void testSendSnapshotStopOnError() throws Exception {
IndexOpFactory iof = randomBoolean() ? new StandardModeIndexOpFactory() : new TimeSeriesModeIndexOpFactory();
final int fileChunkSizeInBytes = between(1, 10 * 1024);
final StartRecoveryRequest request = getStartRecoveryRequest();
final IndexShard shard = mock(IndexShard.class);
when(shard.state()).thenReturn(IndexShardState.STARTED);
final List<Translog.Operation> ops = new ArrayList<>();
for (int numOps = between(1, 256), i = 0; i < numOps; i++) {
final Engine.Index index = iof.createIndexOp(i);
ops.add(new Translog.Index(index, new Engine.IndexResult(1, 1, i, true, index.id())));
}
final AtomicBoolean wasFailed = new AtomicBoolean();
RecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() {
@Override
public void indexTranslogOperations(
List<Translog.Operation> operations,
int totalTranslogOps,
long timestamp,
long msu,
RetentionLeases retentionLeases,
long mappingVersion,
ActionListener<Long> listener
) {
if (randomBoolean()) {
listener.onResponse(SequenceNumbers.NO_OPS_PERFORMED);
} else {
listener.onFailure(new RuntimeException("test - failed to index"));
wasFailed.set(true);
}
}
};
RecoverySourceHandler handler = new RecoverySourceHandler(
shard,
new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()),
threadPool,
request,
fileChunkSizeInBytes,
between(1, 10),
between(1, 10),
between(1, 10),
false,
recoveryPlannerService
);
PlainActionFuture<RecoverySourceHandler.SendSnapshotResult> future = new PlainActionFuture<>();
final long startingSeqNo = randomLongBetween(0, ops.size() - 1L);
final long endingSeqNo = randomLongBetween(startingSeqNo, ops.size() - 1L);
handler.phase2(
startingSeqNo,
endingSeqNo,
newTranslogSnapshot(ops, emptyList()),
randomNonNegativeLong(),
randomNonNegativeLong(),
RetentionLeases.EMPTY,
randomNonNegativeLong(),
future
);
if (wasFailed.get()) {
final RecoveryEngineException error = expectThrows(RecoveryEngineException.class, future::actionGet);
assertThat(error.getMessage(), equalTo("Phase[2] failed to send/replay operations"));
assertThat(error.getCause().getMessage(), equalTo("test - failed to index"));
}
}
public void testSendOperationsConcurrently() throws Throwable {
final IndexShard shard = mock(IndexShard.class);
when(shard.state()).thenReturn(IndexShardState.STARTED);
Set<Long> receivedSeqNos = ConcurrentCollections.newConcurrentSet();
long maxSeenAutoIdTimestamp = randomBoolean() ? -1 : randomNonNegativeLong();
long maxSeqNoOfUpdatesOrDeletes = randomBoolean() ? -1 : randomNonNegativeLong();
RetentionLeases retentionLeases = new RetentionLeases(randomNonNegativeLong(), randomNonNegativeLong(), List.of());
long mappingVersion = randomNonNegativeLong();
AtomicLong localCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
int numOps = randomIntBetween(0, 1000);
AtomicBoolean received = new AtomicBoolean();
RecoveryTargetHandler target = new TestRecoveryTargetHandler() {
@Override
public void indexTranslogOperations(
List<Translog.Operation> operations,
int receivedTotalOps,
long receivedMaxSeenAutoIdTimestamp,
long receivedMaxSeqNoOfUpdatesOrDeletes,
RetentionLeases receivedRetentionLease,
long receivedMappingVersion,
ActionListener<Long> listener
) {
received.set(true);
assertThat(receivedMaxSeenAutoIdTimestamp, equalTo(maxSeenAutoIdTimestamp));
assertThat(receivedMaxSeqNoOfUpdatesOrDeletes, equalTo(maxSeqNoOfUpdatesOrDeletes));
assertThat(receivedRetentionLease, equalTo(retentionLeases));
assertThat(receivedMappingVersion, equalTo(mappingVersion));
assertThat(receivedTotalOps, equalTo(numOps));
for (Translog.Operation operation : operations) {
receivedSeqNos.add(operation.seqNo());
}
if (randomBoolean()) {
localCheckpoint.addAndGet(randomIntBetween(1, 100));
}
listener.onResponse(localCheckpoint.get());
}
};
PlainActionFuture<RecoverySourceHandler.SendSnapshotResult> sendFuture = new PlainActionFuture<>();
long startingSeqNo = randomIntBetween(0, 1000);
long endingSeqNo = startingSeqNo + randomIntBetween(0, 10000);
List<Translog.Operation> operations = generateOperations(numOps);
Randomness.shuffle(operations);
List<Translog.Operation> skipOperations = randomSubsetOf(operations);
Translog.Snapshot snapshot = newTranslogSnapshot(operations, skipOperations);
RecoverySourceHandler handler = new RecoverySourceHandler(
shard,
new AsyncRecoveryTarget(target, recoveryExecutor),
threadPool,
getStartRecoveryRequest(),
between(1, 10 * 1024),
between(1, 5),
between(1, 5),
between(1, 5),
false,
recoveryPlannerService
);
handler.phase2(
startingSeqNo,
endingSeqNo,
snapshot,
maxSeenAutoIdTimestamp,
maxSeqNoOfUpdatesOrDeletes,
retentionLeases,
mappingVersion,
sendFuture
);
RecoverySourceHandler.SendSnapshotResult sendSnapshotResult = sendFuture.actionGet();
assertTrue(received.get());
assertThat(sendSnapshotResult.targetLocalCheckpoint(), equalTo(localCheckpoint.get()));
assertThat(sendSnapshotResult.sentOperations(), equalTo(receivedSeqNos.size()));
Set<Long> sentSeqNos = new HashSet<>();
for (Translog.Operation op : operations) {
if (startingSeqNo <= op.seqNo() && op.seqNo() <= endingSeqNo && skipOperations.contains(op) == false) {
sentSeqNos.add(op.seqNo());
}
}
assertThat(receivedSeqNos, equalTo(sentSeqNos));
}
private | RecoverySourceHandlerTests |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/service/dump/task/DumpAllBetaTask.java | {
"start": 819,
"end": 1015
} | class ____ extends AbstractDelayTask {
@Override
public void merge(AbstractDelayTask task) {
}
public static final String TASK_ID = "dumpAllBetaConfigTask";
}
| DumpAllBetaTask |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/aop/around/NotNullInterceptor.java | {
"start": 1093,
"end": 1878
} | class ____ implements MethodInterceptor<Object, Object> { // <2>
@Nullable
@Override
public Object intercept(MethodInvocationContext<Object, Object> context) {
Optional<Map.Entry<String, MutableArgumentValue<?>>> nullParam = context.getParameters()
.entrySet()
.stream()
.filter(entry -> {
MutableArgumentValue<?> argumentValue = entry.getValue();
return Objects.isNull(argumentValue.getValue());
})
.findFirst(); // <3>
if (nullParam.isPresent()) {
throw new IllegalArgumentException("Null parameter [" + nullParam.get().getKey() + "] not allowed"); // <4>
}
return context.proceed(); // <5>
}
}
// end::interceptor[]
| NotNullInterceptor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/hql/fetchAndJoin/ToManyFetchAndJoinTest.java | {
"start": 899,
"end": 5774
} | class ____ {
@BeforeEach
public void setupData(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( session -> {
Parent p = new Parent( "p" );
Child c1 = new Child( "c1" );
GrandChild gc11 = new GrandChild( "gc11" );
GrandChild gc12 = new GrandChild( "gc12" );
p.getChildren().add( c1 );
c1.getGrandChildren().add( gc11 );
c1.getGrandChildren().add( gc12 );
Child c2 = new Child( "c2" );
GrandChild gc21 = new GrandChild( "gc21" );
GrandChild gc22 = new GrandChild( "gc22" );
GrandChild gc23 = new GrandChild( "gc23" );
p.getChildren().add( c2 );
c2.getGrandChildren().add( gc21 );
c2.getGrandChildren().add( gc22 );
c2.getGrandChildren().add( gc23 );
session.persist( p );
} );
}
@AfterEach
public void dropTestData(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@Test
@JiraKey( value = "HHH-9637")
public void testExplicitJoinBeforeFetchJoins(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( session -> {
var hql = """
select p
from Parent p
inner join p.children cRestrict
inner join fetch p.children c
inner join fetch c.grandChildren
where cRestrict.value = 'c1'
""";
var parent = session.createQuery( hql, Parent.class ).uniqueResult();
assertEquals( "p", parent.getValue() );
assertTrue( Hibernate.isInitialized( parent.getChildren() ) );
assertEquals( 2, parent.getChildren().size() );
Iterator<Child> iterator = parent.getChildren().iterator();
Child cA = iterator.next();
assertTrue( Hibernate.isInitialized( cA.getGrandChildren() ) );
if ( cA.getValue().equals( "c1" ) ) {
assertEquals( 2, cA.getGrandChildren().size() );
Child cB = iterator.next();
assertTrue( Hibernate.isInitialized( cB.getGrandChildren() ) );
assertEquals( 3, cB.getGrandChildren().size() );
}
else if ( cA.getValue().equals( "c2" ) ) {
assertEquals( 3, cA.getGrandChildren().size() );
Child cB = iterator.next();
assertTrue( Hibernate.isInitialized( cB.getGrandChildren() ) );
assertEquals( 2, cB.getGrandChildren().size() );
}
else {
fail( "unexpected value" );
}
} );
}
@Test
@JiraKey( value = "HHH-9637")
public void testExplicitJoinBetweenFetchJoins(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( session -> {
var hql = """
select p
from Parent p
inner join fetch p.children c
inner join p.children cRestrict
inner join fetch c.grandChildren
where cRestrict.value = 'c1'
""";
var parent = session.createQuery( hql, Parent.class ).uniqueResult();
assertEquals( "p", parent.getValue() );
assertTrue( Hibernate.isInitialized( parent.getChildren() ) );
assertEquals( 2, parent.getChildren().size() );
Iterator<Child> iterator = parent.getChildren().iterator();
Child cA = iterator.next();
assertTrue( Hibernate.isInitialized( cA.getGrandChildren() ) );
if ( cA.getValue().equals( "c1" ) ) {
assertEquals( 2, cA.getGrandChildren().size() );
Child cB = iterator.next();
assertTrue( Hibernate.isInitialized( cB.getGrandChildren() ) );
assertEquals( 3, cB.getGrandChildren().size() );
}
else if ( cA.getValue().equals( "c2" ) ) {
assertEquals( 3, cA.getGrandChildren().size() );
Child cB = iterator.next();
assertTrue( Hibernate.isInitialized( cB.getGrandChildren() ) );
assertEquals( 2, cB.getGrandChildren().size() );
}
else {
fail( "unexpected value" );
}
} );
}
@Test
@JiraKey( value = "HHH-9637")
public void testExplicitJoinAfterFetchJoins(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( session -> {
var hql = """
select p
from Parent p
inner join fetch p.children c
inner join fetch c.grandChildren
inner join p.children cRestrict
where cRestrict.value = 'c1'
""";
Parent parent = session.createQuery( hql, Parent.class ).uniqueResult();
assertEquals( "p", parent.getValue() );
assertTrue( Hibernate.isInitialized( parent.getChildren() ) );
assertEquals( 2, parent.getChildren().size() );
Iterator<Child> iterator = parent.getChildren().iterator();
Child cA = iterator.next();
assertTrue( Hibernate.isInitialized( cA.getGrandChildren() ) );
if ( cA.getValue().equals( "c1" ) ) {
assertEquals( 2, cA.getGrandChildren().size() );
Child cB = iterator.next();
assertTrue( Hibernate.isInitialized( cB.getGrandChildren() ) );
assertEquals( 3, cB.getGrandChildren().size() );
}
else if ( cA.getValue().equals( "c2" ) ) {
assertEquals( 3, cA.getGrandChildren().size() );
Child cB = iterator.next();
assertTrue( Hibernate.isInitialized( cB.getGrandChildren() ) );
assertEquals( 2, cB.getGrandChildren().size() );
}
else {
fail( "unexpected value" );
}
} );
}
}
| ToManyFetchAndJoinTest |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/config/plugins/visitors/PluginValueVisitor.java | {
"start": 1260,
"end": 2625
} | class ____ extends AbstractPluginVisitor<PluginValue> {
public PluginValueVisitor() {
super(PluginValue.class);
}
@Override
public Object visit(
final Configuration configuration, final Node node, final LogEvent event, final StringBuilder log) {
final String name = this.annotation.value();
final String elementValue = node.getValue();
final String attributeValue = node.getAttributes().get(name);
String rawValue = null; // if neither is specified, return null (LOG4J2-1313)
if (Strings.isNotEmpty(elementValue)) {
if (Strings.isNotEmpty(attributeValue)) {
LOGGER.error(
"Configuration contains {} with both attribute value ({}) AND element"
+ " value ({}). Please specify only one value. Using the element value.",
node.getName(),
attributeValue,
elementValue);
}
rawValue = elementValue;
} else {
rawValue = removeAttributeValue(node.getAttributes(), name);
}
final String value = this.annotation.substitute() ? this.substitutor.replace(event, rawValue) : rawValue;
StringBuilders.appendKeyDqValue(log, name, value);
return value;
}
}
| PluginValueVisitor |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/manual/OverwriteObjects.java | {
"start": 5823,
"end": 8286
} | class ____ implements ReduceFunction<Tuple2<IntValue, IntValue>> {
private Scrambler scrambler;
public OverwriteObjectsReduce(boolean keyed) {
scrambler = new Scrambler(keyed);
}
@Override
public Tuple2<IntValue, IntValue> reduce(
Tuple2<IntValue, IntValue> a, Tuple2<IntValue, IntValue> b) throws Exception {
return scrambler.scramble(a, b);
}
}
// --------------------------------------------------------------------------------------------
public void testJoin(StreamExecutionEnvironment env) throws Exception {
/*
* Test JoinDriver, LeftOuterJoinDriver, RightOuterJoinDriver, and FullOuterJoinDriver
*/
for (JoinHint joinHint : JoinHint.values()) {
if (joinHint == JoinHint.OPTIMIZER_CHOOSES) {
continue;
}
List<Tuple2<IntValue, IntValue>> enabledResult;
List<Tuple2<IntValue, IntValue>> disabledResult;
// Inner join
LOG.info("Testing inner join with JoinHint = {}", joinHint);
env.getConfig().enableObjectReuse();
enabledResult =
CollectionUtil.iteratorToList(
getDataStream(env)
.join(getDataStream(env))
.where(x -> x.f0)
.equalTo(x -> x.f0)
.window(GlobalWindows.create())
.apply(new OverwriteObjectsJoin())
.executeAndCollect());
Collections.sort(enabledResult, comparator);
env.getConfig().disableObjectReuse();
disabledResult =
CollectionUtil.iteratorToList(
getDataStream(env)
.join(getDataStream(env))
.where(x -> x.f0)
.equalTo(x -> x.f0)
.window(GlobalWindows.create())
.apply(new OverwriteObjectsJoin())
.executeAndCollect());
Collections.sort(disabledResult, comparator);
Assert.assertEquals("JoinHint=" + joinHint, disabledResult, enabledResult);
}
}
private | OverwriteObjectsReduce |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java | {
"start": 1333,
"end": 1603
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(
ProxyUtils.class);
public static final String E_HTTP_HTTPS_ONLY =
"This filter only works for HTTP/HTTPS";
public static final String LOCATION = "Location";
public static | ProxyUtils |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/ClampMinIntegerEvaluator.java | {
"start": 4884,
"end": 5664
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory field;
private final EvalOperator.ExpressionEvaluator.Factory min;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field,
EvalOperator.ExpressionEvaluator.Factory min) {
this.source = source;
this.field = field;
this.min = min;
}
@Override
public ClampMinIntegerEvaluator get(DriverContext context) {
return new ClampMinIntegerEvaluator(source, field.get(context), min.get(context), context);
}
@Override
public String toString() {
return "ClampMinIntegerEvaluator[" + "field=" + field + ", min=" + min + "]";
}
}
}
| Factory |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/Assertions_assertThatStream_Test.java | {
"start": 1127,
"end": 4357
} | class ____ {
private StringStream stringStream = new StringStream();
@Test
void should_create_Assert() {
// GIVEN
Object assertions = assertThat(Stream.of("Luke", "Leia"));
// WHEN/THEN
then(assertions).isNotNull();
}
@Test
void isEqualTo_should_honor_comparing_the_same_mocked_stream() {
// GIVEN
Stream<?> stream = mock();
// WHEN/THEN
assertThatStream(stream).isEqualTo(stream);
}
@Test
void stream_can_be_asserted_twice() {
// GIVEN
Stream<String> names = Stream.of("Luke", "Leia");
// WHEN/THEN
assertThatStream(names).containsExactly("Luke", "Leia")
.containsExactly("Luke", "Leia");
}
@Test
void should_not_consume_stream_when_asserting_non_null() {
// GIVEN
Stream<?> stream = mock();
// WHEN
assertThatStream(stream).isNotNull();
// THEN
verifyNoInteractions(stream);
}
@Test
void isInstanceOf_should_check_the_original_stream_without_consuming_it() {
// GIVEN
Stream<?> stream = mock();
// WHEN
assertThatStream(stream).isInstanceOf(Stream.class);
// THEN
verifyNoInteractions(stream);
}
@Test
void isInstanceOfAny_should_check_the_original_stream_without_consuming_it() {
// GIVEN
Stream<?> stream = mock();
// WHEN
assertThatStream(stream).isInstanceOfAny(Stream.class, String.class);
// THEN
verifyNoInteractions(stream);
}
@Test
void isOfAnyClassIn_should_check_the_original_stream_without_consuming_it() {
assertThatStream(stringStream).isOfAnyClassIn(Double.class, StringStream.class);
}
@Test
void isExactlyInstanceOf_should_check_the_original_stream() {
assertThatStream(new StringStream()).isExactlyInstanceOf(StringStream.class);
}
@Test
void isNotExactlyInstanceOf_should_check_the_original_stream() {
// GIVEN
assertThatStream(stringStream).isNotExactlyInstanceOf(Stream.class);
// WHEN/THEN
expectAssertionError(() -> assertThatStream(stringStream).isNotExactlyInstanceOf(StringStream.class));
}
@Test
void isNotInstanceOf_should_check_the_original_stream() {
assertThatStream(stringStream).isNotInstanceOf(Long.class);
}
@Test
void isNotInstanceOfAny_should_check_the_original_stream() {
assertThatStream(stringStream).isNotInstanceOfAny(Long.class, String.class);
}
@Test
void isNotOfAnyClassIn_should_check_the_original_stream() {
assertThatStream(stringStream).isNotOfAnyClassIn(Long.class, String.class);
}
@Test
void isSameAs_should_check_the_original_stream_without_consuming_it() {
// GIVEN
Stream<?> stream = mock();
// WHEN
assertThatStream(stream).isSameAs(stream);
// THEN
verifyNoInteractions(stream);
}
@Test
void isNotSameAs_should_check_the_original_stream_without_consuming_it() {
// GIVEN
Stream<?> stream = mock();
// WHEN
expectAssertionError(() -> assertThatStream(stream).isNotSameAs(stream));
// THEN
verifyNoInteractions(stream);
}
@Test
void stream_with_upper_bound_assertions() {
// GIVEN
Stream<? extends Foo> foos = Stream.of();
// WHEN/THEN
assertThatStream(foos).hasSize(0);
}
public static | Assertions_assertThatStream_Test |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java | {
"start": 16637,
"end": 19003
} | class ____ implements org.apache.kafka.clients.producer.Callback, Future<Void> {
private int numLeft;
private boolean completed = false;
private Throwable exception = null;
private final Callback<Void> callback;
public SetCallbackFuture(int numRecords, Callback<Void> callback) {
numLeft = numRecords;
this.callback = callback;
}
@Override
public synchronized void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null) {
if (!completed) {
this.exception = exception;
callback.onCompletion(exception, null);
completed = true;
this.notify();
}
return;
}
numLeft -= 1;
if (numLeft == 0) {
callback.onCompletion(null, null);
completed = true;
this.notify();
}
}
@Override
public synchronized boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public synchronized boolean isCancelled() {
return false;
}
@Override
public synchronized boolean isDone() {
return completed;
}
@Override
public synchronized Void get() throws InterruptedException, ExecutionException {
while (!completed) {
this.wait();
}
if (exception != null)
throw new ExecutionException(exception);
return null;
}
@Override
public synchronized Void get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
long started = System.currentTimeMillis();
long limit = started + unit.toMillis(timeout);
while (!completed) {
long leftMs = limit - System.currentTimeMillis();
if (leftMs < 0)
throw new TimeoutException("KafkaOffsetBackingStore Future timed out.");
this.wait(leftMs);
}
if (exception != null)
throw new ExecutionException(exception);
return null;
}
}
}
| SetCallbackFuture |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/ConnectionState.java | {
"start": 1135,
"end": 4885
} | class ____ {
private volatile HandshakeResponse handshakeResponse;
private volatile RedisCredentialsProvider credentialsProvider;
private volatile int db;
private volatile boolean readOnly;
private volatile ConnectionMetadata connectionMetadata = new ConnectionMetadata();
/**
* Applies settings from {@link RedisURI}.
*
* @param redisURI the URI to apply the client name and authentication.
*/
public void apply(RedisURI redisURI) {
connectionMetadata.apply(redisURI);
setCredentialsProvider(redisURI.getCredentialsProvider());
}
void apply(ConnectionMetadata metadata) {
this.connectionMetadata.apply(metadata);
}
ConnectionMetadata getConnectionMetadata() {
return connectionMetadata;
}
/**
* Returns the negotiated {@link ProtocolVersion}.
*
* @return the negotiated {@link ProtocolVersion} once the connection is established.
*/
public ProtocolVersion getNegotiatedProtocolVersion() {
return handshakeResponse != null ? handshakeResponse.getNegotiatedProtocolVersion() : null;
}
/**
* Returns the client connection id. Only available when using {@link ProtocolVersion#RESP3}.
*
* @return the client connection id. Can be {@code null} if Redis uses RESP2.
*/
public Long getConnectionId() {
return handshakeResponse != null ? handshakeResponse.getConnectionId() : null;
}
/**
* Returns the Redis server version. Only available when using {@link ProtocolVersion#RESP3}.
*
* @return the Redis server version.
*/
public String getRedisVersion() {
return handshakeResponse != null ? handshakeResponse.getRedisVersion() : null;
}
/**
* Returns the Redis server mode. Only available when using {@link ProtocolVersion#RESP3}.
*
* @return the Redis server mode.
*/
public String getMode() {
return handshakeResponse != null ? handshakeResponse.getMode() : null;
}
/**
* Returns the Redis server role. Only available when using {@link ProtocolVersion#RESP3}.
*
* @return the Redis server role.
*/
public String getRole() {
return handshakeResponse != null ? handshakeResponse.getRole() : null;
}
void setHandshakeResponse(HandshakeResponse handshakeResponse) {
this.handshakeResponse = handshakeResponse;
}
/**
* Sets username/password state based on the argument count from an {@code AUTH} command.
*
* @param args
*/
protected void setUserNamePassword(List<char[]> args) {
if (args.isEmpty()) {
return;
}
if (args.size() > 1) {
this.credentialsProvider = new StaticCredentialsProvider(new String(args.get(0)), args.get(1));
} else {
this.credentialsProvider = new StaticCredentialsProvider(null, args.get(0));
}
}
protected void setCredentialsProvider(RedisCredentialsProvider credentialsProvider) {
this.credentialsProvider = credentialsProvider;
}
public RedisCredentialsProvider getCredentialsProvider() {
return credentialsProvider;
}
protected void setDb(int db) {
this.db = db;
}
int getDb() {
return db;
}
protected void setReadOnly(boolean readOnly) {
this.readOnly = readOnly;
}
boolean isReadOnly() {
return readOnly;
}
protected void setClientName(String clientName) {
this.connectionMetadata.setClientName(clientName);
}
String getClientName() {
return connectionMetadata.getClientName();
}
/**
* HELLO Handshake response.
*/
static | ConnectionState |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/bean/BeanMethodWithStringParameterTest.java | {
"start": 2257,
"end": 2711
} | class ____ {
public static String doSomething(String name, int repeat) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < repeat; i++) {
sb.append(name);
}
return sb.toString();
}
public static String doSomethingWithExchange(String name, Exchange exchange) {
return name + " " + exchange.getIn().getBody(String.class);
}
}
}
| MyBean |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java | {
"start": 1925,
"end": 9170
} | class ____<T> implements ConsumerNetworkClient.PollCondition {
private static final Object INCOMPLETE_SENTINEL = new Object();
private final AtomicReference<Object> result = new AtomicReference<>(INCOMPLETE_SENTINEL);
private final ConcurrentLinkedQueue<RequestFutureListener<T>> listeners = new ConcurrentLinkedQueue<>();
private final CountDownLatch completedLatch = new CountDownLatch(1);
/**
* Check whether the response is ready to be handled
* @return true if the response is ready, false otherwise
*/
public boolean isDone() {
return result.get() != INCOMPLETE_SENTINEL;
}
public boolean awaitDone(long timeout, TimeUnit unit) throws InterruptedException {
return completedLatch.await(timeout, unit);
}
/**
* Get the value corresponding to this request (only available if the request succeeded)
* @return the value set in {@link #complete(Object)}
* @throws IllegalStateException if the future is not complete or failed
*/
@SuppressWarnings("unchecked")
public T value() {
if (!succeeded())
throw new IllegalStateException("Attempt to retrieve value from future which hasn't successfully completed");
return (T) result.get();
}
/**
* Check if the request succeeded;
* @return true if the request completed and was successful
*/
public boolean succeeded() {
return isDone() && !failed();
}
/**
* Check if the request failed.
* @return true if the request completed with a failure
*/
public boolean failed() {
return result.get() instanceof RuntimeException;
}
/**
* Check if the request is retriable. This is a convenience method for checking if
* the exception is an instance of {@link RetriableException}.
* @return true if it is retriable, false otherwise
* @throws IllegalStateException if the future is not complete or completed successfully
*/
public boolean isRetriable() {
return exception() instanceof RetriableException;
}
/**
* Get the exception from a failed result (only available if the request failed)
* @return the exception set in {@link #raise(RuntimeException)}
* @throws IllegalStateException if the future is not complete or completed successfully
*/
public RuntimeException exception() {
if (!failed())
throw new IllegalStateException("Attempt to retrieve exception from future which hasn't failed");
return (RuntimeException) result.get();
}
/**
* Complete the request successfully. After this call, {@link #succeeded()} will return true
* and the value can be obtained through {@link #value()}.
* @param value corresponding value (or null if there is none)
* @throws IllegalStateException if the future has already been completed
* @throws IllegalArgumentException if the argument is an instance of {@link RuntimeException}
*/
public void complete(T value) {
try {
if (value instanceof RuntimeException)
throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException");
if (!result.compareAndSet(INCOMPLETE_SENTINEL, value))
throw new IllegalStateException("Invalid attempt to complete a request future which is already complete");
fireSuccess();
} finally {
completedLatch.countDown();
}
}
/**
* Raise an exception. The request will be marked as failed, and the caller can either
* handle the exception or throw it.
* @param e corresponding exception to be passed to caller
* @throws IllegalStateException if the future has already been completed
*/
public void raise(RuntimeException e) {
try {
if (e == null)
throw new IllegalArgumentException("The exception passed to raise must not be null");
if (!result.compareAndSet(INCOMPLETE_SENTINEL, e))
throw new IllegalStateException("Invalid attempt to complete a request future which is already complete");
fireFailure();
} finally {
completedLatch.countDown();
}
}
/**
* Raise an error. The request will be marked as failed.
* @param error corresponding error to be passed to caller
*/
public void raise(Errors error) {
raise(error.exception());
}
private void fireSuccess() {
T value = value();
while (true) {
RequestFutureListener<T> listener = listeners.poll();
if (listener == null)
break;
listener.onSuccess(value);
}
}
private void fireFailure() {
RuntimeException exception = exception();
while (true) {
RequestFutureListener<T> listener = listeners.poll();
if (listener == null)
break;
listener.onFailure(exception);
}
}
/**
* Add a listener which will be notified when the future completes
* @param listener non-null listener to add
*/
public void addListener(RequestFutureListener<T> listener) {
this.listeners.add(listener);
if (failed())
fireFailure();
else if (succeeded())
fireSuccess();
}
/**
* Convert from a request future of one type to another type
* @param adapter The adapter which does the conversion
* @param <S> The type of the future adapted to
* @return The new future
*/
public <S> RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter) {
final RequestFuture<S> adapted = new RequestFuture<>();
addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(T value) {
adapter.onSuccess(value, adapted);
}
@Override
public void onFailure(RuntimeException e) {
adapter.onFailure(e, adapted);
}
});
return adapted;
}
public void chain(final RequestFuture<T> future) {
addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(T value) {
future.complete(value);
}
@Override
public void onFailure(RuntimeException e) {
future.raise(e);
}
});
}
public static <T> RequestFuture<T> failure(RuntimeException e) {
RequestFuture<T> future = new RequestFuture<>();
future.raise(e);
return future;
}
public static RequestFuture<Void> voidSuccess() {
RequestFuture<Void> future = new RequestFuture<>();
future.complete(null);
return future;
}
public static <T> RequestFuture<T> coordinatorNotAvailable() {
return failure(Errors.COORDINATOR_NOT_AVAILABLE.exception());
}
public static <T> RequestFuture<T> noBrokersAvailable() {
return failure(new NoAvailableBrokersException());
}
@Override
public boolean shouldBlock() {
return !isDone();
}
}
| RequestFuture |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/BeanOverrideHandlerTests.java | {
"start": 11983,
"end": 12268
} | class ____ {
ExampleService noQualifier;
ExampleService example;
@Qualifier("test")
ExampleService directQualifier;
@Qualifier("test")
@DummyBean
ExampleService qualifiedDummyBean;
}
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.RUNTIME)
@DummyBean
@ | ConfigB |
java | google__guava | android/guava-tests/test/com/google/common/hash/MessageDigestHashFunctionTest.java | {
"start": 1125,
"end": 4272
} | class ____ extends TestCase {
private static final ImmutableSet<String> INPUTS = ImmutableSet.of("", "Z", "foobar");
// From "How Provider Implementations Are Requested and Supplied" from
// http://docs.oracle.com/javase/6/docs/technotes/guides/security/crypto/CryptoSpec.html
// - Some providers may choose to also include alias names.
// - For example, the "SHA-1" algorithm might be referred to as "SHA1".
// - The algorithm name is not case-sensitive.
private static final ImmutableMap<String, HashFunction> ALGORITHMS =
new ImmutableMap.Builder<String, HashFunction>()
.put("MD5", Hashing.md5())
.put("SHA", Hashing.sha1()) // Not the official name, but still works
.put("SHA1", Hashing.sha1()) // Not the official name, but still works
.put("sHa-1", Hashing.sha1()) // Not the official name, but still works
.put("SHA-1", Hashing.sha1())
.put("SHA-256", Hashing.sha256())
.put("SHA-384", Hashing.sha384())
.put("SHA-512", Hashing.sha512())
.build();
public void testHashing() {
for (String stringToTest : INPUTS) {
for (String algorithmToTest : ALGORITHMS.keySet()) {
assertMessageDigestHashing(HashTestUtils.ascii(stringToTest), algorithmToTest);
}
}
}
public void testPutAfterHash() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals(
"2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", UTF_8).hash().toString());
assertThrows(IllegalStateException.class, () -> sha1.putInt(42));
}
public void testHashTwice() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals(
"2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", UTF_8).hash().toString());
assertThrows(IllegalStateException.class, () -> sha1.hash());
}
public void testToString() {
assertEquals("Hashing.md5()", Hashing.md5().toString());
assertEquals("Hashing.sha1()", Hashing.sha1().toString());
assertEquals("Hashing.sha256()", Hashing.sha256().toString());
assertEquals("Hashing.sha512()", Hashing.sha512().toString());
}
private static void assertMessageDigestHashing(byte[] input, String algorithmName) {
try {
MessageDigest digest = MessageDigest.getInstance(algorithmName);
assertEquals(
HashCode.fromBytes(digest.digest(input)), ALGORITHMS.get(algorithmName).hashBytes(input));
for (int bytes = 4; bytes <= digest.getDigestLength(); bytes++) {
assertEquals(
HashCode.fromBytes(Arrays.copyOf(digest.digest(input), bytes)),
new MessageDigestHashFunction(algorithmName, bytes, algorithmName).hashBytes(input));
}
try {
int maxSize = digest.getDigestLength();
new MessageDigestHashFunction(algorithmName, maxSize + 1, algorithmName);
fail();
} catch (IllegalArgumentException expected) {
}
} catch (NoSuchAlgorithmException nsae) {
throw new AssertionError(nsae);
}
}
}
| MessageDigestHashFunctionTest |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/WindowGroupedTable.java | {
"start": 1049,
"end": 3222
} | interface ____ {
/**
* Performs a selection operation on a window grouped table. Similar to an SQL SELECT statement.
* The field expressions can contain complex expressions and aggregations.
*
* <p>Example:
*
* <pre>{@code
* windowGroupedTable.select($("key"), $("window").start(), $("value").avg().as("valavg"));
* }</pre>
*
* <p>Scala Example:
*
* <pre>{@code
* windowGroupedTable.select('key, 'window.start, 'value.avg as 'valavg)
* }</pre>
*/
Table select(Expression... fields);
/**
* Performs an aggregate operation on a window grouped table. You have to close the {@link
* #aggregate(Expression)} with a select statement. The output will be flattened if the output
* type is a composite type.
*
* <p>Example:
*
* <pre>{@code
* windowGroupedTable.aggregate(call(MyAggregateFunction.class, $("a"), $("b")).as("x", "y", "z"))
* .select($("key"), $("window").start(), $("x"), $("y"), $("z"));
* }</pre>
*
* <p>Scala Example:
*
* <pre>{@code
* val aggFunc = new MyAggregateFunction
* windowGroupedTable
* .aggregate(aggFunc('a, 'b) as ('x, 'y, 'z))
* .select('key, 'window.start, 'x, 'y, 'z)
* }</pre>
*/
AggregatedTable aggregate(Expression aggregateFunction);
/**
* Performs a flatAggregate operation on a window grouped table. FlatAggregate takes a
* TableAggregateFunction which returns multiple rows. Use a selection after flatAggregate.
*
* <p>Example:
*
* <pre>{@code
* windowGroupedTable.flatAggregate(call(MyTableAggregateFunction.class, $("a"), $("b")).as("x", "y", "z"))
* .select($("key"), $("window").start(), $("x"), $("y"), $("z"));
* }</pre>
*
* <p>Scala Example:
*
* <pre>{@code
* val tableAggFunc = new MyTableAggregateFunction
* windowGroupedTable
* .flatAggregate(tableAggFunc('a, 'b) as ('x, 'y, 'z))
* .select('key, 'window.start, 'x, 'y, 'z)
* }</pre>
*/
FlatAggregateTable flatAggregate(Expression tableAggregateFunction);
}
| WindowGroupedTable |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/joinedsubclass/TestEntity.java | {
"start": 209,
"end": 277
} | interface ____ {
Integer getId();
void setId(Integer id);
}
| TestEntity |
java | resilience4j__resilience4j | resilience4j-framework-common/src/main/java/io/github/resilience4j/common/circuitbreaker/monitoring/endpoint/CircuitBreakerDetails.java | {
"start": 194,
"end": 2624
} | class ____ {
@Nullable
private String failureRate;
@Nullable
private String slowCallRate;
@Nullable
private String failureRateThreshold;
@Nullable
private String slowCallRateThreshold;
private int bufferedCalls;
private int failedCalls;
private int slowCalls;
private int slowFailedCalls;
private long notPermittedCalls;
@Nullable
private CircuitBreaker.State state;
@Nullable
public String getFailureRate() {
return failureRate;
}
public void setFailureRate(@Nullable String failureRate) {
this.failureRate = failureRate;
}
@Nullable
public String getSlowCallRate() {
return slowCallRate;
}
public void setSlowCallRate(@Nullable String slowCallRate) {
this.slowCallRate = slowCallRate;
}
@Nullable
public String getFailureRateThreshold() {
return failureRateThreshold;
}
public void setFailureRateThreshold(@Nullable String failureRateThreshold) {
this.failureRateThreshold = failureRateThreshold;
}
@Nullable
public String getSlowCallRateThreshold() {
return slowCallRateThreshold;
}
public void setSlowCallRateThreshold(@Nullable String slowCallRateThreshold) {
this.slowCallRateThreshold = slowCallRateThreshold;
}
public int getBufferedCalls() {
return bufferedCalls;
}
public void setBufferedCalls(int bufferedCalls) {
this.bufferedCalls = bufferedCalls;
}
public int getFailedCalls() {
return failedCalls;
}
public void setFailedCalls(int failedCalls) {
this.failedCalls = failedCalls;
}
public int getSlowCalls() {
return slowCalls;
}
public void setSlowCalls(int slowCalls) {
this.slowCalls = slowCalls;
}
public int getSlowFailedCalls() {
return slowFailedCalls;
}
public void setSlowFailedCalls(int slowFailedCalls) {
this.slowFailedCalls = slowFailedCalls;
}
public long getNotPermittedCalls() {
return notPermittedCalls;
}
public void setNotPermittedCalls(long notPermittedCalls) {
this.notPermittedCalls = notPermittedCalls;
}
@Nullable
public CircuitBreaker.State getState() {
return state;
}
public void setState(@Nullable CircuitBreaker.State state) {
this.state = state;
}
}
| CircuitBreakerDetails |
java | quarkusio__quarkus | extensions/keycloak-authorization/deployment/src/main/java/io/quarkus/keycloak/pep/deployment/KeycloakReflectionBuildStep.java | {
"start": 2617,
"end": 5821
} | class ____ {
@BuildStep
public void registerReflectionItems(BuildProducer<ReflectiveClassBuildItem> reflectiveItems) {
reflectiveItems.produce(ReflectiveClassBuildItem.builder(JsonWebToken.class.getName(),
TokenIntrospectionResponse.class.getName(),
JWSHeader.class.getName(),
AccessToken.class.getName(),
IDToken.class.getName(),
RefreshToken.class.getName(),
AccessTokenResponse.class.getName(),
JSONWebKeySet.class.getName(),
JWK.class.getName(),
StringOrArrayDeserializer.class.getName(),
AccessToken.Access.class.getName(),
AccessToken.Authorization.class.getName(),
AuthorizationRequest.class.getName(),
AuthorizationResponse.class.getName(),
PermissionRequest.class.getName(),
PermissionResponse.class.getName(),
PermissionTicketToken.class.getName(),
Permission.class.getName(),
ServerConfiguration.class.getName(),
ResourceRepresentation.class.getName(),
ScopeRepresentation.class.getName(),
ResourceOwnerRepresentation.class.getName(),
StringListMapDeserializer.class.getName(),
StringOrArrayDeserializer.class.getName(),
MTLSEndpointAliases.class.getName(),
OIDCConfigurationRepresentation.class.getName()).methods().fields().build());
}
@BuildStep
public void registerServiceProviders(BuildProducer<ServiceProviderBuildItem> serviceProvider) {
serviceProvider.produce(new ServiceProviderBuildItem(ClientCredentialsProvider.class.getName(),
ClientIdAndSecretCredentialsProvider.class.getName(),
JWTClientCredentialsProvider.class.getName(),
JWTClientSecretCredentialsProvider.class.getName()));
serviceProvider.produce(new ServiceProviderBuildItem(ClaimInformationPointProviderFactory.class.getName(),
HttpClaimInformationPointProviderFactory.class.getName(),
ClaimsInformationPointProviderFactory.class.getName()));
serviceProvider.produce(new ServiceProviderBuildItem(CryptoProvider.class.getName(),
AuthzClientCryptoProvider.class.getName()));
}
@BuildStep
public void runtimeInit(BuildProducer<RuntimeInitializedClassBuildItem> runtimeInit) {
runtimeInit.produce(new RuntimeInitializedClassBuildItem("org.keycloak.common.util.BouncyIntegration"));
runtimeInit.produce(new RuntimeInitializedClassBuildItem("org.keycloak.common.util.PemUtils"));
runtimeInit.produce(new RuntimeInitializedClassBuildItem("org.keycloak.common.util.DerUtils"));
runtimeInit.produce(new RuntimeInitializedClassBuildItem("org.keycloak.common.util.KeystoreUtil"));
runtimeInit.produce(new RuntimeInitializedClassBuildItem("org.keycloak.common.util.CertificateUtils"));
runtimeInit.produce(new RuntimeInitializedClassBuildItem("org.keycloak.common.util.SecretGenerator"));
}
}
| KeycloakReflectionBuildStep |
java | resilience4j__resilience4j | resilience4j-spring-boot3/src/test/java/io/github/resilience4j/springboot3/service/test/DummyService.java | {
"start": 142,
"end": 479
} | interface ____ {
String BACKEND = "backendA";
String BACKEND_B = "backendB";
void doSomething(boolean throwException) throws IOException;
CompletableFuture<String> longDoSomethingAsync() throws InterruptedException;
CompletableFuture<String> doSomethingAsync(boolean throwException) throws IOException;
}
| DummyService |
java | quarkusio__quarkus | integration-tests/vertx/src/main/java/io/quarkus/it/vertx/JsonTestResource.java | {
"start": 475,
"end": 1960
} | class ____ {
@GET
@Path("/json/sync")
@Produces(APPLICATION_JSON)
public JsonObject jsonSync() {
return new JsonObject().put("Hello", "World");
}
@POST
@Path("/json/sync")
@Consumes(APPLICATION_JSON)
@Produces(TEXT_PLAIN)
public String jsonSync(JsonObject jsonObject) {
return "Hello " + jsonObject.getString("Hello");
}
@GET
@Path("/array/sync")
@Produces(APPLICATION_JSON)
public JsonArray arraySync() {
return new JsonArray().add("Hello").add("World");
}
@POST
@Path("/array/sync")
@Consumes(APPLICATION_JSON)
@Produces(TEXT_PLAIN)
public String arraySync(JsonArray jsonArray) {
return jsonArray.stream().map(String.class::cast).collect(Collectors.joining(" "));
}
@GET
@Path("/json/async")
@Produces(APPLICATION_JSON)
public CompletionStage<JsonObject> jsonAsync() {
return CompletableFuture.completedFuture(new JsonObject().put("Hello", "World"));
}
@GET
@Path("/array/async")
@Produces(APPLICATION_JSON)
public CompletionStage<JsonArray> arrayAsync() {
return CompletableFuture.completedFuture(new JsonArray().add("Hello").add("World"));
}
@GET
@Path("/json/mapping")
@Produces(APPLICATION_JSON)
public String getPet() {
// This test check that the Jackson mapping (used by Json.encode) works.
return Json.encode(new Person("jack", "rabbit"));
}
}
| JsonTestResource |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/AbstractFileSystemTable.java | {
"start": 1205,
"end": 2070
} | class ____ {
final ObjectIdentifier tableIdentifier;
final Configuration tableOptions;
final DataType physicalRowDataType;
final Path path;
final String defaultPartName;
List<String> partitionKeys;
AbstractFileSystemTable(
ObjectIdentifier tableIdentifier,
DataType physicalRowDataType,
List<String> partitionKeys,
ReadableConfig tableOptions) {
this.tableIdentifier = tableIdentifier;
this.tableOptions = (Configuration) tableOptions;
this.physicalRowDataType = physicalRowDataType;
this.path = new Path(this.tableOptions.get(FileSystemConnectorOptions.PATH));
this.defaultPartName =
this.tableOptions.get(FileSystemConnectorOptions.PARTITION_DEFAULT_NAME);
this.partitionKeys = partitionKeys;
}
}
| AbstractFileSystemTable |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/factories/Factory.java | {
"start": 1343,
"end": 2869
} | interface ____ be added to {@code
* META_INF/services/org.apache.flink.table.factories.Factory} in JAR files.
*
* <p>Every factory declares a set of required and optional options. This information will not be
* used during discovery but is helpful when generating documentation and performing validation. A
* factory may discover further (nested) factories, the options of the nested factories must not be
* declared in the sets of this factory.
*
* <p>It is the responsibility of each factory to perform validation before returning an instance.
*
* <p>For consistency, the following style for key names of {@link ConfigOption} is recommended:
*
* <ul>
* <li>Try to <b>reuse</b> key names as much as possible. Use other factory implementations as an
* example.
* <li>Key names should be declared in <b>lower case</b>. Use "-" instead of dots or camel case to
* split words.
* <li>Key names should be <b>hierarchical</b> where appropriate. Think about how one would define
* such a hierarchy in JSON or YAML file (e.g. {@code sink.bulk-flush.max-actions}).
* <li>In case of a hierarchy, try not to use the higher level again in the key name (e.g. do
* {@code sink.partitioner} instead of {@code sink.sink-partitioner}) to <b>keep the keys
* short</b>.
* <li>Key names which can be templated, e.g. to refer to a specific column, should be listed
* using '#' as the placeholder symbol. For example, use {@code fields.#.min}.
* </ul>
*/
@PublicEvolving
public | can |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/inputstreams/Diff_diff_InputStream_String_Test.java | {
"start": 1100,
"end": 5105
} | class ____ {
private static Diff diff;
@BeforeAll
static void setUpOnce() {
diff = new Diff();
}
private InputStream actual;
private String expected;
@Test
void should_return_empty_diff_list_if_inputstreams_have_equal_content() throws IOException {
// GIVEN
actual = stream("base", "line0", "line1");
expected = joinLines("base", "line0", "line1");
// WHEN
List<Delta<String>> diffs = diff.diff(actual, expected);
// THEN
assertThat(diffs).isEmpty();
}
@Test
void should_return_diffs_if_inputstreams_do_not_have_equal_content() throws IOException {
// GIVEN
actual = stream("base", "line_0", "line_1");
expected = joinLines("base", "line0", "line1");
// WHEN
List<Delta<String>> diffs = diff.diff(actual, expected);
// THEN
assertThat(diffs).hasSize(1)
.first().hasToString(format("Changed content at line 2:%n"
+ "expecting:%n"
+ " [\"line0\",%n"
+ " \"line1\"]%n"
+ "but was:%n"
+ " [\"line_0\",%n"
+ " \"line_1\"]%n"));
}
@Test
void should_return_multiple_diffs_if_inputstreams_contain_multiple_differences() throws IOException {
// GIVEN
actual = stream("base", "line_0", "line1", "line_2");
expected = joinLines("base", "line0", "line1", "line2");
// WHEN
List<Delta<String>> diffs = diff.diff(actual, expected);
// THEN
assertThat(diffs).hasSize(2);
assertThat(diffs.get(0)).hasToString(format("Changed content at line 2:%n"
+ "expecting:%n"
+ " [\"line0\"]%n"
+ "but was:%n"
+ " [\"line_0\"]%n"));
assertThat(diffs.get(1)).hasToString(format("Changed content at line 4:%n"
+ "expecting:%n"
+ " [\"line2\"]%n"
+ "but was:%n"
+ " [\"line_2\"]%n"));
}
@Test
void should_return_diffs_if_content_of_actual_is_shorter_than_content_of_expected() throws IOException {
// GIVEN
actual = stream("base", "line_0");
expected = joinLines("base", "line_0", "line_1");
// WHEN
List<Delta<String>> diffs = diff.diff(actual, expected);
// THEN
assertThat(diffs).hasSize(1);
assertThat(diffs.get(0)).hasToString(format("Missing content at line 3:%n"
+ " [\"line_1\"]%n"));
}
@Test
void should_return_diffs_if_content_of_actual_is_longer_than_content_of_expected() throws IOException {
// GIVEN
actual = stream("base", "line_0", "line_1");
expected = joinLines("base", "line_0");
// WHEN
List<Delta<String>> diffs = diff.diff(actual, expected);
// THEN
assertThat(diffs).hasSize(1);
assertThat(diffs.get(0)).hasToString(format("Extra content at line 3:%n"
+ " [\"line_1\"]%n"));
}
@Test
void should_return_single_diff_line_for_new_line_at_start() throws IOException {
// GIVEN
actual = stream("", "line_0", "line_1", "line_2");
expected = joinLines("line_0", "line_1", "line_2");
// WHEN
List<Delta<String>> diffs = diff.diff(actual, expected);
// THEN
assertThat(diffs).hasSize(1);
assertThat(diffs.get(0)).hasToString(format("Extra content at line 1:%n"
+ " [\"\"]%n"));
}
static String joinLines(String... lines) {
return String.join(System.lineSeparator(), lines);
}
}
| Diff_diff_InputStream_String_Test |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GridFsEndpointBuilderFactory.java | {
"start": 4619,
"end": 5265
} | class ____
* calling the WriteConcern#valueOf(String) method.
*
* The option is a: <code>com.mongodb.WriteConcern</code> type.
*
* Group: common
*
* @param writeConcern the value to set
* @return the dsl builder
*/
default GridFsEndpointConsumerBuilder writeConcern(com.mongodb.WriteConcern writeConcern) {
doSetProperty("writeConcern", writeConcern);
return this;
}
/**
* Set the WriteConcern for write operations on MongoDB using the
* standard ones. Resolved from the fields of the WriteConcern | by |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java | {
"start": 28995,
"end": 29983
} | class ____ extends Plugin {
@Override
public void onIndexModule(IndexModule indexModule) {
super.onIndexModule(indexModule);
indexModule.addSearchOperationListener(new SearchOperationListener() {
@Override
public void onNewReaderContext(ReaderContext readerContext) {
assertThat(readerContext, not(instanceOf(LegacyReaderContext.class)));
}
@Override
public void onQueryPhase(SearchContext searchContext, long tookInNanos) {
assertThat(searchContext.readerContext(), not(instanceOf(LegacyReaderContext.class)));
}
@Override
public void onFetchPhase(SearchContext searchContext, long tookInNanos) {
assertThat(searchContext.readerContext(), not(instanceOf(LegacyReaderContext.class)));
}
});
}
}
}
| VerifyReaderContextPlugin |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/server/reactive/DefaultSslInfo.java | {
"start": 1037,
"end": 2769
} | class ____ implements SslInfo {
private final @Nullable String sessionId;
private final X509Certificate @Nullable [] peerCertificates;
DefaultSslInfo(@Nullable String sessionId, X509Certificate[] peerCertificates) {
Assert.notNull(peerCertificates, "No SSL certificates");
this.sessionId = sessionId;
this.peerCertificates = peerCertificates;
}
DefaultSslInfo(SSLSession session) {
Assert.notNull(session, "SSLSession is required");
this.sessionId = initSessionId(session);
this.peerCertificates = initCertificates(session);
}
@Override
public @Nullable String getSessionId() {
return this.sessionId;
}
@Override
public X509Certificate @Nullable [] getPeerCertificates() {
return this.peerCertificates;
}
private static @Nullable String initSessionId(SSLSession session) {
byte [] bytes = session.getId();
if (bytes == null) {
return null;
}
StringBuilder sb = new StringBuilder();
for (byte b : bytes) {
String digit = Integer.toHexString(b);
if (digit.length() < 2) {
sb.append('0');
}
if (digit.length() > 2) {
digit = digit.substring(digit.length() - 2);
}
sb.append(digit);
}
return sb.toString();
}
private static X509Certificate @Nullable [] initCertificates(SSLSession session) {
Certificate[] certificates;
try {
certificates = session.getPeerCertificates();
}
catch (Throwable ex) {
return null;
}
List<X509Certificate> result = new ArrayList<>(certificates.length);
for (Certificate certificate : certificates) {
if (certificate instanceof X509Certificate x509Certificate) {
result.add(x509Certificate);
}
}
return (!result.isEmpty() ? result.toArray(new X509Certificate[0]) : null);
}
}
| DefaultSslInfo |
java | quarkusio__quarkus | integration-tests/reactive-messaging-pulsar/src/test/java/io/quarkus/it/pulsar/PulsarResource.java | {
"start": 205,
"end": 5317
} | class ____ implements QuarkusTestResourceLifecycleManager {
private PulsarContainer container;
private boolean pem = false;
private boolean jks = false;
private String keyStorePassword;
private String trustStorePassword;
private String tlsConfigName;
@Override
public void init(Map<String, String> initArgs) {
pem = Boolean.parseBoolean(initArgs.get("isPem"));
jks = Boolean.parseBoolean(initArgs.get("isJks"));
keyStorePassword = initArgs.get("keyStorePassword");
trustStorePassword = initArgs.get("trustStorePassword");
tlsConfigName = initArgs.get("pulsar.tls-configuration-name");
}
@Override
public Map<String, String> start() {
container = new PulsarContainer();
if (pem) {
configurePem();
} else if (jks) {
configureJks();
}
container.start();
Map<String, String> cfg = new HashMap<>();
cfg.put("pulsar.client.serviceUrl", container.getPulsarBrokerUrl());
if (tlsConfigName != null) {
cfg.put("mp.messaging.connector.smallrye-pulsar.tls-configuration-name", tlsConfigName);
}
return cfg;
}
private void configureJks() {
configureCommonTls();
container
.useTls()
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/pulsar-keystore.jks"),
"/pulsar/conf/pulsar-keystore.jks")
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/pulsar-server-truststore.jks"),
"/pulsar/conf/pulsar-server-truststore.jks")
// broker client
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/pulsar-client-keystore.jks"),
"/pulsar/conf/pulsar-client-keystore.jks")
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/pulsar-client-truststore.jks"),
"/pulsar/conf/pulsar-client-truststore.jks");
addConf("tlsEnabledWithKeyStore", "true");
addConf("tlsKeyStoreType", "JKS");
addConf("tlsKeyStore", "/pulsar/conf/pulsar-keystore.jks");
addConf("tlsKeyStorePassword", keyStorePassword);
addConf("tlsTrustStoreType", "JKS");
addConf("tlsTrustStore", "/pulsar/conf/pulsar-server-truststore.jks");
addConf("tlsTrustStorePassword", trustStorePassword);
// broker client
addConf("brokerClientTlsEnabledWithKeyStore", "true");
addConf("brokerClientTlsTrustStoreType", "JKS");
addConf("brokerClientTlsTrustStore", "/pulsar/conf/pulsar-client-truststore.jks");
addConf("brokerClientTlsTrustStorePassword", trustStorePassword);
addConf("brokerClientTlsKeyStoreType", "JKS");
addConf("brokerClientTlsKeyStore", "/pulsar/conf/pulsar-client-keystore.jks");
addConf("brokerClientTlsKeyStorePassword", keyStorePassword);
}
private void configurePem() {
configureCommonTls();
container
.useTls()
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/pulsar.crt"), "/pulsar/conf/pulsar.crt")
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/pulsar.key"), "/pulsar/conf/pulsar.key")
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/pulsar-server-ca.crt"),
"/pulsar/conf/pulsar-server-ca.crt")
// broker client
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/pulsar-client.crt"),
"/pulsar/conf/pulsar-client.crt")
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/pulsar-client.key"),
"/pulsar/conf/pulsar-client.key")
.withCopyFileToContainer(MountableFile.forHostPath("target/certs/pulsar-client-ca.crt"),
"/pulsar/conf/pulsar-client-ca.crt");
addConf("tlsRequireTrustedClientCertOnConnect", "true");
addConf("tlsTrustCertsFilePath", "/pulsar/conf/pulsar-server-ca.crt");
addConf("tlsCertificateFilePath", "/pulsar/conf/pulsar.crt");
addConf("tlsKeyFilePath", "/pulsar/conf/pulsar.key");
// broker client
addConf("brokerClientTrustCertsFilePath", "/pulsar/conf/pulsar-client-ca.crt");
addConf("brokerClientCertificateFilePath", "/pulsar/conf/pulsar-client.crt");
addConf("brokerClientKeyFilePath", "/pulsar/conf/pulsar-client.key");
}
private void addConf(String key, String value) {
container.addEnv("PULSAR_PREFIX_" + key, value);
}
private void configureCommonTls() {
addConf("brokerServicePort", "");
addConf("brokerServicePortTls", "6651");
addConf("webServicePortTls", "8443");
addConf("tlsEnabled", "true");
addConf("brokerClientTlsEnabled", "true");
}
@Override
public void stop() {
if (container != null) {
container.close();
}
}
}
| PulsarResource |
java | apache__camel | components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/CamelContinuationServlet.java | {
"start": 2053,
"end": 13606
} | class ____ extends CamelServlet {
static final String TIMEOUT_ERROR = "CamelTimeoutException";
static final String EXCHANGE_ATTRIBUTE_NAME = "CamelExchange";
static final String EXCHANGE_ATTRIBUTE_ID = "CamelExchangeId";
private static final long serialVersionUID = 1L;
// we must remember expired exchanges as Jetty will initiate a new continuation when we send
// back the error when timeout occurred, and thus in the async callback we cannot check the
// continuation if it was previously expired. So that's why we have our own map for that
private final Map<String, String> expiredExchanges = new ConcurrentHashMap<>();
@Override
protected void doService(HttpServletRequest request, HttpServletResponse response) {
log.trace("Service: {}", request);
try {
handleDoService(request, response);
} catch (Exception e) {
// do not leak exception back to caller
log.warn("Error handling request due to: {}", e.getMessage(), e);
sendError(response, HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
protected void handleDoService(final HttpServletRequest request, final HttpServletResponse response)
throws Exception {
// is there a consumer registered for the request.
HttpConsumer consumer = getServletResolveConsumerStrategy().resolve(request, getConsumers());
if (consumer == null) {
// okay we cannot process this requires so return either 404 or 405.
// to know if its 405 then we need to check if any other HTTP method would have a consumer for the "same" request
boolean hasAnyMethod = METHODS.stream()
.anyMatch(m -> getServletResolveConsumerStrategy().isHttpMethodAllowed(request, m, getConsumers()));
if (hasAnyMethod) {
log.debug("No consumer to service request {} as method {} is not allowed", request, request.getMethod());
sendError(response, HttpServletResponse.SC_METHOD_NOT_ALLOWED);
return;
} else {
log.debug("No consumer to service request {} as resource is not found", request);
sendError(response, HttpServletResponse.SC_NOT_FOUND);
return;
}
}
// figure out if continuation is enabled and what timeout to use
boolean useContinuation = false;
Long continuationTimeout = null;
HttpCommonEndpoint endpoint = consumer.getEndpoint();
if (endpoint instanceof JettyHttpEndpoint) {
JettyHttpEndpoint jettyEndpoint = (JettyHttpEndpoint) endpoint;
Boolean epUseContinuation = jettyEndpoint.getUseContinuation();
Long epContinuationTimeout = jettyEndpoint.getContinuationTimeout();
if (epUseContinuation != null) {
useContinuation = epUseContinuation;
} else {
useContinuation = jettyEndpoint.getComponent().isUseContinuation();
}
if (epContinuationTimeout != null) {
continuationTimeout = epContinuationTimeout;
} else {
continuationTimeout = jettyEndpoint.getComponent().getContinuationTimeout();
}
}
if (useContinuation) {
log.trace("Start request with continuation timeout of {}",
continuationTimeout != null ? continuationTimeout : "jetty default");
} else {
log.trace(
"Usage of continuation is disabled, either by component or endpoint configuration, fallback to normal servlet processing instead");
super.doService(request, response);
return;
}
// if its an OPTIONS request then return which method is allowed
if ("OPTIONS".equals(request.getMethod()) && !consumer.isOptionsEnabled()) {
String allowedMethods = METHODS.stream()
.filter(m -> getServletResolveConsumerStrategy().isHttpMethodAllowed(request, m, getConsumers()))
.collect(Collectors.joining(","));
if (allowedMethods == null && consumer.getEndpoint().getHttpMethodRestrict() != null) {
allowedMethods = consumer.getEndpoint().getHttpMethodRestrict();
}
if (allowedMethods == null) {
// allow them all
allowedMethods = "GET,HEAD,POST,PUT,DELETE,TRACE,OPTIONS,CONNECT,PATCH";
}
if (!allowedMethods.contains("OPTIONS")) {
allowedMethods = allowedMethods + ",OPTIONS";
}
response.addHeader("Allow", allowedMethods);
response.setStatus(HttpServletResponse.SC_OK);
return;
}
if (consumer.getEndpoint().getHttpMethodRestrict() != null) {
Iterator<?> it = ObjectHelper.createIterable(consumer.getEndpoint().getHttpMethodRestrict()).iterator();
boolean match = false;
while (it.hasNext()) {
String method = it.next().toString();
if (method.equalsIgnoreCase(request.getMethod())) {
match = true;
break;
}
}
if (!match) {
sendError(response, HttpServletResponse.SC_METHOD_NOT_ALLOWED);
return;
}
}
if ("TRACE".equals(request.getMethod()) && !consumer.isTraceEnabled()) {
sendError(response, HttpServletResponse.SC_METHOD_NOT_ALLOWED);
return;
}
// we do not support java serialized objects unless explicit enabled
String contentType = request.getContentType();
if (HttpConstants.CONTENT_TYPE_JAVA_SERIALIZED_OBJECT.equals(contentType)
&& !consumer.getEndpoint().getComponent().isAllowJavaSerializedObject()) {
sendError(response, HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
return;
}
final Exchange result = (Exchange) request.getAttribute(EXCHANGE_ATTRIBUTE_NAME);
if (result == null) {
// no asynchronous result so leverage continuation
AsyncContext asyncContext = request.startAsync();
if (isInitial(request) && continuationTimeout != null) {
// set timeout on initial
asyncContext.setTimeout(continuationTimeout.longValue());
}
asyncContext.addListener(new ExpiredListener(), request, response);
// are we suspended and a request is dispatched initially?
if (consumer.isSuspended() && isInitial(request)) {
sendError(response, HttpServletResponse.SC_SERVICE_UNAVAILABLE);
return;
}
// a new request so create an exchange
// must be prototype scoped (not pooled) so we create the exchange via endpoint
final Exchange exchange = consumer.createExchange(false);
exchange.setPattern(ExchangePattern.InOut);
if (consumer.getEndpoint().isBridgeEndpoint()) {
exchange.setProperty(Exchange.SKIP_GZIP_ENCODING, Boolean.TRUE);
exchange.setProperty(Exchange.SKIP_WWW_FORM_URLENCODED, Boolean.TRUE);
}
if (consumer.getEndpoint().isDisableStreamCache()) {
exchange.setProperty(Exchange.DISABLE_HTTP_STREAM_CACHE, Boolean.TRUE);
}
if (contentType != null) {
String normalizedCharset = IOHelper.getCharsetNameFromContentType(contentType);
exchange.setProperty(ExchangePropertyKey.CHARSET_NAME, normalizedCharset);
}
// reuse existing http message if pooled
Message msg = exchange.getIn();
if (msg instanceof HttpMessage) {
HttpMessage hm = (HttpMessage) msg;
hm.init(exchange, endpoint, request, response);
} else {
exchange.setIn(new HttpMessage(exchange, endpoint, request, response));
}
// set context path as header
String contextPath = consumer.getEndpoint().getPath();
exchange.getIn().setHeader(JettyHttpConstants.SERVLET_CONTEXT_PATH, contextPath);
updateHttpPath(exchange, contextPath);
if (log.isTraceEnabled()) {
log.trace("Suspending continuation of exchangeId: {}", exchange.getExchangeId());
}
request.setAttribute(EXCHANGE_ATTRIBUTE_ID, exchange.getExchangeId());
// we want to handle the UoW
UnitOfWork uow = exchange.getUnitOfWork();
if (uow == null) {
consumer.createUoW(exchange);
} else if (uow.onPrepare(exchange)) {
// need to re-attach uow
exchange.getExchangeExtension().setUnitOfWork(uow);
}
ClassLoader oldTccl = overrideTccl(exchange);
if (log.isTraceEnabled()) {
log.trace("Processing request for exchangeId: {}", exchange.getExchangeId());
}
// use the asynchronous API to process the exchange
consumer.getAsyncProcessor().process(exchange, new AsyncCallback() {
public void done(boolean doneSync) {
// check if the exchange id is already expired
boolean expired = expiredExchanges.remove(exchange.getExchangeId()) != null;
if (!expired) {
if (log.isTraceEnabled()) {
log.trace("Resuming continuation of exchangeId: {}", exchange.getExchangeId());
}
// resume processing after both, sync and async callbacks
request.setAttribute(EXCHANGE_ATTRIBUTE_NAME, exchange);
asyncContext.dispatch();
} else {
log.warn("Cannot resume expired continuation of exchangeId: {}", exchange.getExchangeId());
consumer.releaseExchange(exchange, false);
}
}
});
if (oldTccl != null) {
restoreTccl(exchange, oldTccl);
}
// return to let Jetty continuation to work as it will resubmit and invoke the service
// method again when its resumed
return;
}
try {
// now lets output to the response
if (log.isTraceEnabled()) {
log.trace("Resumed continuation and writing response for exchangeId: {}", result.getExchangeId());
}
Integer bs = consumer.getEndpoint().getResponseBufferSize();
if (bs != null) {
log.trace("Using response buffer size: {}", bs);
response.setBufferSize(bs);
}
consumer.getBinding().writeResponse(result, response);
} catch (IOException e) {
log.error("Error processing request", e);
throw e;
} catch (Exception e) {
log.error("Error processing request", e);
throw new CamelException(e);
} finally {
consumer.doneUoW(result);
consumer.releaseExchange(result, false);
}
}
private boolean isInitial(HttpServletRequest request) {
return request.getDispatcherType() != DispatcherType.ASYNC;
}
private | CamelContinuationServlet |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenization.java | {
"start": 714,
"end": 3325
} | class ____ extends Tokenization {
public static final ParseField NAME = new ParseField("bert");
public static final String MASK_TOKEN = "[MASK]";
public static ConstructingObjectParser<BertTokenization, Void> createParser(boolean ignoreUnknownFields) {
ConstructingObjectParser<BertTokenization, Void> parser = new ConstructingObjectParser<>(
"bert_tokenization",
ignoreUnknownFields,
a -> new BertTokenization(
(Boolean) a[0],
(Boolean) a[1],
(Integer) a[2],
a[3] == null ? null : Truncate.fromString((String) a[3]),
(Integer) a[4]
)
);
Tokenization.declareCommonFields(parser);
return parser;
}
private static final ConstructingObjectParser<BertTokenization, Void> LENIENT_PARSER = createParser(true);
private static final ConstructingObjectParser<BertTokenization, Void> STRICT_PARSER = createParser(false);
public static BertTokenization fromXContent(XContentParser parser, boolean lenient) {
return lenient ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null);
}
public BertTokenization(
@Nullable Boolean doLowerCase,
@Nullable Boolean withSpecialTokens,
@Nullable Integer maxSequenceLength,
@Nullable Truncate truncate,
@Nullable Integer span
) {
super(doLowerCase, withSpecialTokens, maxSequenceLength, truncate, span);
}
public BertTokenization(StreamInput in) throws IOException {
super(in);
}
@Override
Tokenization buildWindowingTokenization(int updatedMaxSeqLength, int updatedSpan) {
return new BertTokenization(this.doLowerCase, this.withSpecialTokens, updatedMaxSeqLength, Truncate.NONE, updatedSpan);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
}
XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
return builder;
}
@Override
public String getMaskToken() {
return MASK_TOKEN;
}
@Override
public String getWriteableName() {
return NAME.getPreferredName();
}
@Override
public String getName() {
return NAME.getPreferredName();
}
@Override
public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
return super.equals(o);
}
@Override
public int hashCode() {
return super.hashCode();
}
}
| BertTokenization |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesTests.java | {
"start": 73133,
"end": 73373
} | class ____ {
@Autowired
private BasicProperties properties;
@PostConstruct
void init() {
assertThat(this.properties).isNotNull();
}
@Nullable String getName() {
return this.properties.name;
}
}
| BasicPropertiesConsumer |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/view/groovy/GroovyMarkupViewTests.java | {
"start": 6489,
"end": 6726
} | class ____ extends MarkupTemplateEngine {
TestTemplateEngine() {
super(new TemplateConfiguration());
}
@Override
public Template createTemplate(Reader reader) {
return null;
}
}
@Configuration
static | TestTemplateEngine |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/TestInternalTimerService.java | {
"start": 5574,
"end": 8001
} | class ____<K, N> implements Comparable<Timer<K, N>> {
private final long timestamp;
private final K key;
private final N namespace;
public Timer(long timestamp, K key, N namespace) {
this.timestamp = timestamp;
this.key = key;
this.namespace = namespace;
}
public long getTimestamp() {
return timestamp;
}
public K getKey() {
return key;
}
public N getNamespace() {
return namespace;
}
@Override
public int compareTo(Timer<K, N> o) {
return Long.compare(this.timestamp, o.timestamp);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Timer<?, ?> timer = (Timer<?, ?>) o;
return timestamp == timer.timestamp
&& key.equals(timer.key)
&& namespace.equals(timer.namespace);
}
@Override
public int hashCode() {
int result = (int) (timestamp ^ (timestamp >>> 32));
result = 31 * result + key.hashCode();
result = 31 * result + namespace.hashCode();
return result;
}
@Override
public String toString() {
return "Timer{"
+ "timestamp="
+ timestamp
+ ", key="
+ key
+ ", namespace="
+ namespace
+ '}';
}
}
public int numProcessingTimeTimers() {
return processingTimeTimers.size();
}
public int numEventTimeTimers() {
return watermarkTimers.size();
}
public int numProcessingTimeTimers(N namespace) {
int count = 0;
for (Timer<K, N> timer : processingTimeTimers) {
if (timer.getNamespace().equals(namespace)) {
count++;
}
}
return count;
}
public int numEventTimeTimers(N namespace) {
int count = 0;
for (Timer<K, N> timer : watermarkTimers) {
if (timer.getNamespace().equals(namespace)) {
count++;
}
}
return count;
}
}
| Timer |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/grpc/auto/BiRequestStreamGrpc.java | {
"start": 9159,
"end": 10278
} | class ____
extends BiRequestStreamBaseDescriptorSupplier
implements io.grpc.protobuf.ProtoMethodDescriptorSupplier {
private final String methodName;
BiRequestStreamMethodDescriptorSupplier(String methodName) {
this.methodName = methodName;
}
@Override
public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() {
return getServiceDescriptor().findMethodByName(methodName);
}
}
private static volatile io.grpc.ServiceDescriptor serviceDescriptor;
public static io.grpc.ServiceDescriptor getServiceDescriptor() {
io.grpc.ServiceDescriptor result = serviceDescriptor;
if (result == null) {
synchronized (BiRequestStreamGrpc.class) {
result = serviceDescriptor;
if (result == null) {
serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME)
.setSchemaDescriptor(new BiRequestStreamFileDescriptorSupplier())
.addMethod(getRequestBiStreamMethod())
.build();
}
}
}
return result;
}
}
| BiRequestStreamMethodDescriptorSupplier |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/bug/Bug_for_kongmu.java | {
"start": 1094,
"end": 1173
} | class ____ {
// 在1.2.48中,必须为static
//public static | Result |
java | spring-projects__spring-framework | spring-messaging/src/test/java/org/springframework/messaging/simp/user/UserRegistryMessageHandlerTests.java | {
"start": 1931,
"end": 6437
} | class ____ {
private SimpUserRegistry localRegistry = mock();
private MessageChannel brokerChannel = mock();
private TaskScheduler taskScheduler = mock();
private MultiServerUserRegistry multiServerRegistry = new MultiServerUserRegistry(this.localRegistry);
private MessageConverter converter = new JacksonJsonMessageConverter();
private UserRegistryMessageHandler handler;
@BeforeEach
void setUp() {
SimpMessagingTemplate brokerTemplate = new SimpMessagingTemplate(this.brokerChannel);
brokerTemplate.setMessageConverter(this.converter);
this.handler = new UserRegistryMessageHandler(this.multiServerRegistry, brokerTemplate,
"/topic/simp-user-registry", this.taskScheduler);
}
@Test
void brokerAvailableEvent() {
Runnable runnable = getUserRegistryTask();
assertThat(runnable).isNotNull();
}
@Test
@SuppressWarnings({ "unchecked", "rawtypes" })
void brokerUnavailableEvent() {
ScheduledFuture future = mock();
given(this.taskScheduler.scheduleWithFixedDelay(any(Runnable.class), any(Duration.class))).willReturn(future);
BrokerAvailabilityEvent event = new BrokerAvailabilityEvent(true, this);
this.handler.onApplicationEvent(event);
verifyNoMoreInteractions(future);
event = new BrokerAvailabilityEvent(false, this);
this.handler.onApplicationEvent(event);
verify(future).cancel(true);
}
@Test
@SuppressWarnings("rawtypes")
void broadcastRegistry() {
given(this.brokerChannel.send(any())).willReturn(true);
TestSimpUser simpUser1 = new TestSimpUser("joe");
TestSimpUser simpUser2 = new TestSimpUser("jane");
simpUser1.addSessions(new TestSimpSession("123"));
simpUser1.addSessions(new TestSimpSession("456"));
HashSet<SimpUser> simpUsers = new HashSet<>(Arrays.asList(simpUser1, simpUser2));
given(this.localRegistry.getUsers()).willReturn(simpUsers);
getUserRegistryTask().run();
ArgumentCaptor<Message> captor = ArgumentCaptor.forClass(Message.class);
verify(this.brokerChannel).send(captor.capture());
Message<?> message = captor.getValue();
assertThat(message).isNotNull();
MessageHeaders headers = message.getHeaders();
assertThat(SimpMessageHeaderAccessor.getDestination(headers)).isEqualTo("/topic/simp-user-registry");
MultiServerUserRegistry remoteRegistry = new MultiServerUserRegistry(mock());
remoteRegistry.addRemoteRegistryDto(message, this.converter, 20000);
assertThat(remoteRegistry.getUserCount()).isEqualTo(2);
assertThat(remoteRegistry.getUser("joe")).isNotNull();
assertThat(remoteRegistry.getUser("jane")).isNotNull();
}
@Test
void handleMessage() {
TestSimpUser simpUser1 = new TestSimpUser("joe");
TestSimpUser simpUser2 = new TestSimpUser("jane");
simpUser1.addSessions(new TestSimpSession("123"));
simpUser2.addSessions(new TestSimpSession("456"));
HashSet<SimpUser> simpUsers = new HashSet<>(Arrays.asList(simpUser1, simpUser2));
SimpUserRegistry remoteUserRegistry = mock();
given(remoteUserRegistry.getUserCount()).willReturn(2);
given(remoteUserRegistry.getUsers()).willReturn(simpUsers);
MultiServerUserRegistry remoteRegistry = new MultiServerUserRegistry(remoteUserRegistry);
Message<?> message = this.converter.toMessage(remoteRegistry.getLocalRegistryDto(), null);
this.handler.handleMessage(message);
assertThat(remoteRegistry.getUserCount()).isEqualTo(2);
assertThat(this.multiServerRegistry.getUser("joe")).isNotNull();
assertThat(this.multiServerRegistry.getUser("jane")).isNotNull();
}
@Test
void handleMessageFromOwnBroadcast() {
TestSimpUser simpUser = new TestSimpUser("joe");
simpUser.addSessions(new TestSimpSession("123"));
given(this.localRegistry.getUserCount()).willReturn(1);
given(this.localRegistry.getUsers()).willReturn(Collections.singleton(simpUser));
assertThat(this.multiServerRegistry.getUserCount()).isEqualTo(1);
Message<?> message = this.converter.toMessage(this.multiServerRegistry.getLocalRegistryDto(), null);
this.multiServerRegistry.addRemoteRegistryDto(message, this.converter, 20000);
assertThat(this.multiServerRegistry.getUserCount()).isEqualTo(1);
}
private Runnable getUserRegistryTask() {
BrokerAvailabilityEvent event = new BrokerAvailabilityEvent(true, this);
this.handler.onApplicationEvent(event);
ArgumentCaptor<? extends Runnable> captor = ArgumentCaptor.forClass(Runnable.class);
verify(this.taskScheduler).scheduleWithFixedDelay(captor.capture(), eq(Duration.ofMillis(10000L)));
return captor.getValue();
}
}
| UserRegistryMessageHandlerTests |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java | {
"start": 11236,
"end": 11739
} | class ____ extends
Mapper<LongWritable, AccessRecord, Text, LongWritable> {
LongWritable ONE = new LongWritable(1L);
@Override
public void map(LongWritable key, AccessRecord value, Context context)
throws IOException, InterruptedException {
Text oKey = new Text(value.url);
context.write(oKey, ONE);
}
}
/**
* Reducer sums up the pageviews and emits a PageviewRecord,
* which will correspond to one tuple in the db.
*/
static | PageviewMapper |
java | spring-projects__spring-boot | build-plugin/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/ProcessAotMojo.java | {
"start": 1525,
"end": 2656
} | class ____ extends AbstractAotMojo {
private static final String AOT_PROCESSOR_CLASS_NAME = "org.springframework.boot.SpringApplicationAotProcessor";
/**
* Directory containing the classes and resource files that should be packaged into
* the archive.
*/
@Parameter(defaultValue = "${project.build.outputDirectory}", required = true)
@SuppressWarnings("NullAway.Init")
private File classesDirectory;
/**
* Directory containing the generated sources.
*/
@Parameter(defaultValue = "${project.build.directory}/spring-aot/main/sources", required = true)
@SuppressWarnings("NullAway.Init")
private File generatedSources;
/**
* Directory containing the generated resources.
*/
@Parameter(defaultValue = "${project.build.directory}/spring-aot/main/resources", required = true)
@SuppressWarnings("NullAway.Init")
private File generatedResources;
/**
* Directory containing the generated classes.
*/
@Parameter(defaultValue = "${project.build.directory}/spring-aot/main/classes", required = true)
@SuppressWarnings("NullAway.Init")
private File generatedClasses;
/**
* Name of the main | ProcessAotMojo |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/builder/RecordableInputStream.java | {
"start": 2742,
"end": 3213
} | class ____ extends ByteArrayOutputStream {
public void trim(int head, int tail) {
System.arraycopy(buf, head, buf, 0, count - head - tail);
count -= head + tail;
}
public byte[] toByteArray(int len) {
byte[] b = new byte[len];
System.arraycopy(buf, 0, b, 0, len);
return b;
}
byte[] getByteArray() {
return buf;
}
}
}
| TrimmableByteArrayOutputStream |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsAction.java | {
"start": 1035,
"end": 2216
} | class ____ extends BaseRestHandler {
@Override
public String getName() {
return "connector_sync_job_update_ingestion_stats";
}
@Override
public List<Route> routes() {
return List.of(
new Route(
RestRequest.Method.PUT,
"/" + EnterpriseSearch.CONNECTOR_SYNC_JOB_API_ENDPOINT + "/{" + CONNECTOR_SYNC_JOB_ID_PARAM + "}/_stats"
)
);
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
try (XContentParser parser = restRequest.contentParser()) {
UpdateConnectorSyncJobIngestionStatsAction.Request request = UpdateConnectorSyncJobIngestionStatsAction.Request.fromXContent(
parser,
restRequest.param(CONNECTOR_SYNC_JOB_ID_PARAM)
);
return channel -> client.execute(
UpdateConnectorSyncJobIngestionStatsAction.INSTANCE,
request,
new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status)
);
}
}
}
| RestUpdateConnectorSyncJobIngestionStatsAction |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/ProducerTemplate.java | {
"start": 43960,
"end": 44877
} | class ____)
* @throws CamelExecutionException if the processing of the exchange failed
*/
Object requestBodyAndHeaders(Object body, Map<String, Object> headers) throws CamelExecutionException;
/**
* Sends the body to an endpoint with the specified headers and header values. Uses an {@link ExchangePattern#InOut}
* message exchange pattern. <br/>
* <br/>
* <p/>
* <b>Notice:</b> that if the processing of the exchange failed with an Exception it is thrown from this method as a
* {@link org.apache.camel.CamelExecutionException} with the caused exception wrapped.
*
* @param endpoint the endpoint URI to send to
* @param body the payload to send
* @param headers headers
* @param type the expected response type
* @return the result (see | javadoc |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ai21/completion/Ai21ChatCompletionResponseHandlerTests.java | {
"start": 1322,
"end": 6084
} | class ____ extends ESTestCase {
private final Ai21ChatCompletionResponseHandler responseHandler = new Ai21ChatCompletionResponseHandler(
"chat completions",
(a, b) -> mock()
);
public void testFailNotFound() throws IOException {
var responseJson = XContentHelper.stripWhitespace("""
{
"detail": "Not Found"
}
""");
var errorJson = invalidResponseJson(responseJson, 404);
assertThat(errorJson, is(XContentHelper.stripWhitespace("""
{
"error" : {
"code" : "not_found",
"message" : "Resource not found at [https://api.ai21.com/studio/v1/chat/completions] for request from inference entity id \
[id] status [404]. Error message: [{\\"detail\\":\\"Not Found\\"}]",
"type" : "ai21_error"
}
}""")));
}
public void testFailUnauthorized() throws IOException {
var responseJson = XContentHelper.stripWhitespace("""
{
"detail": "Forbidden: Bad or missing Apikey/JWT."
}
""");
var errorJson = invalidResponseJson(responseJson, 401);
assertThat(errorJson, is(XContentHelper.stripWhitespace("""
{
"error" : {
"code" : "unauthorized",
"message" : "Received an authentication error status code for request from inference entity id [id] status [401]. Error \
message: [{\\"detail\\":\\"Forbidden: Bad or missing Apikey/JWT.\\"}]",
"type" : "ai21_error"
}
}""")));
}
public void testFailUnprocessableEntity() throws IOException {
var responseJson = XContentHelper.stripWhitespace("""
{
"detail": "The provided model is not supported. See https://docs.ai21.com/docs/jamba-foundation-models#api-versioning \
for a list of supported models"
}
""");
var errorJson = invalidResponseJson(responseJson, 422);
assertThat(errorJson, is(XContentHelper.stripWhitespace("""
{
"error" : {
"code" : "unprocessable_entity",
"message" : "Received an input validation error response for request from inference entity id [id] status [422]. \
Error message: [{\\"detail\\":\\"The provided model is not supported. \
See https://docs.ai21.com/docs/jamba-foundation-models#api-versioning for a list of supported models\\"}]",
"type" : "ai21_error"
}
}""")));
}
private String invalidResponseJson(String responseJson, int statusCode) throws IOException {
var exception = invalidResponse(responseJson, statusCode);
assertThat(exception, isA(RetryException.class));
assertThat(unwrapCause(exception), isA(UnifiedChatCompletionException.class));
return toJson((UnifiedChatCompletionException) unwrapCause(exception));
}
private Exception invalidResponse(String responseJson, int statusCode) {
return expectThrows(
RetryException.class,
() -> responseHandler.validateResponse(
mock(),
mock(),
mockRequest(),
new HttpResult(mockErrorResponse(statusCode), responseJson.getBytes(StandardCharsets.UTF_8))
)
);
}
private static Request mockRequest() throws URISyntaxException {
var request = mock(Request.class);
when(request.getInferenceEntityId()).thenReturn("id");
when(request.isStreaming()).thenReturn(true);
when(request.getURI()).thenReturn(new URI("https://api.ai21.com/studio/v1/chat/completions"));
return request;
}
private static HttpResponse mockErrorResponse(int statusCode) {
var statusLine = mock(StatusLine.class);
when(statusLine.getStatusCode()).thenReturn(statusCode);
var response = mock(HttpResponse.class);
when(response.getStatusLine()).thenReturn(statusLine);
return response;
}
private String toJson(UnifiedChatCompletionException e) throws IOException {
try (var builder = XContentFactory.jsonBuilder()) {
e.toXContentChunked(EMPTY_PARAMS).forEachRemaining(xContent -> {
try {
xContent.toXContent(builder, EMPTY_PARAMS);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
});
return XContentHelper.convertToJson(BytesReference.bytes(builder), false, builder.contentType());
}
}
}
| Ai21ChatCompletionResponseHandlerTests |
java | resilience4j__resilience4j | resilience4j-retry/src/main/java/io/github/resilience4j/retry/internal/RetryImpl.java | {
"start": 15654,
"end": 16501
} | class ____ implements Metrics {
private RetryMetrics() {
}
@Override
public long getNumberOfSuccessfulCallsWithoutRetryAttempt() {
return succeededWithoutRetryCounter.longValue();
}
@Override
public long getNumberOfFailedCallsWithoutRetryAttempt() {
return failedWithoutRetryCounter.longValue();
}
@Override
public long getNumberOfSuccessfulCallsWithRetryAttempt() {
return succeededAfterRetryCounter.longValue();
}
@Override
public long getNumberOfFailedCallsWithRetryAttempt() {
return failedAfterRetryCounter.longValue();
}
@Override
public long getNumberOfTotalCalls() {
return totalAttemptsCounter.longValue();
}
}
private | RetryMetrics |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/node/JsonNodeBasicTest.java | {
"start": 510,
"end": 11617
} | class ____ extends NodeTestBase
{
private final ObjectMapper MAPPER = newJsonMapper();
@Test
public void testBoolean() throws Exception
{
BooleanNode f = BooleanNode.getFalse();
assertNotNull(f);
assertTrue(f.isBoolean());
assertSame(f, BooleanNode.valueOf(false));
assertStandardEquals(f);
assertFalse(f.booleanValue());
assertFalse(f.asBoolean());
assertEquals("false", f.asString());
assertEquals(JsonToken.VALUE_FALSE, f.asToken());
assertFalse(f.isNumber());
assertFalse(f.canConvertToInt());
assertFalse(f.canConvertToLong());
assertFalse(f.canConvertToExactIntegral());
// and ditto for true
BooleanNode t = BooleanNode.getTrue();
assertNotNull(t);
assertTrue(t.isBoolean());
assertSame(t, BooleanNode.valueOf(true));
assertStandardEquals(t);
assertTrue(t.booleanValue());
assertTrue(t.asBoolean());
assertEquals("true", t.asString());
assertEquals(JsonToken.VALUE_TRUE, t.asToken());
// Booleans cannot be coerced to numbers in 3.0
//assertNodeNumbers(f, 0, 0.0);
//assertNodeNumbers(t, 1, 1.0);
JsonNode result = objectMapper().readTree("true\n");
assertFalse(result.isNull());
assertFalse(result.isNumber());
assertFalse(result.isString());
assertTrue(result.isBoolean());
assertType(result, BooleanNode.class);
assertTrue(result.booleanValue());
assertEquals("true", result.asString());
assertFalse(result.isMissingNode());
// also, equality should work ok
assertEquals(result, BooleanNode.valueOf(true));
assertEquals(result, BooleanNode.getTrue());
assertNonContainerStreamMethods(f);
}
@Test
public void testBinary() throws Exception
{
assertNull(BinaryNode.valueOf(null));
assertNull(BinaryNode.valueOf(null, 0, 0));
BinaryNode empty = BinaryNode.valueOf(new byte[1], 0, 0);
assertSame(BinaryNode.EMPTY_BINARY_NODE, empty);
assertStandardEquals(empty);
byte[] data = new byte[3];
data[1] = (byte) 3;
BinaryNode n = BinaryNode.valueOf(data, 1, 1);
assertFalse(n.isNumber());
assertFalse(n.canConvertToInt());
assertFalse(n.canConvertToLong());
assertFalse(n.canConvertToExactIntegral());
data[2] = (byte) 3;
BinaryNode n2 = BinaryNode.valueOf(data, 2, 1);
assertTrue(n.equals(n2));
assertEquals("\"Aw==\"", n.toString());
assertEquals("AAMD", new BinaryNode(data).asString());
assertNodeNumbersForNonNumeric(n);
assertNonContainerStreamMethods(n2);
}
@Test
public void testPOJO()
{
POJONode n = new POJONode("x"); // not really a pojo but that's ok
assertStandardEquals(n);
assertEquals(n, new POJONode("x"));
assertEquals("x", n.asString());
// 10-Dec-2018, tatu: With 2.10, should serialize same as via ObjectMapper/ObjectWriter
assertEquals("\"x\"", n.toString());
assertEquals(new POJONode(null), new POJONode(null));
// default; non-numeric
assertNodeNumbersForNonNumeric(n);
// but if wrapping actual number, use it
assertNodeNumbers(new POJONode(Integer.valueOf(123)), 123, 123.0);
assertNonContainerStreamMethods(n);
}
// [databind#743]
@Test
public void testRawValue() throws Exception
{
ObjectNode root = MAPPER.createObjectNode();
root.putRawValue("a", new RawValue(new SerializedString("[1, 2, 3]")));
assertEquals("{\"a\":[1, 2, 3]}", MAPPER.writeValueAsString(root));
}
// [databind#790]
@Test
public void testCustomComparators() throws Exception
{
ObjectNode nestedObject1 = MAPPER.createObjectNode();
nestedObject1.put("value", 6);
ArrayNode nestedArray1 = MAPPER.createArrayNode();
nestedArray1.add(7);
ObjectNode root1 = MAPPER.createObjectNode();
root1.put("value", 5);
root1.set("nested_object", nestedObject1);
root1.set("nested_array", nestedArray1);
ObjectNode nestedObject2 = MAPPER.createObjectNode();
nestedObject2.put("value", 6.9);
ArrayNode nestedArray2 = MAPPER.createArrayNode();
nestedArray2.add(7.0);
ObjectNode root2 = MAPPER.createObjectNode();
root2.put("value", 5.0);
root2.set("nested_object", nestedObject2);
root2.set("nested_array", nestedArray2);
// default equals(): not strictly equal
assertFalse(root1.equals(root2));
assertFalse(root2.equals(root1));
assertTrue(root1.equals(root1));
assertTrue(root2.equals(root2));
assertTrue(nestedArray1.equals(nestedArray1));
assertFalse(nestedArray1.equals(nestedArray2));
assertFalse(nestedArray2.equals(nestedArray1));
// but. Custom comparator can make all the difference
Comparator<JsonNode> cmp = new Comparator<JsonNode>() {
@Override
public int compare(JsonNode o1, JsonNode o2) {
if (o1 instanceof ContainerNode || o2 instanceof ContainerNode) {
fail("container nodes should be traversed, comparator should not be invoked");
}
if (o1.equals(o2)) {
return 0;
}
if ((o1 instanceof NumericNode) && (o2 instanceof NumericNode)) {
int d1 = ((NumericNode) o1).numberValue().intValue();
int d2 = ((NumericNode) o2).numberValue().intValue();
if (d1 == d2) { // strictly equals because it's integral value
return 0;
}
if (d1 < d2) {
return -1;
}
return 1;
}
return 0;
}
};
assertTrue(root1.equals(cmp, root2));
assertTrue(root2.equals(cmp, root1));
assertTrue(root1.equals(cmp, root1));
assertTrue(root2.equals(cmp, root2));
ArrayNode array3 = MAPPER.createArrayNode();
array3.add(123);
assertFalse(root2.equals(cmp, nestedArray1));
assertTrue(nestedArray1.equals(cmp, nestedArray1));
assertFalse(nestedArray1.equals(cmp, root2));
assertFalse(nestedArray1.equals(cmp, array3));
}
// [databind#793]
@Test
public void testArrayWithDefaultTyping() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.activateDefaultTyping(NoCheckSubTypeValidator.instance)
.build();
JsonNode array = mapper.readTree("[ 1, 2 ]");
assertTrue(array.isArray());
assertEquals(2, array.size());
JsonNode obj = mapper.readTree("{ \"a\" : 2 }");
assertTrue(obj.isObject());
assertEquals(1, obj.size());
assertEquals(2, obj.path("a").asInt());
}
// [databind#2145]
@Test
public void testOptionalAccessorOnArray() throws Exception {
ArrayNode arrayNode = MAPPER.createArrayNode();
arrayNode.add("firstElement");
assertTrue(arrayNode.optional(0).isPresent());
assertEquals("firstElement", arrayNode.optional(0).get().asString());
assertFalse(arrayNode.optional(1).isPresent());
assertFalse(arrayNode.optional(-1).isPresent());
assertFalse(arrayNode.optional(999).isPresent());
assertFalse(arrayNode.optional("anyField").isPresent());
}
@Test
public void testOptionalAccessorOnObject() throws Exception {
ObjectNode objectNode = MAPPER.createObjectNode();
objectNode.put("existingField", "value");
assertTrue(objectNode.optional("existingField").isPresent());
assertEquals("value", objectNode.optional("existingField").get().asString());
assertFalse(objectNode.optional("missingField").isPresent());
assertFalse(objectNode.optional(0).isPresent());
assertFalse(objectNode.optional(-1).isPresent());
}
@Test
public void testOptionalAccessorOnNumbers() throws Exception
{
// Test IntNode
IntNode intNode = IntNode.valueOf(42);
assertFalse(intNode.optional("anyField").isPresent());
assertFalse(intNode.optional(0).isPresent());
// Test LongNode
LongNode longNode = LongNode.valueOf(123456789L);
assertFalse(longNode.optional("anyField").isPresent());
assertFalse(longNode.optional(0).isPresent());
// Test DoubleNode
DoubleNode doubleNode = DoubleNode.valueOf(3.14);
assertFalse(doubleNode.optional("anyField").isPresent());
assertFalse(doubleNode.optional(0).isPresent());
// Test DecimalNode
DecimalNode decimalNode = DecimalNode.valueOf(new java.math.BigDecimal("12345.6789"));
assertFalse(decimalNode.optional("anyField").isPresent());
assertFalse(decimalNode.optional(0).isPresent());
}
@Test
public void testOptionalAccessorOnOtherTypes() throws Exception
{
StringNode stringNode = StringNode.valueOf("sampleText");
assertFalse(stringNode.optional("anyField").isPresent());
assertFalse(stringNode.optional(0).isPresent());
NullNode nullNode = NullNode.getInstance();
assertFalse(nullNode.optional("anyField").isPresent());
assertFalse(nullNode.optional(0).isPresent());
BooleanNode booleanNode = BooleanNode.TRUE;
assertFalse(booleanNode.optional("anyField").isPresent());
assertFalse(booleanNode.optional(0).isPresent());
}
// [databind#4867]
@Test
public void testAsOptional() {
// Test with MissingNode
JsonNode missingNode = MissingNode.getInstance();
Optional<JsonNode> missingOptional = missingNode.asOptional();
assertFalse(missingOptional.isPresent());
// Test with ObjectNode
ObjectNode objectNode = MAPPER.createObjectNode();
Optional<JsonNode> objectOptional = objectNode.asOptional();
assertTrue(objectOptional.isPresent());
assertEquals(objectNode, objectOptional.get());
// Test with ArrayNode
ArrayNode arrayNode = MAPPER.createArrayNode();
Optional<JsonNode> arrayOptional = arrayNode.asOptional();
assertTrue(arrayOptional.isPresent());
assertEquals(arrayNode, arrayOptional.get());
// Test with StringNode
StringNode stringNode = StringNode.valueOf("text");
Optional<JsonNode> textOptional = stringNode.asOptional();
assertTrue(textOptional.isPresent());
assertEquals(stringNode, textOptional.get());
// Test with NullNode
NullNode nullNode = NullNode.getInstance();
Optional<JsonNode> nullOptional = nullNode.asOptional();
assertTrue(nullOptional.isPresent());
assertEquals(nullNode, nullOptional.get());
}
}
| JsonNodeBasicTest |
java | elastic__elasticsearch | libs/tdigest/src/main/java/org/elasticsearch/tdigest/ScaleFunction.java | {
"start": 1639,
"end": 23357
} | enum ____ {
/**
* Generates uniform cluster sizes. Used for comparison only.
*/
K_0 {
@Override
public double k(double q, double compression, double n) {
return compression * q / 2;
}
@Override
public double k(double q, double normalizer) {
return normalizer * q;
}
@Override
public double q(double k, double compression, double n) {
return 2 * k / compression;
}
@Override
public double q(double k, double normalizer) {
return k / normalizer;
}
@Override
public double max(double q, double compression, double n) {
return 2 / compression;
}
@Override
public double max(double q, double normalizer) {
return 1 / normalizer;
}
@Override
public double normalizer(double compression, double n) {
return compression / 2;
}
},
/**
* Generates cluster sizes proportional to sqrt(q*(1-q)). This gives constant relative accuracy if accuracy is
* proportional to squared cluster size. It is expected that K_2 and K_3 will give better practical results.
*/
K_1 {
@Override
public double k(final double q, final double compression, double n) {
Function f = new Function() {
@Override
double apply(double q) {
return compression * Math.asin(2 * q - 1) / (2 * Math.PI);
}
};
return ScaleFunction.limitCall(f, q, 1e-15, 1 - 1e-15);
}
@Override
public double k(final double q, final double normalizer) {
Function f = new Function() {
@Override
double apply(double q) {
return normalizer * Math.asin(2 * q - 1);
}
};
return ScaleFunction.limitCall(f, q, 1e-15, 1 - 1e-15);
}
@Override
public double q(double k, final double compression, double n) {
Function f = new Function() {
@Override
double apply(double k) {
return (Math.sin(k * (2 * Math.PI / compression)) + 1) / 2;
}
};
return ScaleFunction.limitCall(f, k, -compression / 4, compression / 4);
}
@Override
public double q(double k, final double normalizer) {
Function f = new Function() {
@Override
double apply(double x) {
return (Math.sin(x) + 1) / 2;
}
};
double x = k / normalizer;
return ScaleFunction.limitCall(f, x, -Math.PI / 2, Math.PI / 2);
}
@Override
public double max(double q, double compression, double n) {
if (q <= 0) {
return 0;
} else if (q >= 1) {
return 0;
} else {
return 2 * Math.sin(Math.PI / compression) * Math.sqrt(q * (1 - q));
}
}
@Override
public double max(double q, double normalizer) {
if (q <= 0) {
return 0;
} else if (q >= 1) {
return 0;
} else {
return 2 * Math.sin(0.5 / normalizer) * Math.sqrt(q * (1 - q));
}
}
@Override
public double normalizer(double compression, double n) {
return compression / (2 * Math.PI);
}
},
/**
* Generates cluster sizes proportional to sqrt(q*(1-q)) but avoids computation of asin in the critical path by
* using an approximate version.
*/
K_1_FAST {
@Override
public double k(double q, final double compression, double n) {
Function f = new Function() {
@Override
double apply(double q) {
return compression * fastAsin(2 * q - 1) / (2 * Math.PI);
}
};
return ScaleFunction.limitCall(f, q, 0, 1);
}
@Override
public double k(double q, final double normalizer) {
Function f = new Function() {
@Override
double apply(double q) {
return normalizer * fastAsin(2 * q - 1);
}
};
return ScaleFunction.limitCall(f, q, 0, 1);
}
@Override
public double q(double k, double compression, double n) {
return (Math.sin(k * (2 * Math.PI / compression)) + 1) / 2;
}
@Override
public double q(double k, double normalizer) {
return (Math.sin(k / normalizer) + 1) / 2;
}
@Override
public double max(double q, double compression, double n) {
if (q <= 0) {
return 0;
} else if (q >= 1) {
return 0;
} else {
return 2 * Math.sin(Math.PI / compression) * Math.sqrt(q * (1 - q));
}
}
@Override
public double max(double q, double normalizer) {
if (q <= 0) {
return 0;
} else if (q >= 1) {
return 0;
} else {
return 2 * Math.sin(0.5 / normalizer) * Math.sqrt(q * (1 - q));
}
}
@Override
public double normalizer(double compression, double n) {
return compression / (2 * Math.PI);
}
},
/**
* Generates cluster sizes proportional to q*(1-q). This makes tail error bounds tighter than for K_1. The use of a
* normalizing function results in a strictly bounded number of clusters no matter how many samples.
*/
K_2 {
@Override
public double k(double q, final double compression, final double n) {
if (n <= 1) {
if (q <= 0) {
return -10;
} else if (q >= 1) {
return 10;
} else {
return 0;
}
}
Function f = new Function() {
@Override
double apply(double q) {
return compression * Math.log(q / (1 - q)) / Z(compression, n);
}
};
return ScaleFunction.limitCall(f, q, 1e-15, 1 - 1e-15);
}
@Override
public double k(double q, final double normalizer) {
Function f = new Function() {
@Override
double apply(double q) {
return Math.log(q / (1 - q)) * normalizer;
}
};
return ScaleFunction.limitCall(f, q, 1e-15, 1 - 1e-15);
}
@Override
public double q(double k, double compression, double n) {
double w = Math.exp(k * Z(compression, n) / compression);
return w / (1 + w);
}
@Override
public double q(double k, double normalizer) {
double w = Math.exp(k / normalizer);
return w / (1 + w);
}
@Override
public double max(double q, double compression, double n) {
return Z(compression, n) * q * (1 - q) / compression;
}
@Override
public double max(double q, double normalizer) {
return q * (1 - q) / normalizer;
}
@Override
public double normalizer(double compression, double n) {
return compression / Z(compression, n);
}
private double Z(double compression, double n) {
return 4 * Math.log(n / compression) + 24;
}
},
/**
* Generates cluster sizes proportional to min(q, 1-q). This makes tail error bounds tighter than for K_1 or K_2.
* The use of a normalizing function results in a strictly bounded number of clusters no matter how many samples.
*/
K_3 {
@Override
public double k(double q, final double compression, final double n) {
Function f = new Function() {
@Override
double apply(double q) {
if (q <= 0.5) {
return compression * Math.log(2 * q) / Z(compression, n);
} else {
return -k(1 - q, compression, n);
}
}
};
return ScaleFunction.limitCall(f, q, 1e-15, 1 - 1e-15);
}
@Override
public double k(double q, final double normalizer) {
Function f = new Function() {
@Override
double apply(double q) {
if (q <= 0.5) {
return Math.log(2 * q) * normalizer;
} else {
return -k(1 - q, normalizer);
}
}
};
return ScaleFunction.limitCall(f, q, 1e-15, 1 - 1e-15);
}
@Override
public double q(double k, double compression, double n) {
if (k <= 0) {
return Math.exp(k * Z(compression, n) / compression) / 2;
} else {
return 1 - q(-k, compression, n);
}
}
@Override
public double q(double k, double normalizer) {
if (k <= 0) {
return Math.exp(k / normalizer) / 2;
} else {
return 1 - q(-k, normalizer);
}
}
@Override
public double max(double q, double compression, double n) {
return Z(compression, n) * Math.min(q, 1 - q) / compression;
}
@Override
public double max(double q, double normalizer) {
return Math.min(q, 1 - q) / normalizer;
}
@Override
public double normalizer(double compression, double n) {
return compression / Z(compression, n);
}
private double Z(double compression, double n) {
return 4 * Math.log(n / compression) + 21;
}
},
/**
* Generates cluster sizes proportional to q*(1-q). This makes the tail error bounds tighter. This version does not
* use a normalizer function and thus the number of clusters increases roughly proportional to log(n). That is good
* for accuracy, but bad for size and bad for the statically allocated MergingDigest, but can be useful for
* tree-based implementations.
*/
K_2_NO_NORM {
@Override
public double k(double q, final double compression, double n) {
Function f = new Function() {
@Override
double apply(double q) {
return compression * Math.log(q / (1 - q));
}
};
return ScaleFunction.limitCall(f, q, 1e-15, 1 - 1e-15);
}
@Override
public double k(double q, final double normalizer) {
Function f = new Function() {
@Override
double apply(double q) {
return normalizer * Math.log(q / (1 - q));
}
};
return ScaleFunction.limitCall(f, q, 1e-15, 1 - 1e-15);
}
@Override
public double q(double k, double compression, double n) {
double w = Math.exp(k / compression);
return w / (1 + w);
}
@Override
public double q(double k, double normalizer) {
double w = Math.exp(k / normalizer);
return w / (1 + w);
}
@Override
public double max(double q, double compression, double n) {
return q * (1 - q) / compression;
}
@Override
public double max(double q, double normalizer) {
return q * (1 - q) / normalizer;
}
@Override
public double normalizer(double compression, double n) {
return compression;
}
},
/**
* Generates cluster sizes proportional to min(q, 1-q). This makes the tail error bounds tighter. This version does
* not use a normalizer function and thus the number of clusters increases roughly proportional to log(n). That is
* good for accuracy, but bad for size and bad for the statically allocated MergingDigest, but can be useful for
* tree-based implementations.
*/
K_3_NO_NORM {
@Override
public double k(double q, final double compression, final double n) {
Function f = new Function() {
@Override
double apply(double q) {
if (q <= 0.5) {
return compression * Math.log(2 * q);
} else {
return -k(1 - q, compression, n);
}
}
};
return limitCall(f, q, 1e-15, 1 - 1e-15);
}
@Override
public double k(double q, final double normalizer) {
// poor man's lambda, sigh
Function f = new Function() {
@Override
double apply(double q) {
if (q <= 0.5) {
return normalizer * Math.log(2 * q);
} else {
return -k(1 - q, normalizer);
}
}
};
return limitCall(f, q, 1e-15, 1 - 1e-15);
}
@Override
public double q(double k, double compression, double n) {
if (k <= 0) {
return Math.exp(k / compression) / 2;
} else {
return 1 - q(-k, compression, n);
}
}
@Override
public double q(double k, double normalizer) {
if (k <= 0) {
return Math.exp(k / normalizer) / 2;
} else {
return 1 - q(-k, normalizer);
}
}
@Override
public double max(double q, double compression, double n) {
return Math.min(q, 1 - q) / compression;
}
@Override
public double max(double q, double normalizer) {
return Math.min(q, 1 - q) / normalizer;
}
@Override
public double normalizer(double compression, double n) {
return compression;
}
}; // max weight is min(q,1-q), should improve tail accuracy even more
/**
* Converts a quantile to the k-scale. The total number of points is also provided so that a normalizing function
* can be computed if necessary.
*
* @param q The quantile
* @param compression Also known as delta in literature on the t-digest
* @param n The total number of samples
* @return The corresponding value of k
*/
public abstract double k(double q, double compression, double n);
/**
* Converts a quantile to the k-scale. The normalizer value depends on compression and (possibly) number of points
* in the digest. #normalizer(double, double)
*
* @param q The quantile
* @param normalizer The normalizer value which depends on compression and (possibly) number of points in the
* digest.
* @return The corresponding value of k
*/
public abstract double k(double q, double normalizer);
/**
* Computes q as a function of k. This is often faster than finding k as a function of q for some scales.
*
* @param k The index value to convert into q scale.
* @param compression The compression factor (often written as δ)
* @param n The number of samples already in the digest.
* @return The value of q that corresponds to k
*/
public abstract double q(double k, double compression, double n);
/**
* Computes q as a function of k. This is often faster than finding k as a function of q for some scales.
*
* @param k The index value to convert into q scale.
* @param normalizer The normalizer value which depends on compression and (possibly) number of points in the
* digest.
* @return The value of q that corresponds to k
*/
public abstract double q(double k, double normalizer);
/**
* Computes the maximum relative size a cluster can have at quantile q. Note that exactly where within the range
* spanned by a cluster that q should be isn't clear. That means that this function usually has to be taken at
* multiple points and the smallest value used.
* <p>
* Note that this is the relative size of a cluster. To get the max number of samples in the cluster, multiply this
* value times the total number of samples in the digest.
*
* @param q The quantile
* @param compression The compression factor, typically delta in the literature
* @param n The number of samples seen so far in the digest
* @return The maximum number of samples that can be in the cluster
*/
public abstract double max(double q, double compression, double n);
/**
* Computes the maximum relative size a cluster can have at quantile q. Note that exactly where within the range
* spanned by a cluster that q should be isn't clear. That means that this function usually has to be taken at
* multiple points and the smallest value used.
* <p>
* Note that this is the relative size of a cluster. To get the max number of samples in the cluster, multiply this
* value times the total number of samples in the digest.
*
* @param q The quantile
* @param normalizer The normalizer value which depends on compression and (possibly) number of points in the
* digest.
* @return The maximum number of samples that can be in the cluster
*/
public abstract double max(double q, double normalizer);
/**
* Computes the normalizer given compression and number of points.
* @param compression The compression parameter for the digest
* @param n The number of samples seen so far
* @return The normalizing factor for the scale function
*/
public abstract double normalizer(double compression, double n);
/**
* Approximates asin to within about 1e-6. This approximation works by breaking the range from 0 to 1 into 5 regions
* for all but the region nearest 1, rational polynomial models get us a very good approximation of asin and by
* interpolating as we move from region to region, we can guarantee continuity and we happen to get monotonicity as
* well. for the values near 1, we just use Math.asin as our region "approximation".
*
* @param x sin(theta)
* @return theta
*/
static double fastAsin(double x) {
if (x < 0) {
return -fastAsin(-x);
} else if (x > 1) {
return Double.NaN;
} else {
// Cutoffs for models. Note that the ranges overlap. In the
// overlap we do linear interpolation to guarantee the overall
// result is "nice"
double c0High = 0.1;
double c1High = 0.55;
double c2Low = 0.5;
double c2High = 0.8;
double c3Low = 0.75;
double c3High = 0.9;
double c4Low = 0.87;
if (x > c3High) {
return Math.asin(x);
} else {
// the models
double[] m0 = { 0.2955302411, 1.2221903614, 0.1488583743, 0.2422015816, -0.3688700895, 0.0733398445 };
double[] m1 = { -0.0430991920, 0.9594035750, -0.0362312299, 0.1204623351, 0.0457029620, -0.0026025285 };
double[] m2 = { -0.034873933724, 1.054796752703, -0.194127063385, 0.283963735636, 0.023800124916, -0.000872727381 };
double[] m3 = { -0.37588391875, 2.61991859025, -2.48835406886, 1.48605387425, 0.00857627492, -0.00015802871 };
// the parameters for all of the models
double[] vars = { 1, x, x * x, x * x * x, 1 / (1 - x), 1 / (1 - x) / (1 - x) };
// raw grist for interpolation coefficients
double x0 = bound((c0High - x) / c0High);
double x1 = bound((c1High - x) / (c1High - c2Low));
double x2 = bound((c2High - x) / (c2High - c3Low));
double x3 = bound((c3High - x) / (c3High - c4Low));
// interpolation coefficients
// noinspection UnnecessaryLocalVariable
double mix0 = x0;
double mix1 = (1 - x0) * x1;
double mix2 = (1 - x1) * x2;
double mix3 = (1 - x2) * x3;
double mix4 = 1 - x3;
// now mix all the results together, avoiding extra evaluations
double r = 0;
if (mix0 > 0) {
r += mix0 * eval(m0, vars);
}
if (mix1 > 0) {
r += mix1 * eval(m1, vars);
}
if (mix2 > 0) {
r += mix2 * eval(m2, vars);
}
if (mix3 > 0) {
r += mix3 * eval(m3, vars);
}
if (mix4 > 0) {
// model 4 is just the real deal
r += mix4 * Math.asin(x);
}
return r;
}
}
}
abstract static | ScaleFunction |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/samples/spr/ControllerAdviceIntegrationTests.java | {
"start": 5559,
"end": 5958
} | class ____ {
static final AtomicInteger instanceCount = new AtomicInteger();
static final AtomicInteger invocationCount = new AtomicInteger();
PrototypeControllerAdvice() {
instanceCount.incrementAndGet();
}
@ModelAttribute
void initModel(Model model) {
model.addAttribute("prototype", invocationCount.incrementAndGet());
}
}
@ControllerAdvice
static | PrototypeControllerAdvice |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/DurationToLongTimeUnitTest.java | {
"start": 6208,
"end": 10119
} | class ____ {
void javaTime(Future<String> f, java.time.Duration d) throws Exception {
// BUG: Diagnostic contains: f.get(d.toNanos(), TimeUnit.NANOSECONDS)
f.get(d.toNanos(), TimeUnit.MILLISECONDS);
// BUG: Diagnostic contains: f.get(d.toMillis(), TimeUnit.MILLISECONDS)
f.get(d.toMillis(), TimeUnit.NANOSECONDS);
// BUG: Diagnostic contains: f.get(d.toSeconds(), TimeUnit.SECONDS)
f.get(d.toSeconds(), TimeUnit.MINUTES);
// BUG: Diagnostic contains: f.get(d.getSeconds(), TimeUnit.SECONDS)
f.get(d.getSeconds(), TimeUnit.MINUTES);
// BUG: Diagnostic contains: f.get(d.toMinutes(), TimeUnit.MINUTES)
f.get(d.toMinutes(), TimeUnit.SECONDS);
// BUG: Diagnostic contains: f.get(d.toHours(), TimeUnit.HOURS)
f.get(d.toHours(), TimeUnit.DAYS);
// BUG: Diagnostic contains: f.get(d.toDays(), TimeUnit.DAYS)
f.get(d.toDays(), TimeUnit.HOURS);
}
void javaTime(Future<String> f, java.time.Instant i) throws Exception {
// BUG: Diagnostic contains: f.get(i.toEpochMilli(), TimeUnit.MILLISECONDS)
f.get(i.toEpochMilli(), TimeUnit.NANOSECONDS);
// BUG: Diagnostic contains: f.get(i.getEpochSecond(), TimeUnit.SECONDS)
f.get(i.getEpochSecond(), TimeUnit.MINUTES);
}
void jodaTime(Future<String> f, org.joda.time.Duration d) throws Exception {
// BUG: Diagnostic contains: f.get(d.getMillis(), TimeUnit.MILLISECONDS)
f.get(d.getMillis(), TimeUnit.NANOSECONDS);
// BUG: Diagnostic contains: f.get(d.getStandardSeconds(), TimeUnit.SECONDS)
f.get(d.getStandardSeconds(), TimeUnit.MINUTES);
// BUG: Diagnostic contains: f.get(d.getStandardMinutes(), TimeUnit.MINUTES)
f.get(d.getStandardMinutes(), TimeUnit.SECONDS);
// BUG: Diagnostic contains: f.get(d.getStandardHours(), TimeUnit.HOURS)
f.get(d.getStandardHours(), TimeUnit.DAYS);
// BUG: Diagnostic contains: f.get(d.getStandardDays(), TimeUnit.DAYS)
f.get(d.getStandardDays(), TimeUnit.HOURS);
}
void jodaTime(Future<String> f, org.joda.time.Instant i) throws Exception {
// BUG: Diagnostic contains: f.get(i.getMillis(), TimeUnit.MILLISECONDS)
f.get(i.getMillis(), TimeUnit.NANOSECONDS);
}
void protoTime(Future<String> f, Duration d) throws Exception {
// BUG: Diagnostic contains: f.get(d.getSeconds(), TimeUnit.SECONDS)
f.get(d.getSeconds(), TimeUnit.MINUTES);
}
void protoTime(Future<String> f, Timestamp t) throws Exception {
// BUG: Diagnostic contains: f.get(t.getSeconds(), TimeUnit.SECONDS)
f.get(t.getSeconds(), TimeUnit.MINUTES);
}
}
""")
.doTest();
}
@Test
public void conflictingUnitsFail_staticImport() {
helper
.addSourceLines(
"TestClass.java",
"""
import static java.util.concurrent.TimeUnit.DAYS;
import static java.util.concurrent.TimeUnit.HOURS;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.protobuf.Duration;
import com.google.protobuf.Timestamp;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
public | TestClass |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/api/operators/async/AsyncWaitOperatorTest.java | {
"start": 63365,
"end": 63974
} | class ____<IN> implements AsyncFunction<IN, IN> {
private static final long serialVersionUID = -4214078239227288637L;
private final SharedReference<List<ResultFuture<?>>> resultFutures;
private CollectableFuturesAsyncFunction(
SharedReference<List<ResultFuture<?>>> resultFutures) {
this.resultFutures = resultFutures;
}
@Override
public void asyncInvoke(IN input, ResultFuture<IN> resultFuture) throws Exception {
resultFutures.get().add(resultFuture);
}
}
private static | CollectableFuturesAsyncFunction |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java | {
"start": 902,
"end": 5482
} | class ____ {
// This was previously a version string, e.g. 8.12.0, but treated exclusively as a string everywhere, never parsed into a version.
// Arbitrarily set to 9 when decoupling this from node version.
public static final String CURRENT_TOKEN_VERSION = "8.14.0";
private final String apiKey;
private final String fingerprint;
private final String version;
private final List<String> boundAddress;
public String getApiKey() {
return apiKey;
}
public String getFingerprint() {
return fingerprint;
}
public String getVersion() {
return version;
}
public List<String> getBoundAddress() {
return boundAddress;
}
private static final ParseField API_KEY = new ParseField("key");
private static final ParseField FINGERPRINT = new ParseField("fgr");
private static final ParseField VERSION = new ParseField("ver");
private static final ParseField ADDRESS = new ParseField("adr");
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<EnrollmentToken, Void> PARSER = new ConstructingObjectParser<>(
"enrollment_token",
false,
a -> new EnrollmentToken((String) a[0], (String) a[1], (String) a[2], (List<String>) a[3])
);
static {
PARSER.declareString(constructorArg(), API_KEY);
PARSER.declareString(constructorArg(), FINGERPRINT);
PARSER.declareString(constructorArg(), VERSION);
PARSER.declareStringArray(constructorArg(), ADDRESS);
}
EnrollmentToken(String apiKey, String fingerprint, String version, List<String> boundAddress) {
this.apiKey = Objects.requireNonNull(apiKey);
this.fingerprint = Objects.requireNonNull(fingerprint);
this.version = Objects.requireNonNull(version);
this.boundAddress = Objects.requireNonNull(boundAddress);
}
/**
* Create an EnrollmentToken
*
* @param apiKey API Key credential in the form apiKeyId:ApiKeySecret to be used for enroll calls
* @param fingerprint hex encoded SHA256 fingerprint of the HTTP CA cert
* @param boundAddress IP Addresses and port numbers for the interfaces where the Elasticsearch node is listening on
*/
public EnrollmentToken(String apiKey, String fingerprint, List<String> boundAddress) {
this(apiKey, fingerprint, EnrollmentToken.CURRENT_TOKEN_VERSION, boundAddress);
}
public String getRaw() throws Exception {
final XContentBuilder builder = JsonXContent.contentBuilder();
builder.startObject();
builder.field("ver", version);
builder.startArray("adr");
for (String bound_address : boundAddress) {
builder.value(bound_address);
}
builder.endArray();
builder.field("fgr", fingerprint);
builder.field("key", apiKey);
builder.endObject();
return Strings.toString(builder);
}
public String getEncoded() throws Exception {
final String jsonString = getRaw();
return Base64.getUrlEncoder().encodeToString(jsonString.getBytes(StandardCharsets.UTF_8));
}
/**
* Decodes and parses an enrollment token from its serialized form (created with {@link EnrollmentToken#getEncoded()}
* @param encoded The Base64 encoded JSON representation of the enrollment token
* @return the parsed EnrollmentToken
* @throws IOException when failing to decode the serialized token
*/
public static EnrollmentToken decodeFromString(String encoded) throws IOException {
if (Strings.isNullOrEmpty(encoded)) {
throw new IOException("Cannot decode enrollment token from an empty string");
}
try (
XContentParser jsonParser = JsonXContent.jsonXContent.createParser(
XContentParserConfiguration.EMPTY,
Base64.getDecoder().decode(encoded)
)
) {
return EnrollmentToken.PARSER.parse(jsonParser, null);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EnrollmentToken that = (EnrollmentToken) o;
return apiKey.equals(that.apiKey)
&& fingerprint.equals(that.fingerprint)
&& version.equals(that.version)
&& boundAddress.equals(that.boundAddress);
}
@Override
public int hashCode() {
return Objects.hash(apiKey, fingerprint, version, boundAddress);
}
}
| EnrollmentToken |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ThreadSafeCheckerTest.java | {
"start": 25236,
"end": 25654
} | class ____ {
{
new Super() {};
}
}
""")
.doTest();
}
@Test
public void positiveEnumConstant() {
compilationHelper
.addSourceLines(
"threadsafety/Super.java",
"""
package threadsafety;
import com.google.errorprone.annotations.ThreadSafe;
@ThreadSafe
| Test |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/SortRestoreTest.java | {
"start": 1198,
"end": 1507
} | class ____ extends RestoreTestBase {
public SortRestoreTest() {
super(StreamExecSort.class, AfterRestoreSource.NO_RESTORE);
}
@Override
public List<TableTestProgram> programs() {
return Arrays.asList(SortTestPrograms.SORT_ASC, SortTestPrograms.SORT_DESC);
}
}
| SortRestoreTest |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/ResourceResolver.java | {
"start": 962,
"end": 1770
} | interface ____ extends StaticService, CamelContextAware {
/**
* Service factory base path for scheme specific resolver.
*/
String FACTORY_PATH = "META-INF/services/org/apache/camel/resource-resolver/";
/**
* Key for a custom fallback resource resolver.
*/
String FALLBACK_RESOURCE_RESOLVER = "fallback-resource-resolver";
/**
* The supported resource scheme.
* <p/>
* Implementations should support a single scheme only.
*/
String getSupportedScheme();
/**
* Resolve a {@link Resource} from a give uri.
*
* @param location the location of the resource to resolve.
* @return an {@link Resource}, null if was not possible to resolve the resource.
*/
Resource resolve(String location);
}
| ResourceResolver |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java | {
"start": 1888,
"end": 5728
} | class ____ {
static final short REPLICATION = 3;
private final Path dir1 = new Path("/TestSnapshot1");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
@BeforeEach
public void setUp() throws Exception {
conf = new Configuration();
conf.setBoolean(DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
hdfs.mkdirs(dir1);
}
@AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
/**
* Test listing all the snapshottable directories.
*/
@Test
@Timeout(value = 60)
public void testListSnapshot() throws Exception {
fsn.getSnapshotManager().setAllowNestedSnapshots(true);
// Initially there is no snapshottable directories in the system
SnapshotStatus[] snapshotStatuses = null;
SnapshottableDirectoryStatus[] dirs = hdfs.getSnapshottableDirListing();
assertNull(dirs);
LambdaTestUtils.intercept(SnapshotException.class,
"Directory is not a " + "snapshottable directory",
() -> hdfs.getSnapshotListing(dir1));
// Make root as snapshottable
final Path root = new Path("/");
hdfs.allowSnapshot(root);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals("", dirs[0].getDirStatus().getLocalName());
assertEquals(root, dirs[0].getFullPath());
snapshotStatuses = hdfs.getSnapshotListing(root);
assertTrue(snapshotStatuses.length == 0);
// Make root non-snaphsottable
hdfs.disallowSnapshot(root);
dirs = hdfs.getSnapshottableDirListing();
assertNull(dirs);
snapshotStatuses = hdfs.getSnapshotListing(root);
assertTrue(snapshotStatuses.length == 0);
// Make dir1 as snapshottable
hdfs.allowSnapshot(dir1);
hdfs.createSnapshot(dir1, "s0");
snapshotStatuses = hdfs.getSnapshotListing(dir1);
assertEquals(1, snapshotStatuses.length);
assertEquals("s0", snapshotStatuses[0].getDirStatus().getLocalName());
assertEquals(SnapshotTestHelper.getSnapshotRoot(dir1, "s0"), snapshotStatuses[0].getFullPath());
// snapshot id is zero
assertEquals(0, snapshotStatuses[0].getSnapshotID());
// Create a snapshot for dir1
hdfs.createSnapshot(dir1, "s1");
hdfs.createSnapshot(dir1, "s2");
snapshotStatuses = hdfs.getSnapshotListing(dir1);
// There are now 3 snapshots for dir1
assertEquals(3, snapshotStatuses.length);
assertEquals("s0", snapshotStatuses[0].getDirStatus().
getLocalName());
assertEquals(SnapshotTestHelper.getSnapshotRoot(dir1, "s0"),
snapshotStatuses[0].getFullPath());
assertEquals("s1", snapshotStatuses[1].getDirStatus().
getLocalName());
assertEquals(SnapshotTestHelper.getSnapshotRoot(dir1, "s1"),
snapshotStatuses[1].getFullPath());
assertEquals("s2", snapshotStatuses[2].getDirStatus().
getLocalName());
assertEquals(SnapshotTestHelper.getSnapshotRoot(dir1, "s2"),
snapshotStatuses[2].getFullPath());
hdfs.deleteSnapshot(dir1, "s2");
snapshotStatuses = hdfs.getSnapshotListing(dir1);
// There are now 2 active snapshots for dir1 and one is marked deleted
assertEquals(3, snapshotStatuses.length);
assertTrue(snapshotStatuses[2].isDeleted());
assertFalse(snapshotStatuses[1].isDeleted());
assertFalse(snapshotStatuses[0].isDeleted());
// delete the 1st snapshot
hdfs.deleteSnapshot(dir1, "s0");
snapshotStatuses = hdfs.getSnapshotListing(dir1);
// There are now 2 snapshots now as the 1st one is deleted in order
assertEquals(2, snapshotStatuses.length);
}
} | TestListSnapshot |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreTemplateTests.java | {
"start": 948,
"end": 7786
} | class ____ extends AbstractXContentSerializingTestCase<DataStreamFailureStore.Template> {
@Override
protected Writeable.Reader<DataStreamFailureStore.Template> instanceReader() {
return DataStreamFailureStore.Template::read;
}
@Override
protected DataStreamFailureStore.Template createTestInstance() {
return randomFailureStoreTemplate();
}
@Override
protected DataStreamFailureStore.Template mutateInstance(DataStreamFailureStore.Template instance) {
var enabled = instance.enabled();
var lifecycle = instance.lifecycle();
switch (randomIntBetween(0, 1)) {
case 0 -> enabled = enabled.get() != null && lifecycle.get() != null && randomBoolean()
? randomEmptyResettableValue()
: ResettableValue.create(Boolean.FALSE.equals(enabled.get()));
case 1 -> lifecycle = lifecycle.get() != null && enabled.get() != null && randomBoolean()
? randomEmptyResettableValue()
: ResettableValue.create(
randomValueOtherThan(lifecycle.get(), DataStreamLifecycleTemplateTests::randomFailuresLifecycleTemplate)
);
default -> throw new IllegalArgumentException("illegal randomisation branch");
}
return new DataStreamFailureStore.Template(enabled, lifecycle);
}
@Override
protected DataStreamFailureStore.Template doParseInstance(XContentParser parser) throws IOException {
return DataStreamFailureStore.Template.fromXContent(parser);
}
static DataStreamFailureStore.Template randomFailureStoreTemplate() {
boolean enabledDefined = randomBoolean();
boolean lifecycleDefined = enabledDefined == false || randomBoolean();
return new DataStreamFailureStore.Template(
enabledDefined ? ResettableValue.create(randomBoolean()) : randomEmptyResettableValue(),
lifecycleDefined
? ResettableValue.create(DataStreamLifecycleTemplateTests.randomFailuresLifecycleTemplate())
: randomEmptyResettableValue()
);
}
public void testInvalidEmptyConfiguration() {
Exception exception = expectThrows(
IllegalArgumentException.class,
() -> new DataStreamFailureStore.Template(ResettableValue.undefined(), ResettableValue.undefined())
);
assertThat(exception.getMessage(), containsString("at least one non-null configuration value"));
}
public void testTemplateComposition() {
// Merging a template with itself, remains the same
boolean enabled = randomBoolean();
DataStreamFailureStore.Template template = new DataStreamFailureStore.Template(
enabled,
randomBoolean() ? null : DataStreamLifecycleTemplateTests.randomFailuresLifecycleTemplate()
);
DataStreamFailureStore.Template result = DataStreamFailureStore.builder(template).composeTemplate(template).buildTemplate();
assertThat(result, equalTo(normalise(template)));
// Override only enabled and keep lifecycle undefined
DataStreamFailureStore.Template negatedEnabledTemplate = DataStreamFailureStore.builder(template)
.enabled(enabled == false)
.buildTemplate();
result = DataStreamFailureStore.builder(template).composeTemplate(negatedEnabledTemplate).buildTemplate();
assertThat(result, equalTo(normalise(new DataStreamFailureStore.Template(enabled == false, template.lifecycle().get()))));
// Override only lifecycle and ensure it is merged
enabled = false; // Ensure it's not the default to ensure that it will not be overwritten
TimeValue retention = randomPositiveTimeValue();
DataStreamFailureStore.Template template1 = DataStreamFailureStore.builder()
.lifecycle(DataStreamLifecycle.failuresLifecycleBuilder().dataRetention(retention).build())
.buildTemplate();
DataStreamFailureStore.Template template2 = DataStreamFailureStore.builder()
.lifecycle(DataStreamLifecycle.failuresLifecycleBuilder().enabled(enabled).build())
.buildTemplate();
result = DataStreamFailureStore.builder(template1).composeTemplate(template2).buildTemplate();
assertThat(result.lifecycle().get().enabled(), equalTo(enabled));
assertThat(result.lifecycle().get().dataRetention().get(), equalTo(retention));
// Test reset
DataStreamFailureStore.Template fullyFilledTemplate = DataStreamFailureStore.builder()
.enabled(ResettableValue.create(randomBoolean()))
.lifecycle(DataStreamLifecycleTests.randomFailuresLifecycle())
.buildTemplate();
result = DataStreamFailureStore.builder(fullyFilledTemplate)
.composeTemplate(
new DataStreamFailureStore.Template(
ResettableValue.reset(),
ResettableValue.create(DataStreamLifecycleTemplateTests.randomFailuresLifecycleTemplate())
)
)
.buildTemplate();
assertThat(result.enabled(), equalTo(ResettableValue.undefined()));
assertThat(result.lifecycle(), not(equalTo(ResettableValue.undefined())));
result = DataStreamFailureStore.builder(fullyFilledTemplate)
.composeTemplate(new DataStreamFailureStore.Template(ResettableValue.create(randomBoolean()), ResettableValue.reset()))
.buildTemplate();
assertThat(result.enabled(), not(equalTo(ResettableValue.undefined())));
assertThat(result.lifecycle(), equalTo(ResettableValue.undefined()));
// Test resetting all values
result = DataStreamFailureStore.builder(fullyFilledTemplate)
.composeTemplate(new DataStreamFailureStore.Template(ResettableValue.reset(), ResettableValue.reset()))
.buildTemplate();
assertThat(result, nullValue());
}
private static <T> ResettableValue<T> randomEmptyResettableValue() {
return randomBoolean() ? ResettableValue.undefined() : ResettableValue.reset();
}
private static DataStreamFailureStore.Template normalise(DataStreamFailureStore.Template failureStoreTemplate) {
return new DataStreamFailureStore.Template(
failureStoreTemplate.enabled(),
failureStoreTemplate.lifecycle()
.map(
template -> new DataStreamLifecycle.Template(
template.lifecycleType(),
template.enabled(),
template.dataRetention().get(),
template.downsamplingRounds().get(),
template.downsamplingMethod().get()
)
)
);
}
}
| DataStreamFailureStoreTemplateTests |
java | apache__camel | components/camel-netty-http/src/main/java/org/apache/camel/component/netty/http/HttpPrincipal.java | {
"start": 932,
"end": 1533
} | class ____ implements Principal {
private final String username;
private final String password;
public HttpPrincipal(String username, String password) {
this.username = username;
this.password = password;
}
@Override
public String getName() {
return username;
}
public String getUsername() {
return username;
}
public String getPassword() {
return password;
}
@Override
public String toString() {
// do not display the password
return "HttpPrincipal[" + username + "]";
}
}
| HttpPrincipal |
java | quarkusio__quarkus | integration-tests/jackson/src/main/java/io/quarkus/it/jackson/DateDeserializerPojoResource.java | {
"start": 333,
"end": 1059
} | class ____ {
@Inject
private ObjectMapper objectMapper;
@POST
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.APPLICATION_JSON)
@Path("/sql/timestamp")
public String timestamp(String body) throws IOException {
SqlTimestampPojo model = objectMapper.readValue(body, SqlTimestampPojo.class);
return objectMapper.writeValueAsString(model);
}
@POST
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.APPLICATION_JSON)
@Path("/sql/date")
public String date(String body) throws IOException {
SqlDatePojo model = objectMapper.readValue(body, SqlDatePojo.class);
return objectMapper.writeValueAsString(model);
}
}
| DateDeserializerPojoResource |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/collection/internal/StandardCollectionSemanticsResolver.java | {
"start": 404,
"end": 775
} | class ____ implements CollectionSemanticsResolver {
/**
* Singleton access
*/
public static final StandardCollectionSemanticsResolver INSTANCE = new StandardCollectionSemanticsResolver();
@Override
public CollectionSemantics resolveRepresentation(Collection bootDescriptor) {
return bootDescriptor.getCollectionSemantics();
}
}
| StandardCollectionSemanticsResolver |
java | apache__flink | flink-metrics/flink-metrics-core/src/main/java/org/apache/flink/traces/reporter/TraceReporterFactory.java | {
"start": 1347,
"end": 1614
} | interface ____ {
/**
* Creates a new trace reporter.
*
* @param properties configured properties for the reporter
* @return created metric reporter
*/
TraceReporter createTraceReporter(final Properties properties);
}
| TraceReporterFactory |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/TestVisibleTypeId.java | {
"start": 1106,
"end": 1444
} | class ____ {
public int a = 1;
protected String type;
public void setType(String t) { type = t; }
}
// as wrapper-object
@JsonTypeInfo(use=JsonTypeInfo.Id.NAME, include=JsonTypeInfo.As.WRAPPER_OBJECT,
property="type", visible=true)
@JsonTypeName("ObjectType")
static | WrapperArrayBean |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/DelayedBucket.java | {
"start": 993,
"end": 3547
} | class ____<B extends InternalMultiBucketAggregation.InternalBucket> {
/**
* The buckets to reduce or {@code null} if we've already reduced the buckets.
*/
private List<B> toReduce;
/**
* The result of reducing the buckets or {@code null} if they haven't yet been
* reduced.
*/
private B reduced;
/**
* The count of documents. Calculated on the fly the first time its needed and
* cached.
*/
private long docCount = -1;
/**
* Build a delayed bucket.
*/
public DelayedBucket(List<B> toReduce) {
this.toReduce = toReduce;
}
/**
* The reduced bucket. If the bucket hasn't been reduced already this
* will reduce the sub-aggs and throw out the list to reduce.
*/
public B reduced(BiFunction<List<B>, AggregationReduceContext, B> reduce, AggregationReduceContext reduceContext) {
if (reduced == null) {
reduced = reduce.apply(toReduce, reduceContext);
toReduce = null;
}
return reduced;
}
/**
* Count the documents in the buckets.
*/
public long getDocCount() {
if (docCount < 0) {
if (reduced == null) {
docCount = 0;
for (B bucket : toReduce) {
docCount += bucket.getDocCount();
}
} else {
docCount = reduced.getDocCount();
}
}
return docCount;
}
/**
* Compare the keys of two buckets.
*/
@SuppressWarnings({ "rawtypes", "unchecked" }) // The funny casting here is sad, but this is how buckets are compared.
int compareKey(DelayedBucket<?> rhs) {
return ((KeyComparable) representativeBucket()).compareKey(rhs.representativeBucket());
}
/**
* A representative of the buckets used to acess the key.
*/
private B representativeBucket() {
return reduced == null ? toReduce.get(0) : reduced;
}
@Override
public String toString() {
return "Delayed[" + representativeBucket().getKeyAsString() + "]";
}
/**
* Called to mark a bucket as non-competitive so it can release it can release
* any sub-buckets from the breaker.
*/
void nonCompetitive(AggregationReduceContext reduceContext) {
if (reduced != null) {
// -countInnerBucket for all the sub-buckets.
reduceContext.consumeBucketsAndMaybeBreak(-InternalMultiBucketAggregation.countInnerBucket(reduced));
}
}
}
| DelayedBucket |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/subpackage/NonPublicAliasedAnnotation.java | {
"start": 993,
"end": 1147
} | interface ____ {
String name();
@AliasFor("path")
String value() default "";
@AliasFor("value")
String path() default "";
}
| NonPublicAliasedAnnotation |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-netty/src/main/java/org/apache/dubbo/remoting/transport/netty/NettyClient.java | {
"start": 2145,
"end": 9968
} | class ____ extends AbstractClient {
// ChannelFactory's closure has a DirectMemory leak, using static to avoid
// https://issues.jboss.org/browse/NETTY-424
private static final ChannelFactory CHANNEL_FACTORY = new NioClientSocketChannelFactory(
Executors.newCachedThreadPool(new NamedThreadFactory("NettyClientBoss", true)),
Executors.newCachedThreadPool(new NamedThreadFactory("NettyClientWorker", true)),
Constants.DEFAULT_IO_THREADS);
private ClientBootstrap bootstrap;
private volatile Channel channel; // volatile, please copy reference to use
public NettyClient(final URL url, final ChannelHandler handler) throws RemotingException {
super(url, wrapChannelHandler(url, handler));
}
@Override
protected void doOpen() throws Throwable {
NettyHelper.setNettyLoggerFactory();
bootstrap = new ClientBootstrap(CHANNEL_FACTORY);
// config
// @see org.jboss.netty.channel.socket.SocketChannelConfig
bootstrap.setOption("keepAlive", true);
bootstrap.setOption("tcpNoDelay", true);
bootstrap.setOption("connectTimeoutMillis", getConnectTimeout());
final NettyHandler nettyHandler = new NettyHandler(getUrl(), this);
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() {
NettyCodecAdapter adapter = new NettyCodecAdapter(getCodec(), getUrl(), NettyClient.this);
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast("decoder", adapter.getDecoder());
pipeline.addLast("encoder", adapter.getEncoder());
pipeline.addLast("handler", nettyHandler);
return pipeline;
}
});
}
@Override
protected void doConnect() throws Throwable {
long start = System.currentTimeMillis();
InetSocketAddress connectAddress = getConnectAddress();
ChannelFuture future = bootstrap.connect(connectAddress);
long connectTimeout = getConnectTimeout();
long deadline = start + connectTimeout;
try {
while (true) {
boolean ret = future.awaitUninterruptibly(connectTimeout, TimeUnit.MILLISECONDS);
if (ret && future.isSuccess()) {
Channel newChannel = future.getChannel();
newChannel.setInterestOps(Channel.OP_READ_WRITE);
try {
// copy reference
Channel oldChannel = NettyClient.this.channel;
if (oldChannel != null) {
try {
if (logger.isInfoEnabled()) {
logger.info("Close old netty channel " + oldChannel
+ " on create new netty channel " + newChannel);
}
// Close old channel
oldChannel.close();
} finally {
NettyChannel.removeChannelIfDisconnected(oldChannel);
}
}
} finally {
if (NettyClient.this.isClosed()) {
try {
if (logger.isInfoEnabled()) {
logger.info(
"Close new netty channel " + newChannel + ", because the client closed.");
}
newChannel.close();
} finally {
NettyClient.this.channel = null;
NettyChannel.removeChannelIfDisconnected(newChannel);
}
} else {
NettyClient.this.channel = newChannel;
}
}
break;
} else if (future.getCause() != null) {
Throwable cause = future.getCause();
if (cause instanceof ClosedChannelException) {
// Netty3.2.10 ClosedChannelException issue, see https://github.com/netty/netty/issues/138
connectTimeout = deadline - System.currentTimeMillis();
if (connectTimeout > 0) {
// 6-1 - Retry connect to provider server by Netty3.2.10 ClosedChannelException issue#138.
logger.warn(
TRANSPORT_FAILED_CONNECT_PROVIDER,
"Netty3.2.10 ClosedChannelException issue#138",
"",
"Retry connect to provider server.");
future = bootstrap.connect(connectAddress);
continue;
}
}
RemotingException remotingException = new RemotingException(
this,
"client(url: " + getUrl() + ") failed to connect to server " + getRemoteAddress()
+ ", error message is:" + cause.getMessage(),
cause);
// 6-1 - Failed to connect to provider server by other reason.
logger.error(
TRANSPORT_FAILED_CONNECT_PROVIDER,
"network disconnected",
"",
"Failed to connect to provider server by other reason.",
cause);
throw remotingException;
} else {
RemotingException remotingException = new RemotingException(
this,
"client(url: " + getUrl() + ") failed to connect to server " + getRemoteAddress()
+ " client-side timeout " + getConnectTimeout() + "ms (elapsed: "
+ (System.currentTimeMillis() - start) + "ms) from netty client "
+ NetUtils.getLocalHost() + " using dubbo version " + Version.getVersion());
// 6-2 - Client-side timeout.
logger.error(
TRANSPORT_CLIENT_CONNECT_TIMEOUT,
"provider crash",
"",
"Client-side timeout.",
remotingException);
throw remotingException;
}
}
} finally {
if (!isConnected()) {
future.cancel();
}
}
}
@Override
protected void doDisConnect() throws Throwable {
try {
NettyChannel.removeChannelIfDisconnected(channel);
} catch (Throwable t) {
logger.warn(TRANSPORT_FAILED_DISCONNECT_PROVIDER, "", "", t.getMessage());
}
}
@Override
protected void doClose() throws Throwable {
/*try {
bootstrap.releaseExternalResources();
} catch (Throwable t) {
logger.warn(t.getMessage());
}*/
}
@Override
protected org.apache.dubbo.remoting.Channel getChannel() {
Channel c = channel;
if (c == null || !c.isConnected()) {
return null;
}
return NettyChannel.getOrAddChannel(c, getUrl(), this);
}
Channel getNettyChannel() {
return channel;
}
}
| NettyClient |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/DiscriminatorColumnJpaAnnotation.java | {
"start": 718,
"end": 3735
} | class ____ implements DiscriminatorColumn, ColumnDetails {
private String name;
private jakarta.persistence.DiscriminatorType discriminatorType;
private String columnDefinition;
private String options;
private int length;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public DiscriminatorColumnJpaAnnotation(ModelsContext modelContext) {
this.name = "DTYPE";
this.discriminatorType = jakarta.persistence.DiscriminatorType.STRING;
this.columnDefinition = "";
this.options = "";
this.length = 31;
}
/**
* Used in creating annotation instances from JDK variant
*/
public DiscriminatorColumnJpaAnnotation(DiscriminatorColumn annotation, ModelsContext modelContext) {
this.name = annotation.name();
this.discriminatorType = annotation.discriminatorType();
this.columnDefinition = annotation.columnDefinition();
this.options = annotation.options();
this.length = annotation.length();
}
/**
* Used in creating annotation instances from Jandex variant
*/
public DiscriminatorColumnJpaAnnotation(
Map<String, Object> attributeValues,
ModelsContext modelContext) {
this.name = (String) attributeValues.get( "name" );
this.discriminatorType = (jakarta.persistence.DiscriminatorType) attributeValues.get( "discriminatorType" );
this.columnDefinition = (String) attributeValues.get( "columnDefinition" );
this.options = (String) attributeValues.get( "options" );
this.length = (int) attributeValues.get( "length" );
}
@Override
public Class<? extends Annotation> annotationType() {
return DiscriminatorColumn.class;
}
@Override
public String name() {
return name;
}
public void name(String value) {
this.name = value;
}
@Override
public jakarta.persistence.DiscriminatorType discriminatorType() {
return discriminatorType;
}
public void discriminatorType(jakarta.persistence.DiscriminatorType value) {
this.discriminatorType = value;
}
@Override
public String columnDefinition() {
return columnDefinition;
}
public void columnDefinition(String value) {
this.columnDefinition = value;
}
@Override
public String options() {
return options;
}
public void options(String value) {
this.options = value;
}
@Override
public int length() {
return length;
}
public void length(int value) {
this.length = value;
}
public void apply(JaxbDiscriminatorColumnImpl jaxbColumn, XmlDocumentContext xmlDocumentContext) {
if ( StringHelper.isNotEmpty( jaxbColumn.getName() ) ) {
name( jaxbColumn.getName() );
}
if ( jaxbColumn.getDiscriminatorType() != null ) {
discriminatorType( jaxbColumn.getDiscriminatorType() );
}
if ( jaxbColumn.getLength() != null ) {
length( jaxbColumn.getLength() );
}
if ( StringHelper.isNotEmpty( jaxbColumn.getColumnDefinition() ) ) {
columnDefinition( jaxbColumn.getColumnDefinition() );
}
if ( StringHelper.isNotEmpty( jaxbColumn.getOptions() ) ) {
options( jaxbColumn.getOptions() );
}
}
}
| DiscriminatorColumnJpaAnnotation |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java | {
"start": 1451,
"end": 2862
} | class ____ extends TransportBroadcastReplicationAction<
FlushRequest,
BroadcastResponse,
ShardFlushRequest,
ReplicationResponse> {
@Inject
public TransportFlushAction(
ClusterService clusterService,
TransportService transportService,
NodeClient client,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
ProjectResolver projectResolver
) {
super(
FlushAction.NAME,
FlushRequest::new,
clusterService,
transportService,
client,
actionFilters,
indexNameExpressionResolver,
TransportShardFlushAction.TYPE,
transportService.getThreadPool().executor(ThreadPool.Names.FLUSH),
projectResolver
);
}
@Override
protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardId, SplitShardCountSummary shardCountSummary) {
return new ShardFlushRequest(request, shardId, shardCountSummary);
}
@Override
protected BroadcastResponse newResponse(
int successfulShards,
int failedShards,
int totalNumCopies,
List<DefaultShardOperationFailedException> shardFailures
) {
return new BroadcastResponse(totalNumCopies, successfulShards, failedShards, shardFailures);
}
}
| TransportFlushAction |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/cid/keymanytoone/Card.java | {
"start": 486,
"end": 1220
} | class ____ implements Serializable {
@Id
private String id;
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.EAGER, mappedBy = "primaryKey.card")
private Set<CardField> fields;
String model;
Card() {
fields = new HashSet<>();
}
public Card(String id) {
this();
this.id = id;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public void addField(Card card, Key key) {
fields.add(new CardField( card, key));
}
public Set<CardField> getFields() {
return fields;
}
public void setFields(Set<CardField> fields) {
this.fields = fields;
}
public String getModel() {
return model;
}
public void setModel(String model) {
this.model = model;
}
}
| Card |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/TemplateLocator.java | {
"start": 676,
"end": 1191
} | interface ____ {
/**
* A {@link Reader} instance produced by a locator is immediately closed right after the template content is parsed.
*
* @return the reader
*/
Reader read();
/**
*
* @return the template variant
*/
Optional<Variant> getVariant();
/**
* @return the source
*/
default Optional<URI> getSource() {
return Optional.empty();
}
}
}
| TemplateLocation |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/android/FragmentNotInstantiableTest.java | {
"start": 2382,
"end": 2539
} | class ____ extends android.support.v4.app.Fragment {
public PrivateV4Fragment() {}
}
public static | PrivateV4Fragment |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxTests.java | {
"start": 1316,
"end": 3285
} | class ____ extends AbstractScalarFunctionTestCase {
public StYMaxTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
String expectedGeo = "StYMaxFromWKBGeoEvaluator[wkb=Attribute[channel=0]]";
String expectedCartesian = "StYMaxFromWKBEvaluator[wkb=Attribute[channel=0]]";
final List<TestCaseSupplier> suppliers = new ArrayList<>();
TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedGeo, DOUBLE, StYMaxTests::valueOfGeo, List.of());
TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedCartesian, DOUBLE, StYMaxTests::valueOfCartesian, List.of());
TestCaseSupplier.forUnaryGeoShape(suppliers, expectedGeo, DOUBLE, StYMaxTests::valueOfGeo, List.of());
TestCaseSupplier.forUnaryCartesianShape(suppliers, expectedCartesian, DOUBLE, StYMaxTests::valueOfCartesian, List.of());
return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers);
}
private static double valueOfGeo(BytesRef wkb) {
return valueOf(wkb, true);
}
private static double valueOfCartesian(BytesRef wkb) {
return valueOf(wkb, false);
}
private static double valueOf(BytesRef wkb, boolean geo) {
var geometry = UNSPECIFIED.wkbToGeometry(wkb);
if (geometry instanceof Point point) {
return point.getY();
}
var envelope = geo
? SpatialEnvelopeVisitor.visitGeo(geometry, WrapLongitude.WRAP)
: SpatialEnvelopeVisitor.visitCartesian(geometry);
if (envelope.isPresent()) {
return envelope.get().getMaxY();
}
throw new IllegalArgumentException("Geometry is empty");
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new StYMax(source, args.get(0));
}
}
| StYMaxTests |
java | quarkusio__quarkus | extensions/jdbc/jdbc-h2/runtime/src/main/java/io/quarkus/jdbc/h2/runtime/H2AgroalConnectionConfigurer.java | {
"start": 344,
"end": 930
} | class ____ implements AgroalConnectionConfigurer {
@Override
public void disableSslSupport(String databaseKind, AgroalDataSourceConfigurationSupplier dataSourceConfiguration,
Map<String, String> additionalProperties) {
// do not log anything for H2
}
@Override
public void setExceptionSorter(String databaseKind, AgroalDataSourceConfigurationSupplier dataSourceConfiguration) {
// Do not log a warning: we don't have an exception sorter for H2,
// but there is nothing the user can do about it.
}
}
| H2AgroalConnectionConfigurer |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-saml2-service-provider/src/main/java/smoketest/saml2/serviceprovider/SampleSaml2RelyingPartyApplication.java | {
"start": 819,
"end": 978
} | class ____ {
public static void main(String[] args) {
SpringApplication.run(SampleSaml2RelyingPartyApplication.class);
}
}
| SampleSaml2RelyingPartyApplication |
java | apache__flink | flink-core/src/test/java/org/apache/flink/util/SerializedValueTest.java | {
"start": 1129,
"end": 2688
} | class ____ {
@Test
void testSimpleValue() throws Exception {
final String value = "teststring";
SerializedValue<String> v = new SerializedValue<>(value);
SerializedValue<String> copy = CommonTestUtils.createCopySerializable(v);
assertThat(v.deserializeValue(getClass().getClassLoader())).isEqualTo(value);
assertThat(copy.deserializeValue(getClass().getClassLoader())).isEqualTo(value);
assertThat(copy).isEqualTo(v);
assertThat(copy).hasSameHashCodeAs(v);
assertThat(v.toString()).isNotNull();
assertThat(copy.toString()).isNotNull();
assertThat(v.getByteArray()).isNotEmpty();
assertThat(copy.getByteArray()).isEqualTo(v.getByteArray());
byte[] bytes = v.getByteArray();
SerializedValue<String> saved =
SerializedValue.fromBytes(Arrays.copyOf(bytes, bytes.length));
assertThat(saved).isEqualTo(v);
assertThat(saved.getByteArray()).isEqualTo(v.getByteArray());
}
@Test
void testNullValue() {
assertThatThrownBy(() -> new SerializedValue<>(null))
.isInstanceOf(NullPointerException.class);
}
@Test
void testFromNullBytes() {
assertThatThrownBy(() -> SerializedValue.fromBytes(null))
.isInstanceOf(NullPointerException.class);
}
@Test
void testFromEmptyBytes() {
assertThatThrownBy(() -> SerializedValue.fromBytes(new byte[0]))
.isInstanceOf(IllegalArgumentException.class);
}
}
| SerializedValueTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/indexcoll/Trainee.java | {
"start": 309,
"end": 583
} | class ____ {
@Id @GeneratedValue private Long id;
private String name;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| Trainee |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/DeferProducer.java | {
"start": 1593,
"end": 4131
} | class ____ extends ServiceSupport implements Producer, AsyncProcessor {
private Producer delegate;
private final Endpoint endpoint;
public DeferProducer(Endpoint endpoint) {
this.endpoint = endpoint;
}
@Override
public void process(Exchange exchange) throws Exception {
if (delegate == null) {
throw new IllegalStateException("Not started");
}
delegate.process(exchange);
}
@Override
public boolean process(Exchange exchange, AsyncCallback callback) {
if (delegate == null) {
exchange.setException(new IllegalStateException("Not started"));
callback.done(true);
return true;
}
if (delegate instanceof AsyncProcessor asyncProcessor) {
return asyncProcessor.process(exchange, callback);
}
// fallback to sync mode
try {
process(exchange);
} catch (Exception e) {
exchange.setException(e);
}
callback.done(true);
return true;
}
@Override
public CompletableFuture<Exchange> processAsync(Exchange exchange) {
AsyncCallbackToCompletableFutureAdapter<Exchange> callback = new AsyncCallbackToCompletableFutureAdapter<>(exchange);
process(exchange, callback);
return callback.getFuture();
}
@Override
protected void doStart() throws Exception {
// need to lookup endpoint again as it may be intercepted
Endpoint lookup = endpoint.getCamelContext().getEndpoint(endpoint.getEndpointUri());
InternalProcessorFactory pf = PluginHelper.getInternalProcessorFactory(endpoint.getCamelContext());
delegate = pf.createProducer(lookup);
ServiceHelper.startService(delegate);
}
@Override
protected void doStop() throws Exception {
ServiceHelper.stopService(delegate);
}
@Override
public boolean isSingleton() {
if (delegate != null) {
return delegate.isSingleton();
} else {
// assume singleton by default
return true;
}
}
@Override
public Endpoint getEndpoint() {
if (delegate != null) {
return delegate.getEndpoint();
} else {
return endpoint;
}
}
@Override
public String toString() {
if (delegate != null) {
return delegate.toString();
} else {
return "DelegateProducer[" + endpoint + "]";
}
}
}
| DeferProducer |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/internal/CreateKeySecondPass.java | {
"start": 378,
"end": 1051
} | class ____ implements SecondPass {
private RootClass rootClass;
private JoinedSubclass joinedSubClass;
public CreateKeySecondPass(RootClass rootClass) {
this.rootClass = rootClass;
}
public CreateKeySecondPass(JoinedSubclass joinedSubClass) {
this.joinedSubClass = joinedSubClass;
}
@Override
public void doSecondPass(Map<String, PersistentClass> persistentClasses) {
if ( rootClass != null ) {
rootClass.createPrimaryKey();
}
else if ( joinedSubClass != null ) {
joinedSubClass.createPrimaryKey();
joinedSubClass.createForeignKey();
}
else {
throw new AssertionError( "rootClass and joinedSubClass are null" );
}
}
}
| CreateKeySecondPass |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/group/BidirectionalLazyGroupsInEmbeddableTest.java | {
"start": 2760,
"end": 3734
} | class ____ {
private String name;
private Set<Employee> employees;
public Employer(String name) {
this();
setName( name );
}
@Id
public String getName() {
return name;
}
@OneToMany(mappedBy = "employerContainer.employer", fetch = FetchType.LAZY)
@LazyGroup("Employees")
public Set<Employee> getEmployees() {
return employees;
}
public void addEmployee(Employee employee) {
if ( getEmployees() == null ) {
setEmployees( new HashSet<>() );
}
employees.add( employee );
if ( employee.getEmployerContainer() == null ) {
employee.setEmployerContainer( new EmployerContainer() );
}
employee.getEmployerContainer().setEmployer( this );
}
protected Employer() {
// this form used by Hibernate
}
protected void setName(String name) {
this.name = name;
}
protected void setEmployees(Set<Employee> employees) {
this.employees = employees;
}
}
@Entity(name = "Employee")
public static | Employer |
java | alibaba__nacos | common/src/main/java/com/alibaba/nacos/common/http/client/response/JdkHttpClientResponse.java | {
"start": 1080,
"end": 2868
} | class ____ implements HttpClientResponse {
private final HttpURLConnection conn;
private InputStream responseStream;
private Header responseHeader;
private static final String CONTENT_ENCODING = "gzip";
public JdkHttpClientResponse(HttpURLConnection conn) {
this.conn = conn;
}
@Override
public Header getHeaders() {
if (this.responseHeader == null) {
this.responseHeader = Header.newInstance();
}
for (Map.Entry<String, List<String>> entry : conn.getHeaderFields().entrySet()) {
this.responseHeader.addOriginalResponseHeader(entry.getKey(), entry.getValue());
}
return this.responseHeader;
}
@Override
public InputStream getBody() throws IOException {
Header headers = getHeaders();
InputStream errorStream = this.conn.getErrorStream();
this.responseStream = (errorStream != null ? errorStream : this.conn.getInputStream());
String contentEncoding = headers.getValue(HttpHeaderConsts.CONTENT_ENCODING);
// Used to process http content_encoding, when content_encoding is GZIP, use GZIPInputStream
if (CONTENT_ENCODING.equals(contentEncoding)) {
byte[] bytes = IoUtils.tryDecompress(this.responseStream);
return new ByteArrayInputStream(bytes);
}
return this.responseStream;
}
@Override
public int getStatusCode() throws IOException {
return this.conn.getResponseCode();
}
@Override
public String getStatusText() throws IOException {
return this.conn.getResponseMessage();
}
@Override
public void close() {
IoUtils.closeQuietly(this.responseStream);
}
}
| JdkHttpClientResponse |
java | apache__flink | flink-table/flink-sql-parser/src/main/java/org/apache/flink/sql/parser/ddl/SqlCreateCatalog.java | {
"start": 1374,
"end": 2244
} | class ____ extends SqlCreateObject {
private static final SqlSpecialOperator OPERATOR =
new SqlSpecialOperator("CREATE CATALOG", SqlKind.OTHER_DDL);
public SqlCreateCatalog(
SqlParserPos position,
SqlIdentifier catalogName,
SqlNodeList propertyList,
@Nullable SqlCharStringLiteral comment,
boolean ifNotExists) {
super(OPERATOR, position, catalogName, false, false, ifNotExists, propertyList, comment);
requireNonNull(propertyList, "propertyList cannot be null");
}
@Override
protected String getScope() {
return "CATALOG";
}
@Override
public List<SqlNode> getOperandList() {
return ImmutableNullableList.of(name, properties, comment);
}
public String catalogName() {
return name.getSimple();
}
}
| SqlCreateCatalog |
java | google__error-prone | core/src/main/java/com/google/errorprone/refaster/UMethodIdent.java | {
"start": 1070,
"end": 2169
} | class ____ extends UIdent {
public static UMethodIdent create(UClassIdent classIdent, CharSequence member, UType memberType) {
return new AutoValue_UMethodIdent(classIdent, StringName.of(member), memberType);
}
public static UMethodIdent create(ClassSymbol classSym, CharSequence member, UType memberType) {
return create(UClassIdent.create(classSym), member, memberType);
}
abstract UClassIdent classIdent();
@Override
public abstract StringName getName();
abstract UType memberType();
@Override
public JCExpression inline(Inliner inliner) throws CouldNotResolveImportException {
return inliner.maker().Ident(getName().inline(inliner));
}
@Override
protected Choice<Unifier> defaultAction(Tree node, Unifier unifier) {
Symbol symbol = ASTHelpers.getSymbol(node);
if (symbol != null) {
return classIdent()
.unify(symbol.getEnclosingElement(), unifier)
.flatMap(unifications(getName(), symbol.getSimpleName()))
.flatMap(unifications(memberType(), symbol.asType()));
}
return Choice.none();
}
}
| UMethodIdent |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldBeASCII_create_Test.java | {
"start": 953,
"end": 1600
} | class ____ {
@Test
void should_create_error_message_for_non_ASCII_character() {
// WHEN
String message = shouldBeASCII("\u2303").create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo("[Test] %nExpecting \"\u2303\" to be ASCII".formatted());
}
@Test
void should_create_error_message_for_strings_with_ASCII_character() {
// WHEN
String message = shouldBeASCII("123\u230300abc").create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo("[Test] %nExpecting \"123\u230300abc\" to be ASCII".formatted());
}
}
| ShouldBeASCII_create_Test |
java | google__dagger | dagger-testing/main/java/dagger/model/testing/BindingGraphSubject.java | {
"start": 4058,
"end": 5510
} | class ____ extends Subject {
private final Binding actual;
BindingSubject(FailureMetadata metadata, @NullableDecl Binding actual) {
super(metadata, actual);
this.actual = actual;
}
/**
* Asserts that the binding depends on a binding with an unqualified key.
*
* @param type the canonical name of the type, as returned by {@link TypeMirror#toString()}
*/
public void dependsOnBindingWithKey(String type) {
dependsOnBindingWithKeyString(keyString(type));
}
/**
* Asserts that the binding depends on a binding with a qualified key.
*
* @param qualifier the canonical string form of the qualifier, as returned by {@link
* javax.lang.model.element.AnnotationMirror AnnotationMirror.toString()}
* @param type the canonical name of the type, as returned by {@link TypeMirror#toString()}
*/
public void dependsOnBindingWithKey(String qualifier, String type) {
dependsOnBindingWithKeyString(keyString(qualifier, type));
}
private void dependsOnBindingWithKeyString(String keyString) {
if (actualBindingGraph().requestedBindings(actual).stream()
.noneMatch(binding -> binding.key().toString().equals(keyString))) {
failWithActual("expected to depend on binding with key", keyString);
}
}
private BindingGraph actualBindingGraph() {
return BindingGraphSubject.this.actual;
}
}
}
| BindingSubject |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/LastLongByTimestampAggregatorFunctionTests.java | {
"start": 887,
"end": 2846
} | class ____ extends AggregatorFunctionTestCase {
@Override
protected SourceOperator simpleInput(BlockFactory blockFactory, int size) {
FirstLongByTimestampGroupingAggregatorFunctionTests.TimestampGen tsgen = randomFrom(
FirstLongByTimestampGroupingAggregatorFunctionTests.TimestampGen.values()
);
return new TupleLongLongBlockSourceOperator(
blockFactory,
IntStream.range(0, size).mapToObj(l -> Tuple.tuple(randomLong(), tsgen.gen()))
);
}
@Override
protected AggregatorFunctionSupplier aggregatorFunction() {
return new LastLongByTimestampAggregatorFunctionSupplier();
}
@Override
protected int inputCount() {
return 2;
}
@Override
protected String expectedDescriptionOfAggregator() {
return "last_long_by_timestamp";
}
@Override
public void assertSimpleOutput(List<Page> input, Block result) {
ExpectedWork work = new ExpectedWork(false);
for (Page page : input) {
LongBlock values = page.getBlock(0);
LongBlock timestamps = page.getBlock(1);
for (int p = 0; p < page.getPositionCount(); p++) {
int tsStart = timestamps.getFirstValueIndex(p);
int tsEnd = tsStart + timestamps.getValueCount(p);
for (int tsOffset = tsStart; tsOffset < tsEnd; tsOffset++) {
long timestamp = timestamps.getLong(tsOffset);
int vStart = values.getFirstValueIndex(p);
int vEnd = vStart + values.getValueCount(p);
for (int vOffset = vStart; vOffset < vEnd; vOffset++) {
long value = values.getLong(vOffset);
work.add(timestamp, value);
}
}
}
}
work.check(BlockUtils.toJavaObject(result, 0));
}
}
| LastLongByTimestampAggregatorFunctionTests |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestRpcServiceProtosLegacy.java | {
"start": 150665,
"end": 156400
} | class ____ implements BlockingInterface {
private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.BlockingRpcChannel channel;
public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 sleep(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.getDefaultInstance());
}
}
// @@protoc_insertion_point(class_scope:hadoop.common.TestProtobufRpcHandoffProto)
}
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\035test_rpc_service_legacy.proto\022\rhadoop." +
"common\032\021test_legacy.proto2\330\013\n\024TestProtob" +
"ufRpcProto\022K\n\004ping\022 .hadoop.common.Empty" +
"RequestProto\032!.hadoop.common.EmptyRespon" +
"seProto\022I\n\004echo\022\037.hadoop.common.EchoRequ" +
"estProto\032 .hadoop.common.EchoResponsePro" +
"to\022L\n\005error\022 .hadoop.common.EmptyRequest" +
"Proto\032!.hadoop.common.EmptyResponseProto" +
"\022M\n\006error2\022 .hadoop.common.EmptyRequestP" +
"roto\032!.hadoop.common.EmptyResponseProto\022",
"R\n\010slowPing\022#.hadoop.common.SlowPingRequ" +
"estProto\032!.hadoop.common.EmptyResponsePr" +
"oto\022L\n\005echo2\022 .hadoop.common.EchoRequest" +
"Proto2\032!.hadoop.common.EchoResponseProto" +
"2\022F\n\003add\022\036.hadoop.common.AddRequestProto" +
"\032\037.hadoop.common.AddResponseProto\022H\n\004add" +
"2\022\037.hadoop.common.AddRequestProto2\032\037.had" +
"oop.common.AddResponseProto\022T\n\rtestServe" +
"rGet\022 .hadoop.common.EmptyRequestProto\032!" +
".hadoop.common.EmptyResponseProto\022U\n\010exc",
"hange\022#.hadoop.common.ExchangeRequestPro" +
"to\032$.hadoop.common.ExchangeResponseProto" +
"\022L\n\005sleep\022 .hadoop.common.SleepRequestPr" +
"oto\032!.hadoop.common.EmptyResponseProto\022S" +
"\n\014lockAndSleep\022 .hadoop.common.SleepRequ" +
"estProto\032!.hadoop.common.EmptyResponsePr" +
"oto\022Y\n\rgetAuthMethod\022 .hadoop.common.Emp" +
"tyRequestProto\032&.hadoop.common.AuthMetho" +
"dResponseProto\022Q\n\013getAuthUser\022 .hadoop.c" +
"ommon.EmptyRequestProto\032 .hadoop.common.",
"UserResponseProto\022R\n\rechoPostponed\022\037.had" +
"oop.common.EchoRequestProto\032 .hadoop.com" +
"mon.EchoResponseProto\022T\n\rsendPostponed\022 " +
".hadoop.common.EmptyRequestProto\032!.hadoo" +
"p.common.EmptyResponseProto\022T\n\016getCurren" +
"tUser\022 .hadoop.common.EmptyRequestProto\032" +
" .hadoop.common.UserResponseProto\022Y\n\023get" +
"ServerRemoteUser\022 .hadoop.common.EmptyRe" +
"questProto\032 .hadoop.common.UserResponseP" +
"roto2\377\001\n\025TestProtobufRpc2Proto\022L\n\005ping2\022",
" .hadoop.common.EmptyRequestProto\032!.hado" +
"op.common.EmptyResponseProto\022J\n\005echo2\022\037." +
"hadoop.common.EchoRequestProto\032 .hadoop." +
"common.EchoResponseProto\022L\n\005sleep\022 .hado" +
"op.common.SleepRequestProto\032!.hadoop.com" +
"mon.SleepResponseProto2\257\001\n\023OldProtobufRp" +
"cProto\022K\n\004ping\022 .hadoop.common.EmptyRequ" +
"estProto\032!.hadoop.common.EmptyResponsePr" +
"oto\022K\n\004echo\022 .hadoop.common.EmptyRequest" +
"Proto\032!.hadoop.common.EmptyResponseProto",
"2\253\001\n\023NewProtobufRpcProto\022K\n\004ping\022 .hadoo" +
"p.common.EmptyRequestProto\032!.hadoop.comm" +
"on.EmptyResponseProto\022G\n\004echo\022\036.hadoop.c" +
"ommon.OptRequestProto\032\037.hadoop.common.Op" +
"tResponseProto2\261\001\n\025NewerProtobufRpcProto" +
"\022K\n\004ping\022 .hadoop.common.EmptyRequestPro" +
"to\032!.hadoop.common.EmptyResponseProto\022K\n" +
"\004echo\022 .hadoop.common.EmptyRequestProto\032" +
"!.hadoop.common.EmptyResponseProto2Z\n\013Cu" +
"stomProto\022K\n\004ping\022 .hadoop.common.EmptyR",
"equestProto\032!.hadoop.common.EmptyRespons" +
"eProto2m\n\033TestProtobufRpcHandoffProto\022N\n" +
"\005sleep\022!.hadoop.common.SleepRequestProto" +
"2\032\".hadoop.common.SleepResponseProto2BB\n" +
"\036org.apache.hadoop.ipc.protobufB\032TestRpc" +
"ServiceProtosLegacy\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.ipc.protobuf.TestProtosLegacy.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}
| BlockingStub |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java | {
"start": 8632,
"end": 28754
} | class ____ {
private final String topic = "topic";
private final Collection<Node> nodes = Collections.singletonList(NODE);
private final Cluster emptyCluster = new Cluster(
null,
nodes,
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet());
private final Cluster onePartitionCluster = new Cluster(
"dummy",
nodes,
Collections.singletonList(new PartitionInfo(topic, 0, null, null, null)),
Collections.emptySet(),
Collections.emptySet());
private final Cluster threePartitionCluster = new Cluster(
"dummy",
nodes,
Arrays.asList(
new PartitionInfo(topic, 0, null, null, null),
new PartitionInfo(topic, 1, null, null, null),
new PartitionInfo(topic, 2, null, null, null)),
Collections.emptySet(),
Collections.emptySet());
private TestInfo testInfo;
private static final int DEFAULT_METADATA_IDLE_MS = 5 * 60 * 1000;
private static final Node NODE = new Node(0, "host1", 1000);
private static <K, V> KafkaProducer<K, V> kafkaProducer(Map<String, Object> configs,
Serializer<K> keySerializer,
Serializer<V> valueSerializer,
ProducerMetadata metadata,
KafkaClient kafkaClient,
ProducerInterceptors<K, V> interceptors,
Time time) {
return new KafkaProducer<>(new ProducerConfig(ProducerConfig.appendSerializerToConfig(configs, keySerializer, valueSerializer)),
keySerializer, valueSerializer, metadata, kafkaClient, interceptors, new ApiVersions(), time);
}
@BeforeEach
public void setup(TestInfo testInfo) {
this.testInfo = testInfo;
}
@AfterEach
public void detectLeaks() throws InterruptedException {
// Assert no thread leakage of Kafka producer.
TestUtils.assertNoLeakedThreadsWithNameAndDaemonStatus(NETWORK_THREAD_PREFIX, Boolean.TRUE);
}
@Test
public void testOverwriteAcksAndRetriesForIdempotentProducers() {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId");
props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
ProducerConfig config = new ProducerConfig(props);
assertTrue(config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG));
assertTrue(Stream.of("-1", "all").anyMatch(each -> each.equalsIgnoreCase(config.getString(ProducerConfig.ACKS_CONFIG))));
assertEquals(Integer.MAX_VALUE, (int) config.getInt(ProducerConfig.RETRIES_CONFIG));
assertTrue(config.getString(ProducerConfig.CLIENT_ID_CONFIG).equalsIgnoreCase("producer-" +
config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG)));
}
@Test
public void testAcksAndIdempotenceForIdempotentProducers() {
Properties baseProps = baseProperties();
Properties validProps = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.ACKS_CONFIG, "0");
setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false");
}};
ProducerConfig config = new ProducerConfig(validProps);
assertFalse(
config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG),
"idempotence should be overwritten");
assertEquals(
"0",
config.getString(ProducerConfig.ACKS_CONFIG),
"acks should be overwritten");
Properties validProps2 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId");
}};
config = new ProducerConfig(validProps2);
assertTrue(
config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG),
"idempotence should be set with the default value");
assertEquals(
"-1",
config.getString(ProducerConfig.ACKS_CONFIG),
"acks should be set with the default value");
Properties validProps3 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.ACKS_CONFIG, "all");
setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false");
}};
config = new ProducerConfig(validProps3);
assertFalse(config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG),
"idempotence should be overwritten");
assertEquals(
"-1",
config.getString(ProducerConfig.ACKS_CONFIG),
"acks should be overwritten");
Properties validProps4 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.ACKS_CONFIG, "0");
}};
config = new ProducerConfig(validProps4);
assertFalse(
config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG),
"idempotence should be disabled when acks not set to all and `enable.idempotence` config is unset.");
assertEquals(
"0",
config.getString(ProducerConfig.ACKS_CONFIG),
"acks should be set with overridden value");
Properties validProps5 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.ACKS_CONFIG, "1");
}};
config = new ProducerConfig(validProps5);
assertFalse(
config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG),
"idempotence should be disabled when acks not set to all and `enable.idempotence` config is unset.");
assertEquals(
"1",
config.getString(ProducerConfig.ACKS_CONFIG),
"acks should be set with overridden value");
Properties invalidProps = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.ACKS_CONFIG, "0");
setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false");
setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId");
}};
assertThrows(
ConfigException.class,
() -> new ProducerConfig(invalidProps),
"Cannot set a transactional.id without also enabling idempotence");
Properties invalidProps2 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.ACKS_CONFIG, "1");
// explicitly enabling idempotence should still throw exception
setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
}};
assertThrows(
ConfigException.class,
() -> new ProducerConfig(invalidProps2),
"Must set acks to all in order to use the idempotent producer");
Properties invalidProps3 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.ACKS_CONFIG, "0");
setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId");
}};
assertThrows(
ConfigException.class,
() -> new ProducerConfig(invalidProps3),
"Must set acks to all when using the transactional producer.");
}
@Test
public void testRetriesAndIdempotenceForIdempotentProducers() {
Properties baseProps = baseProperties();
Properties validProps = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.RETRIES_CONFIG, "0");
setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false");
}};
ProducerConfig config = new ProducerConfig(validProps);
assertFalse(
config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG),
"idempotence should be overwritten");
assertEquals(
0,
config.getInt(ProducerConfig.RETRIES_CONFIG),
"retries should be overwritten");
Properties validProps2 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.RETRIES_CONFIG, "0");
}};
config = new ProducerConfig(validProps2);
assertFalse(
config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG),
"idempotence should be disabled when retries set to 0 and `enable.idempotence` config is unset.");
assertEquals(
0,
config.getInt(ProducerConfig.RETRIES_CONFIG),
"retries should be set with overridden value");
Properties invalidProps = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.RETRIES_CONFIG, "0");
setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false");
setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId");
}};
assertThrows(
ConfigException.class,
() -> new ProducerConfig(invalidProps),
"Cannot set a transactional.id without also enabling idempotence");
Properties invalidProps2 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.RETRIES_CONFIG, "0");
// explicitly enabling idempotence should still throw exception
setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
}};
assertThrows(
ConfigException.class,
() -> new ProducerConfig(invalidProps2),
"Must set retries to non-zero when using the idempotent producer.");
Properties invalidProps3 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.RETRIES_CONFIG, "0");
setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId");
}};
assertThrows(
ConfigException.class,
() -> new ProducerConfig(invalidProps3),
"Must set retries to non-zero when using the transactional producer.");
}
private Properties baseProperties() {
Properties baseProps = new Properties();
baseProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
baseProps.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
baseProps.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
return baseProps;
}
@Test
public void testInflightRequestsAndIdempotenceForIdempotentProducers() {
Properties baseProps = baseProperties();
Properties validProps = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6");
setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false");
}};
ProducerConfig config = new ProducerConfig(validProps);
assertFalse(
config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG),
"idempotence should be overwritten");
assertEquals(
6,
config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION),
"max.in.flight.requests.per.connection should be overwritten");
Properties invalidProps1 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6");
}};
ConfigException configException = assertThrows(ConfigException.class, () -> new ProducerConfig(invalidProps1));
assertEquals("To use the idempotent producer, " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION +
" must be set to at most 5. Current value is 6.", configException.getMessage());
Properties invalidProps2 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5");
setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false");
setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId");
}};
assertThrows(
ConfigException.class,
() -> new ProducerConfig(invalidProps2),
"Cannot set a transactional.id without also enabling idempotence");
Properties invalidProps3 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6");
// explicitly enabling idempotence should still throw exception
setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
}};
assertThrows(
ConfigException.class,
() -> new ProducerConfig(invalidProps3),
"Must set max.in.flight.requests.per.connection to at most 5 when using the idempotent producer.");
Properties invalidProps4 = new Properties() {{
putAll(baseProps);
setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6");
setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId");
}};
assertThrows(
ConfigException.class,
() -> new ProducerConfig(invalidProps4),
"Must set retries to non-zero when using the idempotent producer.");
}
@Test
public void testMetricsReporterAutoGeneratedClientId() {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
try (KafkaProducer<String, String> producer = new KafkaProducer<>(
props, new StringSerializer(), new StringSerializer())) {
assertEquals(2, producer.metrics.reporters().size());
MockMetricsReporter mockMetricsReporter = (MockMetricsReporter) producer.metrics.reporters().stream()
.filter(reporter -> reporter instanceof MockMetricsReporter).findFirst().get();
assertEquals(producer.getClientId(), mockMetricsReporter.clientId);
}
}
@Test
public void testDisableJmxAndClientTelemetryReporter() {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, "");
props.setProperty(ProducerConfig.ENABLE_METRICS_PUSH_CONFIG, "false");
try (KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())) {
assertTrue(producer.metrics.reporters().isEmpty());
}
}
@Test
public void testExplicitlyOnlyEnableJmxReporter() {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, "org.apache.kafka.common.metrics.JmxReporter");
props.setProperty(ProducerConfig.ENABLE_METRICS_PUSH_CONFIG, "false");
try (KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())) {
assertEquals(1, producer.metrics.reporters().size());
assertInstanceOf(JmxReporter.class, producer.metrics.reporters().get(0));
}
}
@Test
public void testExplicitlyOnlyEnableClientTelemetryReporter() {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, "");
try (KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())) {
assertEquals(1, producer.metrics.reporters().size());
assertInstanceOf(ClientTelemetryReporter.class, producer.metrics.reporters().get(0));
}
}
@Test
public void testConstructorWithSerializers() {
Properties producerProps = new Properties();
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer()).close();
}
@Test
public void testNoSerializerProvided() {
Properties producerProps = new Properties();
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
assertThrows(ConfigException.class, () -> {
try (KafkaProducer<?, ?> producer = new KafkaProducer<>(producerProps)) {
// KafkaProducer will be closed automatically after the block
}
});
final Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
// Invalid value null for configuration key.serializer: must be non-null.
assertThrows(ConfigException.class, () -> {
try (KafkaProducer<String, String> producer = new KafkaProducer<>(configs)) {
// KafkaProducer will be closed automatically after the block
}
});
}
@Test
public void testConstructorFailureCloseResource() {
Properties props = new Properties();
props.setProperty(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "some.invalid.hostname.foo.bar.local:9999");
props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
final int oldInitCount = MockMetricsReporter.INIT_COUNT.get();
final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get();
try (KafkaProducer<byte[], byte[]> ignored = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer())) {
fail("should have caught an exception and returned");
} catch (KafkaException e) {
assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get());
assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get());
assertEquals("Failed to construct kafka producer", e.getMessage());
}
}
@Test
public void testConstructorWithNotStringKey() {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.put(1, "not string key");
ConfigException ce = assertThrows(
ConfigException.class,
() -> new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()));
assertTrue(ce.getMessage().contains("One or more keys is not a string."), "Unexpected exception message: " + ce.getMessage());
}
@Test
public void testConstructorWithInvalidMetricReporterClass() {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, "an.invalid.class");
KafkaException ce = assertThrows(
KafkaException.class,
() -> new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()));
assertTrue(ce.getMessage().contains("Failed to construct kafka producer"), "Unexpected exception message: " + ce.getMessage());
assertTrue(ce.getCause().getMessage().contains("Class an.invalid. | KafkaProducerTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/operators/AbstractOuterJoinTaskTest.java | {
"start": 2262,
"end": 18669
} | class ____
extends BinaryOperatorTestBase<
FlatJoinFunction<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>> {
private static final long HASH_MEM = 6 * 1024 * 1024;
private static final long SORT_MEM = 3 * 1024 * 1024;
private static final int NUM_SORTER = 2;
private static final long BNLJN_MEM = 10 * PAGE_SIZE;
private final double bnljn_frac;
@SuppressWarnings("unchecked")
protected final TypeComparator<Tuple2<Integer, Integer>> comparator1 =
new TupleComparator<>(
new int[] {0},
new TypeComparator<?>[] {new IntComparator(true)},
new TypeSerializer<?>[] {IntSerializer.INSTANCE});
@SuppressWarnings("unchecked")
protected final TypeComparator<Tuple2<Integer, Integer>> comparator2 =
new TupleComparator<>(
new int[] {0},
new TypeComparator<?>[] {new IntComparator(true)},
new TypeSerializer<?>[] {IntSerializer.INSTANCE});
protected final List<Tuple2<Integer, Integer>> outList = new ArrayList<>();
@SuppressWarnings("unchecked")
protected final TypeSerializer<Tuple2<Integer, Integer>> serializer =
new TupleSerializer<>(
(Class<Tuple2<Integer, Integer>>) (Class<?>) Tuple2.class,
new TypeSerializer<?>[] {IntSerializer.INSTANCE, IntSerializer.INSTANCE});
AbstractOuterJoinTaskTest(ExecutionConfig config) {
super(config, HASH_MEM, NUM_SORTER, SORT_MEM);
bnljn_frac = (double) BNLJN_MEM / this.getMemoryManager().getMemorySize();
}
@TestTemplate
void testSortBoth1OuterJoinTask() throws Exception {
final int keyCnt1 = 20;
final int valCnt1 = 1;
final int keyCnt2 = 10;
final int valCnt2 = 2;
testSortBothOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
@TestTemplate
void testSortBoth2OuterJoinTask() throws Exception {
final int keyCnt1 = 20;
final int valCnt1 = 1;
final int keyCnt2 = 20;
final int valCnt2 = 1;
testSortBothOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
@TestTemplate
void testSortBoth3OuterJoinTask() throws Exception {
int keyCnt1 = 20;
int valCnt1 = 1;
int keyCnt2 = 20;
int valCnt2 = 20;
testSortBothOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
@TestTemplate
void testSortBoth4OuterJoinTask() throws Exception {
int keyCnt1 = 20;
int valCnt1 = 20;
int keyCnt2 = 20;
int valCnt2 = 1;
testSortBothOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
@TestTemplate
void testSortBoth5OuterJoinTask() throws Exception {
int keyCnt1 = 20;
int valCnt1 = 20;
int keyCnt2 = 20;
int valCnt2 = 20;
testSortBothOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
@TestTemplate
void testSortBoth6OuterJoinTask() throws Exception {
int keyCnt1 = 10;
int valCnt1 = 1;
int keyCnt2 = 20;
int valCnt2 = 2;
testSortBothOuterJoinTask(keyCnt1, valCnt1, keyCnt2, valCnt2);
}
private void testSortBothOuterJoinTask(int keyCnt1, int valCnt1, int keyCnt2, int valCnt2)
throws Exception {
setOutput(this.outList, this.serializer);
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(this.getSortDriverStrategy());
getTaskConfig().setRelativeMemoryDriver(this.bnljn_frac);
setNumFileHandlesForSort(4);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInputSorted(
new UniformIntTupleGenerator(keyCnt1, valCnt1, false),
this.serializer,
this.comparator1.duplicate());
addInputSorted(
new UniformIntTupleGenerator(keyCnt2, valCnt2, false),
this.serializer,
this.comparator2.duplicate());
testDriver(testTask, MockJoinStub.class);
final int expCnt = calculateExpectedCount(keyCnt1, valCnt1, keyCnt2, valCnt2);
assertThat(this.outList)
.withFailMessage("Result set size was %d. Expected was %d", outList.size(), expCnt)
.hasSize(expCnt);
this.outList.clear();
}
@TestTemplate
void testSortFirstOuterJoinTask() throws Exception {
int keyCnt1 = 20;
int valCnt1 = 20;
int keyCnt2 = 20;
int valCnt2 = 20;
setOutput(this.outList, this.serializer);
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(this.getSortDriverStrategy());
getTaskConfig().setRelativeMemoryDriver(this.bnljn_frac);
setNumFileHandlesForSort(4);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInputSorted(
new UniformIntTupleGenerator(keyCnt1, valCnt1, false),
this.serializer,
this.comparator1.duplicate());
addInput(new UniformIntTupleGenerator(keyCnt2, valCnt2, true), this.serializer);
testDriver(testTask, MockJoinStub.class);
final int expCnt = calculateExpectedCount(keyCnt1, valCnt1, keyCnt2, valCnt2);
assertThat(this.outList)
.withFailMessage("Result set size was %d. Expected was %d", outList.size(), expCnt)
.hasSize(expCnt);
this.outList.clear();
}
@TestTemplate
void testSortSecondOuterJoinTask() throws Exception {
int keyCnt1 = 20;
int valCnt1 = 20;
int keyCnt2 = 20;
int valCnt2 = 20;
setOutput(this.outList, this.serializer);
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(this.getSortDriverStrategy());
getTaskConfig().setRelativeMemoryDriver(this.bnljn_frac);
setNumFileHandlesForSort(4);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInput(new UniformIntTupleGenerator(keyCnt1, valCnt1, true), this.serializer);
addInputSorted(
new UniformIntTupleGenerator(keyCnt2, valCnt2, false),
this.serializer,
this.comparator2.duplicate());
testDriver(testTask, MockJoinStub.class);
final int expCnt = calculateExpectedCount(keyCnt1, valCnt1, keyCnt2, valCnt2);
assertThat(this.outList)
.withFailMessage("Result set size was %d. Expected was %d", outList.size(), expCnt)
.hasSize(expCnt);
this.outList.clear();
}
@TestTemplate
void testMergeOuterJoinTask() throws Exception {
int keyCnt1 = 20;
int valCnt1 = 20;
int keyCnt2 = 20;
int valCnt2 = 20;
setOutput(this.outList, this.serializer);
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(this.getSortDriverStrategy());
getTaskConfig().setRelativeMemoryDriver(this.bnljn_frac);
setNumFileHandlesForSort(4);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInput(new UniformIntTupleGenerator(keyCnt1, valCnt1, true), this.serializer);
addInput(new UniformIntTupleGenerator(keyCnt2, valCnt2, true), this.serializer);
testDriver(testTask, MockJoinStub.class);
final int expCnt = calculateExpectedCount(keyCnt1, valCnt1, keyCnt2, valCnt2);
assertThat(this.outList)
.withFailMessage("Result set size was %d. Expected was %d", outList.size(), expCnt)
.hasSize(expCnt);
this.outList.clear();
}
@TestTemplate
void testFailingOuterJoinTask() {
int keyCnt1 = 20;
int valCnt1 = 20;
int keyCnt2 = 20;
int valCnt2 = 20;
setOutput(new DiscardingOutputCollector<Tuple2<Integer, Integer>>());
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(this.getSortDriverStrategy());
getTaskConfig().setRelativeMemoryDriver(this.bnljn_frac);
setNumFileHandlesForSort(4);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInput(new UniformIntTupleGenerator(keyCnt1, valCnt1, true), this.serializer);
addInput(new UniformIntTupleGenerator(keyCnt2, valCnt2, true), this.serializer);
assertThatThrownBy(() -> testDriver(testTask, MockFailingJoinStub.class))
.isInstanceOf(ExpectedTestException.class);
}
@TestTemplate
void testCancelOuterJoinTaskWhileSort1() throws Exception {
setOutput(new DiscardingOutputCollector<Tuple2<Integer, Integer>>());
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(this.getSortDriverStrategy());
getTaskConfig().setRelativeMemoryDriver(this.bnljn_frac);
setNumFileHandlesForSort(4);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInputSorted(
new DelayingIterator<>(new InfiniteIntTupleIterator(), 100),
this.serializer,
this.comparator1.duplicate());
addInput(new DelayingIterator<>(new InfiniteIntTupleIterator(), 100), this.serializer);
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread taskRunner =
new Thread("Task runner for testCancelOuterJoinTaskWhileSort1()") {
@Override
public void run() {
try {
testDriver(testTask, MockJoinStub.class);
} catch (Throwable t) {
error.set(t);
}
}
};
taskRunner.start();
Thread.sleep(1000);
cancel();
taskRunner.interrupt();
taskRunner.join(60000);
assertThat(taskRunner.isAlive())
.withFailMessage("Task thread did not finish within 60 seconds")
.isFalse();
assertThat(error.get()).isNull();
}
@TestTemplate
void testCancelOuterJoinTaskWhileSort2() throws Exception {
setOutput(new DiscardingOutputCollector<Tuple2<Integer, Integer>>());
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(this.getSortDriverStrategy());
getTaskConfig().setRelativeMemoryDriver(this.bnljn_frac);
setNumFileHandlesForSort(4);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInput(new DelayingIterator<>(new InfiniteIntTupleIterator(), 1), this.serializer);
addInputSorted(
new DelayingIterator<>(new InfiniteIntTupleIterator(), 1),
this.serializer,
this.comparator2.duplicate());
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread taskRunner =
new Thread("Task runner for testCancelOuterJoinTaskWhileSort2()") {
@Override
public void run() {
try {
testDriver(testTask, MockJoinStub.class);
} catch (Throwable t) {
error.set(t);
}
}
};
taskRunner.start();
Thread.sleep(1000);
cancel();
taskRunner.interrupt();
taskRunner.join(60000);
assertThat(taskRunner.isAlive())
.withFailMessage("Task thread did not finish within 60 seconds")
.isFalse();
assertThat(error.get()).isNull();
}
@TestTemplate
void testCancelOuterJoinTaskWhileRunning() throws Exception {
setOutput(new DiscardingOutputCollector<Tuple2<Integer, Integer>>());
addDriverComparator(this.comparator1);
addDriverComparator(this.comparator2);
getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory());
getTaskConfig().setDriverStrategy(this.getSortDriverStrategy());
getTaskConfig().setRelativeMemoryDriver(bnljn_frac);
setNumFileHandlesForSort(4);
final AbstractOuterJoinDriver<
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>,
Tuple2<Integer, Integer>>
testTask = getOuterJoinDriver();
addInput(new DelayingIterator<>(new InfiniteIntTupleIterator(), 100), this.serializer);
addInput(new DelayingIterator<>(new InfiniteIntTupleIterator(), 100), this.serializer);
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread taskRunner =
new Thread("Task runner for testCancelOuterJoinTaskWhileRunning()") {
@Override
public void run() {
try {
testDriver(testTask, MockJoinStub.class);
} catch (Throwable t) {
error.set(t);
}
}
};
taskRunner.start();
Thread.sleep(1000);
cancel();
taskRunner.interrupt();
taskRunner.join(60000);
assertThat(taskRunner.isAlive())
.withFailMessage("Task thread did not finish within 60 seconds")
.isFalse();
assertThat(error.get()).isNull();
}
protected abstract AbstractOuterJoinDriver<
Tuple2<Integer, Integer>, Tuple2<Integer, Integer>, Tuple2<Integer, Integer>>
getOuterJoinDriver();
protected abstract int calculateExpectedCount(
int keyCnt1, int valCnt1, int keyCnt2, int valCnt2);
protected abstract DriverStrategy getSortDriverStrategy();
// =================================================================================================
@SuppressWarnings("serial")
public static final | AbstractOuterJoinTaskTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java | {
"start": 19773,
"end": 22246
} | class ____ extends StatusReporter {
TaskAttemptContext context;
public WrappedStatusReporter(TaskAttemptContext context) {
this.context = context;
}
@Override
public Counter getCounter(Enum<?> name) {
return context.getCounter(name);
}
@Override
public Counter getCounter(String group, String name) {
return context.getCounter(group, name);
}
@Override
public void progress() {
context.progress();
}
@Override
public float getProgress() {
return context.getProgress();
}
@Override
public void setStatus(String status) {
context.setStatus(status);
}
}
/**
* Closes all the opened outputs.
*
* This should be called from cleanup method of map/reduce task.
* If overridden subclasses must invoke <code>super.close()</code> at the
* end of their <code>close()</code>
*
*/
@SuppressWarnings("unchecked")
public void close() throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
int nThreads = conf.getInt(MRConfig.MULTIPLE_OUTPUTS_CLOSE_THREAD_COUNT,
MRConfig.DEFAULT_MULTIPLE_OUTPUTS_CLOSE_THREAD_COUNT);
AtomicBoolean encounteredException = new AtomicBoolean(false);
ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("MultipleOutputs-close")
.setUncaughtExceptionHandler(((t, e) -> {
LOG.error("Thread " + t + " failed unexpectedly", e);
encounteredException.set(true);
})).build();
ExecutorService executorService = Executors.newFixedThreadPool(nThreads, threadFactory);
List<Callable<Object>> callableList = new ArrayList<>(recordWriters.size());
for (RecordWriter writer : recordWriters.values()) {
callableList.add(() -> {
try {
writer.close(context);
} catch (IOException e) {
LOG.error("Error while closing MultipleOutput file", e);
encounteredException.set(true);
}
return null;
});
}
try {
executorService.invokeAll(callableList);
} catch (InterruptedException e) {
LOG.warn("Closing is Interrupted");
Thread.currentThread().interrupt();
} finally {
executorService.shutdown();
}
if (encounteredException.get()) {
throw new IOException(
"One or more threads encountered exception during close. See prior errors.");
}
}
}
| WrappedStatusReporter |
java | apache__dubbo | dubbo-demo/dubbo-demo-spring-boot/dubbo-demo-spring-boot-servlet/src/main/java/org/apache/dubbo/springboot/demo/servlet/HelloReply.java | {
"start": 890,
"end": 1166
} | class ____ implements Serializable {
private static final long serialVersionUID = 1L;
private String message;
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
}
| HelloReply |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/dialect/functional/HANACalcViewTest.java | {
"start": 6810,
"end": 7137
} | class ____ {
private int dummyint;
private String dummy;
@SuppressWarnings("unused")
public CVEntityDTO(int dummyint, String dummy) {
this.dummyint = dummyint;
this.dummy = dummy;
}
public int getDummyint() {
return this.dummyint;
}
public String getDummy() {
return this.dummy;
}
}
}
| CVEntityDTO |
java | apache__flink | flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/embedded/AbstractEmbeddedDataStreamPythonFunctionOperator.java | {
"start": 1893,
"end": 4992
} | class ____<OUT>
extends AbstractEmbeddedPythonFunctionOperator<OUT>
implements DataStreamPythonFunctionOperator<OUT> {
private static final long serialVersionUID = 1L;
private static final String NUM_PARTITIONS = "NUM_PARTITIONS";
/** The serialized python function to be executed. */
private final DataStreamPythonFunctionInfo pythonFunctionInfo;
private final Map<String, OutputTag<?>> sideOutputTags;
/** The TypeInformation of output data. */
protected final TypeInformation<OUT> outputTypeInfo;
/** The number of partitions for the partition custom function. */
private Integer numPartitions;
transient PythonTypeUtils.DataConverter<OUT, Object> outputDataConverter;
protected transient TimestampedCollector<OUT> collector;
protected transient boolean hasSideOutput;
protected transient SideOutputContext sideOutputContext;
public AbstractEmbeddedDataStreamPythonFunctionOperator(
Configuration config,
DataStreamPythonFunctionInfo pythonFunctionInfo,
TypeInformation<OUT> outputTypeInfo) {
super(config);
this.pythonFunctionInfo = Preconditions.checkNotNull(pythonFunctionInfo);
this.outputTypeInfo = Preconditions.checkNotNull(outputTypeInfo);
this.sideOutputTags = new HashMap<>();
}
@Override
public void open() throws Exception {
hasSideOutput = !sideOutputTags.isEmpty();
if (hasSideOutput) {
sideOutputContext = new SideOutputContext();
sideOutputContext.open();
}
super.open();
outputDataConverter =
PythonTypeUtils.TypeInfoToDataConverter.typeInfoDataConverter(outputTypeInfo);
collector = new TimestampedCollector<>(output);
}
@Override
public TypeInformation<OUT> getProducedType() {
return outputTypeInfo;
}
@Override
public void setNumPartitions(int numPartitions) {
this.numPartitions = numPartitions;
}
@Override
public DataStreamPythonFunctionInfo getPythonFunctionInfo() {
return pythonFunctionInfo;
}
@Override
public void addSideOutputTags(Collection<OutputTag<?>> outputTags) {
for (OutputTag<?> outputTag : outputTags) {
sideOutputTags.put(outputTag.getId(), outputTag);
}
}
@Override
public Collection<OutputTag<?>> getSideOutputTags() {
return sideOutputTags.values();
}
public Map<String, String> getJobParameters() {
Map<String, String> jobParameters = new HashMap<>();
if (numPartitions != null) {
jobParameters.put(NUM_PARTITIONS, String.valueOf(numPartitions));
}
KeyedStateBackend<Object> keyedStateBackend = getKeyedStateBackend();
if (keyedStateBackend != null) {
jobParameters.put(
"inBatchExecutionMode",
String.valueOf(inBatchExecutionMode(keyedStateBackend)));
}
return jobParameters;
}
private | AbstractEmbeddedDataStreamPythonFunctionOperator |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/struct/TestParentChildReferences.java | {
"start": 3647,
"end": 3942
} | class ____ {
@JsonManagedReference
protected final List<Child> children = new ArrayList<Child>();
public List<Child> getChildren() { return children; }
public void addChild(Child child) { children.add(child); child.setParent(this); }
}
public static | Parent |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/demo/sql/SchemaStatTest_odps.java | {
"start": 509,
"end": 1950
} | class ____ extends TestCase {
public void test_schemaStat() throws Exception {
File file = new File("/Users/wenshao/Downloads/odps_sql_1.txt");
String sql = FileUtils.readFileToString(file);
DbType dbType = DbType.odps;
SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, dbType);
List<SQLStatement> stmtList = parser.parseStatementList();
System.out.println("stmtList size : " + stmtList.size());
SchemaStatVisitor statVisitor = SQLUtils.createSchemaStatVisitor(dbType);
for (SQLStatement stmt : stmtList) {
stmt.accept(statVisitor);
}
Set<TableStat.Relationship> relationships = statVisitor.getRelationships();
for (TableStat.Relationship relationship : relationships) {
System.out.println(relationship); // table1.id = table2.id
}
// System.out.println(statVisitor.getColumns());
// System.out.println(statVisitor.getGroupByColumns()); // group by
System.out.println("relationships : " + statVisitor.getRelationships()); // group by
System.out.println(statVisitor.getConditions());
// assertEquals(3, relationships.size());
//
// Assert.assertEquals(21, statVisitor.getColumns().size());
// Assert.assertEquals(20, statVisitor.getConditions().size());
// assertEquals(1, statVisitor.getFunctions().size());
}
}
| SchemaStatTest_odps |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/ttl/TtlAwareSerializerSnapshot.java | {
"start": 1356,
"end": 1978
} | class ____ a {@link
* TypeSerializerSnapshot} with ttl awareness. It will return true when the wrapped {@link
* TypeSerializerSnapshot} is the instance of {@link TtlStateFactory.TtlSerializerSnapshot}. Also,
* it overrides the compatibility type check between TtlSerializerSnapshot and non-ttl
* TypeSerializerSnapshot.
*
* <p>If two TtlAwareSerializerSnapshots have the same ttl config, it will return the compatibility
* check result of the original TypeSerializerSnapshot.
*
* <p>If two TtlAwareSerializerSnapshots have different ttl config, it will return a wrapped
* compatibility check result.
*/
public | wraps |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/eventtime/RecordTimestampAssigner.java | {
"start": 1118,
"end": 1308
} | class ____<E> implements TimestampAssigner<E> {
@Override
public long extractTimestamp(E element, long recordTimestamp) {
return recordTimestamp;
}
}
| RecordTimestampAssigner |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/exceptions/Exceptions.java | {
"start": 878,
"end": 3261
} | class ____ {
/** Utility class. */
private Exceptions() {
throw new IllegalStateException("No instances!");
}
/**
* Convenience method to throw a {@code RuntimeException} and {@code Error} directly
* or wrap any other exception type into a {@code RuntimeException}.
* @param t the exception to throw directly or wrapped
* @return because {@code propagate} itself throws an exception or error, this is a sort of phantom return
* value; {@code propagate} does not actually return anything
*/
@NonNull
public static RuntimeException propagate(@NonNull Throwable t) {
/*
* The return type of RuntimeException is a trick for code to be like this:
*
* throw Exceptions.propagate(e);
*
* Even though nothing will return and throw via that 'throw', it allows the code to look like it
* so it's easy to read and understand that it will always result in a throw.
*/
throw ExceptionHelper.wrapOrThrow(t);
}
/**
* Throws a particular {@code Throwable} only if it belongs to a set of "fatal" error varieties. These
* varieties are as follows:
* <ul>
* <li>{@code VirtualMachineError}</li>
* <li>{@code ThreadDeath}</li>
* <li>{@code LinkageError}</li>
* </ul>
* This can be useful if you are writing an operator that calls user-supplied code, and you want to
* notify subscribers of errors encountered in that code by calling their {@code onError} methods, but only
* if the errors are not so catastrophic that such a call would be futile, in which case you simply want to
* rethrow the error.
*
* @param t
* the {@code Throwable} to test and perhaps throw
* @see <a href="https://github.com/ReactiveX/RxJava/issues/748#issuecomment-32471495">RxJava: StackOverflowError is swallowed (Issue #748)</a>
*/
public static void throwIfFatal(@NonNull Throwable t) {
// values here derived from https://github.com/ReactiveX/RxJava/issues/748#issuecomment-32471495
if (t instanceof VirtualMachineError) {
throw (VirtualMachineError) t;
} else if (t instanceof ThreadDeath) {
throw (ThreadDeath) t;
} else if (t instanceof LinkageError) {
throw (LinkageError) t;
}
}
}
| Exceptions |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/test/java/org/apache/camel/component/salesforce/dto/generated/Asset.java | {
"start": 1045,
"end": 4531
} | class ____ extends AbstractSObjectBase {
public Asset() {
getAttributes().setType("Asset");
}
// ContactId
private String ContactId;
@JsonProperty("ContactId")
public String getContactId() {
return this.ContactId;
}
@JsonProperty("ContactId")
public void setContactId(String ContactId) {
this.ContactId = ContactId;
}
// AccountId
private String AccountId;
@JsonProperty("AccountId")
public String getAccountId() {
return this.AccountId;
}
@JsonProperty("AccountId")
public void setAccountId(String AccountId) {
this.AccountId = AccountId;
}
// Product2Id
private String Product2Id;
@JsonProperty("Product2Id")
public String getProduct2Id() {
return this.Product2Id;
}
@JsonProperty("Product2Id")
public void setProduct2Id(String Product2Id) {
this.Product2Id = Product2Id;
}
// IsCompetitorProduct
private Boolean IsCompetitorProduct;
@JsonProperty("IsCompetitorProduct")
public Boolean getIsCompetitorProduct() {
return this.IsCompetitorProduct;
}
@JsonProperty("IsCompetitorProduct")
public void setIsCompetitorProduct(Boolean IsCompetitorProduct) {
this.IsCompetitorProduct = IsCompetitorProduct;
}
// SerialNumber
private String SerialNumber;
@JsonProperty("SerialNumber")
public String getSerialNumber() {
return this.SerialNumber;
}
@JsonProperty("SerialNumber")
public void setSerialNumber(String SerialNumber) {
this.SerialNumber = SerialNumber;
}
// InstallDate
private java.time.ZonedDateTime InstallDate;
@JsonProperty("InstallDate")
public java.time.ZonedDateTime getInstallDate() {
return this.InstallDate;
}
@JsonProperty("InstallDate")
public void setInstallDate(java.time.ZonedDateTime InstallDate) {
this.InstallDate = InstallDate;
}
// PurchaseDate
private java.time.ZonedDateTime PurchaseDate;
@JsonProperty("PurchaseDate")
public java.time.ZonedDateTime getPurchaseDate() {
return this.PurchaseDate;
}
@JsonProperty("PurchaseDate")
public void setPurchaseDate(java.time.ZonedDateTime PurchaseDate) {
this.PurchaseDate = PurchaseDate;
}
// UsageEndDate
private java.time.ZonedDateTime UsageEndDate;
@JsonProperty("UsageEndDate")
public java.time.ZonedDateTime getUsageEndDate() {
return this.UsageEndDate;
}
@JsonProperty("UsageEndDate")
public void setUsageEndDate(java.time.ZonedDateTime UsageEndDate) {
this.UsageEndDate = UsageEndDate;
}
// Price
private Double Price;
@JsonProperty("Price")
public Double getPrice() {
return this.Price;
}
@JsonProperty("Price")
public void setPrice(Double Price) {
this.Price = Price;
}
// Quantity
private Double Quantity;
@JsonProperty("Quantity")
public Double getQuantity() {
return this.Quantity;
}
@JsonProperty("Quantity")
public void setQuantity(Double Quantity) {
this.Quantity = Quantity;
}
// Description
private String Description;
@JsonProperty("Description")
public String getDescription() {
return this.Description;
}
@JsonProperty("Description")
public void setDescription(String Description) {
this.Description = Description;
}
}
| Asset |
java | apache__camel | components/camel-openapi-java/src/test/java/org/apache/camel/openapi/RestOpenApiLicenseInfoTest.java | {
"start": 1337,
"end": 3547
} | class ____ {
@ParameterizedTest
@ValueSource(strings = { "3.1", "3.0" })
public void testLicenseInfo(String openApiVersion) throws Exception {
CamelContext context = new DefaultCamelContext();
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
restConfiguration()
.apiProperty("openapi.version", openApiVersion)
.apiProperty("api.contact.name", "Mr Camel")
.apiProperty("api.contact.email", "camel@apache.org")
.apiProperty("api.contact.url", "https://camel.apache.org")
.apiProperty("api.license.name", "Apache V2")
.apiProperty("api.license.url", "https://www.apache.org/licenses/LICENSE-2.0")
.apiProperty("externalDocs.url", "https://openweathermap.org/api")
.apiProperty("externalDocs.description", "API Documentation");
rest("/api")
.get("/api").to("direct:api");
from("direct:api").setBody().constant("Hello World");
}
});
RestConfiguration restConfiguration = context.getRestConfiguration();
RestOpenApiProcessor processor
= new RestOpenApiProcessor(restConfiguration.getApiProperties(), restConfiguration);
processor.setCamelContext(context);
processor.start();
Exchange exchange = new DefaultExchange(context);
processor.process(exchange);
String json = exchange.getMessage().getBody(String.class);
assertNotNull(json);
assertTrue(json.contains("\"url\" : \"https://www.apache.org/licenses/LICENSE-2.0\""));
assertTrue(json.contains("\"name\" : \"Apache V2\""));
assertTrue(json.contains("\"name\" : \"Mr Camel\""));
assertTrue(json.contains("\"email\" : \"camel@apache.org\""));
assertTrue(json.contains("\"url\" : \"https://camel.apache.org\""));
assertTrue(json.contains("\"externalDocs\" :"));
assertTrue(json.contains("\"description\" : \"API Documentation\""));
}
}
| RestOpenApiLicenseInfoTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.