language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__camel
|
components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/api/utils/TimeModule.java
|
{
"start": 1533,
"end": 3163
}
|
class ____ extends SimpleModule {
private static final LocalDateDeserializer LOCAL_DATE_DESERIALIZER = new LocalDateDeserializer(DateTimeFormatter.ISO_DATE);
private static final LocalDateSerializer LOCAL_DATE_SERIALIZER = new LocalDateSerializer(DateTimeFormatter.ISO_DATE);
private static final long serialVersionUID = 1L;
private static final ZonedDateTimeSerializer ZONED_DATE_TIME_SERIALIZER = new ZonedDateTimeSerializer(ISO_OFFSET_DATE_TIME);
private final JavaTimeModule delegate = new JavaTimeModule();
public TimeModule() {
addSerializer(LocalDate.class, LOCAL_DATE_SERIALIZER);
addDeserializer(LocalDate.class, LOCAL_DATE_DESERIALIZER);
addSerializer(LocalDateTime.class, LocalDateTimeSerializer.INSTANCE);
addDeserializer(LocalDateTime.class, LocalDateTimeDeserializer.INSTANCE);
addSerializer(OffsetDateTime.class, OffsetDateTimeSerializer.INSTANCE);
addDeserializer(OffsetDateTime.class, OffsetDateTimeDeserializer.INSTANCE);
addSerializer(ZonedDateTime.class, ZONED_DATE_TIME_SERIALIZER);
addDeserializer(ZonedDateTime.class, ZonedDateTimeDeserializer.INSTANCE);
addSerializer(Instant.class, InstantSerializer.INSTANCE);
addDeserializer(Instant.class, InstantDeserializer.INSTANCE);
addSerializer(OffsetTime.class, OffsetTimeSerializer.INSTANCE);
addDeserializer(OffsetTime.class, OffsetTimeDeserializer.INSTANCE);
}
@Override
public void setupModule(final SetupContext context) {
delegate.setupModule(context);
super.setupModule(context);
}
}
|
TimeModule
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/StringifiedForStFileSystem.java
|
{
"start": 1085,
"end": 2851
}
|
class ____ {
private ForStFlinkFileSystem fileSystem;
public StringifiedForStFileSystem(ForStFlinkFileSystem fileSystem) {
this.fileSystem = ForStFileSystemUtils.tryDecorate(fileSystem);
}
public static StringifiedForStFileSystem get(String uri) throws IOException {
return new StringifiedForStFileSystem(ForStFlinkFileSystem.get(URI.create(uri)));
}
public boolean exists(final String path) throws IOException {
return fileSystem.exists(new Path(path));
}
public ForStFileStatus getFileStatus(String path) throws IOException {
return new ForStFileStatus(fileSystem.getFileStatus(new Path(path)));
}
public ForStFileStatus[] listStatus(String path) throws IOException {
return Arrays.stream(fileSystem.listStatus(new Path(path)))
.map(ForStFileStatus::new)
.toArray(ForStFileStatus[]::new);
}
public boolean delete(String path, boolean recursive) throws IOException {
return fileSystem.delete(new Path(path), recursive);
}
public boolean mkdirs(String path) throws IOException {
return fileSystem.mkdirs(new Path(path));
}
public boolean rename(String src, String dst) throws IOException {
return fileSystem.rename(new Path(src), new Path(dst));
}
public ByteBufferReadableFSDataInputStream open(String path) throws IOException {
return fileSystem.open(new Path(path));
}
public ByteBufferWritableFSDataOutputStream create(String path) throws IOException {
return fileSystem.create(new Path(path));
}
public int link(String src, String dst) throws IOException {
return fileSystem.link(new Path(src), new Path(dst));
}
}
|
StringifiedForStFileSystem
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/UpdateInferenceModelActionResponseTests.java
|
{
"start": 909,
"end": 2118
}
|
class ____ extends AbstractWireSerializingTestCase<UpdateInferenceModelAction.Response> {
@Override
protected Writeable.Reader<UpdateInferenceModelAction.Response> instanceReader() {
return UpdateInferenceModelAction.Response::new;
}
@Override
protected UpdateInferenceModelAction.Response createTestInstance() {
return new UpdateInferenceModelAction.Response(ModelConfigurationsTests.createRandomInstance());
}
@Override
protected UpdateInferenceModelAction.Response mutateInstance(UpdateInferenceModelAction.Response instance) throws IOException {
ModelConfigurations newModel = randomValueOtherThan(instance.getModel(), ModelConfigurationsTests::createRandomInstance);
return new UpdateInferenceModelAction.Response(newModel);
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>(InferenceNamedWriteablesProvider.getNamedWriteables());
namedWriteables.addAll(XPackClientPlugin.getChunkingSettingsNamedWriteables());
return new NamedWriteableRegistry(namedWriteables);
}
}
|
UpdateInferenceModelActionResponseTests
|
java
|
spring-projects__spring-boot
|
module/spring-boot-mongodb/src/main/java/org/springframework/boot/mongodb/autoconfigure/metrics/MongoMetricsAutoConfiguration.java
|
{
"start": 2730,
"end": 3701
}
|
class ____ {
@Bean
@ConditionalOnMissingBean
MongoMetricsCommandListener mongoMetricsCommandListener(MeterRegistry meterRegistry,
MongoCommandTagsProvider mongoCommandTagsProvider) {
return new MongoMetricsCommandListener(meterRegistry, mongoCommandTagsProvider);
}
@Bean
@ConditionalOnMissingBean
MongoCommandTagsProvider mongoCommandTagsProvider() {
return new DefaultMongoCommandTagsProvider();
}
@Bean
MongoClientSettingsBuilderCustomizer mongoMetricsCommandListenerClientSettingsBuilderCustomizer(
MongoMetricsCommandListener mongoMetricsCommandListener) {
return (clientSettingsBuilder) -> clientSettingsBuilder.addCommandListener(mongoMetricsCommandListener);
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnClass(MongoMetricsConnectionPoolListener.class)
@ConditionalOnBooleanProperty(name = "management.metrics.mongodb.connectionpool.enabled", matchIfMissing = true)
static
|
MongoCommandMetricsConfiguration
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/sql/oracle/demo/Demo3.java
|
{
"start": 1624,
"end": 4908
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
String sql = "select * from user u where u.uid = 2 and uname = ?";
List<Object> parameters = new ArrayList<Object>();
parameters.add(1);
parameters.add("wenshao");
String realSql = convert(sql, parameters);
System.out.println(realSql);
}
public void test_1() throws Exception {
String sql = "select * from user where uid = ? and uname = ?";
List<Object> parameters = new ArrayList<Object>();
parameters.add(1);
parameters.add("wenshao");
String realSql = convert(sql, parameters);
System.out.println(realSql);
}
public void test_2() throws Exception {
String sql = "select * from (select * from user where uid = ? and uname = ?) t";
List<Object> parameters = new ArrayList<Object>();
parameters.add(1);
parameters.add("wenshao");
String realSql = convert(sql, parameters);
System.out.println(realSql);
}
public void test_3() throws Exception {
String sql = "select * from groups where uid = ? and uname = ?";
List<Object> parameters = new ArrayList<Object>();
parameters.add(1);
parameters.add("wenshao");
String realSql = convert(sql, parameters);
System.out.println(realSql);
}
private String convert(String sql, List<Object> parameters) {
SQLStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> stmtList = parser.parseStatementList();
SQLStatement first = (SQLStatement) stmtList.get(0);
MyVisitor visitor = new MyVisitor();
first.accept(visitor);
if (visitor.getVariantList().size() > 0) {
SQLExpr firstVar = visitor.getVariantList().get(0);
int userId;
if (firstVar instanceof SQLVariantRefExpr) {
int varIndex = (Integer) firstVar.getAttribute("varIndex");
userId = (Integer) parameters.get(varIndex);
} else {
userId = ((SQLNumericLiteralExpr) firstVar).getNumber().intValue();
}
String tableName;
if (userId == 1) {
tableName = "user_1";
} else {
tableName = "user_x";
}
for (SQLExprTableSource tableSource : visitor.getTableSourceList()) {
SQLExpr expr = tableSource.getExpr();
if (expr instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identExpr = (SQLIdentifierExpr) expr;
String ident = identExpr.getName();
if (ident.equals("user")) {
tableSource.setExpr(new SQLIdentifierExpr(tableName));
}
} else if (expr instanceof SQLPropertyExpr) {
SQLPropertyExpr proExpr = (SQLPropertyExpr) expr;
String ident = proExpr.getName();
if (ident.equals("user")) {
proExpr.setName(tableName);
}
}
}
}
String realSql = SQLUtils.toOracleString(first);
return realSql;
}
private static
|
Demo3
|
java
|
apache__camel
|
components/camel-docker/src/test/java/org/apache/camel/component/docker/it/DockerStatsConsumerTest.java
|
{
"start": 1569,
"end": 2246
}
|
class ____ extends DockerITTestSupport {
@Test
void testDocker() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMinimumMessageCount(1);
MockEndpoint.assertIsSatisfied(context, 60, TimeUnit.SECONDS);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("docker://stats?containerId={{docker.test.container.id}}&host={{docker.hostname}}&port={{docker.port}}")
.log("${body}")
.to("mock:result");
}
};
}
}
|
DockerStatsConsumerTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/QueryableBuiltInRolesUtils.java
|
{
"start": 1147,
"end": 1306
}
|
class ____ provides helper method for calculating the hash of a role descriptor,
* determining the roles to upsert and the roles to delete.
*/
public final
|
which
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppToMonitor.java
|
{
"start": 1014,
"end": 1095
}
|
class ____ for monitor application with applicationId+appTimeoutType.
*/
public
|
used
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/onetomany/detached/BasicDetachedSetWithEmbId.java
|
{
"start": 1011,
"end": 4036
}
|
class ____ {
private EmbId str1_id;
private EmbId str2_id;
private EmbId coll1_id;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
str1_id = new EmbId( 1, 2 );
str2_id = new EmbId( 3, 4 );
coll1_id = new EmbId( 5, 6 );
// Revision 1
scope.inTransaction( em -> {
EmbIdTestEntity str1 = new EmbIdTestEntity( str1_id, "str1" );
EmbIdTestEntity str2 = new EmbIdTestEntity( str2_id, "str2" );
em.persist( str1 );
em.persist( str2 );
SetRefCollEntityEmbId coll1 = new SetRefCollEntityEmbId( coll1_id, "coll1" );
coll1.setCollection( new HashSet<EmbIdTestEntity>() );
coll1.getCollection().add( str1 );
em.persist( coll1 );
} );
// Revision 2
scope.inTransaction( em -> {
EmbIdTestEntity str2 = em.find( EmbIdTestEntity.class, str2_id );
SetRefCollEntityEmbId coll1 = em.find( SetRefCollEntityEmbId.class, coll1_id );
coll1.getCollection().add( str2 );
} );
// Revision 3
scope.inTransaction( em -> {
EmbIdTestEntity str1 = em.find( EmbIdTestEntity.class, str1_id );
SetRefCollEntityEmbId coll1 = em.find( SetRefCollEntityEmbId.class, coll1_id );
coll1.getCollection().remove( str1 );
} );
// Revision 4
scope.inTransaction( em -> {
SetRefCollEntityEmbId coll1 = em.find( SetRefCollEntityEmbId.class, coll1_id );
coll1.getCollection().clear();
} );
}
@Test
public void testRevisionsCounts(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( Arrays.asList( 1, 2, 3, 4 ),
auditReader.getRevisions( SetRefCollEntityEmbId.class, coll1_id ) );
assertEquals( Arrays.asList( 1 ), auditReader.getRevisions( EmbIdTestEntity.class, str1_id ) );
assertEquals( Arrays.asList( 1 ), auditReader.getRevisions( EmbIdTestEntity.class, str2_id ) );
} );
}
@Test
public void testHistoryOfColl1(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
EmbIdTestEntity str1 = em.find( EmbIdTestEntity.class, str1_id );
EmbIdTestEntity str2 = em.find( EmbIdTestEntity.class, str2_id );
final var auditReader = AuditReaderFactory.get( em );
SetRefCollEntityEmbId rev1 = auditReader.find( SetRefCollEntityEmbId.class, coll1_id, 1 );
SetRefCollEntityEmbId rev2 = auditReader.find( SetRefCollEntityEmbId.class, coll1_id, 2 );
SetRefCollEntityEmbId rev3 = auditReader.find( SetRefCollEntityEmbId.class, coll1_id, 3 );
SetRefCollEntityEmbId rev4 = auditReader.find( SetRefCollEntityEmbId.class, coll1_id, 4 );
assertEquals( TestTools.makeSet( str1 ), rev1.getCollection() );
assertEquals( TestTools.makeSet( str1, str2 ), rev2.getCollection() );
assertEquals( TestTools.makeSet( str2 ), rev3.getCollection() );
assertEquals( TestTools.makeSet(), rev4.getCollection() );
assertEquals( "coll1", rev1.getData() );
assertEquals( "coll1", rev2.getData() );
assertEquals( "coll1", rev3.getData() );
assertEquals( "coll1", rev4.getData() );
} );
}
}
|
BasicDetachedSetWithEmbId
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/data/util/DataFormatConverters.java
|
{
"start": 54507,
"end": 56149
}
|
class ____<T> extends AbstractRowDataConverter<T> {
private static final long serialVersionUID = 6821541780176167135L;
private final PojoTypeInfo<T> t;
private final PojoField[] fields;
public PojoConverter(PojoTypeInfo<T> t, DataType[] fieldTypes) {
super(fieldTypes);
this.fields = new PojoField[t.getArity()];
for (int i = 0; i < t.getArity(); i++) {
fields[i] = t.getPojoFieldAt(i);
fields[i].getField().setAccessible(true);
}
this.t = t;
}
@Override
RowData toInternalImpl(T value) {
GenericRowData genericRow = new GenericRowData(t.getArity());
for (int i = 0; i < t.getArity(); i++) {
try {
genericRow.setField(
i, converters[i].toInternal(fields[i].getField().get(value)));
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return genericRow;
}
@Override
T toExternalImpl(RowData value) {
try {
T pojo = t.getTypeClass().newInstance();
for (int i = 0; i < t.getArity(); i++) {
fields[i].getField().set(pojo, converters[i].toExternal(value, i));
}
return pojo;
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
/** Converter for row. */
public static final
|
PojoConverter
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/appender/routing/Routes.java
|
{
"start": 2044,
"end": 2139
}
|
class ____ {
private static final String LOG_EVENT_KEY = "logEvent";
public static
|
Routes
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/Assertions_assertThatReflectiveOperationException_Test.java
|
{
"start": 1056,
"end": 2130
}
|
class ____ {
@Test
void should_pass_when_throw_ReflectiveOperationException() {
assertThatReflectiveOperationException().isThrownBy(codeThrowing(new ReflectiveOperationException()));
}
@Test
void should_fail_when_throw_wrong_type() {
// GIVEN
ThrowingCallable throwingCallable = () -> assertThatReflectiveOperationException().isThrownBy(codeThrowing(new Error()));
// WHEN
var assertionError = expectAssertionError(throwingCallable);
// THEN
then(assertionError).hasMessageContainingAll(Error.class.getName(), ReflectiveOperationException.class.getName());
}
@Test
void should_fail_when_no_exception_thrown() {
// GIVEN
ThrowingCallable throwingCallable = () -> assertThatReflectiveOperationException().isThrownBy(() -> {});
// WHEN
var assertionError = expectAssertionError(throwingCallable);
// THEN
then(assertionError).hasMessage("%nExpecting code to throw a java.lang.ReflectiveOperationException, but no throwable was thrown.".formatted());
}
}
|
Assertions_assertThatReflectiveOperationException_Test
|
java
|
redisson__redisson
|
redisson-spring-data/redisson-spring-data-18/src/test/java/org/redisson/spring/data/connection/RedissonClusterConnectionTest.java
|
{
"start": 1256,
"end": 8346
}
|
class ____ {
static RedissonClient redisson;
static RedissonClusterConnection connection;
static ClusterProcesses process;
@BeforeClass
public static void before() throws FailedToStartRedisException, IOException, InterruptedException {
RedisRunner master1 = new RedisRunner().randomPort().randomDir().nosave();
RedisRunner master2 = new RedisRunner().randomPort().randomDir().nosave();
RedisRunner master3 = new RedisRunner().randomPort().randomDir().nosave();
RedisRunner slave1 = new RedisRunner().randomPort().randomDir().nosave();
RedisRunner slave2 = new RedisRunner().randomPort().randomDir().nosave();
RedisRunner slave3 = new RedisRunner().randomPort().randomDir().nosave();
ClusterRunner clusterRunner = new ClusterRunner()
.addNode(master1, slave1)
.addNode(master2, slave2)
.addNode(master3, slave3);
process = clusterRunner.run();
Config config = new Config();
config.useClusterServers()
.setSubscriptionMode(SubscriptionMode.SLAVE)
.setLoadBalancer(new RandomLoadBalancer())
.addNodeAddress(process.getNodes().stream().findAny().get().getRedisServerAddressAndPort());
redisson = Redisson.create(config);
connection = new RedissonClusterConnection(redisson);
}
@AfterClass
public static void after() {
process.shutdown();
redisson.shutdown();
}
@Test
public void testClusterGetNodes() {
Iterable<RedisClusterNode> nodes = connection.clusterGetNodes();
assertThat(nodes).hasSize(6);
for (RedisClusterNode redisClusterNode : nodes) {
assertThat(redisClusterNode.getLinkState()).isNotNull();
assertThat(redisClusterNode.getFlags()).isNotEmpty();
assertThat(redisClusterNode.getHost()).isNotNull();
assertThat(redisClusterNode.getPort()).isNotNull();
assertThat(redisClusterNode.getId()).isNotNull();
assertThat(redisClusterNode.getType()).isNotNull();
if (redisClusterNode.getType() == NodeType.MASTER) {
assertThat(redisClusterNode.getSlotRange().getSlots()).isNotEmpty();
} else {
assertThat(redisClusterNode.getMasterId()).isNotNull();
}
}
}
@Test
public void testClusterGetNodesMaster() {
Iterable<RedisClusterNode> nodes = connection.clusterGetNodes();
for (RedisClusterNode redisClusterNode : nodes) {
if (redisClusterNode.getType() == NodeType.MASTER) {
Collection<RedisClusterNode> slaves = connection.clusterGetSlaves(redisClusterNode);
assertThat(slaves).hasSize(1);
}
}
}
@Test
public void testClusterGetMasterSlaveMap() {
Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterSlaveMap();
assertThat(map).hasSize(3);
for (Collection<RedisClusterNode> slaves : map.values()) {
assertThat(slaves).hasSize(1);
}
}
@Test
public void testClusterGetSlotForKey() {
Integer slot = connection.clusterGetSlotForKey("123".getBytes());
assertThat(slot).isNotNull();
}
@Test
public void testClusterGetNodeForSlot() {
RedisClusterNode node1 = connection.clusterGetNodeForSlot(1);
RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000);
assertThat(node1.getId()).isNotEqualTo(node2.getId());
}
@Test
public void testClusterGetNodeForKey() {
RedisClusterNode node = connection.clusterGetNodeForKey("123".getBytes());
assertThat(node).isNotNull();
}
@Test
public void testClusterGetClusterInfo() {
ClusterInfo info = connection.clusterGetClusterInfo();
assertThat(info.getSlotsFail()).isEqualTo(0);
assertThat(info.getSlotsOk()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
assertThat(info.getSlotsAssigned()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
}
@Test
public void testClusterAddRemoveSlots() {
RedisClusterNode master = getFirstMaster();
Integer slot = master.getSlotRange().getSlots().iterator().next();
connection.clusterDeleteSlots(master, slot);
connection.clusterAddSlots(master, slot);
}
@Test
public void testClusterCountKeysInSlot() {
Long t = connection.clusterCountKeysInSlot(1);
assertThat(t).isZero();
}
@Test
public void testClusterMeetForget() {
RedisClusterNode master = getFirstMaster();
connection.clusterForget(master);
connection.clusterMeet(master);
}
@Test
public void testClusterGetKeysInSlot() {
List<byte[]> keys = connection.clusterGetKeysInSlot(12, 10);
assertThat(keys).isEmpty();
}
@Test
public void testClusterPing() {
RedisClusterNode master = getFirstMaster();
String res = connection.ping(master);
assertThat(res).isEqualTo("PONG");
}
@Test
public void testDbSize() {
RedisClusterNode master = getFirstMaster();
Long size = connection.dbSize(master);
assertThat(size).isZero();
}
@Test
public void testInfo() {
RedisClusterNode master = getFirstMaster();
Properties info = connection.info(master);
assertThat(info.size()).isGreaterThan(10);
}
@Test
public void testResetConfigStats() {
RedisClusterNode master = getFirstMaster();
connection.resetConfigStats(master);
}
@Test
public void testTime() {
RedisClusterNode master = getFirstMaster();
Long time = connection.time(master);
assertThat(time).isGreaterThan(1000);
}
@Test
public void testGetClientList() {
RedisClusterNode master = getFirstMaster();
List<RedisClientInfo> list = connection.getClientList(master);
assertThat(list.size()).isGreaterThan(10);
}
@Test
public void testSetConfig() {
RedisClusterNode master = getFirstMaster();
connection.setConfig(master, "timeout", "10");
}
@Test
public void testGetConfig() {
RedisClusterNode master = getFirstMaster();
List<String> config = connection.getConfig(master, "*");
assertThat(config.size()).isGreaterThan(20);
}
protected RedisClusterNode getFirstMaster() {
Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterSlaveMap();
RedisClusterNode master = map.keySet().iterator().next();
return master;
}
@Test
public void testConnectionFactoryReturnsClusterConnection() {
RedisConnectionFactory connectionFactory = new RedissonConnectionFactory(redisson);
assertThat(connectionFactory.getConnection()).isInstanceOf(RedissonClusterConnection.class);
}
}
|
RedissonClusterConnectionTest
|
java
|
spring-projects__spring-framework
|
spring-context-support/src/main/java/org/springframework/cache/jcache/interceptor/AnnotationJCacheOperationSource.java
|
{
"start": 1524,
"end": 1753
}
|
interface ____ reads
* the JSR-107 {@link CacheResult}, {@link CachePut}, {@link CacheRemove} and
* {@link CacheRemoveAll} annotations.
*
* @author Stephane Nicoll
* @author Juergen Hoeller
* @since 4.1
*/
public abstract
|
that
|
java
|
spring-projects__spring-boot
|
module/spring-boot-data-jdbc/src/main/java/org/springframework/boot/data/jdbc/autoconfigure/DataJdbcRepositoriesAutoConfiguration.java
|
{
"start": 4196,
"end": 6733
}
|
class ____ extends AbstractJdbcConfiguration {
private final ApplicationContext applicationContext;
private final DataJdbcProperties properties;
SpringBootJdbcConfiguration(ApplicationContext applicationContext, DataJdbcProperties properties) {
this.applicationContext = applicationContext;
this.properties = properties;
}
@Override
protected Set<Class<?>> getInitialEntitySet() throws ClassNotFoundException {
return new EntityScanner(this.applicationContext).scan(Table.class);
}
@Override
@Bean
@ConditionalOnMissingBean
public RelationalManagedTypes jdbcManagedTypes() throws ClassNotFoundException {
return super.jdbcManagedTypes();
}
@Override
@Bean
@ConditionalOnMissingBean
public JdbcMappingContext jdbcMappingContext(Optional<NamingStrategy> namingStrategy,
JdbcCustomConversions customConversions, RelationalManagedTypes jdbcManagedTypes) {
return super.jdbcMappingContext(namingStrategy, customConversions, jdbcManagedTypes);
}
@Override
@Bean
@ConditionalOnMissingBean
public JdbcConverter jdbcConverter(JdbcMappingContext mappingContext, NamedParameterJdbcOperations operations,
@Lazy RelationResolver relationResolver, JdbcCustomConversions conversions, JdbcDialect dialect) {
return super.jdbcConverter(mappingContext, operations, relationResolver, conversions, dialect);
}
@Override
@Bean
@ConditionalOnMissingBean
public JdbcCustomConversions jdbcCustomConversions() {
return super.jdbcCustomConversions();
}
@Override
@Bean
@ConditionalOnMissingBean
public JdbcAggregateTemplate jdbcAggregateTemplate(ApplicationContext applicationContext,
JdbcMappingContext mappingContext, JdbcConverter converter, DataAccessStrategy dataAccessStrategy) {
return super.jdbcAggregateTemplate(applicationContext, mappingContext, converter, dataAccessStrategy);
}
@Override
@Bean
@ConditionalOnMissingBean
public DataAccessStrategy dataAccessStrategyBean(NamedParameterJdbcOperations operations,
JdbcConverter jdbcConverter, JdbcMappingContext context, JdbcDialect dialect) {
return super.dataAccessStrategyBean(operations, jdbcConverter, context, dialect);
}
@Override
@Bean
@ConditionalOnMissingBean
public JdbcDialect jdbcDialect(NamedParameterJdbcOperations operations) {
DataJdbcDatabaseDialect dialect = this.properties.getDialect();
return (dialect != null) ? dialect.getJdbcDialect(operations.getJdbcOperations())
: super.jdbcDialect(operations);
}
}
}
|
SpringBootJdbcConfiguration
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/entitygraph/EntityGraphAttributeNodesTest.java
|
{
"start": 2123,
"end": 2622
}
|
class ____ {
@Id
private Integer id;
private String name;
@OneToMany(mappedBy = "human", cascade = CascadeType.ALL)
@OnDelete(action = OnDeleteAction.CASCADE)
private Collection<House> houses;
public Human() {
}
public Human(Integer id) {
this.id = id;
this.houses = new ArrayList<>();
}
public Human(Integer id, String name, Collection<House> houses) {
this.id = id;
this.name = name;
this.houses = houses;
}
}
@Entity(name = "House")
public static
|
Human
|
java
|
apache__camel
|
components/camel-kafka/src/generated/java/org/apache/camel/component/kafka/KafkaComponentConfigurer.java
|
{
"start": 732,
"end": 52233
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
private org.apache.camel.component.kafka.KafkaConfiguration getOrCreateConfiguration(KafkaComponent target) {
if (target.getConfiguration() == null) {
target.setConfiguration(new org.apache.camel.component.kafka.KafkaConfiguration());
}
return target.getConfiguration();
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
KafkaComponent target = (KafkaComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "additionalproperties":
case "additionalProperties": getOrCreateConfiguration(target).setAdditionalProperties(property(camelContext, java.util.Map.class, value)); return true;
case "allowmanualcommit":
case "allowManualCommit": getOrCreateConfiguration(target).setAllowManualCommit(property(camelContext, boolean.class, value)); return true;
case "autocommitenable":
case "autoCommitEnable": getOrCreateConfiguration(target).setAutoCommitEnable(property(camelContext, boolean.class, value)); return true;
case "autocommitintervalms":
case "autoCommitIntervalMs": getOrCreateConfiguration(target).setAutoCommitIntervalMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "autooffsetreset":
case "autoOffsetReset": getOrCreateConfiguration(target).setAutoOffsetReset(property(camelContext, java.lang.String.class, value)); return true;
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "batchwithindividualheaders":
case "batchWithIndividualHeaders": getOrCreateConfiguration(target).setBatchWithIndividualHeaders(property(camelContext, boolean.class, value)); return true;
case "batching": getOrCreateConfiguration(target).setBatching(property(camelContext, boolean.class, value)); return true;
case "batchingintervalms":
case "batchingIntervalMs": getOrCreateConfiguration(target).setBatchingIntervalMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "breakonfirsterror":
case "breakOnFirstError": getOrCreateConfiguration(target).setBreakOnFirstError(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "brokers": getOrCreateConfiguration(target).setBrokers(property(camelContext, java.lang.String.class, value)); return true;
case "buffermemorysize":
case "bufferMemorySize": getOrCreateConfiguration(target).setBufferMemorySize(property(camelContext, java.lang.Integer.class, value)); return true;
case "checkcrcs":
case "checkCrcs": getOrCreateConfiguration(target).setCheckCrcs(property(camelContext, java.lang.Boolean.class, value)); return true;
case "clientid":
case "clientId": getOrCreateConfiguration(target).setClientId(property(camelContext, java.lang.String.class, value)); return true;
case "committimeoutms":
case "commitTimeoutMs": getOrCreateConfiguration(target).setCommitTimeoutMs(property(camelContext, java.lang.Long.class, value)); return true;
case "compressioncodec":
case "compressionCodec": getOrCreateConfiguration(target).setCompressionCodec(property(camelContext, java.lang.String.class, value)); return true;
case "configuration": target.setConfiguration(property(camelContext, org.apache.camel.component.kafka.KafkaConfiguration.class, value)); return true;
case "connectionmaxidlems":
case "connectionMaxIdleMs": getOrCreateConfiguration(target).setConnectionMaxIdleMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "consumerrequesttimeoutms":
case "consumerRequestTimeoutMs": getOrCreateConfiguration(target).setConsumerRequestTimeoutMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "consumerscount":
case "consumersCount": getOrCreateConfiguration(target).setConsumersCount(property(camelContext, int.class, value)); return true;
case "createconsumerbackoffinterval":
case "createConsumerBackoffInterval": target.setCreateConsumerBackoffInterval(property(camelContext, long.class, value)); return true;
case "createconsumerbackoffmaxattempts":
case "createConsumerBackoffMaxAttempts": target.setCreateConsumerBackoffMaxAttempts(property(camelContext, int.class, value)); return true;
case "deliverytimeoutms":
case "deliveryTimeoutMs": getOrCreateConfiguration(target).setDeliveryTimeoutMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "enableidempotence":
case "enableIdempotence": getOrCreateConfiguration(target).setEnableIdempotence(property(camelContext, boolean.class, value)); return true;
case "fetchmaxbytes":
case "fetchMaxBytes": getOrCreateConfiguration(target).setFetchMaxBytes(property(camelContext, java.lang.Integer.class, value)); return true;
case "fetchminbytes":
case "fetchMinBytes": getOrCreateConfiguration(target).setFetchMinBytes(property(camelContext, java.lang.Integer.class, value)); return true;
case "fetchwaitmaxms":
case "fetchWaitMaxMs": getOrCreateConfiguration(target).setFetchWaitMaxMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "groupid":
case "groupId": getOrCreateConfiguration(target).setGroupId(property(camelContext, java.lang.String.class, value)); return true;
case "groupinstanceid":
case "groupInstanceId": getOrCreateConfiguration(target).setGroupInstanceId(property(camelContext, java.lang.String.class, value)); return true;
case "headerdeserializer":
case "headerDeserializer": getOrCreateConfiguration(target).setHeaderDeserializer(property(camelContext, org.apache.camel.component.kafka.serde.KafkaHeaderDeserializer.class, value)); return true;
case "headerfilterstrategy":
case "headerFilterStrategy": getOrCreateConfiguration(target).setHeaderFilterStrategy(property(camelContext, org.apache.camel.spi.HeaderFilterStrategy.class, value)); return true;
case "headerserializer":
case "headerSerializer": getOrCreateConfiguration(target).setHeaderSerializer(property(camelContext, org.apache.camel.component.kafka.serde.KafkaHeaderSerializer.class, value)); return true;
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": target.setHealthCheckConsumerEnabled(property(camelContext, boolean.class, value)); return true;
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": target.setHealthCheckProducerEnabled(property(camelContext, boolean.class, value)); return true;
case "heartbeatintervalms":
case "heartbeatIntervalMs": getOrCreateConfiguration(target).setHeartbeatIntervalMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "interceptorclasses":
case "interceptorClasses": getOrCreateConfiguration(target).setInterceptorClasses(property(camelContext, java.lang.String.class, value)); return true;
case "isolationlevel":
case "isolationLevel": getOrCreateConfiguration(target).setIsolationLevel(property(camelContext, java.lang.String.class, value)); return true;
case "kafkaclientfactory":
case "kafkaClientFactory": target.setKafkaClientFactory(property(camelContext, org.apache.camel.component.kafka.KafkaClientFactory.class, value)); return true;
case "kafkamanualcommitfactory":
case "kafkaManualCommitFactory": target.setKafkaManualCommitFactory(property(camelContext, org.apache.camel.component.kafka.consumer.KafkaManualCommitFactory.class, value)); return true;
case "kerberosbeforereloginmintime":
case "kerberosBeforeReloginMinTime": getOrCreateConfiguration(target).setKerberosBeforeReloginMinTime(property(camelContext, java.lang.Integer.class, value)); return true;
case "kerberosconfiglocation":
case "kerberosConfigLocation": getOrCreateConfiguration(target).setKerberosConfigLocation(property(camelContext, java.lang.String.class, value)); return true;
case "kerberosinitcmd":
case "kerberosInitCmd": getOrCreateConfiguration(target).setKerberosInitCmd(property(camelContext, java.lang.String.class, value)); return true;
case "kerberosprincipaltolocalrules":
case "kerberosPrincipalToLocalRules": getOrCreateConfiguration(target).setKerberosPrincipalToLocalRules(property(camelContext, java.lang.String.class, value)); return true;
case "kerberosrenewjitter":
case "kerberosRenewJitter": getOrCreateConfiguration(target).setKerberosRenewJitter(property(camelContext, java.lang.Double.class, value)); return true;
case "kerberosrenewwindowfactor":
case "kerberosRenewWindowFactor": getOrCreateConfiguration(target).setKerberosRenewWindowFactor(property(camelContext, java.lang.Double.class, value)); return true;
case "key": getOrCreateConfiguration(target).setKey(property(camelContext, java.lang.String.class, value)); return true;
case "keydeserializer":
case "keyDeserializer": getOrCreateConfiguration(target).setKeyDeserializer(property(camelContext, java.lang.String.class, value)); return true;
case "keyserializer":
case "keySerializer": getOrCreateConfiguration(target).setKeySerializer(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "lingerms":
case "lingerMs": getOrCreateConfiguration(target).setLingerMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "maxblockms":
case "maxBlockMs": getOrCreateConfiguration(target).setMaxBlockMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "maxinflightrequest":
case "maxInFlightRequest": getOrCreateConfiguration(target).setMaxInFlightRequest(property(camelContext, java.lang.Integer.class, value)); return true;
case "maxpartitionfetchbytes":
case "maxPartitionFetchBytes": getOrCreateConfiguration(target).setMaxPartitionFetchBytes(property(camelContext, java.lang.Integer.class, value)); return true;
case "maxpollintervalms":
case "maxPollIntervalMs": getOrCreateConfiguration(target).setMaxPollIntervalMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "maxpollrecords":
case "maxPollRecords": getOrCreateConfiguration(target).setMaxPollRecords(property(camelContext, java.lang.Integer.class, value)); return true;
case "maxrequestsize":
case "maxRequestSize": getOrCreateConfiguration(target).setMaxRequestSize(property(camelContext, java.lang.Integer.class, value)); return true;
case "metadatamaxagems":
case "metadataMaxAgeMs": getOrCreateConfiguration(target).setMetadataMaxAgeMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "metricreporters":
case "metricReporters": getOrCreateConfiguration(target).setMetricReporters(property(camelContext, java.lang.String.class, value)); return true;
case "metricssamplewindowms":
case "metricsSampleWindowMs": getOrCreateConfiguration(target).setMetricsSampleWindowMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "noofmetricssample":
case "noOfMetricsSample": getOrCreateConfiguration(target).setNoOfMetricsSample(property(camelContext, java.lang.Integer.class, value)); return true;
case "offsetrepository":
case "offsetRepository": getOrCreateConfiguration(target).setOffsetRepository(property(camelContext, org.apache.camel.spi.StateRepository.class, value)); return true;
case "partitionassignor":
case "partitionAssignor": getOrCreateConfiguration(target).setPartitionAssignor(property(camelContext, java.lang.String.class, value)); return true;
case "partitionkey":
case "partitionKey": getOrCreateConfiguration(target).setPartitionKey(property(camelContext, java.lang.Integer.class, value)); return true;
case "partitioner": getOrCreateConfiguration(target).setPartitioner(property(camelContext, java.lang.String.class, value)); return true;
case "partitionerignorekeys":
case "partitionerIgnoreKeys": getOrCreateConfiguration(target).setPartitionerIgnoreKeys(property(camelContext, boolean.class, value)); return true;
case "pollexceptionstrategy":
case "pollExceptionStrategy": target.setPollExceptionStrategy(property(camelContext, org.apache.camel.component.kafka.PollExceptionStrategy.class, value)); return true;
case "pollonerror":
case "pollOnError": getOrCreateConfiguration(target).setPollOnError(property(camelContext, org.apache.camel.component.kafka.PollOnError.class, value)); return true;
case "polltimeoutms":
case "pollTimeoutMs": getOrCreateConfiguration(target).setPollTimeoutMs(property(camelContext, java.lang.Long.class, value)); return true;
case "prevalidatehostandport":
case "preValidateHostAndPort": getOrCreateConfiguration(target).setPreValidateHostAndPort(property(camelContext, boolean.class, value)); return true;
case "producerbatchsize":
case "producerBatchSize": getOrCreateConfiguration(target).setProducerBatchSize(property(camelContext, java.lang.Integer.class, value)); return true;
case "queuebufferingmaxmessages":
case "queueBufferingMaxMessages": getOrCreateConfiguration(target).setQueueBufferingMaxMessages(property(camelContext, java.lang.Integer.class, value)); return true;
case "receivebufferbytes":
case "receiveBufferBytes": getOrCreateConfiguration(target).setReceiveBufferBytes(property(camelContext, java.lang.Integer.class, value)); return true;
case "reconnectbackoffmaxms":
case "reconnectBackoffMaxMs": getOrCreateConfiguration(target).setReconnectBackoffMaxMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "reconnectbackoffms":
case "reconnectBackoffMs": getOrCreateConfiguration(target).setReconnectBackoffMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "recordmetadata":
case "recordMetadata": getOrCreateConfiguration(target).setRecordMetadata(property(camelContext, boolean.class, value)); return true;
case "requestrequiredacks":
case "requestRequiredAcks": getOrCreateConfiguration(target).setRequestRequiredAcks(property(camelContext, java.lang.String.class, value)); return true;
case "requesttimeoutms":
case "requestTimeoutMs": getOrCreateConfiguration(target).setRequestTimeoutMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "retries": getOrCreateConfiguration(target).setRetries(property(camelContext, java.lang.Integer.class, value)); return true;
case "retrybackoffmaxms":
case "retryBackoffMaxMs": getOrCreateConfiguration(target).setRetryBackoffMaxMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "retrybackoffms":
case "retryBackoffMs": getOrCreateConfiguration(target).setRetryBackoffMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "sasljaasconfig":
case "saslJaasConfig": getOrCreateConfiguration(target).setSaslJaasConfig(property(camelContext, java.lang.String.class, value)); return true;
case "saslkerberosservicename":
case "saslKerberosServiceName": getOrCreateConfiguration(target).setSaslKerberosServiceName(property(camelContext, java.lang.String.class, value)); return true;
case "saslmechanism":
case "saslMechanism": getOrCreateConfiguration(target).setSaslMechanism(property(camelContext, java.lang.String.class, value)); return true;
case "schemaregistryurl":
case "schemaRegistryURL": getOrCreateConfiguration(target).setSchemaRegistryURL(property(camelContext, java.lang.String.class, value)); return true;
case "securityprotocol":
case "securityProtocol": getOrCreateConfiguration(target).setSecurityProtocol(property(camelContext, java.lang.String.class, value)); return true;
case "seekto":
case "seekTo": getOrCreateConfiguration(target).setSeekTo(property(camelContext, org.apache.camel.component.kafka.SeekPolicy.class, value)); return true;
case "sendbufferbytes":
case "sendBufferBytes": getOrCreateConfiguration(target).setSendBufferBytes(property(camelContext, java.lang.Integer.class, value)); return true;
case "sessiontimeoutms":
case "sessionTimeoutMs": getOrCreateConfiguration(target).setSessionTimeoutMs(property(camelContext, java.lang.Integer.class, value)); return true;
case "shutdowntimeout":
case "shutdownTimeout": getOrCreateConfiguration(target).setShutdownTimeout(property(camelContext, int.class, value)); return true;
case "specificavroreader":
case "specificAvroReader": getOrCreateConfiguration(target).setSpecificAvroReader(property(camelContext, boolean.class, value)); return true;
case "sslciphersuites":
case "sslCipherSuites": getOrCreateConfiguration(target).setSslCipherSuites(property(camelContext, java.lang.String.class, value)); return true;
case "sslcontextparameters":
case "sslContextParameters": getOrCreateConfiguration(target).setSslContextParameters(property(camelContext, org.apache.camel.support.jsse.SSLContextParameters.class, value)); return true;
case "sslenabledprotocols":
case "sslEnabledProtocols": getOrCreateConfiguration(target).setSslEnabledProtocols(property(camelContext, java.lang.String.class, value)); return true;
case "sslendpointalgorithm":
case "sslEndpointAlgorithm": getOrCreateConfiguration(target).setSslEndpointAlgorithm(property(camelContext, java.lang.String.class, value)); return true;
case "sslkeypassword":
case "sslKeyPassword": getOrCreateConfiguration(target).setSslKeyPassword(property(camelContext, java.lang.String.class, value)); return true;
case "sslkeymanageralgorithm":
case "sslKeymanagerAlgorithm": getOrCreateConfiguration(target).setSslKeymanagerAlgorithm(property(camelContext, java.lang.String.class, value)); return true;
case "sslkeystorelocation":
case "sslKeystoreLocation": getOrCreateConfiguration(target).setSslKeystoreLocation(property(camelContext, java.lang.String.class, value)); return true;
case "sslkeystorepassword":
case "sslKeystorePassword": getOrCreateConfiguration(target).setSslKeystorePassword(property(camelContext, java.lang.String.class, value)); return true;
case "sslkeystoretype":
case "sslKeystoreType": getOrCreateConfiguration(target).setSslKeystoreType(property(camelContext, java.lang.String.class, value)); return true;
case "sslprotocol":
case "sslProtocol": getOrCreateConfiguration(target).setSslProtocol(property(camelContext, java.lang.String.class, value)); return true;
case "sslprovider":
case "sslProvider": getOrCreateConfiguration(target).setSslProvider(property(camelContext, java.lang.String.class, value)); return true;
case "ssltrustmanageralgorithm":
case "sslTrustmanagerAlgorithm": getOrCreateConfiguration(target).setSslTrustmanagerAlgorithm(property(camelContext, java.lang.String.class, value)); return true;
case "ssltruststorelocation":
case "sslTruststoreLocation": getOrCreateConfiguration(target).setSslTruststoreLocation(property(camelContext, java.lang.String.class, value)); return true;
case "ssltruststorepassword":
case "sslTruststorePassword": getOrCreateConfiguration(target).setSslTruststorePassword(property(camelContext, java.lang.String.class, value)); return true;
case "ssltruststoretype":
case "sslTruststoreType": getOrCreateConfiguration(target).setSslTruststoreType(property(camelContext, java.lang.String.class, value)); return true;
case "subscribeconsumerbackoffinterval":
case "subscribeConsumerBackoffInterval": target.setSubscribeConsumerBackoffInterval(property(camelContext, long.class, value)); return true;
case "subscribeconsumerbackoffmaxattempts":
case "subscribeConsumerBackoffMaxAttempts": target.setSubscribeConsumerBackoffMaxAttempts(property(camelContext, int.class, value)); return true;
case "subscribeconsumertopicmustexists":
case "subscribeConsumerTopicMustExists": target.setSubscribeConsumerTopicMustExists(property(camelContext, boolean.class, value)); return true;
case "synchronous": getOrCreateConfiguration(target).setSynchronous(property(camelContext, boolean.class, value)); return true;
case "topicispattern":
case "topicIsPattern": getOrCreateConfiguration(target).setTopicIsPattern(property(camelContext, boolean.class, value)); return true;
case "transacted": getOrCreateConfiguration(target).setTransacted(property(camelContext, boolean.class, value)); return true;
case "transactionalid":
case "transactionalId": getOrCreateConfiguration(target).setTransactionalId(property(camelContext, java.lang.String.class, value)); return true;
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": target.setUseGlobalSslContextParameters(property(camelContext, boolean.class, value)); return true;
case "useiterator":
case "useIterator": getOrCreateConfiguration(target).setUseIterator(property(camelContext, boolean.class, value)); return true;
case "valuedeserializer":
case "valueDeserializer": getOrCreateConfiguration(target).setValueDeserializer(property(camelContext, java.lang.String.class, value)); return true;
case "valueserializer":
case "valueSerializer": getOrCreateConfiguration(target).setValueSerializer(property(camelContext, java.lang.String.class, value)); return true;
case "workerpool":
case "workerPool": getOrCreateConfiguration(target).setWorkerPool(property(camelContext, java.util.concurrent.ExecutorService.class, value)); return true;
case "workerpoolcoresize":
case "workerPoolCoreSize": getOrCreateConfiguration(target).setWorkerPoolCoreSize(property(camelContext, java.lang.Integer.class, value)); return true;
case "workerpoolmaxsize":
case "workerPoolMaxSize": getOrCreateConfiguration(target).setWorkerPoolMaxSize(property(camelContext, java.lang.Integer.class, value)); return true;
default: return false;
}
}
@Override
public String[] getAutowiredNames() {
return new String[]{"kafkaClientFactory", "kafkaManualCommitFactory", "pollExceptionStrategy"};
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "additionalproperties":
case "additionalProperties": return java.util.Map.class;
case "allowmanualcommit":
case "allowManualCommit": return boolean.class;
case "autocommitenable":
case "autoCommitEnable": return boolean.class;
case "autocommitintervalms":
case "autoCommitIntervalMs": return java.lang.Integer.class;
case "autooffsetreset":
case "autoOffsetReset": return java.lang.String.class;
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "batchwithindividualheaders":
case "batchWithIndividualHeaders": return boolean.class;
case "batching": return boolean.class;
case "batchingintervalms":
case "batchingIntervalMs": return java.lang.Integer.class;
case "breakonfirsterror":
case "breakOnFirstError": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "brokers": return java.lang.String.class;
case "buffermemorysize":
case "bufferMemorySize": return java.lang.Integer.class;
case "checkcrcs":
case "checkCrcs": return java.lang.Boolean.class;
case "clientid":
case "clientId": return java.lang.String.class;
case "committimeoutms":
case "commitTimeoutMs": return java.lang.Long.class;
case "compressioncodec":
case "compressionCodec": return java.lang.String.class;
case "configuration": return org.apache.camel.component.kafka.KafkaConfiguration.class;
case "connectionmaxidlems":
case "connectionMaxIdleMs": return java.lang.Integer.class;
case "consumerrequesttimeoutms":
case "consumerRequestTimeoutMs": return java.lang.Integer.class;
case "consumerscount":
case "consumersCount": return int.class;
case "createconsumerbackoffinterval":
case "createConsumerBackoffInterval": return long.class;
case "createconsumerbackoffmaxattempts":
case "createConsumerBackoffMaxAttempts": return int.class;
case "deliverytimeoutms":
case "deliveryTimeoutMs": return java.lang.Integer.class;
case "enableidempotence":
case "enableIdempotence": return boolean.class;
case "fetchmaxbytes":
case "fetchMaxBytes": return java.lang.Integer.class;
case "fetchminbytes":
case "fetchMinBytes": return java.lang.Integer.class;
case "fetchwaitmaxms":
case "fetchWaitMaxMs": return java.lang.Integer.class;
case "groupid":
case "groupId": return java.lang.String.class;
case "groupinstanceid":
case "groupInstanceId": return java.lang.String.class;
case "headerdeserializer":
case "headerDeserializer": return org.apache.camel.component.kafka.serde.KafkaHeaderDeserializer.class;
case "headerfilterstrategy":
case "headerFilterStrategy": return org.apache.camel.spi.HeaderFilterStrategy.class;
case "headerserializer":
case "headerSerializer": return org.apache.camel.component.kafka.serde.KafkaHeaderSerializer.class;
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": return boolean.class;
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": return boolean.class;
case "heartbeatintervalms":
case "heartbeatIntervalMs": return java.lang.Integer.class;
case "interceptorclasses":
case "interceptorClasses": return java.lang.String.class;
case "isolationlevel":
case "isolationLevel": return java.lang.String.class;
case "kafkaclientfactory":
case "kafkaClientFactory": return org.apache.camel.component.kafka.KafkaClientFactory.class;
case "kafkamanualcommitfactory":
case "kafkaManualCommitFactory": return org.apache.camel.component.kafka.consumer.KafkaManualCommitFactory.class;
case "kerberosbeforereloginmintime":
case "kerberosBeforeReloginMinTime": return java.lang.Integer.class;
case "kerberosconfiglocation":
case "kerberosConfigLocation": return java.lang.String.class;
case "kerberosinitcmd":
case "kerberosInitCmd": return java.lang.String.class;
case "kerberosprincipaltolocalrules":
case "kerberosPrincipalToLocalRules": return java.lang.String.class;
case "kerberosrenewjitter":
case "kerberosRenewJitter": return java.lang.Double.class;
case "kerberosrenewwindowfactor":
case "kerberosRenewWindowFactor": return java.lang.Double.class;
case "key": return java.lang.String.class;
case "keydeserializer":
case "keyDeserializer": return java.lang.String.class;
case "keyserializer":
case "keySerializer": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "lingerms":
case "lingerMs": return java.lang.Integer.class;
case "maxblockms":
case "maxBlockMs": return java.lang.Integer.class;
case "maxinflightrequest":
case "maxInFlightRequest": return java.lang.Integer.class;
case "maxpartitionfetchbytes":
case "maxPartitionFetchBytes": return java.lang.Integer.class;
case "maxpollintervalms":
case "maxPollIntervalMs": return java.lang.Integer.class;
case "maxpollrecords":
case "maxPollRecords": return java.lang.Integer.class;
case "maxrequestsize":
case "maxRequestSize": return java.lang.Integer.class;
case "metadatamaxagems":
case "metadataMaxAgeMs": return java.lang.Integer.class;
case "metricreporters":
case "metricReporters": return java.lang.String.class;
case "metricssamplewindowms":
case "metricsSampleWindowMs": return java.lang.Integer.class;
case "noofmetricssample":
case "noOfMetricsSample": return java.lang.Integer.class;
case "offsetrepository":
case "offsetRepository": return org.apache.camel.spi.StateRepository.class;
case "partitionassignor":
case "partitionAssignor": return java.lang.String.class;
case "partitionkey":
case "partitionKey": return java.lang.Integer.class;
case "partitioner": return java.lang.String.class;
case "partitionerignorekeys":
case "partitionerIgnoreKeys": return boolean.class;
case "pollexceptionstrategy":
case "pollExceptionStrategy": return org.apache.camel.component.kafka.PollExceptionStrategy.class;
case "pollonerror":
case "pollOnError": return org.apache.camel.component.kafka.PollOnError.class;
case "polltimeoutms":
case "pollTimeoutMs": return java.lang.Long.class;
case "prevalidatehostandport":
case "preValidateHostAndPort": return boolean.class;
case "producerbatchsize":
case "producerBatchSize": return java.lang.Integer.class;
case "queuebufferingmaxmessages":
case "queueBufferingMaxMessages": return java.lang.Integer.class;
case "receivebufferbytes":
case "receiveBufferBytes": return java.lang.Integer.class;
case "reconnectbackoffmaxms":
case "reconnectBackoffMaxMs": return java.lang.Integer.class;
case "reconnectbackoffms":
case "reconnectBackoffMs": return java.lang.Integer.class;
case "recordmetadata":
case "recordMetadata": return boolean.class;
case "requestrequiredacks":
case "requestRequiredAcks": return java.lang.String.class;
case "requesttimeoutms":
case "requestTimeoutMs": return java.lang.Integer.class;
case "retries": return java.lang.Integer.class;
case "retrybackoffmaxms":
case "retryBackoffMaxMs": return java.lang.Integer.class;
case "retrybackoffms":
case "retryBackoffMs": return java.lang.Integer.class;
case "sasljaasconfig":
case "saslJaasConfig": return java.lang.String.class;
case "saslkerberosservicename":
case "saslKerberosServiceName": return java.lang.String.class;
case "saslmechanism":
case "saslMechanism": return java.lang.String.class;
case "schemaregistryurl":
case "schemaRegistryURL": return java.lang.String.class;
case "securityprotocol":
case "securityProtocol": return java.lang.String.class;
case "seekto":
case "seekTo": return org.apache.camel.component.kafka.SeekPolicy.class;
case "sendbufferbytes":
case "sendBufferBytes": return java.lang.Integer.class;
case "sessiontimeoutms":
case "sessionTimeoutMs": return java.lang.Integer.class;
case "shutdowntimeout":
case "shutdownTimeout": return int.class;
case "specificavroreader":
case "specificAvroReader": return boolean.class;
case "sslciphersuites":
case "sslCipherSuites": return java.lang.String.class;
case "sslcontextparameters":
case "sslContextParameters": return org.apache.camel.support.jsse.SSLContextParameters.class;
case "sslenabledprotocols":
case "sslEnabledProtocols": return java.lang.String.class;
case "sslendpointalgorithm":
case "sslEndpointAlgorithm": return java.lang.String.class;
case "sslkeypassword":
case "sslKeyPassword": return java.lang.String.class;
case "sslkeymanageralgorithm":
case "sslKeymanagerAlgorithm": return java.lang.String.class;
case "sslkeystorelocation":
case "sslKeystoreLocation": return java.lang.String.class;
case "sslkeystorepassword":
case "sslKeystorePassword": return java.lang.String.class;
case "sslkeystoretype":
case "sslKeystoreType": return java.lang.String.class;
case "sslprotocol":
case "sslProtocol": return java.lang.String.class;
case "sslprovider":
case "sslProvider": return java.lang.String.class;
case "ssltrustmanageralgorithm":
case "sslTrustmanagerAlgorithm": return java.lang.String.class;
case "ssltruststorelocation":
case "sslTruststoreLocation": return java.lang.String.class;
case "ssltruststorepassword":
case "sslTruststorePassword": return java.lang.String.class;
case "ssltruststoretype":
case "sslTruststoreType": return java.lang.String.class;
case "subscribeconsumerbackoffinterval":
case "subscribeConsumerBackoffInterval": return long.class;
case "subscribeconsumerbackoffmaxattempts":
case "subscribeConsumerBackoffMaxAttempts": return int.class;
case "subscribeconsumertopicmustexists":
case "subscribeConsumerTopicMustExists": return boolean.class;
case "synchronous": return boolean.class;
case "topicispattern":
case "topicIsPattern": return boolean.class;
case "transacted": return boolean.class;
case "transactionalid":
case "transactionalId": return java.lang.String.class;
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": return boolean.class;
case "useiterator":
case "useIterator": return boolean.class;
case "valuedeserializer":
case "valueDeserializer": return java.lang.String.class;
case "valueserializer":
case "valueSerializer": return java.lang.String.class;
case "workerpool":
case "workerPool": return java.util.concurrent.ExecutorService.class;
case "workerpoolcoresize":
case "workerPoolCoreSize": return java.lang.Integer.class;
case "workerpoolmaxsize":
case "workerPoolMaxSize": return java.lang.Integer.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
KafkaComponent target = (KafkaComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "additionalproperties":
case "additionalProperties": return getOrCreateConfiguration(target).getAdditionalProperties();
case "allowmanualcommit":
case "allowManualCommit": return getOrCreateConfiguration(target).isAllowManualCommit();
case "autocommitenable":
case "autoCommitEnable": return getOrCreateConfiguration(target).isAutoCommitEnable();
case "autocommitintervalms":
case "autoCommitIntervalMs": return getOrCreateConfiguration(target).getAutoCommitIntervalMs();
case "autooffsetreset":
case "autoOffsetReset": return getOrCreateConfiguration(target).getAutoOffsetReset();
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "batchwithindividualheaders":
case "batchWithIndividualHeaders": return getOrCreateConfiguration(target).isBatchWithIndividualHeaders();
case "batching": return getOrCreateConfiguration(target).isBatching();
case "batchingintervalms":
case "batchingIntervalMs": return getOrCreateConfiguration(target).getBatchingIntervalMs();
case "breakonfirsterror":
case "breakOnFirstError": return getOrCreateConfiguration(target).isBreakOnFirstError();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "brokers": return getOrCreateConfiguration(target).getBrokers();
case "buffermemorysize":
case "bufferMemorySize": return getOrCreateConfiguration(target).getBufferMemorySize();
case "checkcrcs":
case "checkCrcs": return getOrCreateConfiguration(target).getCheckCrcs();
case "clientid":
case "clientId": return getOrCreateConfiguration(target).getClientId();
case "committimeoutms":
case "commitTimeoutMs": return getOrCreateConfiguration(target).getCommitTimeoutMs();
case "compressioncodec":
case "compressionCodec": return getOrCreateConfiguration(target).getCompressionCodec();
case "configuration": return target.getConfiguration();
case "connectionmaxidlems":
case "connectionMaxIdleMs": return getOrCreateConfiguration(target).getConnectionMaxIdleMs();
case "consumerrequesttimeoutms":
case "consumerRequestTimeoutMs": return getOrCreateConfiguration(target).getConsumerRequestTimeoutMs();
case "consumerscount":
case "consumersCount": return getOrCreateConfiguration(target).getConsumersCount();
case "createconsumerbackoffinterval":
case "createConsumerBackoffInterval": return target.getCreateConsumerBackoffInterval();
case "createconsumerbackoffmaxattempts":
case "createConsumerBackoffMaxAttempts": return target.getCreateConsumerBackoffMaxAttempts();
case "deliverytimeoutms":
case "deliveryTimeoutMs": return getOrCreateConfiguration(target).getDeliveryTimeoutMs();
case "enableidempotence":
case "enableIdempotence": return getOrCreateConfiguration(target).isEnableIdempotence();
case "fetchmaxbytes":
case "fetchMaxBytes": return getOrCreateConfiguration(target).getFetchMaxBytes();
case "fetchminbytes":
case "fetchMinBytes": return getOrCreateConfiguration(target).getFetchMinBytes();
case "fetchwaitmaxms":
case "fetchWaitMaxMs": return getOrCreateConfiguration(target).getFetchWaitMaxMs();
case "groupid":
case "groupId": return getOrCreateConfiguration(target).getGroupId();
case "groupinstanceid":
case "groupInstanceId": return getOrCreateConfiguration(target).getGroupInstanceId();
case "headerdeserializer":
case "headerDeserializer": return getOrCreateConfiguration(target).getHeaderDeserializer();
case "headerfilterstrategy":
case "headerFilterStrategy": return getOrCreateConfiguration(target).getHeaderFilterStrategy();
case "headerserializer":
case "headerSerializer": return getOrCreateConfiguration(target).getHeaderSerializer();
case "healthcheckconsumerenabled":
case "healthCheckConsumerEnabled": return target.isHealthCheckConsumerEnabled();
case "healthcheckproducerenabled":
case "healthCheckProducerEnabled": return target.isHealthCheckProducerEnabled();
case "heartbeatintervalms":
case "heartbeatIntervalMs": return getOrCreateConfiguration(target).getHeartbeatIntervalMs();
case "interceptorclasses":
case "interceptorClasses": return getOrCreateConfiguration(target).getInterceptorClasses();
case "isolationlevel":
case "isolationLevel": return getOrCreateConfiguration(target).getIsolationLevel();
case "kafkaclientfactory":
case "kafkaClientFactory": return target.getKafkaClientFactory();
case "kafkamanualcommitfactory":
case "kafkaManualCommitFactory": return target.getKafkaManualCommitFactory();
case "kerberosbeforereloginmintime":
case "kerberosBeforeReloginMinTime": return getOrCreateConfiguration(target).getKerberosBeforeReloginMinTime();
case "kerberosconfiglocation":
case "kerberosConfigLocation": return getOrCreateConfiguration(target).getKerberosConfigLocation();
case "kerberosinitcmd":
case "kerberosInitCmd": return getOrCreateConfiguration(target).getKerberosInitCmd();
case "kerberosprincipaltolocalrules":
case "kerberosPrincipalToLocalRules": return getOrCreateConfiguration(target).getKerberosPrincipalToLocalRules();
case "kerberosrenewjitter":
case "kerberosRenewJitter": return getOrCreateConfiguration(target).getKerberosRenewJitter();
case "kerberosrenewwindowfactor":
case "kerberosRenewWindowFactor": return getOrCreateConfiguration(target).getKerberosRenewWindowFactor();
case "key": return getOrCreateConfiguration(target).getKey();
case "keydeserializer":
case "keyDeserializer": return getOrCreateConfiguration(target).getKeyDeserializer();
case "keyserializer":
case "keySerializer": return getOrCreateConfiguration(target).getKeySerializer();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "lingerms":
case "lingerMs": return getOrCreateConfiguration(target).getLingerMs();
case "maxblockms":
case "maxBlockMs": return getOrCreateConfiguration(target).getMaxBlockMs();
case "maxinflightrequest":
case "maxInFlightRequest": return getOrCreateConfiguration(target).getMaxInFlightRequest();
case "maxpartitionfetchbytes":
case "maxPartitionFetchBytes": return getOrCreateConfiguration(target).getMaxPartitionFetchBytes();
case "maxpollintervalms":
case "maxPollIntervalMs": return getOrCreateConfiguration(target).getMaxPollIntervalMs();
case "maxpollrecords":
case "maxPollRecords": return getOrCreateConfiguration(target).getMaxPollRecords();
case "maxrequestsize":
case "maxRequestSize": return getOrCreateConfiguration(target).getMaxRequestSize();
case "metadatamaxagems":
case "metadataMaxAgeMs": return getOrCreateConfiguration(target).getMetadataMaxAgeMs();
case "metricreporters":
case "metricReporters": return getOrCreateConfiguration(target).getMetricReporters();
case "metricssamplewindowms":
case "metricsSampleWindowMs": return getOrCreateConfiguration(target).getMetricsSampleWindowMs();
case "noofmetricssample":
case "noOfMetricsSample": return getOrCreateConfiguration(target).getNoOfMetricsSample();
case "offsetrepository":
case "offsetRepository": return getOrCreateConfiguration(target).getOffsetRepository();
case "partitionassignor":
case "partitionAssignor": return getOrCreateConfiguration(target).getPartitionAssignor();
case "partitionkey":
case "partitionKey": return getOrCreateConfiguration(target).getPartitionKey();
case "partitioner": return getOrCreateConfiguration(target).getPartitioner();
case "partitionerignorekeys":
case "partitionerIgnoreKeys": return getOrCreateConfiguration(target).isPartitionerIgnoreKeys();
case "pollexceptionstrategy":
case "pollExceptionStrategy": return target.getPollExceptionStrategy();
case "pollonerror":
case "pollOnError": return getOrCreateConfiguration(target).getPollOnError();
case "polltimeoutms":
case "pollTimeoutMs": return getOrCreateConfiguration(target).getPollTimeoutMs();
case "prevalidatehostandport":
case "preValidateHostAndPort": return getOrCreateConfiguration(target).isPreValidateHostAndPort();
case "producerbatchsize":
case "producerBatchSize": return getOrCreateConfiguration(target).getProducerBatchSize();
case "queuebufferingmaxmessages":
case "queueBufferingMaxMessages": return getOrCreateConfiguration(target).getQueueBufferingMaxMessages();
case "receivebufferbytes":
case "receiveBufferBytes": return getOrCreateConfiguration(target).getReceiveBufferBytes();
case "reconnectbackoffmaxms":
case "reconnectBackoffMaxMs": return getOrCreateConfiguration(target).getReconnectBackoffMaxMs();
case "reconnectbackoffms":
case "reconnectBackoffMs": return getOrCreateConfiguration(target).getReconnectBackoffMs();
case "recordmetadata":
case "recordMetadata": return getOrCreateConfiguration(target).isRecordMetadata();
case "requestrequiredacks":
case "requestRequiredAcks": return getOrCreateConfiguration(target).getRequestRequiredAcks();
case "requesttimeoutms":
case "requestTimeoutMs": return getOrCreateConfiguration(target).getRequestTimeoutMs();
case "retries": return getOrCreateConfiguration(target).getRetries();
case "retrybackoffmaxms":
case "retryBackoffMaxMs": return getOrCreateConfiguration(target).getRetryBackoffMaxMs();
case "retrybackoffms":
case "retryBackoffMs": return getOrCreateConfiguration(target).getRetryBackoffMs();
case "sasljaasconfig":
case "saslJaasConfig": return getOrCreateConfiguration(target).getSaslJaasConfig();
case "saslkerberosservicename":
case "saslKerberosServiceName": return getOrCreateConfiguration(target).getSaslKerberosServiceName();
case "saslmechanism":
case "saslMechanism": return getOrCreateConfiguration(target).getSaslMechanism();
case "schemaregistryurl":
case "schemaRegistryURL": return getOrCreateConfiguration(target).getSchemaRegistryURL();
case "securityprotocol":
case "securityProtocol": return getOrCreateConfiguration(target).getSecurityProtocol();
case "seekto":
case "seekTo": return getOrCreateConfiguration(target).getSeekTo();
case "sendbufferbytes":
case "sendBufferBytes": return getOrCreateConfiguration(target).getSendBufferBytes();
case "sessiontimeoutms":
case "sessionTimeoutMs": return getOrCreateConfiguration(target).getSessionTimeoutMs();
case "shutdowntimeout":
case "shutdownTimeout": return getOrCreateConfiguration(target).getShutdownTimeout();
case "specificavroreader":
case "specificAvroReader": return getOrCreateConfiguration(target).isSpecificAvroReader();
case "sslciphersuites":
case "sslCipherSuites": return getOrCreateConfiguration(target).getSslCipherSuites();
case "sslcontextparameters":
case "sslContextParameters": return getOrCreateConfiguration(target).getSslContextParameters();
case "sslenabledprotocols":
case "sslEnabledProtocols": return getOrCreateConfiguration(target).getSslEnabledProtocols();
case "sslendpointalgorithm":
case "sslEndpointAlgorithm": return getOrCreateConfiguration(target).getSslEndpointAlgorithm();
case "sslkeypassword":
case "sslKeyPassword": return getOrCreateConfiguration(target).getSslKeyPassword();
case "sslkeymanageralgorithm":
case "sslKeymanagerAlgorithm": return getOrCreateConfiguration(target).getSslKeymanagerAlgorithm();
case "sslkeystorelocation":
case "sslKeystoreLocation": return getOrCreateConfiguration(target).getSslKeystoreLocation();
case "sslkeystorepassword":
case "sslKeystorePassword": return getOrCreateConfiguration(target).getSslKeystorePassword();
case "sslkeystoretype":
case "sslKeystoreType": return getOrCreateConfiguration(target).getSslKeystoreType();
case "sslprotocol":
case "sslProtocol": return getOrCreateConfiguration(target).getSslProtocol();
case "sslprovider":
case "sslProvider": return getOrCreateConfiguration(target).getSslProvider();
case "ssltrustmanageralgorithm":
case "sslTrustmanagerAlgorithm": return getOrCreateConfiguration(target).getSslTrustmanagerAlgorithm();
case "ssltruststorelocation":
case "sslTruststoreLocation": return getOrCreateConfiguration(target).getSslTruststoreLocation();
case "ssltruststorepassword":
case "sslTruststorePassword": return getOrCreateConfiguration(target).getSslTruststorePassword();
case "ssltruststoretype":
case "sslTruststoreType": return getOrCreateConfiguration(target).getSslTruststoreType();
case "subscribeconsumerbackoffinterval":
case "subscribeConsumerBackoffInterval": return target.getSubscribeConsumerBackoffInterval();
case "subscribeconsumerbackoffmaxattempts":
case "subscribeConsumerBackoffMaxAttempts": return target.getSubscribeConsumerBackoffMaxAttempts();
case "subscribeconsumertopicmustexists":
case "subscribeConsumerTopicMustExists": return target.isSubscribeConsumerTopicMustExists();
case "synchronous": return getOrCreateConfiguration(target).isSynchronous();
case "topicispattern":
case "topicIsPattern": return getOrCreateConfiguration(target).isTopicIsPattern();
case "transacted": return getOrCreateConfiguration(target).isTransacted();
case "transactionalid":
case "transactionalId": return getOrCreateConfiguration(target).getTransactionalId();
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": return target.isUseGlobalSslContextParameters();
case "useiterator":
case "useIterator": return getOrCreateConfiguration(target).isUseIterator();
case "valuedeserializer":
case "valueDeserializer": return getOrCreateConfiguration(target).getValueDeserializer();
case "valueserializer":
case "valueSerializer": return getOrCreateConfiguration(target).getValueSerializer();
case "workerpool":
case "workerPool": return getOrCreateConfiguration(target).getWorkerPool();
case "workerpoolcoresize":
case "workerPoolCoreSize": return getOrCreateConfiguration(target).getWorkerPoolCoreSize();
case "workerpoolmaxsize":
case "workerPoolMaxSize": return getOrCreateConfiguration(target).getWorkerPoolMaxSize();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "additionalproperties":
case "additionalProperties": return java.lang.Object.class;
case "offsetrepository":
case "offsetRepository": return java.lang.String.class;
default: return null;
}
}
}
|
KafkaComponentConfigurer
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/bigintegers/BigIntegers_assertIsNotNegative_Test.java
|
{
"start": 1019,
"end": 2200
}
|
class ____ extends BigIntegersBaseTest {
@Test
void should_succeed_since_actual_is_not_negative() {
numbers.assertIsNotNegative(someInfo(), new BigInteger("6"));
}
@Test
void should_succeed_since_actual_is_zero() {
numbers.assertIsNotNegative(someInfo(), BigInteger.ZERO);
}
@Test
void should_fail_since_actual_is_negative() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> numbers.assertIsNotNegative(someInfo(),
new BigInteger("-6")))
.withMessage("%nExpecting actual:%n -6%nto be greater than or equal to:%n 0%n".formatted());
}
@Test
void should_succeed_since_actual_is_not_negative_according_to_custom_comparison_strategy() {
numbersWithAbsValueComparisonStrategy.assertIsNotNegative(someInfo(), new BigInteger("-1"));
}
@Test
void should_succeed_since_actual_positive_is_not_negative_according_to_custom_comparison_strategy() {
numbersWithAbsValueComparisonStrategy.assertIsNotNegative(someInfo(), BigInteger.ONE);
}
}
|
BigIntegers_assertIsNotNegative_Test
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/annotations/DialectOverride.java
|
{
"start": 9698,
"end": 9997
}
|
interface ____ {
SQLSelect[] value();
}
/**
* Specializes a {@link org.hibernate.annotations.SQLInsert}
* in a certain dialect.
*/
@Target({METHOD, FIELD, TYPE})
@Retention(RUNTIME)
@Repeatable(SQLInserts.class)
@OverridesAnnotation(org.hibernate.annotations.SQLInsert.class)
@
|
SQLSelects
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/aot/hint/ExecutableMode.java
|
{
"start": 873,
"end": 1647
}
|
enum ____ {
/**
* Only retrieving the {@link Executable} and its metadata is required.
* @deprecated with no replacement since introspection is included
* when {@link ReflectionHints#registerType(Class, MemberCategory...) adding a reflection hint for a type}.
*/
@Deprecated(since= "7.0", forRemoval = true)
INTROSPECT,
/**
* Full reflection support is required, including the ability to invoke
* the {@link Executable}.
*/
INVOKE;
/**
* Specify if this mode already includes the specified {@code other} mode.
* @param other the other mode to check
* @return {@code true} if this mode includes the other mode
*/
boolean includes(@Nullable ExecutableMode other) {
return (other == null || this.ordinal() >= other.ordinal());
}
}
|
ExecutableMode
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/LifecycleMethodOverridingTests.java
|
{
"start": 3658,
"end": 3759
}
|
class ____ {
@Nested
@DisplayName("a public lifecycle method in a subclass")
|
PublicSuperClassTests
|
java
|
resilience4j__resilience4j
|
resilience4j-retry/src/test/java/io/github/resilience4j/retry/internal/CompletionStageRetryTest.java
|
{
"start": 1562,
"end": 13096
}
|
class ____ {
private AsyncHelloWorldService helloWorldService;
private ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor();
@Before
public void setUp() {
helloWorldService = mock(AsyncHelloWorldService.class);
}
@Test
public void shouldNotRetry() {
given(helloWorldService.returnHelloWorld())
.willReturn(completedFuture("Hello world"));
Retry retryContext = Retry.ofDefaults("id");
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(
retryContext,
scheduler,
() -> helloWorldService.returnHelloWorld());
String result = awaitResult(supplier);
then(helloWorldService).should().returnHelloWorld();
assertThat(result).isEqualTo("Hello world");
}
@Test
public void shouldNotRetryWhenReturnVoid() {
given(helloWorldService.sayHelloWorld())
.willReturn(completedFuture(null));
Retry retryContext = Retry.ofDefaults("id");
Supplier<CompletionStage<Void>> supplier = Retry.decorateCompletionStage(
retryContext,
scheduler,
() -> helloWorldService.sayHelloWorld());
awaitResult(supplier);
then(helloWorldService).should().sayHelloWorld();
}
@Test
public void shouldNotRetryWithThatResult() {
given(helloWorldService.returnHelloWorld())
.willReturn(completedFuture("Hello world"));
final RetryConfig retryConfig = RetryConfig.<String>custom()
.retryOnResult(s -> s.contains("NoRetry"))
.maxAttempts(1)
.build();
Retry retryContext = Retry.of("id", retryConfig);
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(
retryContext,
scheduler,
() -> helloWorldService.returnHelloWorld());
String result = awaitResult(supplier);
then(helloWorldService).should().returnHelloWorld();
assertThat(result).isEqualTo("Hello world");
}
@Test
public void shouldRetryInCaseOResultRetryMatchAtSyncStage() {
shouldCompleteFutureAfterAttemptsInCaseOfRetyOnResultAtAsyncStage(1, "Hello world");
}
@Test
public void shouldRetryTowAttemptsInCaseOResultRetryMatchAtSyncStage() {
shouldCompleteFutureAfterAttemptsInCaseOfRetyOnResultAtAsyncStage(2, "Hello world");
}
@Test(expected = IllegalArgumentException.class)
public void shouldRethrowExceptionInCaseOfExceptionAtSyncStage() {
given(helloWorldService.returnHelloWorld())
.willThrow(new IllegalArgumentException("BAM!"));
Retry retry = Retry.ofDefaults("id");
retry.executeCompletionStage(
scheduler,
() -> helloWorldService.returnHelloWorld());
}
@Test
public void shouldRetryInCaseOfAnExceptionAtAsyncStage() {
CompletableFuture<String> failedFuture = new CompletableFuture<>();
failedFuture.completeExceptionally(new HelloWorldException());
given(helloWorldService.returnHelloWorld())
.willReturn(failedFuture)
.willReturn(completedFuture("Hello world"));
Retry retryContext = Retry.ofDefaults("id");
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(
retryContext,
scheduler,
() -> helloWorldService.returnHelloWorld());
String result = awaitResult(supplier.get());
then(helloWorldService).should(times(2)).returnHelloWorld();
assertThat(result).isEqualTo("Hello world");
}
@Test
public void shouldThrowOnceMaxAttemptsReachedIfConfigured() {
given(helloWorldService.returnHelloWorld())
.willReturn(CompletableFuture.completedFuture("invalid response"));
RetryConfig retryConfig = RetryConfig.<String>custom()
.retryOnResult(s -> s.equals("invalid response"))
.maxAttempts(3)
.failAfterMaxAttempts(true)
.build();
Retry retry = Retry.of("retry", retryConfig);
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(
retry,
scheduler,
helloWorldService::returnHelloWorld
);
assertThat(supplier.get())
.failsWithin(5, TimeUnit.SECONDS)
.withThrowableOfType(ExecutionException.class)
.havingCause()
.isInstanceOf(MaxRetriesExceededException.class)
.withMessage("Retry 'retry' has exhausted all attempts (3)");
then(helloWorldService).should(times(3)).returnHelloWorld();
}
@Test
public void shouldStopRetryingAndEmitProperEventsIfIntervalFunctionReturnsLessThanZero() {
given(helloWorldService.returnHelloWorld())
.willReturn(CompletableFuture.failedFuture(new HelloWorldException("Exceptional!")));
AtomicInteger numberOfTimesIntervalFunctionCalled = new AtomicInteger(0);
RetryConfig retryConfig = RetryConfig.<String>custom()
.intervalFunction((ignored) -> {
int numTimesCalled = numberOfTimesIntervalFunctionCalled.incrementAndGet();
return numTimesCalled > 1 ? -1L : 0L;
})
.maxAttempts(3)
.build();
AtomicInteger numberOfRetryEvents = new AtomicInteger();
AtomicBoolean onErrorEventOccurred = new AtomicBoolean(false);
Retry retry = Retry.of("retry", retryConfig);
retry.getEventPublisher().onRetry((ignored) -> numberOfRetryEvents.getAndIncrement());
retry.getEventPublisher().onError((ignored) -> onErrorEventOccurred.set(true));
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(
retry,
scheduler,
helloWorldService::returnHelloWorld
);
assertThat(supplier.get())
.failsWithin(5, TimeUnit.SECONDS)
.withThrowableOfType(ExecutionException.class)
.havingCause()
.isInstanceOf(HelloWorldException.class)
.withMessage("Exceptional!");
assertThat(numberOfRetryEvents).hasValue(1);
assertThat(onErrorEventOccurred).isTrue();
then(helloWorldService).should(times(2)).returnHelloWorld();
}
@Test
public void shouldContinueRetryingAndEmitProperEventsIfIntervalFunctionReturnsZeroOrMore() {
given(helloWorldService.returnHelloWorld())
.willReturn(CompletableFuture.failedFuture(new HelloWorldException("Exceptional!")));
AtomicInteger numberOfTimesIntervalFunctionCalled = new AtomicInteger(0);
RetryConfig retryConfig = RetryConfig.<String>custom()
.intervalFunction((ignored) -> {
// Returns 0, 1, 2
return (long) numberOfTimesIntervalFunctionCalled.getAndIncrement();
})
.maxAttempts(3)
.build();
AtomicInteger numberOfRetryEvents = new AtomicInteger();
AtomicBoolean onErrorEventOccurred = new AtomicBoolean(false);
Retry retry = Retry.of("retry", retryConfig);
retry.getEventPublisher().onRetry((ignored) -> numberOfRetryEvents.getAndIncrement());
retry.getEventPublisher().onError((ignored) -> onErrorEventOccurred.set(true));
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(
retry,
scheduler,
helloWorldService::returnHelloWorld
);
assertThat(supplier.get())
.failsWithin(5, TimeUnit.SECONDS)
.withThrowableOfType(ExecutionException.class)
.havingCause()
.isInstanceOf(HelloWorldException.class)
.withMessage("Exceptional!");
assertThat(numberOfRetryEvents).hasValue(2);
assertThat(onErrorEventOccurred).isTrue();
then(helloWorldService).should(times(3)).returnHelloWorld();
}
@Test
public void shouldCompleteFutureAfterOneAttemptInCaseOfExceptionAtAsyncStage() {
shouldCompleteFutureAfterAttemptsInCaseOfExceptionAtAsyncStage(1);
}
@Test
public void shouldCompleteFutureAfterTwoAttemptsInCaseOfExceptionAtAsyncStage() {
shouldCompleteFutureAfterAttemptsInCaseOfExceptionAtAsyncStage(2);
}
@Test
public void shouldCompleteFutureAfterThreeAttemptsInCaseOfExceptionAtAsyncStage() {
shouldCompleteFutureAfterAttemptsInCaseOfExceptionAtAsyncStage(3);
}
private void shouldCompleteFutureAfterAttemptsInCaseOfExceptionAtAsyncStage(int noOfAttempts) {
CompletableFuture<String> failedFuture = new CompletableFuture<>();
failedFuture.completeExceptionally(new HelloWorldException());
given(helloWorldService.returnHelloWorld())
.willReturn(failedFuture);
Retry retryContext = Retry.of(
"id",
RetryConfig
.custom()
.maxAttempts(noOfAttempts)
.build());
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(
retryContext,
scheduler,
() -> helloWorldService.returnHelloWorld());
Try<String> resultTry = Try.of(() -> awaitResult(supplier.get()));
then(helloWorldService).should(times(noOfAttempts)).returnHelloWorld();
assertThat(resultTry.isFailure()).isTrue();
assertThat(resultTry.getCause().getCause()).isInstanceOf(HelloWorldException.class);
}
private void shouldCompleteFutureAfterAttemptsInCaseOfRetyOnResultAtAsyncStage(int noOfAttempts,
String retryResponse) {
given(helloWorldService.returnHelloWorld())
.willReturn(completedFuture("Hello world"));
Retry retryContext = Retry.of(
"id",
RetryConfig
.<String>custom()
.maxAttempts(noOfAttempts)
.retryOnResult(s -> s.contains(retryResponse))
.build());
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(
retryContext,
scheduler,
() -> helloWorldService.returnHelloWorld());
Try<String> resultTry = Try.of(() -> awaitResult(supplier.get()));
then(helloWorldService).should(times(noOfAttempts)).returnHelloWorld();
assertThat(resultTry.isSuccess()).isTrue();
}
@Test
public void shouldCompleteExceptionallyWhenRetryOnExPredicateThrows() {
given(helloWorldService.returnHelloWorld())
.willReturn(CompletableFuture.failedFuture(new HelloWorldException()));
final RetryConfig retryConfig = RetryConfig.custom()
.retryOnException(__ -> {
throw new RuntimeException();
})
.build();
Retry retryContext = Retry.of("id", retryConfig);
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(
retryContext,
scheduler,
() -> helloWorldService.returnHelloWorld());
Try<String> resultTry = Try.of(() -> awaitResult(supplier.get()));
then(helloWorldService).should(times(1)).returnHelloWorld();
assertThat(resultTry.isFailure()).isTrue();
assertThat(resultTry.getCause().getCause()).isInstanceOf(RuntimeException.class);
}
}
|
CompletionStageRetryTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/WriteableExponentialHistogram.java
|
{
"start": 1963,
"end": 5572
}
|
class ____ to be added, similar to the following PR:
// https://github.com/elastic/elasticsearch/pull/135054
private static final String WRITEABLE_NAME = "test_exponential_histogram";
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
GenericNamedWriteable.class,
WRITEABLE_NAME,
WriteableExponentialHistogram::readFrom
);
private final ExponentialHistogram delegate;
public WriteableExponentialHistogram(ExponentialHistogram delegate) {
this.delegate = delegate;
}
@Override
public int scale() {
return delegate.scale();
}
@Override
public ZeroBucket zeroBucket() {
return delegate.zeroBucket();
}
@Override
public Buckets positiveBuckets() {
return delegate.positiveBuckets();
}
@Override
public Buckets negativeBuckets() {
return delegate.negativeBuckets();
}
@Override
public double sum() {
return delegate.sum();
}
@Override
public long valueCount() {
return delegate.valueCount();
}
@Override
public double min() {
return delegate.min();
}
@Override
public double max() {
return delegate.max();
}
@Override
public long ramBytesUsed() {
return 0;
}
@Override
public String getWriteableName() {
return WRITEABLE_NAME;
}
@Override
public boolean supportsVersion(TransportVersion version) {
return true;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
assert false : "must not be called when overriding supportsVersion";
throw new UnsupportedOperationException("must not be called when overriding supportsVersion");
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeByte((byte) scale());
out.writeDouble(sum());
out.writeDouble(min());
out.writeDouble(max());
out.writeDouble(zeroBucket().zeroThreshold());
out.writeLong(zeroBucket().count());
writeBuckets(out, negativeBuckets());
writeBuckets(out, positiveBuckets());
}
private static void writeBuckets(StreamOutput out, Buckets buckets) throws IOException {
int count = 0;
BucketIterator iterator = buckets.iterator();
while (iterator.hasNext()) {
count++;
iterator.advance();
}
out.writeInt(count);
iterator = buckets.iterator();
while (iterator.hasNext()) {
out.writeLong(iterator.peekIndex());
out.writeLong(iterator.peekCount());
iterator.advance();
}
}
private static WriteableExponentialHistogram readFrom(StreamInput in) throws IOException {
byte scale = in.readByte();
ExponentialHistogramBuilder builder = ExponentialHistogram.builder(scale, ExponentialHistogramCircuitBreaker.noop());
builder.sum(in.readDouble());
builder.min(in.readDouble());
builder.max(in.readDouble());
builder.zeroBucket(ZeroBucket.create(in.readDouble(), in.readLong()));
int negBucketCount = in.readInt();
for (int i = 0; i < negBucketCount; i++) {
builder.setNegativeBucket(in.readLong(), in.readLong());
}
int posBucketCount = in.readInt();
for (int i = 0; i < posBucketCount; i++) {
builder.setPositiveBucket(in.readLong(), in.readLong());
}
return new WriteableExponentialHistogram(builder.build());
}
}
|
needs
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUser.java
|
{
"start": 517,
"end": 2599
}
|
class ____ extends User {
private final Optional<RoleDescriptor> localClusterRoleDescriptor;
private final Optional<RoleDescriptor> remoteAccessRoleDescriptor;
InternalUser(String username, @Nullable RoleDescriptor localClusterRole) {
this(username, Optional.ofNullable(localClusterRole), Optional.empty());
}
InternalUser(String username, Optional<RoleDescriptor> localClusterRole, Optional<RoleDescriptor> remoteAccessRole) {
super(username, Strings.EMPTY_ARRAY);
assert enabled();
assert roles() != null && roles().length == 0;
this.localClusterRoleDescriptor = Objects.requireNonNull(localClusterRole);
this.localClusterRoleDescriptor.ifPresent(rd -> { assert rd.getName().equals(username); });
this.remoteAccessRoleDescriptor = Objects.requireNonNull(remoteAccessRole);
}
@Override
public boolean equals(Object o) {
return o == this;
}
@Override
public int hashCode() {
return System.identityHashCode(this);
}
/**
* The local-cluster role descriptor assigned to this internal user, or {@link Optional#empty()} if this user does not have a role.
* This {@link RoleDescriptor} defines the privileges that the internal-user has for requests that originate from a node within the
* local cluster.
* @see #getRemoteAccessRoleDescriptor()
*/
public Optional<RoleDescriptor> getLocalClusterRoleDescriptor() {
return localClusterRoleDescriptor;
}
/**
* The remote-access role descriptor assigned to this internal user, or {@link Optional#empty()} if this user is not permitted to
* make cross-cluster requests.
* This {@link RoleDescriptor} defines the privileges that the internal-user has for requests that run on the current cluster, but
* originate from a node within an external cluster (via CCS/CCR).
* @see #getLocalClusterRoleDescriptor()
*/
public Optional<RoleDescriptor> getRemoteAccessRoleDescriptor() {
return remoteAccessRoleDescriptor;
}
}
|
InternalUser
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/RedisConnectionStateListener.java
|
{
"start": 72,
"end": 172
}
|
interface ____ Redis connection state monitoring.
*
* @author ze
* @author Mark Paluch
*/
public
|
for
|
java
|
micronaut-projects__micronaut-core
|
http-client-jdk/src/main/java/io/micronaut/http/client/jdk/DefaultJdkHttpClientRegistry.java
|
{
"start": 10430,
"end": 17771
}
|
class ____ be an instance of HttpClientConfiguration for injection point: " + configurationClass);
}
final List<String> filterAnnotations = clientKey.filterAnnotations;
final String path = clientKey.path;
if (clientBean != null && path == null && configurationClass == null && filterAnnotations.isEmpty()) {
return clientBean;
}
LoadBalancer loadBalancer = null;
final HttpClientConfiguration configuration;
if (configurationClass != null) {
configuration = (HttpClientConfiguration) this.beanContext.getBean(configurationClass);
} else if (clientId != null) {
configuration = this.beanContext.findBean(
HttpClientConfiguration.class,
Qualifiers.byName(clientId)
).orElse(defaultHttpClientConfiguration);
} else {
configuration = defaultHttpClientConfiguration;
}
if (clientId != null) {
loadBalancer = loadBalancerResolver.resolve(clientId)
.orElseThrow(() ->
new HttpClientException("Invalid service reference [" + clientId + "] specified to @Client"));
}
String contextPath = null;
if (StringUtils.isNotEmpty(path)) {
contextPath = path;
} else if (StringUtils.isNotEmpty(clientId) && clientId.startsWith("/")) {
contextPath = clientId;
} else {
if (loadBalancer != null) {
contextPath = loadBalancer.getContextPath().orElse(null);
}
}
DefaultJdkHttpClient client = buildClient(
loadBalancer,
clientKey.httpVersion,
configuration,
clientId,
contextPath,
beanContext,
annotationMetadata
);
final JsonFeatures jsonFeatures = clientKey.jsonFeatures;
if (jsonFeatures != null) {
List<MediaTypeCodec> codecs = new ArrayList<>(2);
MediaTypeCodecRegistry codecRegistry = client.getMediaTypeCodecRegistry();
for (MediaTypeCodec codec : codecRegistry.getCodecs()) {
if (codec instanceof MapperMediaTypeCodec mapper) {
codecs.add(mapper.cloneWithFeatures(jsonFeatures));
} else {
codecs.add(codec);
}
}
if (codecRegistry.findCodec(MediaType.APPLICATION_JSON_TYPE).isEmpty()) {
codecs.add(createNewJsonCodec(this.beanContext, jsonFeatures));
}
client.setMediaTypeCodecRegistry(MediaTypeCodecRegistry.of(codecs));
client.setMessageBodyHandlerRegistry(new MessageBodyHandlerRegistry() {
final MessageBodyHandlerRegistry delegate = client.getMessageBodyHandlerRegistry();
@SuppressWarnings("unchecked")
private <T> T customize(T handler) {
if (handler instanceof CustomizableJsonHandler cnjh) {
return (T) cnjh.customize(jsonFeatures);
}
return handler;
}
@Override
public <T> Optional<MessageBodyReader<T>> findReader(Argument<T> type, List<MediaType> mediaType) {
return delegate.findReader(type, mediaType).map(this::customize);
}
@Override
public <T> Optional<MessageBodyWriter<T>> findWriter(Argument<T> type, List<MediaType> mediaType) {
return delegate.findWriter(type, mediaType).map(this::customize);
}
});
}
return client;
});
}
private DefaultJdkHttpClient buildClient(
LoadBalancer loadBalancer,
HttpVersionSelection httpVersion,
HttpClientConfiguration configuration,
String clientId,
String contextPath,
BeanContext beanContext,
AnnotationMetadata annotationMetadata
) {
ConversionService conversionService = beanContext.getBean(ConversionService.class);
return new DefaultJdkHttpClient(
loadBalancer,
httpVersion,
configuration,
contextPath,
clientFilterResolver,
clientFilterResolver.resolveFilterEntries(new ClientFilterResolutionContext(
clientId == null ? null : Collections.singletonList(clientId),
annotationMetadata
)),
mediaTypeCodecRegistry,
messageBodyHandlerRegistry,
requestBinderRegistryProvider.orElse(new DefaultRequestBinderRegistry(conversionService)),
clientId,
conversionService,
jdkClientSslBuilder,
cookieDecoder
);
}
@Override
public DefaultJdkHttpClient getClient(HttpVersionSelection httpVersion, String clientId, String path) {
final ClientKey key = new ClientKey(
httpVersion,
clientId,
null,
path,
null,
null
);
return getClient(key, AnnotationMetadata.EMPTY_METADATA);
}
@Override
public HttpClient resolveClient(InjectionPoint<?> injectionPoint, LoadBalancer loadBalancer, HttpClientConfiguration configuration, BeanContext beanContext) {
return resolveDefaultHttpClient(injectionPoint, loadBalancer, configuration, beanContext);
}
@Override
public void disposeClient(AnnotationMetadata annotationMetadata) {
final ClientKey key = getClientKey(annotationMetadata);
HttpClient client = clients.remove(key);
if (client != null && client.isRunning()) {
client.close();
}
}
@Override
public void close() throws Exception {
for (HttpClient httpClient : clients.values()) {
try {
httpClient.close();
} catch (Throwable e) {
if (LOG.isWarnEnabled()) {
LOG.warn("Error shutting down HTTP client: {}", e.getMessage(), e);
}
}
}
clients.clear();
}
@Override
public @NonNull RawHttpClient getRawClient(@NonNull HttpVersionSelection httpVersion, @NonNull String clientId, @Nullable String path) {
return new JdkRawHttpClient(getClient(httpVersion, clientId, path));
}
/**
* Client key.
*
* @param httpVersion The HTTP version
* @param clientId The client ID
* @param filterAnnotations The filter annotations
* @param path The path
* @param configurationClass The configuration class
* @param jsonFeatures The JSON features
*/
@Internal
private record ClientKey(
HttpVersionSelection httpVersion,
String clientId,
List<String> filterAnnotations,
String path,
Class<?> configurationClass,
JsonFeatures jsonFeatures
) {
}
}
|
must
|
java
|
google__guice
|
core/test/com/googlecode/guice/JakartaTest.java
|
{
"start": 13165,
"end": 13759
}
|
class ____ implements Scope {
private int now = 0;
@Override
public <T> com.google.inject.Provider<T> scope(
Key<T> key, final com.google.inject.Provider<T> unscoped) {
return new com.google.inject.Provider<T>() {
private T value;
private int snapshotTime = -1;
@Override
public T get() {
if (snapshotTime != now) {
value = unscoped.get();
snapshotTime = now;
}
return value;
}
};
}
public void reset() {
now++;
}
}
@TestScoped
static
|
TestScope
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/error/ShouldHaveSize.java
|
{
"start": 904,
"end": 4936
}
|
class ____ extends BasicErrorMessageFactory {
private static final String SHOULD_HAVE_FILE_SIZE = "%nExpecting file%n"
+ " %s%n"
+ "to have a size of:%n"
+ " %s bytes%n"
+ "but had:%n"
+ " %s bytes";
private static final String SHOULD_HAVE_PATH_SIZE = "%nExpecting path%n"
+ " %s%n"
+ "to have a size of:%n"
+ " %s bytes%n"
+ "but had:%n"
+ " %s bytes";
/**
* Creates a new <code>{@link ShouldHaveSize}</code>.
* @param actual the actual value in the failed assertion.
* @param actualSize the size of {@code actual}.
* @param expectedSize the expected size.
* @param firstDimensionArrayIndex Index of first dimension of array
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldHaveSize(Object actual, int actualSize, int expectedSize,
int firstDimensionArrayIndex) {
return new ShouldHaveSize(actual, actualSize, expectedSize, firstDimensionArrayIndex);
}
/**
* Creates a new <code>{@link ShouldHaveSize}</code>.
* @param actual the actual value in the failed assertion.
* @param actualSize the size of {@code actual}.
* @param expectedSize the expected size.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldHaveSize(Object actual, int actualSize, int expectedSize) {
return new ShouldHaveSize(actual, actualSize, expectedSize);
}
private ShouldHaveSize(Object actual, int actualSize, int expectedSize) {
// format the sizes in a standard way, otherwise if we use (for ex) an Hexadecimal representation
// it will format sizes in hexadecimal while we only want actual to be formatted in hexadecimal
super("%nExpected size: %s but was: %s in:%n%s".formatted(expectedSize, actualSize, "%s"), actual);
}
private ShouldHaveSize(Object actual, int actualSize, int expectedSize, int firstDimensionArrayIndex) {
// format the sizes in a standard way, otherwise if we use (for ex) an Hexadecimal representation
// it will format sizes in hexadecimal while we only want actual to be formatted in hexadecimal
// @format:off
super("%nExpected size: %s but was: %s in actual[%d]:%n%s".formatted(expectedSize, actualSize, firstDimensionArrayIndex, "%s"), actual);
// @format:on
}
/**
* Creates a new <code>{@link ShouldHaveSize}</code> for file size.
* @param actual the actual file in the failed assertion.
* @param expectedSize the expected file size.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldHaveSize(File actual, long expectedSize) {
return new ShouldHaveSize(actual, expectedSize);
}
private ShouldHaveSize(File actual, long expectedSize) {
super(SHOULD_HAVE_FILE_SIZE, actual, expectedSize, actual.length());
}
/**
* Creates a new <code>{@link ShouldHaveSize}</code> for Path file size
* @param actual The actual path file in the failed assertion
* @param expectedSize The expected size of the path file
* @return the created {@code ErrorMessageFactory}
* @throws IOException if an I/O error occurs
*/
public static ErrorMessageFactory shouldHaveSize(Path actual, long expectedSize) throws IOException {
return new ShouldHaveSize(actual, expectedSize);
}
private ShouldHaveSize(Path actual, long expectedSize) throws IOException {
super(SHOULD_HAVE_PATH_SIZE, actual, expectedSize, Files.size(actual));
}
}
|
ShouldHaveSize
|
java
|
alibaba__nacos
|
core/src/main/java/com/alibaba/nacos/core/remote/NacosRuntimeConnectionEjector.java
|
{
"start": 1293,
"end": 8531
}
|
class ____ extends RuntimeConnectionEjector {
public NacosRuntimeConnectionEjector() {
}
/**
* eject connections on runtime.
*/
public void doEject() {
// remove out dated connection
ejectOutdatedConnection();
// remove overload connection
ejectOverLimitConnection();
}
/**
* eject the outdated connection.
*/
private void ejectOutdatedConnection() {
try {
Loggers.CONNECTION.info("Connection check task start");
Map<String, Connection> connections = connectionManager.connections;
int totalCount = connections.size();
int currentSdkClientCount = connectionManager.currentSdkClientCount();
Loggers.CONNECTION.info("Long connection metrics detail ,Total count ={}, sdkCount={},clusterCount={}",
totalCount, currentSdkClientCount, (totalCount - currentSdkClientCount));
Set<String> outDatedConnections = new HashSet<>();
long now = System.currentTimeMillis();
//outdated connections collect.
for (Map.Entry<String, Connection> entry : connections.entrySet()) {
Connection client = entry.getValue();
if (now - client.getMetaInfo().getLastActiveTime() >= KEEP_ALIVE_TIME) {
outDatedConnections.add(client.getMetaInfo().getConnectionId());
} else if (client.getMetaInfo().pushQueueBlockTimesLastOver(300 * 1000)) {
outDatedConnections.add(client.getMetaInfo().getConnectionId());
}
}
// check out date connection
Loggers.CONNECTION.info("Out dated connection ,size={}", outDatedConnections.size());
if (CollectionUtils.isNotEmpty(outDatedConnections)) {
Set<String> successConnections = new HashSet<>();
final CountDownLatch latch = new CountDownLatch(outDatedConnections.size());
for (String outDateConnectionId : outDatedConnections) {
try {
Connection connection = connectionManager.getConnection(outDateConnectionId);
if (connection != null) {
ClientDetectionRequest clientDetectionRequest = new ClientDetectionRequest();
connection.asyncRequest(clientDetectionRequest, new RequestCallBack() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public long getTimeout() {
return 5000L;
}
@Override
public void onResponse(Response response) {
latch.countDown();
if (response != null && response.isSuccess()) {
connection.freshActiveTime();
successConnections.add(outDateConnectionId);
}
}
@Override
public void onException(Throwable e) {
latch.countDown();
}
});
Loggers.CONNECTION.info("[{}]send connection active request ", outDateConnectionId);
} else {
latch.countDown();
}
} catch (ConnectionAlreadyClosedException e) {
latch.countDown();
} catch (Exception e) {
Loggers.CONNECTION.error("[{}]Error occurs when check client active detection ,error={}",
outDateConnectionId, e);
latch.countDown();
}
}
latch.await(5000L, TimeUnit.MILLISECONDS);
Loggers.CONNECTION.info("Out dated connection check successCount={}", successConnections.size());
for (String outDateConnectionId : outDatedConnections) {
if (!successConnections.contains(outDateConnectionId)) {
Loggers.CONNECTION.info("[{}]Unregister Out dated connection....", outDateConnectionId);
connectionManager.unregister(outDateConnectionId);
}
}
}
Loggers.CONNECTION.info("Connection check task end");
} catch (Throwable e) {
Loggers.CONNECTION.error("Error occurs during connection check... ", e);
}
}
/**
* eject the over limit connection.
*/
private void ejectOverLimitConnection() {
// if not count set, then give up
if (getLoadClient() > 0) {
try {
Loggers.CONNECTION.info("Connection overLimit check task start, loadCount={}, redirectAddress={}",
getLoadClient(), getRedirectAddress());
// check count
int currentConnectionCount = connectionManager.getCurrentConnectionCount();
int ejectingCount = currentConnectionCount - getLoadClient();
// if overload
if (ejectingCount > 0) {
// we may modify the connection map when connection reset
// avoid concurrent modified exception, create new set for ids snapshot
Set<String> ids = new HashSet<>(connectionManager.connections.keySet());
for (String id : ids) {
if (ejectingCount > 0) {
// check sdk
Connection connection = connectionManager.getConnection(id);
if (connection != null && connection.getMetaInfo().isSdkSource()) {
if (connectionManager.loadSingle(id, redirectAddress)) {
ejectingCount--;
}
}
} else {
// reach the count
break;
}
}
}
Loggers.CONNECTION.info("Connection overLimit task end, current loadCount={}, has ejected loadCont={}",
connectionManager.getCurrentConnectionCount(), getLoadClient() - ejectingCount);
} catch (Throwable e) {
Loggers.CONNECTION.error("Error occurs during connection overLimit... ", e);
}
// reset
setRedirectAddress(null);
setLoadClient(-1);
}
}
@Override
public String getName() {
return "nacos";
}
}
|
NacosRuntimeConnectionEjector
|
java
|
google__guava
|
android/guava-testlib/src/com/google/common/collect/testing/google/MultimapEntriesTester.java
|
{
"start": 2326,
"end": 5556
}
|
class ____<K, V> extends AbstractMultimapTester<K, V, Multimap<K, V>> {
public void testEntries() {
assertEqualIgnoringOrder(getSampleElements(), multimap().entries());
}
@CollectionSize.Require(absent = ZERO)
@MapFeature.Require(ALLOWS_NULL_KEYS)
public void testContainsEntryWithNullKeyPresent() {
initMultimapWithNullKey();
assertContains(multimap().entries(), mapEntry((K) null, getValueForNullKey()));
}
@MapFeature.Require(ALLOWS_NULL_KEY_QUERIES)
public void testContainsEntryWithNullKeyAbsent() {
assertFalse(multimap().entries().contains(mapEntry(null, v0())));
}
@CollectionSize.Require(absent = ZERO)
@MapFeature.Require(ALLOWS_NULL_VALUES)
public void testContainsEntryWithNullValuePresent() {
initMultimapWithNullValue();
assertContains(multimap().entries(), mapEntry(getKeyForNullValue(), (V) null));
}
@MapFeature.Require(ALLOWS_NULL_VALUE_QUERIES)
public void testContainsEntryWithNullValueAbsent() {
assertFalse(multimap().entries().contains(mapEntry(k0(), null)));
}
@CollectionSize.Require(absent = ZERO)
@MapFeature.Require(SUPPORTS_REMOVE)
public void testRemovePropagatesToMultimap() {
assertTrue(multimap().entries().remove(mapEntry(k0(), v0())));
expectMissing(mapEntry(k0(), v0()));
assertEquals(getNumElements() - 1, multimap().size());
assertFalse(multimap().containsEntry(k0(), v0()));
}
@CollectionSize.Require(absent = ZERO)
@MapFeature.Require(SUPPORTS_REMOVE)
public void testRemoveAllPropagatesToMultimap() {
assertTrue(multimap().entries().removeAll(singleton(mapEntry(k0(), v0()))));
expectMissing(mapEntry(k0(), v0()));
assertEquals(getNumElements() - 1, multimap().size());
assertFalse(multimap().containsEntry(k0(), v0()));
}
@CollectionSize.Require(absent = ZERO)
@MapFeature.Require(SUPPORTS_REMOVE)
/*
* We are comparing Multimaps of the same type, so as long as they have value collections that
* implement equals() (as with ListMultimap or SetMultimap, as opposed to a QueueMultimap or
* something), our equality check is value-based.
*/
@SuppressWarnings("UndefinedEquals")
public void testRetainAllPropagatesToMultimap() {
multimap().entries().retainAll(singleton(mapEntry(k0(), v0())));
assertEquals(getSubjectGenerator().create(mapEntry(k0(), v0())), multimap());
assertEquals(1, multimap().size());
assertTrue(multimap().containsEntry(k0(), v0()));
}
@CollectionSize.Require(ONE)
@CollectionFeature.Require(SUPPORTS_ITERATOR_REMOVE)
public void testIteratorRemovePropagatesToMultimap() {
Iterator<Entry<K, V>> iterator = multimap().entries().iterator();
assertEquals(mapEntry(k0(), v0()), iterator.next());
iterator.remove();
assertTrue(multimap().isEmpty());
}
@CollectionSize.Require(absent = ZERO)
@MapFeature.Require(SUPPORTS_REMOVE)
public void testEntriesRemainValidAfterRemove() {
Iterator<Entry<K, V>> iterator = multimap().entries().iterator();
Entry<K, V> entry = iterator.next();
K key = entry.getKey();
V value = entry.getValue();
multimap().removeAll(key);
assertEquals(key, entry.getKey());
assertEquals(value, entry.getValue());
}
}
|
MultimapEntriesTester
|
java
|
resilience4j__resilience4j
|
resilience4j-cache/src/main/java/io/github/resilience4j/cache/event/CacheOnMissEvent.java
|
{
"start": 730,
"end": 1330
}
|
class ____<K> extends AbstractCacheEvent {
private final K cacheKey;
public CacheOnMissEvent(String cacheName, K cacheKey) {
super(cacheName);
this.cacheKey = cacheKey;
}
@Override
public Type getEventType() {
return Type.CACHE_MISS;
}
public K getCacheKey() {
return cacheKey;
}
@Override
public String toString() {
return String.format("%s: Cache '%s' recorded a cache miss on cache key '%s'.",
getCreationTime(),
getCacheName(),
getCacheKey().toString());
}
}
|
CacheOnMissEvent
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-spring6/src/test/java/org/apache/dubbo/config/spring6/utils/HelloResponse.java
|
{
"start": 887,
"end": 1203
}
|
class ____ implements Serializable {
private String response;
public HelloResponse(String response) {
this.response = response;
}
public String getResponse() {
return response;
}
public void setResponse(String response) {
this.response = response;
}
}
|
HelloResponse
|
java
|
spring-projects__spring-framework
|
spring-web/src/jmh/java/org/springframework/http/support/HeadersAdaptersBaseline.java
|
{
"start": 11345,
"end": 11943
}
|
class ____ implements Entry<String, List<String>> {
private final String key;
HeaderEntry(String key) {
this.key = key;
}
@Override
public String getKey() {
return this.key;
}
@Override
public List<String> getValue() {
return headers.getValuesList(this.key);
}
@Override
public List<String> setValue(List<String> value) {
HttpFields.Mutable mutableHttpFields = mutableFields();
List<String> previousValues = headers.getValuesList(this.key);
mutableHttpFields.put(this.key, value);
return previousValues;
}
}
private
|
HeaderEntry
|
java
|
apache__camel
|
components/camel-aws/camel-aws-xray/src/main/java/org/apache/camel/component/aws/xray/decorators/http/AbstractHttpSegmentDecorator.java
|
{
"start": 1073,
"end": 3673
}
|
class ____ extends AbstractSegmentDecorator {
public static final String POST_METHOD = "POST";
public static final String GET_METHOD = "GET";
@Override
public String getOperationName(Exchange exchange, Endpoint endpoint) {
return getHttpMethod(exchange, endpoint);
}
@Override
public void pre(Entity segment, Exchange exchange, Endpoint endpoint) {
super.pre(segment, exchange, endpoint);
String httpUrl = getHttpUrl(exchange, endpoint);
if (httpUrl != null) {
segment.putMetadata("http.url", httpUrl);
}
segment.putMetadata("http.method", getHttpMethod(exchange, endpoint));
}
@Override
public void post(Entity segment, Exchange exchange, Endpoint endpoint) {
super.post(segment, exchange, endpoint);
Object responseCode = exchange.getMessage().getHeader(Exchange.HTTP_RESPONSE_CODE);
if (responseCode instanceof Integer) {
segment.putMetadata("htt.response.code", responseCode);
}
}
protected String getHttpMethod(Exchange exchange, Endpoint endpoint) {
// 1. Use method provided in header.
Object method = exchange.getIn().getHeader(Exchange.HTTP_METHOD);
if (method instanceof String) {
return (String) method;
}
// 2. GET if query string is provided in header.
if (exchange.getIn().getHeader(Exchange.HTTP_QUERY) != null) {
return GET_METHOD;
}
// 3. GET if endpoint is configured with a query string.
if (endpoint.getEndpointUri().indexOf('?') != -1) {
return GET_METHOD;
}
// 4. POST if there is data to send (body is not null).
if (exchange.getIn().getBody() != null) {
return POST_METHOD;
}
// 5. GET otherwise.
return GET_METHOD;
}
protected String getHttpUrl(Exchange exchange, Endpoint endpoint) {
Object url = exchange.getIn().getHeader(Exchange.HTTP_URL);
if (url instanceof String) {
return (String) url;
} else {
Object uri = exchange.getIn().getHeader(Exchange.HTTP_URI);
if (uri instanceof String) {
return (String) uri;
} else {
// Try to obtain from endpoint
int index = endpoint.getEndpointUri().lastIndexOf("http:");
if (index != -1) {
return endpoint.getEndpointUri().substring(index);
}
}
}
return null;
}
}
|
AbstractHttpSegmentDecorator
|
java
|
processing__processing4
|
java/test/processing/mode/java/preproc/MissingCurlyMessageSimplifierStrategyTest.java
|
{
"start": 661,
"end": 845
}
|
class ____ {");
Assert.assertTrue(msg.isPresent());
}
@Test
public void testNotPresent() {
Optional<PdeIssueEmitter.IssueMessageSimplification> msg = strategy.simplify("
|
Test
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/context/event/DubboApplicationStateEvent.java
|
{
"start": 1102,
"end": 1835
}
|
class ____ extends ApplicationEvent {
private final DeployState state;
private Throwable cause;
public DubboApplicationStateEvent(ApplicationModel applicationModel, DeployState state) {
super(applicationModel);
this.state = state;
}
public DubboApplicationStateEvent(ApplicationModel applicationModel, DeployState state, Throwable cause) {
super(applicationModel);
this.state = state;
this.cause = cause;
}
public ApplicationModel getApplicationModel() {
return (ApplicationModel) getSource();
}
public DeployState getState() {
return state;
}
public Throwable getCause() {
return cause;
}
}
|
DubboApplicationStateEvent
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/object/ObjectAssert_usingDefaultComparator_Test.java
|
{
"start": 1059,
"end": 1437
}
|
class ____ extends ObjectAssertBaseTest {
@Override
protected ObjectAssert<Jedi> invoke_api_method() {
return assertions.usingComparator(alwaysEqual())
.usingDefaultComparator();
}
@Override
protected void verify_internal_effects() {
assertThat(getObjects(assertions).getComparator()).isNull();
}
}
|
ObjectAssert_usingDefaultComparator_Test
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/action/fieldcaps/FieldCapsWithFilterIT.java
|
{
"start": 2125,
"end": 3466
}
|
class ____ extends InternalEngine {
EngineWithExposingTimestamp(EngineConfig engineConfig) {
super(engineConfig);
assert IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(config().getIndexSettings().getSettings()) : "require read-only index";
}
@Override
public ShardLongFieldRange getRawFieldRange(String field) {
try (Searcher searcher = acquireSearcher("test")) {
final DirectoryReader directoryReader = searcher.getDirectoryReader();
final byte[] minPackedValue = PointValues.getMinPackedValue(directoryReader, field);
final byte[] maxPackedValue = PointValues.getMaxPackedValue(directoryReader, field);
if (minPackedValue == null || maxPackedValue == null) {
assert minPackedValue == null && maxPackedValue == null
: Arrays.toString(minPackedValue) + "-" + Arrays.toString(maxPackedValue);
return ShardLongFieldRange.EMPTY;
}
return ShardLongFieldRange.of(LongPoint.decodeDimension(minPackedValue, 0), LongPoint.decodeDimension(maxPackedValue, 0));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
public static
|
EngineWithExposingTimestamp
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/Olingo4EndpointBuilderFactory.java
|
{
"start": 48676,
"end": 54569
}
|
interface ____ extends EndpointProducerBuilder {
default Olingo4EndpointProducerBuilder basic() {
return (Olingo4EndpointProducerBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedOlingo4EndpointProducerBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedOlingo4EndpointProducerBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Custom HTTP async client builder for more complex HTTP client
* configuration, overrides connectionTimeout, socketTimeout, proxy and
* sslContext. Note that a socketTimeout MUST be specified in the
* builder, otherwise OData requests could block indefinitely.
*
* The option is a:
* <code>org.apache.http.impl.nio.client.HttpAsyncClientBuilder</code>
* type.
*
* Group: advanced
*
* @param httpAsyncClientBuilder the value to set
* @return the dsl builder
*/
default AdvancedOlingo4EndpointProducerBuilder httpAsyncClientBuilder(org.apache.http.impl.nio.client.HttpAsyncClientBuilder httpAsyncClientBuilder) {
doSetProperty("httpAsyncClientBuilder", httpAsyncClientBuilder);
return this;
}
/**
* Custom HTTP async client builder for more complex HTTP client
* configuration, overrides connectionTimeout, socketTimeout, proxy and
* sslContext. Note that a socketTimeout MUST be specified in the
* builder, otherwise OData requests could block indefinitely.
*
* The option will be converted to a
* <code>org.apache.http.impl.nio.client.HttpAsyncClientBuilder</code>
* type.
*
* Group: advanced
*
* @param httpAsyncClientBuilder the value to set
* @return the dsl builder
*/
default AdvancedOlingo4EndpointProducerBuilder httpAsyncClientBuilder(String httpAsyncClientBuilder) {
doSetProperty("httpAsyncClientBuilder", httpAsyncClientBuilder);
return this;
}
/**
* Custom HTTP client builder for more complex HTTP client
* configuration, overrides connectionTimeout, socketTimeout, proxy and
* sslContext. Note that a socketTimeout MUST be specified in the
* builder, otherwise OData requests could block indefinitely.
*
* The option is a:
* <code>org.apache.http.impl.client.HttpClientBuilder</code> type.
*
* Group: advanced
*
* @param httpClientBuilder the value to set
* @return the dsl builder
*/
default AdvancedOlingo4EndpointProducerBuilder httpClientBuilder(org.apache.http.impl.client.HttpClientBuilder httpClientBuilder) {
doSetProperty("httpClientBuilder", httpClientBuilder);
return this;
}
/**
* Custom HTTP client builder for more complex HTTP client
* configuration, overrides connectionTimeout, socketTimeout, proxy and
* sslContext. Note that a socketTimeout MUST be specified in the
* builder, otherwise OData requests could block indefinitely.
*
* The option will be converted to a
* <code>org.apache.http.impl.client.HttpClientBuilder</code> type.
*
* Group: advanced
*
* @param httpClientBuilder the value to set
* @return the dsl builder
*/
default AdvancedOlingo4EndpointProducerBuilder httpClientBuilder(String httpClientBuilder) {
doSetProperty("httpClientBuilder", httpClientBuilder);
return this;
}
}
/**
* Builder for endpoint for the Olingo4 component.
*/
public
|
AdvancedOlingo4EndpointProducerBuilder
|
java
|
grpc__grpc-java
|
binder/src/main/java/io/grpc/binder/internal/BinderClientTransport.java
|
{
"start": 15613,
"end": 16192
}
|
interface ____ {
/**
* Notifies the implementation that the binding has succeeded and we are now connected to the
* server's "endpoint" which can be reached at 'endpointBinder'.
*/
@MainThread
void onBound(OneWayBinderProxy endpointBinder);
/** Notifies the implementation that we've received a valid SETUP_TRANSPORT transaction. */
@BinderThread
void handleSetupTransport();
/** Notifies the implementation that the SecurityPolicy check of the server succeeded. */
void onServerAuthorizationOk();
}
private final
|
ClientHandshake
|
java
|
google__error-prone
|
check_api/src/main/java/com/google/errorprone/apply/AndroidImportOrganizer.java
|
{
"start": 1973,
"end": 4520
}
|
class ____ implements ImportOrganizer {
private static final String ANDROID = "android";
private static final String COM_ANDROID = "com.android";
private static final String JAVA = "java";
private static final String JAVAX = "javax";
private static final ImmutableSet<String> SPECIAL_ROOTS =
ImmutableSet.of(ANDROID, COM_ANDROID, JAVA, JAVAX);
private final StaticOrder order;
AndroidImportOrganizer(StaticOrder order) {
this.order = order;
}
@Override
public OrganizedImports organizeImports(List<Import> imports) {
OrganizedImports organized = new OrganizedImports();
// Group into static and non-static.
Map<Boolean, List<Import>> partionedByStatic =
imports.stream().collect(Collectors.partitioningBy(Import::isStatic));
for (Boolean key : order.groupOrder()) {
organizePartition(organized, partionedByStatic.get(key));
}
return organized;
}
private static void organizePartition(OrganizedImports organized, List<Import> imports) {
Map<String, ImmutableSortedSet<Import>> groupedByRoot =
imports.stream()
.collect(
Collectors.groupingBy(
// Group by root package.
AndroidImportOrganizer::rootPackage,
// Ensure that the results are sorted.
TreeMap::new,
// Each group is a set sorted by type.
toImmutableSortedSet(Comparator.comparing(Import::getType))));
// Get the third party roots by removing the roots that are handled specially and sorting.
ImmutableSortedSet<String> thirdParty =
groupedByRoot.keySet().stream()
.filter(r -> !SPECIAL_ROOTS.contains(r))
.collect(toImmutableSortedSet(Ordering.natural()));
// Construct a list of the possible roots in the correct order.
ImmutableList<String> roots =
ImmutableList.<String>builder()
.add(ANDROID)
.add(COM_ANDROID)
.addAll(thirdParty)
.add(JAVA)
.add(JAVAX)
.build();
organized.addGroups(groupedByRoot, roots);
}
private static String rootPackage(Import anImport) {
String type = anImport.getType();
if (type.startsWith("com.android.")) {
return "com.android";
}
int index = type.indexOf('.');
if (index == -1) {
// Treat the default package as if it has an empty root.
return "";
} else {
return type.substring(0, index);
}
}
}
|
AndroidImportOrganizer
|
java
|
google__guice
|
core/test/com/google/inject/MembersInjectorTest.java
|
{
"start": 13380,
"end": 13478
}
|
class ____<T> {
@Inject B b;
@Inject T t;
@Inject
void doNothing() {}
}
static
|
A
|
java
|
junit-team__junit5
|
junit-platform-launcher/src/main/java/org/junit/platform/launcher/LauncherDiscoveryRequest.java
|
{
"start": 1966,
"end": 2385
}
|
interface ____ not intended to be implemented by clients.
*
* @since 1.0
* @see org.junit.platform.launcher.core.LauncherDiscoveryRequestBuilder
* @see EngineDiscoveryRequest
* @see EngineFilter
* @see ConfigurationParameters
* @see DiscoverySelector
* @see DiscoveryFilter
* @see PostDiscoveryFilter
* @see #getEngineFilters()
* @see #getPostDiscoveryFilters()
*/
@API(status = STABLE, since = "1.0")
public
|
is
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/factory/annotation/LookupAnnotationTests.java
|
{
"start": 8063,
"end": 8220
}
|
class ____ {
@Lookup
public abstract NumberStore<Double> getDoubleStore();
@Lookup
public abstract NumberStore<Float> getFloatStore();
}
}
|
NumberBean
|
java
|
apache__camel
|
components/camel-netty-http/src/test/java/org/apache/camel/component/netty/http/NettyHttpCustomOptionsTest.java
|
{
"start": 1055,
"end": 1767
}
|
class ____ extends BaseNettyTest {
final String expectedResponse = "response";
@Test
public void shouldReturnCustomResponseForOptions() {
String response = template.requestBodyAndHeader("netty-http:http://localhost:{{port}}/foo", "", HTTP_METHOD, "OPTIONS",
String.class);
assertEquals(expectedResponse, response);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("netty-http:http://0.0.0.0:{{port}}/foo?httpMethodRestrict=OPTIONS").setBody().constant(expectedResponse);
}
};
}
}
|
NettyHttpCustomOptionsTest
|
java
|
bumptech__glide
|
annotation/compiler/test/src/test/java/com/bumptech/glide/annotation/compiler/test/TestDescription.java
|
{
"start": 254,
"end": 510
}
|
class ____ extends TestWatcher {
private Description description;
@Override
protected void starting(Description description) {
this.description = description;
}
public Description getDescription() {
return description;
}
}
|
TestDescription
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/runtime/stream/sql/FunctionITCase.java
|
{
"start": 77774,
"end": 78399
}
|
class ____ extends ScalarFunction {
public Integer eval(Integer... args) {
for (Integer o : args) {
if (o != null) {
return o;
}
}
return null;
}
@Override
public TypeInference getTypeInference(DataTypeFactory typeFactory) {
return TypeInference.newBuilder()
.outputTypeStrategy(TypeStrategies.argument(0))
.build();
}
}
/** Function for testing variable arguments. */
@SuppressWarnings("unused")
public static
|
CustomScalarFunction
|
java
|
reactor__reactor-core
|
reactor-core/src/test/java/reactor/core/publisher/MonoErrorSuppliedTest.java
|
{
"start": 1031,
"end": 4171
}
|
class ____ {
@Test
public void normal() {
StepVerifier.create(Mono.error(() -> new Exception("test")))
.verifyErrorMessage("test");
}
@Test
public void throwOnBlock() {
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> new MonoErrorSupplied<>(() -> new IllegalStateException("boom"))
.block()
)
.withMessage("boom");
}
@Test
public void throwOnTimeoutBlock() {
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> new MonoErrorSupplied<>(() -> new IllegalStateException("boom"))
.block(Duration.ofMillis(100))
)
.withMessage("boom");
}
@Test
public void throwOnCall() {
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> new MonoErrorSupplied<>(() -> new IllegalStateException("boom"))
.call()
)
.withMessage("boom");
}
@Test
public void lazilyEvaluatedSubscribe() {
AtomicInteger count = new AtomicInteger();
Mono<Object> error = Mono.error(() -> new IllegalStateException("boom" + count.incrementAndGet()));
assertThat(count).as("no op before subscribe").hasValue(0);
StepVerifier.create(error.retry(3))
.verifyErrorMessage("boom4");
}
@Test
public void lazilyEvaluatedBlock() {
AtomicInteger count = new AtomicInteger();
Mono<Object> error = Mono.error(() -> new IllegalStateException("boom" + count.incrementAndGet()));
assertThat(count).as("no op before block").hasValue(0);
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(error::block)
.withMessage("boom1");
assertThat(count).as("after block").hasValue(1);
}
@Test
public void lazilyEvaluatedBlockTimeout() {
AtomicInteger count = new AtomicInteger();
Mono<Object> error = Mono.error(() -> new IllegalStateException("boom" + count.incrementAndGet()));
assertThat(count).as("no op before block").hasValue(0);
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> error.block(Duration.ofMillis(100)))
.withMessage("boom1");
assertThat(count).as("after block").hasValue(1);
}
@Test
public void lazilyEvaluatedCall() {
AtomicInteger count = new AtomicInteger();
MonoErrorSupplied<Object> error = new MonoErrorSupplied<>(() -> new IllegalStateException("boom" + count.incrementAndGet()));
assertThat(count).as("no op before call").hasValue(0);
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(error::call)
.withMessage("boom1");
assertThat(count).as("after call").hasValue(1);
}
@Test
public void supplierMethod() {
StepVerifier.create(Mono.error(illegalStateExceptionSupplier()))
.verifyErrorSatisfies(e -> assertThat(e).isInstanceOf(IllegalStateException.class)
.hasMessage("boom"));
}
private Supplier<IllegalStateException> illegalStateExceptionSupplier() {
return () -> new IllegalStateException("boom");
}
@Test
public void scanOperator(){
MonoErrorSupplied<?> test = new MonoErrorSupplied<>(() -> new NullPointerException());
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
}
|
MonoErrorSuppliedTest
|
java
|
resilience4j__resilience4j
|
resilience4j-metrics/src/main/java/io/github/resilience4j/metrics/Timer.java
|
{
"start": 8389,
"end": 8691
}
|
interface ____ {
/**
* Stops the Timer and records a failed call. This method must be invoked when a call
* failed.
*/
void onError();
/**
* Stops the Timer and records a successful call.
*/
void onSuccess();
}
|
Context
|
java
|
alibaba__nacos
|
console/src/main/java/com/alibaba/nacos/console/proxy/config/ConfigProxy.java
|
{
"start": 1733,
"end": 1823
}
|
class ____ handling configuration operations.
*
* @author zhangyukun
*/
@Service
public
|
for
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/XmlReaderContext.java
|
{
"start": 3225,
"end": 3468
}
|
class ____.
* @see XmlBeanDefinitionReader#setResourceLoader
* @see ResourceLoader#getClassLoader()
*/
public final @Nullable ResourceLoader getResourceLoader() {
return this.reader.getResourceLoader();
}
/**
* Return the bean
|
loader
|
java
|
junit-team__junit5
|
junit-platform-commons/src/main/java/org/junit/platform/commons/support/AnnotationSupport.java
|
{
"start": 15494,
"end": 16337
}
|
interface ____ which to find the fields; never {@code null}
* @param annotationType the annotation type to search for; never {@code null}
* @return the list of all such fields found; neither {@code null} nor mutable
* @since 1.4
* @see Class#getDeclaredFields()
* @see #findPublicAnnotatedFields(Class, Class, Class)
* @see #findAnnotatedFields(Class, Class, Predicate, HierarchyTraversalMode)
* @see ReflectionSupport#findFields(Class, Predicate, HierarchyTraversalMode)
* @see ReflectionSupport#tryToReadFieldValue(Field, Object)
*/
@API(status = MAINTAINED, since = "1.4")
public static List<Field> findAnnotatedFields(Class<?> clazz, Class<? extends Annotation> annotationType) {
return findAnnotatedFields(clazz, annotationType, field -> true);
}
/**
* Find all distinct {@linkplain Field fields} of the supplied
|
in
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/onetomany/inverseToSuperclass/DetailSuperclass.java
|
{
"start": 230,
"end": 538
}
|
class ____ {
private long id;
private Root parent;
public DetailSuperclass() {
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public Root getParent() {
return parent;
}
public void setParent(Root parent) {
this.parent = parent;
}
}
|
DetailSuperclass
|
java
|
spring-projects__spring-boot
|
module/spring-boot-http-client/src/main/java/org/springframework/boot/http/client/autoconfigure/PropertiesApiVersionInserter.java
|
{
"start": 1097,
"end": 3313
}
|
class ____ implements ApiVersionInserter {
static final PropertiesApiVersionInserter EMPTY = new PropertiesApiVersionInserter(new ApiVersionInserter() {
});
private final ApiVersionInserter delegate;
private PropertiesApiVersionInserter(ApiVersionInserter delegate) {
this.delegate = delegate;
}
@Override
public URI insertVersion(Object version, URI uri) {
return this.delegate.insertVersion(version, uri);
}
@Override
public void insertVersion(Object version, HttpHeaders headers) {
this.delegate.insertVersion(version, headers);
}
/**
* Factory method to get a new {@link PropertiesApiVersionInserter} for the given
* properties.
* @param properties the API version properties
* @return an {@link PropertiesApiVersionInserter} configured from the properties
*/
public static PropertiesApiVersionInserter get(ApiversionProperties.Insert properties) {
Builder builder = builder(properties);
return (builder != null) ? new PropertiesApiVersionInserter(builder.build()) : EMPTY;
}
/**
* Factory method to create a new
* {@link org.springframework.web.client.ApiVersionInserter.Builder builder} from the
* given properties, if there are any.
* @param properties the API version properties
* @return a builder configured from the properties or {@code null} if no properties
* were mapped
*/
private static ApiVersionInserter.@Nullable Builder builder(ApiversionProperties.Insert properties) {
Assert.notNull(properties, "'properties' must not be null");
PropertyMapper map = PropertyMapper.get();
ApiVersionInserter.Builder builder = ApiVersionInserter.builder();
Counter counter = new Counter();
map.from(properties::getHeader).whenHasText().as(counter::counted).to(builder::useHeader);
map.from(properties::getQueryParameter).whenHasText().as(counter::counted).to(builder::useQueryParam);
map.from(properties::getPathSegment).as(counter::counted).to(builder::usePathSegment);
map.from(properties::getMediaTypeParameter).as(counter::counted).to(builder::useMediaTypeParam);
return (!counter.isEmpty()) ? builder : null;
}
/**
* Internal counter used to track if properties were applied.
*/
private static final
|
PropertiesApiVersionInserter
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/core/io/support/PathMatchingResourcePatternResolver.java
|
{
"start": 7467,
"end": 7981
}
|
class ____
* hierarchy, and then off each resource the same {@code PathMatcher} resolution
* strategy described above is used for the wildcard sub pattern.
*
* <h3>Other Notes</h3>
*
* <p>As of Spring Framework 6.0, if {@link #getResources(String)} is invoked with
* a location pattern using the "{@code classpath*:}" prefix it will first search
* all modules in the {@linkplain ModuleLayer#boot() boot layer}, excluding
* {@linkplain ModuleFinder#ofSystem() system modules}. It will then search the
*
|
loader
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/RequestMappingIntegrationTests.java
|
{
"start": 2630,
"end": 5048
}
|
class ____ extends AbstractRequestMappingIntegrationTests {
@Override
protected ApplicationContext initApplicationContext() {
AnnotationConfigApplicationContext wac = new AnnotationConfigApplicationContext();
wac.register(WebConfig.class, TestRestController.class, LocalConfig.class);
wac.refresh();
return wac;
}
@ParameterizedHttpServerTest // gh-30293
void emptyMapping(HttpServer httpServer) throws Exception {
startServer(httpServer);
String url = "http://localhost:" + this.port;
assertThat(getRestTemplate().getForObject(url, String.class)).isEqualTo("root");
url += "/";
assertThat(getRestTemplate().getForObject(url, String.class)).isEqualTo("root");
assertThat(getApplicationContext().getBean(TestExecutor.class).invocationCount.get()).isEqualTo(4);
assertThat(getApplicationContext().getBean(TestPredicate.class).invocationCount.get()).isEqualTo(4);
}
@ParameterizedHttpServerTest
void httpHead(HttpServer httpServer) throws Exception {
startServer(httpServer);
String url = "http://localhost:" + this.port + "/text";
HttpHeaders headers = getRestTemplate().headForHeaders(url);
String contentType = headers.getFirst("Content-Type");
assertThat(contentType).isNotNull();
}
@ParameterizedHttpServerTest
void forwardedHeaders(HttpServer httpServer) throws Exception {
startServer(httpServer);
// One integration test to verify triggering of Forwarded header support.
// More fine-grained tests in ForwardedHeaderTransformerTests.
RequestEntity<Void> request = RequestEntity
.get(URI.create("http://localhost:" + this.port + "/uri"))
.header("Forwarded", "host=84.198.58.199;proto=https")
.build();
ResponseEntity<String> entity = getRestTemplate().exchange(request, String.class);
assertThat(entity.getBody()).isEqualTo("https://84.198.58.199/uri");
}
@ParameterizedHttpServerTest
void stream(HttpServer httpServer) throws Exception {
startServer(httpServer);
int[] expected = {0, 1, 2, 3, 4};
assertThat(performGet("/stream", new HttpHeaders(), int[].class).getBody()).isEqualTo(expected);
}
@ParameterizedHttpServerTest // gh-33739
void requestBodyAndDelayedResponse(HttpServer httpServer) throws Exception {
startServer(httpServer);
assertThat(performPost("/post", new HttpHeaders(), "text", String.class).getBody()).isEqualTo("text");
}
@Configuration
@EnableWebFlux
static
|
RequestMappingIntegrationTests
|
java
|
spring-projects__spring-boot
|
module/spring-boot-mustache/src/test/java/org/springframework/boot/mustache/autoconfigure/MustacheAutoConfigurationServletIntegrationTests.java
|
{
"start": 3442,
"end": 4525
}
|
class ____ {
@RequestMapping("/")
String home(Map<String, Object> model) {
model.put("time", new Date());
model.put("message", "Hello World");
model.put("title", "Hello App");
return "home";
}
@RequestMapping("/partial")
String layout(Map<String, Object> model) {
model.put("time", new Date());
model.put("message", "Hello World");
model.put("title", "Hello App");
return "partial";
}
@Bean
MustacheViewResolver viewResolver() {
Mustache.Compiler compiler = Mustache.compiler()
.withLoader(new MustacheResourceTemplateLoader(
"classpath:/org/springframework/boot/mustache/autoconfigure/", ".html"));
MustacheViewResolver resolver = new MustacheViewResolver(compiler);
resolver.setPrefix("classpath:/org/springframework/boot/mustache/autoconfigure/");
resolver.setSuffix(".html");
return resolver;
}
static void main(String[] args) {
SpringApplication.run(Application.class, args);
}
}
@Configuration(proxyBeanMethods = false)
@Import({ TomcatServletWebServerAutoConfiguration.class })
static
|
Application
|
java
|
apache__rocketmq
|
namesrv/src/test/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManagerBrokerRegisterTest.java
|
{
"start": 1328,
"end": 4750
}
|
class ____ extends RouteInfoManagerTestBase {
private static RouteInfoManager routeInfoManager;
public static String clusterName = "cluster";
public static String brokerPrefix = "broker";
public static String topicPrefix = "topic";
public static int brokerPerName = 3;
public static int brokerNameNumber = 3;
public static RouteInfoManagerTestBase.Cluster cluster;
@Before
public void setup() {
routeInfoManager = new RouteInfoManager(new NamesrvConfig(), null);
cluster = registerCluster(routeInfoManager,
clusterName,
brokerPrefix,
brokerNameNumber,
brokerPerName,
topicPrefix,
10);
}
@After
public void terminate() {
routeInfoManager.printAllPeriodically();
for (BrokerData bd : cluster.brokerDataMap.values()) {
unregisterBrokerAll(routeInfoManager, bd);
}
}
// @Test
// public void testScanNotActiveBroker() {
// for (int j = 0; j < brokerNameNumber; j++) {
// String brokerName = getBrokerName(brokerPrefix, j);
//
// for (int i = 0; i < brokerPerName; i++) {
// String brokerAddr = getBrokerAddr(clusterName, brokerName, i);
//
// // set not active
// routeInfoManager.updateBrokerInfoUpdateTimestamp(brokerAddr, 0);
//
// assertEquals(1, routeInfoManager.scanNotActiveBroker());
// }
// }
//
// }
@Test
public void testMasterChangeFromSlave() {
String topicName = getTopicName(topicPrefix, 0);
String brokerName = getBrokerName(brokerPrefix, 0);
String originMasterAddr = getBrokerAddr(clusterName, brokerName, MixAll.MASTER_ID);
TopicRouteData topicRouteData = routeInfoManager.pickupTopicRouteData(topicName);
BrokerData brokerDataOrigin = findBrokerDataByBrokerName(topicRouteData.getBrokerDatas(), brokerName);
// check origin master address
Assert.assertEquals(brokerDataOrigin.getBrokerAddrs().get(MixAll.MASTER_ID), originMasterAddr);
// master changed
String newMasterAddr = getBrokerAddr(clusterName, brokerName, 1);
registerBrokerWithTopicConfig(routeInfoManager,
clusterName,
newMasterAddr,
brokerName,
MixAll.MASTER_ID,
newMasterAddr,
cluster.topicConfig,
new ArrayList<>());
topicRouteData = routeInfoManager.pickupTopicRouteData(topicName);
brokerDataOrigin = findBrokerDataByBrokerName(topicRouteData.getBrokerDatas(), brokerName);
// check new master address
assertEquals(brokerDataOrigin.getBrokerAddrs().get(MixAll.MASTER_ID), newMasterAddr);
}
@Test
public void testUnregisterBroker() {
String topicName = getTopicName(topicPrefix, 0);
String brokerName = getBrokerName(brokerPrefix, 0);
long unregisterBrokerId = 2;
unregisterBroker(routeInfoManager, cluster.brokerDataMap.get(brokerName), unregisterBrokerId);
TopicRouteData topicRouteData = routeInfoManager.pickupTopicRouteData(topicName);
HashMap<Long, String> brokerAddrs = findBrokerDataByBrokerName(topicRouteData.getBrokerDatas(), brokerName).getBrokerAddrs();
assertFalse(brokerAddrs.containsKey(unregisterBrokerId));
}
}
|
RouteInfoManagerBrokerRegisterTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java
|
{
"start": 5343,
"end": 5481
}
|
class ____ the passwords using automatically generated random passwords.
* The passwords will be printed to the console.
*/
|
sets
|
java
|
spring-projects__spring-security
|
webauthn/src/test/java/org/springframework/security/web/webauthn/registration/WebAuthnRegistrationRequestJacksonTests.java
|
{
"start": 1799,
"end": 4102
}
|
class ____ {
private JsonMapper mapper;
@BeforeEach
void setup() {
this.mapper = JsonMapper.builder().addModule(new WebauthnJacksonModule()).build();
}
@Test
void readRelyingPartyRequest() throws Exception {
String json = """
{
"publicKey": {
"label": "Cell Phone",
"credential": %s
}
}
""".formatted(PublicKeyCredentialJson.PUBLIC_KEY_JSON);
WebAuthnRegistrationFilter.WebAuthnRegistrationRequest registrationRequest = this.mapper.readValue(json,
WebAuthnRegistrationFilter.WebAuthnRegistrationRequest.class);
ImmutableAuthenticationExtensionsClientOutputs clientExtensionResults = new ImmutableAuthenticationExtensionsClientOutputs(
new CredentialPropertiesOutput(false));
PublicKeyCredential<AuthenticatorAttestationResponse> credential = PublicKeyCredential.builder()
.id("AX6nVVERrH6opMafUGn3Z9EyNEy6cftfBKV_2YxYl1jdW8CSJxMKGXFV3bnrKTiMSJeInkG7C6B2lPt8E5i3KaM")
.rawId(Bytes
.fromBase64("AX6nVVERrH6opMafUGn3Z9EyNEy6cftfBKV_2YxYl1jdW8CSJxMKGXFV3bnrKTiMSJeInkG7C6B2lPt8E5i3KaM"))
.response(AuthenticatorAttestationResponse.builder()
.attestationObject(Bytes.fromBase64(
"o2NmbXRkbm9uZWdhdHRTdG10oGhhdXRoRGF0YVjFSZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2NFAAAAAAAAAAAAAAAAAAAAAAAAAAAAQQF-p1VREax-qKTGn1Bp92fRMjRMunH7XwSlf9mMWJdY3VvAkicTChlxVd256yk4jEiXiJ5BuwugdpT7fBOYtymjpQECAyYgASFYIJK-2epPEw0ujHN-gvVp2Hp3ef8CzU3zqwO5ylx8L2OsIlggK5x5OlTGEPxLS-85TAABum4aqVK4CSWJ7LYDdkjuBLk"))
.clientDataJSON(Bytes.fromBase64(
"eyJ0eXBlIjoid2ViYXV0aG4uY3JlYXRlIiwiY2hhbGxlbmdlIjoiSUJRbnVZMVowSzFIcUJvRldDcDJ4bEpsOC1vcV9hRklYenlUX0YwLTBHVSIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6ODA4MCIsImNyb3NzT3JpZ2luIjpmYWxzZX0"))
.transports(AuthenticatorTransport.HYBRID, AuthenticatorTransport.INTERNAL)
.build())
.type(PublicKeyCredentialType.PUBLIC_KEY)
.clientExtensionResults(clientExtensionResults)
.authenticatorAttachment(AuthenticatorAttachment.CROSS_PLATFORM)
.build();
WebAuthnRegistrationFilter.WebAuthnRegistrationRequest expected = new WebAuthnRegistrationFilter.WebAuthnRegistrationRequest();
expected.setPublicKey(new RelyingPartyPublicKey(credential, "Cell Phone"));
assertThat(registrationRequest).usingRecursiveComparison().isEqualTo(expected);
}
}
|
WebAuthnRegistrationRequestJacksonTests
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/checkpointing/UnalignedCheckpointsCancellationTest.java
|
{
"start": 6360,
"end": 7082
}
|
class ____ extends AbstractInvokable {
TestInvokable() {
super(new DummyEnvironment());
}
private boolean checkpointAborted;
private boolean checkpointTriggered;
@Override
public void invoke() {}
@Override
public void triggerCheckpointOnBarrier(
CheckpointMetaData checkpointMetaData,
CheckpointOptions checkpointOptions,
CheckpointMetricsBuilder checkpointMetrics) {
checkpointTriggered = true;
}
@Override
public void abortCheckpointOnBarrier(long checkpointId, CheckpointException cause) {
checkpointAborted = true;
}
}
}
|
TestInvokable
|
java
|
google__dagger
|
javatests/dagger/android/support/functional/TestBroadcastReceiver.java
|
{
"start": 759,
"end": 867
}
|
class ____ extends DaggerBroadcastReceiver {
@Inject Set<Class<?>> componentHierarchy;
}
|
TestBroadcastReceiver
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ScopingValidationTest.java
|
{
"start": 18740,
"end": 18973
}
|
interface ____ {}");
Source scopeA =
CompilerTests.javaSource(
"test.ScopeA",
"package test;",
"",
"import javax.inject.Scope;",
"",
"@Scope @
|
SimpleScope
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/initialized_collection_property/Post.java
|
{
"start": 727,
"end": 1130
}
|
class ____ {
private int id;
private String content;
public Post() {
}
public Post(int id, String content) {
this.id = id;
this.content = content;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
}
|
Post
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/group/SimpleLazyGroupUpdateTest.java
|
{
"start": 1942,
"end": 3816
}
|
class ____ {
public static final String REALLY_BIG_STRING = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
@BeforeEach
public void prepare(SessionFactoryScope scope) {
scope.inTransaction( s -> {
s.persist( new TestEntity( 1L, "entity 1", "blah", REALLY_BIG_STRING ) );
} );
}
@Test
public void test(SessionFactoryScope scope) {
scope.inTransaction( s -> {
TestEntity entity = s.get( TestEntity.class, 1L );
assertLoaded( entity, "name" );
assertNotLoaded( entity, "lifeStory" );
assertNotLoaded( entity, "reallyBigString" );
entity.lifeStory = "blah blah blah";
assertLoaded( entity, "name" );
assertLoaded( entity, "lifeStory" );
assertNotLoaded( entity, "reallyBigString" );
} );
scope.inTransaction( s -> {
TestEntity entity = s.get( TestEntity.class, 1L );
assertLoaded( entity, "name" );
assertNotLoaded( entity, "lifeStory" );
assertNotLoaded( entity, "reallyBigString" );
assertEquals( "blah blah blah", entity.lifeStory );
assertEquals( REALLY_BIG_STRING, entity.reallyBigString );
} );
}
private void assertLoaded(Object owner, String name) {
// NOTE we assume null == not-loaded
Object fieldByReflection = getFieldByReflection( owner, name );
assertNotNull( fieldByReflection, "Expecting field '" + name + "' to be loaded, but it was not" );
}
private void assertNotLoaded(Object owner, String name) {
// NOTE we assume null == not-loaded
Object fieldByReflection = getFieldByReflection( owner, name );
assertNull( fieldByReflection, "Expecting field '" + name + "' to be not loaded, but it was" );
}
@AfterEach
public void cleanup(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
// --- //
@Entity( name = "TestEntity" )
@Table( name = "TEST_ENTITY" )
static
|
SimpleLazyGroupUpdateTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/nullness/EqualsBrokenForNullTest.java
|
{
"start": 4091,
"end": 4510
}
|
class ____ {
@Override
// BUG: Diagnostic contains: if (obj == null) { return false; }
public boolean equals(Object obj) {
if (!obj.getClass().isAssignableFrom(getClass())) {
return false;
}
return true;
}
}
private
|
ObjectGetClassReceiverToIsAssignableFrom
|
java
|
quarkusio__quarkus
|
core/builder/src/main/java/io/quarkus/builder/Constraint.java
|
{
"start": 29,
"end": 75
}
|
enum ____ {
REAL,
ORDER_ONLY,
}
|
Constraint
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/http/codec/xml/XmlEventDecoder.java
|
{
"start": 5358,
"end": 5414
}
|
class ____ isolate Aalto dependency.
*/
private static
|
to
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsUnbuffer.java
|
{
"start": 1352,
"end": 3162
}
|
class ____ extends AbstractAbfsIntegrationTest {
private Path dest;
public ITestAbfsUnbuffer() throws Exception {
}
@BeforeEach
@Override
public void setup() throws Exception {
super.setup();
dest = path("ITestAbfsUnbuffer");
byte[] data = ContractTestUtils.dataset(16, 'a', 26);
ContractTestUtils.writeDataset(getFileSystem(), dest, data, data.length,
16, true);
}
@Test
public void testUnbuffer() throws IOException {
// Open file, read half the data, and then call unbuffer
try (FSDataInputStream inputStream = getFileSystem().open(dest)) {
assertTrue(
inputStream.getWrappedStream() instanceof AbfsInputStream, "unexpected stream type "
+ inputStream.getWrappedStream().getClass().getSimpleName());
readAndAssertBytesRead(inputStream, 8);
assertFalse(
isBufferNull(inputStream), "AbfsInputStream buffer should not be null");
inputStream.unbuffer();
// Check the the underlying buffer is null
assertTrue(
isBufferNull(inputStream), "AbfsInputStream buffer should be null");
}
}
private boolean isBufferNull(FSDataInputStream inputStream) {
return ((AbfsInputStream) inputStream.getWrappedStream()).getBuffer() == null;
}
/**
* Read the specified number of bytes from the given
* {@link FSDataInputStream} and assert that
* {@link FSDataInputStream#read(byte[])} read the specified number of bytes.
*/
private static void readAndAssertBytesRead(FSDataInputStream inputStream,
int bytesToRead) throws IOException {
assertEquals(bytesToRead, inputStream.read(new byte[bytesToRead]),
"AbfsInputStream#read did not read the correct number of "+ "bytes");
}
}
|
ITestAbfsUnbuffer
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/PrivateConstructorForUtilityClassTest.java
|
{
"start": 9346,
"end": 9796
}
|
class ____ {
static final String SOME_CONSTANT = "";
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void record() {
CompilationTestHelper.newInstance(PrivateConstructorForUtilityClass.class, getClass())
.addSourceLines(
"ExampleUtilityClass.java",
"""
package example;
// BUG: Diagnostic contains:
public final
|
Test
|
java
|
hibernate__hibernate-orm
|
hibernate-spatial/src/main/java/org/hibernate/spatial/dialect/hana/HANAPointType.java
|
{
"start": 740,
"end": 2657
}
|
class ____ implements JdbcType {
/**
* An instance of the descrtiptor
*/
public static final HANAPointType INSTANCE = new HANAPointType();
private static final long serialVersionUID = -6978798264716544804L;
@Override
public int getJdbcTypeCode() {
return Types.STRUCT;
}
@Override
public int getDefaultSqlTypeCode() {
return SqlTypes.POINT;
}
@Override
public <X> ValueBinder<X> getBinder(final JavaType<X> javaType) {
return new BasicBinder<X>( javaType, this ) {
@Override
protected void doBind(PreparedStatement st, X value, int index, WrapperOptions options)
throws SQLException {
final Point<?> geometry = getJavaType().unwrap( value, Point.class, options );
st.setObject( index, HANASpatialUtils.toEWKB( geometry ) );
}
@Override
protected void doBind(CallableStatement st, X value, String name, WrapperOptions options)
throws SQLException {
final Point<?> geometry = getJavaType().unwrap( value, Point.class, options );
st.setObject( name, HANASpatialUtils.toEWKB( geometry ) );
}
};
}
@Override
public <X> ValueExtractor<X> getExtractor(final JavaType<X> javaType) {
return new BasicExtractor<X>( javaType, this ) {
@Override
protected X doExtract(ResultSet rs, int paramIndex, WrapperOptions options) throws SQLException {
return getJavaType().wrap( HANASpatialUtils.toGeometry( rs.getObject( paramIndex ) ), options );
}
@Override
protected X doExtract(CallableStatement statement, int index, WrapperOptions options) throws SQLException {
return getJavaType().wrap( HANASpatialUtils.toGeometry( statement.getObject( index ) ), options );
}
@Override
protected X doExtract(CallableStatement statement, String name, WrapperOptions options)
throws SQLException {
return getJavaType().wrap( HANASpatialUtils.toGeometry( statement.getObject( name ) ), options );
}
};
}
}
|
HANAPointType
|
java
|
processing__processing4
|
java/test/processing/mode/java/UTCompiler.java
|
{
"start": 345,
"end": 1901
}
|
class ____ {
private final String classpath;
UTCompiler(File... classpath) throws IOException {
final StringBuilder sb = new StringBuilder();
for (final File f : classpath) {
if (sb.length() > 0)
sb.append(File.pathSeparatorChar);
sb.append(f.getAbsolutePath());
}
this.classpath = sb.toString();
}
ProcessResult compile(final String name, final String program)
throws IOException {
final File tmpdir = File.createTempFile("utcompiler", ".tmp");
if (!tmpdir.delete())
throw new IOException("Cannot delete " + tmpdir);
if (!tmpdir.mkdir())
throw new IOException("Cannot create " + tmpdir);
final File javaFile = new File(tmpdir, name + ".java");
final FileWriter java = new FileWriter(javaFile);
try {
java.write(program);
} finally {
java.close();
}
try {
return new ProcessHelper("javac",
"-sourcepath", tmpdir.getAbsolutePath(),
"-cp", classpath,
"-nowarn",
"-d", tmpdir.getAbsolutePath(),
javaFile.getAbsolutePath()).execute();
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
for (final File f: tmpdir.listFiles())
if (!f.getName().startsWith("."))if (!f.delete())
throw new IOException("Can't delete " + f);
if (!tmpdir.delete())
throw new IOException("Can't delete " + tmpdir);
}
}
}
|
UTCompiler
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/aot/UserRepository.java
|
{
"start": 11527,
"end": 11644
}
|
interface ____ {
String getEmailAddress();
}
record Names(String firstname, String lastname) {
}
static
|
EmailOnly
|
java
|
apache__camel
|
components/camel-dhis2/camel-dhis2-component/src/test/java/org/apache/camel/component/dhis2/Dhis2ResourceTablesIT.java
|
{
"start": 1312,
"end": 1404
}
|
class ____ {@link org.apache.camel.component.dhis2.api.Dhis2ResourceTables} APIs.
*/
public
|
for
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/plugins/PersistentTaskPlugin.java
|
{
"start": 971,
"end": 1413
}
|
interface ____ {
/**
* Returns additional persistent tasks executors added by this plugin.
*/
default List<PersistentTasksExecutor<?>> getPersistentTasksExecutor(
ClusterService clusterService,
ThreadPool threadPool,
Client client,
SettingsModule settingsModule,
IndexNameExpressionResolver expressionResolver
) {
return Collections.emptyList();
}
}
|
PersistentTaskPlugin
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/util/NetUtils.java
|
{
"start": 4085,
"end": 7967
}
|
interface ____ {@code null} if no MAC address could be determined.
*/
public static byte[] getMacAddress() {
byte[] mac = null;
try {
final InetAddress localHost = InetAddress.getLocalHost();
try {
final NetworkInterface localInterface = NetworkInterface.getByInetAddress(localHost);
if (isUpAndNotLoopback(localInterface)) {
mac = localInterface.getHardwareAddress();
}
if (mac == null) {
final Enumeration<NetworkInterface> networkInterfaces = NetworkInterface.getNetworkInterfaces();
if (networkInterfaces != null) {
while (networkInterfaces.hasMoreElements() && mac == null) {
final NetworkInterface nic = networkInterfaces.nextElement();
if (isUpAndNotLoopback(nic)) {
mac = nic.getHardwareAddress();
}
}
}
}
} catch (final SocketException e) {
LOGGER.catching(e);
}
if (ArrayUtils.isEmpty(mac) && localHost != null) {
// Emulate a MAC address with an IP v4 or v6
final byte[] address = localHost.getAddress();
// Take only 6 bytes if the address is an IPv6 otherwise will pad with two zero bytes
mac = Arrays.copyOf(address, 6);
}
} catch (final UnknownHostException ignored) {
// ignored
}
return mac;
}
/**
* Returns the mac address, if it is available, as a string with each byte separated by a ":" character.
* @return the mac address String or null.
*/
public static String getMacAddressString() {
final byte[] macAddr = getMacAddress();
if (!ArrayUtils.isEmpty(macAddr)) {
final StringBuilder sb = new StringBuilder(String.format("%02x", macAddr[0]));
for (int i = 1; i < macAddr.length; ++i) {
sb.append(":").append(String.format("%02x", macAddr[i]));
}
return sb.toString();
}
return null;
}
private static boolean isUpAndNotLoopback(final NetworkInterface ni) throws SocketException {
return ni != null && !ni.isLoopback() && ni.isUp();
}
/**
* Converts a URI string or file path to a URI object.
*
* @param path the URI string or path
* @return the URI object
*/
@SuppressFBWarnings(
value = "PATH_TRAVERSAL_IN",
justification = "Currently `path` comes from a configuration file.")
public static URI toURI(final String path) {
try {
// Resolves absolute URI
return new URI(path);
} catch (final URISyntaxException e) {
// A file path or a Apache Commons VFS URL might contain blanks.
// A file path may start with a driver letter
try {
final URL url = new URL(path);
return new URI(url.getProtocol(), url.getHost(), url.getPath(), null);
} catch (MalformedURLException | URISyntaxException nestedEx) {
return new File(path).toURI();
}
}
}
public static List<URI> toURIs(final String path) {
final String[] parts = path.split(",");
String scheme = null;
final List<URI> uris = new ArrayList<>(parts.length);
for (final String part : parts) {
final URI uri = NetUtils.toURI(scheme != null ? scheme + ":" + part.trim() : part.trim());
if (scheme == null && uri.getScheme() != null) {
scheme = uri.getScheme();
}
uris.add(uri);
}
return uris;
}
}
|
or
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-ws/src/test/java/org/apache/camel/component/spring/ws/testfilters/EmptyMessageFilter.java
|
{
"start": 1019,
"end": 1337
}
|
class ____ implements MessageFilter {
@Override
public void filterProducer(Exchange exchange, WebServiceMessage produceResponse) {
// Do nothing
}
@Override
public void filterConsumer(Exchange exchange, WebServiceMessage consumerResponse) {
// Do nothing
}
}
|
EmptyMessageFilter
|
java
|
apache__thrift
|
lib/javame/src/org/apache/thrift/protocol/TProtocolException.java
|
{
"start": 922,
"end": 2048
}
|
class ____ extends TException {
private static final long serialVersionUID = 1L;
public static final int UNKNOWN = 0;
public static final int INVALID_DATA = 1;
public static final int NEGATIVE_SIZE = 2;
public static final int SIZE_LIMIT = 3;
public static final int BAD_VERSION = 4;
public static final int NOT_IMPLEMENTED = 5;
protected int type_ = UNKNOWN;
public TProtocolException() {
super();
}
public TProtocolException(int type) {
super();
type_ = type;
}
public TProtocolException(int type, String message) {
super(message);
type_ = type;
}
public TProtocolException(String message) {
super(message);
}
public TProtocolException(int type, Throwable cause) {
super(cause);
type_ = type;
}
public TProtocolException(Throwable cause) {
super(cause);
}
public TProtocolException(String message, Throwable cause) {
super(message, cause);
}
public TProtocolException(int type, String message, Throwable cause) {
super(message, cause);
type_ = type;
}
public int getType() {
return type_;
}
}
|
TProtocolException
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/BroadcastMessageAction.java
|
{
"start": 3886,
"end": 4476
}
|
class ____ extends BaseNodesResponse<NodeResponse> {
protected Response(ClusterName clusterName, List<NodeResponse> nodes, List<FailedNodeException> failures) {
super(clusterName, nodes, failures);
}
@Override
protected List<NodeResponse> readNodesFrom(StreamInput in) throws IOException {
return in.readCollectionAsList(NodeResponse::new);
}
@Override
protected void writeNodesTo(StreamOutput out, List<NodeResponse> nodes) {
TransportAction.localOnly();
}
}
public static
|
Response
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/net/ProxyType.java
|
{
"start": 621,
"end": 778
}
|
enum ____ {
/**
* HTTP CONNECT ssl proxy
*/
HTTP,
/**
* SOCKS4/4a tcp proxy
*/
SOCKS4,
/**
* SOCSK5 tcp proxy
*/
SOCKS5
}
|
ProxyType
|
java
|
spring-projects__spring-boot
|
core/spring-boot-test-autoconfigure/src/test/java/org/springframework/boot/test/autoconfigure/override/OverrideAutoConfigurationEnabledFalseIntegrationTests.java
|
{
"start": 1968,
"end": 2398
}
|
class ____ {
@Autowired
private ApplicationContext context;
@Test
void disabledAutoConfiguration() {
ApplicationContext context = this.context;
assertThat(context.getBean(ExampleTestConfig.class)).isNotNull();
assertThatExceptionOfType(NoSuchBeanDefinitionException.class)
.isThrownBy(() -> context.getBean(ConfigurationPropertiesBindingPostProcessor.class));
}
}
|
OverrideAutoConfigurationEnabledFalseIntegrationTests
|
java
|
spring-projects__spring-framework
|
spring-expression/src/test/java/org/springframework/expression/spel/SelectionAndProjectionTests.java
|
{
"start": 14721,
"end": 15051
}
|
class ____ {
private final Map<String, String> colors = new TreeMap<>();
{
colors.put("red", "rot");
colors.put("brown", "braun");
colors.put("blue", "blau");
colors.put("yellow", "gelb");
colors.put("beige", "beige");
}
public Map<String, String> getColors() {
return colors;
}
}
static
|
MapTestBean
|
java
|
quarkusio__quarkus
|
independent-projects/bootstrap/runner/src/main/java/io/quarkus/bootstrap/runner/RunnerClassLoader.java
|
{
"start": 13461,
"end": 14866
}
|
class ____ implements Resource {
@Override
public void beforeCheckpoint(Context<? extends Resource> ctx) {
synchronized (currentlyBufferedResources) {
for (int i = 0; i < currentlyBufferedResources.length; ++i) {
if (currentlyBufferedResources[i] != null) {
currentlyBufferedResources[i].resetInternalCaches();
currentlyBufferedResources[i] = null;
}
}
}
}
@Override
public void afterRestore(Context<? extends Resource> ctx) {
}
}
@Override
public boolean equals(Object o) {
//see comment in hashCode
return this == o;
}
@Override
public int hashCode() {
//We can return a constant as we expect to have a single instance of these;
//this is useful to avoid triggering a call to the identity hashcode,
//which could be rather inefficient as there's good chances that some component
//will have inflated the monitor of this instance.
//A hash collision would be unfortunate but unexpected, and shouldn't be a problem
//as the equals implementation still does honour the identity contract .
//See also discussion on https://github.com/smallrye/smallrye-context-propagation/pull/443
return 1;
}
}
|
CracResource
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-jose/src/main/java/org/springframework/security/oauth2/jwt/JwsHeader.java
|
{
"start": 1231,
"end": 2039
}
|
class ____ extends JoseHeader {
private JwsHeader(Map<String, Object> headers) {
super(headers);
}
@SuppressWarnings("unchecked")
@Override
public JwsAlgorithm getAlgorithm() {
return super.getAlgorithm();
}
/**
* Returns a new {@link Builder}, initialized with the provided {@link JwsAlgorithm}.
* @param jwsAlgorithm the {@link JwsAlgorithm}
* @return the {@link Builder}
*/
public static Builder with(JwsAlgorithm jwsAlgorithm) {
return new Builder(jwsAlgorithm);
}
/**
* Returns a new {@link Builder}, initialized with the provided {@code headers}.
* @param headers the headers
* @return the {@link Builder}
*/
public static Builder from(JwsHeader headers) {
return new Builder(headers);
}
/**
* A builder for {@link JwsHeader}.
*/
public static final
|
JwsHeader
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulUnsignedLongsEvaluator.java
|
{
"start": 1127,
"end": 5173
}
|
class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(MulUnsignedLongsEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator lhs;
private final EvalOperator.ExpressionEvaluator rhs;
private final DriverContext driverContext;
private Warnings warnings;
public MulUnsignedLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs,
EvalOperator.ExpressionEvaluator rhs, DriverContext driverContext) {
this.source = source;
this.lhs = lhs;
this.rhs = rhs;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) {
try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) {
LongVector lhsVector = lhsBlock.asVector();
if (lhsVector == null) {
return eval(page.getPositionCount(), lhsBlock, rhsBlock);
}
LongVector rhsVector = rhsBlock.asVector();
if (rhsVector == null) {
return eval(page.getPositionCount(), lhsBlock, rhsBlock);
}
return eval(page.getPositionCount(), lhsVector, rhsVector);
}
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += lhs.baseRamBytesUsed();
baseRamBytesUsed += rhs.baseRamBytesUsed();
return baseRamBytesUsed;
}
public LongBlock eval(int positionCount, LongBlock lhsBlock, LongBlock rhsBlock) {
try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (lhsBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
switch (rhsBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
long lhs = lhsBlock.getLong(lhsBlock.getFirstValueIndex(p));
long rhs = rhsBlock.getLong(rhsBlock.getFirstValueIndex(p));
try {
result.appendLong(Mul.processUnsignedLongs(lhs, rhs));
} catch (ArithmeticException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
public LongBlock eval(int positionCount, LongVector lhsVector, LongVector rhsVector) {
try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
long lhs = lhsVector.getLong(p);
long rhs = rhsVector.getLong(p);
try {
result.appendLong(Mul.processUnsignedLongs(lhs, rhs));
} catch (ArithmeticException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
@Override
public String toString() {
return "MulUnsignedLongsEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(lhs, rhs);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static
|
MulUnsignedLongsEvaluator
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/CometdEndpointBuilderFactory.java
|
{
"start": 32080,
"end": 41654
}
|
interface ____
extends
CometdEndpointConsumerBuilder,
CometdEndpointProducerBuilder {
default AdvancedCometdEndpointBuilder advanced() {
return (AdvancedCometdEndpointBuilder) this;
}
/**
* The origins domain that support to cross, if the crosssOriginFilterOn
* is true.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: *
* Group: common
*
* @param allowedOrigins the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder allowedOrigins(String allowedOrigins) {
doSetProperty("allowedOrigins", allowedOrigins);
return this;
}
/**
* The root directory for the web resources or classpath. Use the
* protocol file: or classpath: depending if you want that the component
* loads the resource from file system or classpath. Classpath is
* required for OSGI deployment where the resources are packaged in the
* jar.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param baseResource the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder baseResource(String baseResource) {
doSetProperty("baseResource", baseResource);
return this;
}
/**
* If true, the server will support for cross-domain filtering.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param crossOriginFilterOn the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder crossOriginFilterOn(boolean crossOriginFilterOn) {
doSetProperty("crossOriginFilterOn", crossOriginFilterOn);
return this;
}
/**
* If true, the server will support for cross-domain filtering.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param crossOriginFilterOn the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder crossOriginFilterOn(String crossOriginFilterOn) {
doSetProperty("crossOriginFilterOn", crossOriginFilterOn);
return this;
}
/**
* The filterPath will be used by the CrossOriginFilter, if the
* crosssOriginFilterOn is true.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param filterPath the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder filterPath(String filterPath) {
doSetProperty("filterPath", filterPath);
return this;
}
/**
* The client side poll timeout in milliseconds. How long a client will
* wait between reconnects.
*
* The option is a: <code>int</code> type.
*
* Group: common
*
* @param interval the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder interval(int interval) {
doSetProperty("interval", interval);
return this;
}
/**
* The client side poll timeout in milliseconds. How long a client will
* wait between reconnects.
*
* The option will be converted to a <code>int</code> type.
*
* Group: common
*
* @param interval the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder interval(String interval) {
doSetProperty("interval", interval);
return this;
}
/**
* If true, the server will accept JSON wrapped in a comment and will
* generate JSON wrapped in a comment. This is a defence against Ajax
* Hijacking.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param jsonCommented the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder jsonCommented(boolean jsonCommented) {
doSetProperty("jsonCommented", jsonCommented);
return this;
}
/**
* If true, the server will accept JSON wrapped in a comment and will
* generate JSON wrapped in a comment. This is a defence against Ajax
* Hijacking.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param jsonCommented the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder jsonCommented(String jsonCommented) {
doSetProperty("jsonCommented", jsonCommented);
return this;
}
/**
* Logging level. 0=none, 1=info, 2=debug.
*
* The option is a: <code>int</code> type.
*
* Default: 1
* Group: common
*
* @param logLevel the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder logLevel(int logLevel) {
doSetProperty("logLevel", logLevel);
return this;
}
/**
* Logging level. 0=none, 1=info, 2=debug.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 1
* Group: common
*
* @param logLevel the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder logLevel(String logLevel) {
doSetProperty("logLevel", logLevel);
return this;
}
/**
* The max client side poll timeout in milliseconds. A client will be
* removed if a connection is not received in this time.
*
* The option is a: <code>int</code> type.
*
* Default: 30000
* Group: common
*
* @param maxInterval the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder maxInterval(int maxInterval) {
doSetProperty("maxInterval", maxInterval);
return this;
}
/**
* The max client side poll timeout in milliseconds. A client will be
* removed if a connection is not received in this time.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 30000
* Group: common
*
* @param maxInterval the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder maxInterval(String maxInterval) {
doSetProperty("maxInterval", maxInterval);
return this;
}
/**
* The client side poll timeout, if multiple connections are detected
* from the same browser.
*
* The option is a: <code>int</code> type.
*
* Default: 1500
* Group: common
*
* @param multiFrameInterval the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder multiFrameInterval(int multiFrameInterval) {
doSetProperty("multiFrameInterval", multiFrameInterval);
return this;
}
/**
* The client side poll timeout, if multiple connections are detected
* from the same browser.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 1500
* Group: common
*
* @param multiFrameInterval the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder multiFrameInterval(String multiFrameInterval) {
doSetProperty("multiFrameInterval", multiFrameInterval);
return this;
}
/**
* The server side poll timeout in milliseconds. This is how long the
* server will hold a reconnect request before responding.
*
* The option is a: <code>int</code> type.
*
* Default: 240000
* Group: common
*
* @param timeout the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder timeout(int timeout) {
doSetProperty("timeout", timeout);
return this;
}
/**
* The server side poll timeout in milliseconds. This is how long the
* server will hold a reconnect request before responding.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 240000
* Group: common
*
* @param timeout the value to set
* @return the dsl builder
*/
default CometdEndpointBuilder timeout(String timeout) {
doSetProperty("timeout", timeout);
return this;
}
}
/**
* Advanced builder for endpoint for the CometD component.
*/
public
|
CometdEndpointBuilder
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationUsageService.java
|
{
"start": 1210,
"end": 4378
}
|
class ____ {
private final Map<String, Map<String, LongAdder>> aggs;
private final MeterRegistry meterRegistry;
public Builder() {
this(MeterRegistry.NOOP);
}
public Builder(MeterRegistry meterRegistry) {
aggs = new HashMap<>();
assert meterRegistry != null;
this.meterRegistry = meterRegistry;
}
public void registerAggregationUsage(String aggregationName) {
registerAggregationUsage(aggregationName, OTHER_SUBTYPE);
}
public void registerAggregationUsage(String aggregationName, String valuesSourceType) {
Map<String, LongAdder> subAgg = aggs.computeIfAbsent(aggregationName, k -> new HashMap<>());
if (subAgg.put(valuesSourceType, new LongAdder()) != null) {
throw new IllegalArgumentException(
"stats for aggregation [" + aggregationName + "][" + valuesSourceType + "] already registered"
);
}
}
public AggregationUsageService build() {
return new AggregationUsageService(this);
}
}
// Attribute names for the metric
private AggregationUsageService(Builder builder) {
this.aggs = builder.aggs;
info = new AggregationInfo(aggs);
this.aggregationsUsageCounter = builder.meterRegistry.registerLongCounter(
ES_SEARCH_QUERY_AGGREGATIONS_TOTAL_COUNT,
"Aggregations usage",
"count"
);
}
public void incAggregationUsage(String aggregationName, String valuesSourceType) {
Map<String, LongAdder> valuesSourceMap = aggs.get(aggregationName);
// Not all aggs register their usage at the moment we also don't register them in test context
if (valuesSourceMap != null) {
LongAdder adder = valuesSourceMap.get(valuesSourceType);
if (adder != null) {
adder.increment();
}
assert adder != null : "Unknown subtype [" + aggregationName + "][" + valuesSourceType + "]";
}
assert valuesSourceMap != null : "Unknown aggregation [" + aggregationName + "][" + valuesSourceType + "]";
// tests will have a no-op implementation here
String VALUES_SOURCE_KEY = "values_source";
String AGGREGATION_NAME_KEY = "aggregation_name";
aggregationsUsageCounter.incrementBy(1, Map.of(AGGREGATION_NAME_KEY, aggregationName, VALUES_SOURCE_KEY, valuesSourceType));
}
public Map<String, Object> getUsageStats() {
Map<String, Object> aggsUsageMap = new HashMap<>();
aggs.forEach((name, agg) -> {
Map<String, Long> aggUsageMap = new HashMap<>();
agg.forEach((k, v) -> {
long val = v.longValue();
if (val > 0) {
aggUsageMap.put(k, val);
}
});
if (aggUsageMap.isEmpty() == false) {
aggsUsageMap.put(name, aggUsageMap);
}
});
return aggsUsageMap;
}
@Override
public AggregationInfo info() {
return info;
}
}
|
Builder
|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng3506ArtifactHandlersFromPluginsTest.java
|
{
"start": 1184,
"end": 4184
}
|
class ____ extends AbstractMavenIntegrationTestCase {
private static final String GID = "org.apache.maven.its.mng3506";
private static final String AID = "mng-3506";
private static final String VERSION = "1";
private static final String TYPE = "jar";
private static final String BAD_TYPE1 = "coreit-1";
private static final String BAD_TYPE2 = "coreit-2";
@Test
public void testProjectPackagingUsage() throws IOException, VerificationException {
File testDir = extractResources("/" + AID);
// First, build the test plugin
Verifier verifier = newVerifier(new File(testDir, "mng-3506.2/maven-it-plugin-extension2").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
// Then, run the test project that uses the plugin
verifier = newVerifier(testDir.getAbsolutePath());
verifier.deleteArtifacts(GID);
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
// Now, if everything worked, we have .pom and a .jar in the local repo for each child, and a pom for the
// parent.
// IF IT DIDN'T, we have a .pom and a .coreit-1 for child 1 AND/OR .pom and .coreit-2 for child 2 in the local
// repo...
// Parent POM
String path = verifier.getArtifactPath(GID, AID, VERSION, "pom");
assertTrue(new File(path).exists(), path + " should have been installed.");
// Child 1
path = verifier.getArtifactPath(GID, AID + ".1", VERSION, TYPE);
assertTrue(new File(path).exists(), path + " should have been installed.");
path = verifier.getArtifactPath(GID, AID + ".1", VERSION, "pom");
assertTrue(new File(path).exists(), path + " should have been installed.");
path = verifier.getArtifactPath(GID, AID + ".1", VERSION, BAD_TYPE1);
assertFalse(new File(path).exists(), path + " should NOT have been installed.");
path = verifier.getArtifactPath(GID, AID + ".1", VERSION, BAD_TYPE2);
assertFalse(new File(path).exists(), path + " should _NEVER_ be installed!!!");
// Child 2
path = verifier.getArtifactPath(GID, AID + ".2", VERSION, TYPE);
assertTrue(new File(path).exists(), path + " should have been installed.");
path = verifier.getArtifactPath(GID, AID + ".2", VERSION, "pom");
assertTrue(new File(path).exists(), path + " should have been installed.");
path = verifier.getArtifactPath(GID, AID + ".2", VERSION, BAD_TYPE1);
assertFalse(new File(path).exists(), path + " should _NEVER_ be installed!!!");
path = verifier.getArtifactPath(GID, AID + ".2", VERSION, BAD_TYPE2);
assertFalse(new File(path).exists(), path + " should NOT have been installed.");
}
}
|
MavenITmng3506ArtifactHandlersFromPluginsTest
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnBooleanPropertyTests.java
|
{
"start": 8426,
"end": 8615
}
|
class ____ {
@Bean
String foo() {
return "foo";
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnBooleanProperty(value = "x", name = "y")
static
|
NoNameOrValueAttribute
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Phase.java
|
{
"start": 1089,
"end": 2212
}
|
enum ____ {
/**
* The namenode is loading the fsimage file into memory.
*/
LOADING_FSIMAGE("LoadingFsImage", "Loading fsimage"),
/**
* The namenode is loading the edits file and applying its operations to the
* in-memory metadata.
*/
LOADING_EDITS("LoadingEdits", "Loading edits"),
/**
* The namenode is saving a new checkpoint.
*/
SAVING_CHECKPOINT("SavingCheckpoint", "Saving checkpoint"),
/**
* The namenode has entered safemode, awaiting block reports from data nodes.
*/
SAFEMODE("SafeMode", "Safe mode");
private final String name, description;
/**
* Returns phase description.
*
* @return String description
*/
public String getDescription() {
return description;
}
/**
* Returns phase name.
*
* @return String phase name
*/
public String getName() {
return name;
}
/**
* Private constructor of enum.
*
* @param name String phase name
* @param description String phase description
*/
private Phase(String name, String description) {
this.name = name;
this.description = description;
}
}
|
Phase
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/JoinDeriveNullFilterRule.java
|
{
"start": 2056,
"end": 4805
}
|
class ____
extends RelRule<JoinDeriveNullFilterRule.JoinDeriveNullFilterRuleConfig> {
// To avoid the impact of null values on the single join node,
// We will add a null filter (possibly be pushed down) before the join to filter
// null values when the source of InnerJoin has nullCount more than this value.
public static final Long JOIN_NULL_FILTER_THRESHOLD = 2000000L;
public static final JoinDeriveNullFilterRule INSTANCE =
JoinDeriveNullFilterRule.JoinDeriveNullFilterRuleConfig.DEFAULT.toRule();
private JoinDeriveNullFilterRule(JoinDeriveNullFilterRuleConfig config) {
super(config);
}
@Override
public boolean matches(RelOptRuleCall call) {
Join join = call.rel(0);
return join.getJoinType() == JoinRelType.INNER
&& !join.analyzeCondition().pairs().isEmpty();
}
@Override
public void onMatch(RelOptRuleCall call) {
LogicalJoin join = call.rel(0);
RelBuilder relBuilder = call.builder();
RexBuilder rexBuilder = join.getCluster().getRexBuilder();
FlinkRelMetadataQuery mq =
FlinkRelMetadataQuery.reuseOrCreate(join.getCluster().getMetadataQuery());
JoinInfo joinInfo = join.analyzeCondition();
RelNode newLeft =
createIsNotNullFilter(
relBuilder, rexBuilder, mq, join.getLeft(), joinInfo.leftKeys);
RelNode newRight =
createIsNotNullFilter(
relBuilder, rexBuilder, mq, join.getRight(), joinInfo.rightKeys);
if ((newLeft != join.getLeft()) || (newRight != join.getRight())) {
Join newJoin = join.copy(join.getTraitSet(), Lists.newArrayList(newLeft, newRight));
call.transformTo(newJoin);
}
}
private RelNode createIsNotNullFilter(
RelBuilder relBuilder,
RexBuilder rexBuilder,
FlinkRelMetadataQuery mq,
RelNode input,
ImmutableIntList keys) {
List<RexNode> filters = new ArrayList<>();
for (int key : keys) {
Double nullCount = mq.getColumnNullCount(input, key);
if (nullCount != null && nullCount > JOIN_NULL_FILTER_THRESHOLD) {
filters.add(
relBuilder.call(
SqlStdOperatorTable.IS_NOT_NULL,
rexBuilder.makeInputRef(input, key)));
}
}
if (!filters.isEmpty()) {
return relBuilder.push(input).filter(filters).build();
} else {
return input;
}
}
/** Rule configuration. */
@Value.Immutable(singleton = false)
public
|
JoinDeriveNullFilterRule
|
java
|
hibernate__hibernate-orm
|
hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/sequence/RDMSSequenceSupport.java
|
{
"start": 554,
"end": 1488
}
|
class ____ implements SequenceSupport {
public static final SequenceSupport INSTANCE = new RDMSSequenceSupport();
@Override
public String getSelectSequenceNextValString(String sequenceName) throws MappingException {
return "permuted_id('NEXT',31)";
}
@Override
public String getFromDual() {
// The where clause was added to eliminate this statement from Brute Force Searches.
return " from rdms.rdms_dummy where key_col=1";
}
@Override
public String getCreateSequenceString(String sequenceName) {
// We must return a valid RDMS/RSA command from this method to
// prevent RDMS/RSA from issuing *ERROR 400
return "";
}
@Override
public String getDropSequenceString(String sequenceName) {
// We must return a valid RDMS/RSA command from this method to
// prevent RDMS/RSA from issuing *ERROR 400
return "";
}
@Override
public boolean supportsPooledSequences() {
return false;
}
}
|
RDMSSequenceSupport
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/resource/transaction/LegacySettingInitiatorTest.java
|
{
"start": 1037,
"end": 2418
}
|
class ____ {
private BootstrapServiceRegistryImpl bsr;
@BeforeEach
public void before() {
bsr = (BootstrapServiceRegistryImpl) new BootstrapServiceRegistryBuilder().build();
}
@AfterEach
public void after() {
if ( bsr != null ) {
bsr.destroy();
}
}
@Test
public void testLegacySettingSelection() {
final TransactionCoordinatorBuilderInitiator initiator = new TransactionCoordinatorBuilderInitiator();
TransactionCoordinatorBuilder builder = initiator.initiateService(
Collections.singletonMap(
TransactionCoordinatorBuilderInitiator.LEGACY_SETTING_NAME,
"org.hibernate.transaction.JDBCTransactionFactory"
),
bsr
);
assertThat( builder, instanceOf( JdbcResourceLocalTransactionCoordinatorBuilderImpl.class ) );
builder = initiator.initiateService(
Collections.singletonMap(
TransactionCoordinatorBuilderInitiator.LEGACY_SETTING_NAME,
"org.hibernate.transaction.JTATransactionFactory"
),
bsr
);
assertThat( builder, instanceOf( JtaTransactionCoordinatorBuilderImpl.class ) );
builder = initiator.initiateService(
Collections.singletonMap(
TransactionCoordinatorBuilderInitiator.LEGACY_SETTING_NAME,
"org.hibernate.transaction.CMTTransactionFactory"
),
bsr
);
assertThat( builder, instanceOf( JtaTransactionCoordinatorBuilderImpl.class ) );
}
}
|
LegacySettingInitiatorTest
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/net/impl/VertxConnection.java
|
{
"start": 1820,
"end": 16917
}
|
class ____ extends ConnectionBase {
private static final Logger log = LoggerFactory.getLogger(VertxConnection.class);
private static final int MAX_REGION_SIZE = 1024 * 1024;
public final VoidChannelPromise voidPromise;
private final OutboundWriteQueue outboundMessageQueue;
// State accessed exclusively from the event loop thread
private Deque<Object> pending;
private boolean reentrant;
private boolean read;
private boolean needsFlush;
private boolean draining;
private boolean channelWritable;
private boolean paused;
private boolean autoRead;
// State accessed exclusively from the event loop thread
private ScheduledFuture<?> shutdownTimeout;
private ChannelPromise shutdown;
private boolean closeSent;
public VertxConnection(ContextInternal context, ChannelHandlerContext chctx) {
this(context, chctx, false);
}
public VertxConnection(ContextInternal context, ChannelHandlerContext chctx, boolean strictThreadMode) {
super(context, chctx);
EventLoopExecutor executor;
if (context.threadingModel() == ThreadingModel.EVENT_LOOP && context.nettyEventLoop() == chctx.executor()) {
executor = (EventLoopExecutor) context.executor();
} else {
executor = new EventLoopExecutor((EventLoop)chctx.executor());
}
this.channelWritable = chctx.channel().isWritable();
this.outboundMessageQueue = strictThreadMode ? new DirectOutboundMessageQueue() : new InternalMessageChannel(executor);
this.voidPromise = new VoidChannelPromise(chctx.channel(), false);
this.autoRead = true;
}
/**
* Called by a Netty handler to relay user {@code event}, the default implementation handles
* {@link ShutdownEvent} and {@link ReferenceCounted}.
* <ul>
* <li>{@code ShutdownEvent} trigger a channel shutdown</li>
* <li>{@code ReferencedCounter} is released</li>
* </ul>
* Subclasses can override it to handle user events.
* <p/>
* This method is exclusively called on the event-loop thread and relays a channel user event.
* @param event the event.
*/
protected void handleEvent(Object event) {
if (event instanceof ShutdownEvent) {
ShutdownEvent shutdownEvent = (ShutdownEvent)event;
if (shutdown == null) {
ChannelPromise promise = chctx.newPromise();
shutdown = promise;
handleShutdown(shutdownEvent.timeout(), promise);
} else {
throw new UnsupportedOperationException();
}
} else {
// Will release the event if needed
ReferenceCountUtil.release(event);
}
}
/**
* Called by the Netty handler when the connection becomes idle. The default implementation closes the
* connection.
* <p/>
* Subclasses can override it to prevent the idle event to happen (e.g. when the connection is pooled) or
* perform extra work when the idle event happens.
* <p/>
* This method is exclusively called on the event-loop thread and relays a channel user event.
*/
protected void handleIdle(IdleStateEvent event) {
log.debug("The connection will be closed due to timeout");
// Should be channel close ...
chctx.close();
}
protected boolean supportsFileRegion() {
return vertx.transport().supportFileRegion() && !isSsl() &&!isTrafficShaped();
}
/**
* Implement the shutdown default's behavior that cancels the shutdown timeout and close the channel with the
* channel {@code promise} argument.
*
* @param timeout the shutdown timeout
* @param promise the channel promise to be used for closing the channel
*/
protected void handleShutdown(Duration timeout, ChannelPromise promise) {
// Assert from event-loop
ScheduledFuture<?> t = shutdownTimeout;
if (t == null || t.cancel(false)) {
channel.close(shutdown);
}
}
/**
* Override the {@link ConnectionBase#close()} behavior to cooperate with the shutdown sequence.
*/
public final Future<Void> close() {
return shutdown(0L, TimeUnit.SECONDS);
}
/**
* Initiate the connection shutdown sequence.
*
* @param timeout the shutdown timeout
* @param unit the shutdown timeout unit
* @return the future completed after the channel's closure
*/
public final Future<Void> shutdown(long timeout, TimeUnit unit) {
if (timeout < 0L) {
throw new IllegalArgumentException("Timeout must be >= 0");
}
ChannelPromise promise = channel.newPromise();
EventExecutor exec = chctx.executor();
if (exec.inEventLoop()) {
shutdown(Duration.ofMillis(unit.toMillis(timeout)), promise);
} else {
exec.execute(() -> shutdown(Duration.ofMillis(unit.toMillis(timeout)), promise));
}
PromiseInternal<Void> p = context.promise();
promise.addListener(p);
return p.future();
}
private void shutdown(Duration timeout, ChannelPromise promise) {
if (shutdown != null) {
ScheduledFuture<?> t = shutdownTimeout;
if (timeout.isZero() && (t == null || t.cancel(false))) {
shutdown = promise;
channel.close(promise);
} else {
channel
.closeFuture()
.addListener(future -> {
if (future.isSuccess()) {
promise.setSuccess();
} else {
promise.setFailure(future.cause());
}
});
}
} else {
shutdown = promise;
if (!timeout.isZero()) {
EventExecutor el = chctx.executor();
shutdownTimeout = el.schedule(() -> {
channel.close(promise);
}, timeout.toMillis(), TimeUnit.MILLISECONDS);
}
handleShutdown(timeout, promise);
}
}
// Exclusively called by the owning handler close signal
void handleClose(ChannelPromise promise) {
terminateClose(promise);
}
private void terminateClose(ChannelPromise promise) {
if (!closeSent) {
closeSent = true;
writeClose(promise);
} else {
channel
.closeFuture()
.addListener(future -> {
if (future.isSuccess()) {
promise.setSuccess();
} else {
promise.setFailure(future.cause());
}
});
}
}
/**
* Called by the Netty handler when the connection becomes closed. The default implementation flushes and closes the
* connection.
* <p/>
* Subclasses can override it to intercept the channel close and implement the close operation, this method should
* always be called to proceed with the close control flow.
* <p/>
* This method is exclusively called on the event-loop thread and relays a channel user event.
*/
protected void writeClose(ChannelPromise promise) {
// Make sure everything is flushed out on close
ChannelPromise channelPromise = chctx
.newPromise()
.addListener((ChannelFutureListener) f -> {
chctx.close(promise);
});
writeToChannel(Unpooled.EMPTY_BUFFER, true, channelPromise);
}
protected void handleClosed() {
ScheduledFuture<?> timeout = shutdownTimeout;
if (timeout != null) {
timeout.cancel(false);
}
outboundMessageQueue.close();
super.handleClosed();
}
/**
* Called when the connection write queue is drained
*/
protected void handleWriteQueueDrained() {
}
protected void handleMessage(Object msg) {
}
protected void handleReadComplete() {
}
void channelWritabilityChanged() {
channelWritable = chctx.channel().isWritable();
if (channelWritable) {
outboundMessageQueue.tryDrain();
}
}
/**
* This method is exclusively called by {@code VertxHandler} to read a message on the event-loop thread.
*/
final void read(Object msg) {
if (METRICS_ENABLED) {
reportBytesRead(msg);
}
read = true;
if (!reentrant && !paused && (pending == null || pending.isEmpty())) {
// Fast path
reentrant = true;
try {
handleMessage(msg);
} finally {
reentrant = false;
}
// The pending queue could be not empty at this stage if a pending message was added by calling handleMessage
// Subsequent calls to read or readComplete will take care of these messages
} else {
addPending(msg);
}
}
private void addPending(Object msg) {
if (pending == null) {
pending = new ArrayDeque<>();
}
pending.add(msg);
if (!reentrant) {
checkPendingMessages();
}
}
/**
* This method is exclusively called by {@code VertxHandler} to signal read completion on the event-loop thread.
*/
final void readComplete() {
if (read) {
if (pending != null) {
checkPendingMessages();
}
handleReadComplete();
read = false;
checkFlush();
checkAutoRead();
}
}
private void checkPendingMessages() {
Object msg;
reentrant = true;
try {
while (!paused && (msg = pending.poll()) != null) {
handleMessage(msg);
}
} finally {
reentrant = false;
}
}
public final void doPause() {
assert chctx.executor().inEventLoop();
paused = true;
}
public final void doResume() {
assert chctx.executor().inEventLoop();
if (!paused) {
return;
}
paused = false;
if (!read && pending != null && !pending.isEmpty()) {
read = true;
try {
checkPendingMessages();
handleReadComplete();
} finally {
read = false;
if (!draining) {
checkFlush();
}
checkAutoRead();
}
}
}
private void checkFlush() {
if (needsFlush) {
needsFlush = false;
chctx.flush();
}
}
private void checkAutoRead() {
if (autoRead) {
if (pending != null && pending.size() >= 8) {
autoRead = false;
chctx.channel().config().setAutoRead(false);
}
} else {
if (pending == null || pending.isEmpty()) {
autoRead = true;
chctx.channel().config().setAutoRead(true);
}
}
}
/**
* Like {@link #write(Object, boolean, ChannelPromise)}.
*/
public final ChannelPromise write(Object msg, boolean forceFlush, Promise<Void> promise) {
ChannelPromise channelPromise = promise == null ? voidPromise : newChannelPromise(promise);
write(msg, forceFlush, channelPromise);
return channelPromise;
}
/**
* Like {@link #write(Object, boolean, ChannelPromise)}.
*/
public final ChannelPromise write(Object msg, boolean forceFlush) {
return write(msg, forceFlush, voidPromise);
}
/**
* This method must be exclusively called on the event-loop thread.
*
* <p>This method directly writes to the channel pipeline and bypasses the outbound queue.</p>
*
* @param msg the message to write
* @param forceFlush flush when {@code true} or there is no read in progress
* @param promise the promise receiving the completion event
*/
public final ChannelPromise write(Object msg, boolean forceFlush, ChannelPromise promise) {
assert chctx.executor().inEventLoop();
if (METRICS_ENABLED) {
reportsBytesWritten(msg);
}
boolean flush = (!read && !draining) || forceFlush;
needsFlush = !flush;
if (flush) {
chctx.writeAndFlush(msg, promise);
} else {
chctx.write(msg, promise);
}
return promise;
}
public final boolean writeToChannel(Object obj) {
return writeToChannel(obj, voidPromise);
}
public final boolean writeToChannel(Object msg, Promise<Void> listener) {
return writeToChannel(msg, listener == null ? voidPromise : newChannelPromise(listener));
}
public final boolean writeToChannel(Object msg, ChannelPromise promise) {
return writeToChannel(msg, false, promise);
}
public final boolean writeToChannel(Object msg, boolean forceFlush, ChannelPromise promise) {
return writeToChannel(new MessageWrite() {
@Override
public void write() {
VertxConnection.this.write(msg, forceFlush, promise);
}
@Override
public void cancel(Throwable cause) {
promise.setFailure(cause);
}
});
}
// Write to channel boolean return for now is not used so avoids reading a volatile
public final boolean writeToChannel(MessageWrite msg) {
return outboundMessageQueue.write(msg);
}
/**
* Asynchronous flush.
*/
public final void flush() {
flush(voidPromise);
}
/**
* Asynchronous flush.
*
* @param promise the promise resolved when flush occurred
*/
public final void flush(ChannelPromise promise) {
writeToChannel(Unpooled.EMPTY_BUFFER, true, promise);
}
/**
* Asynchronous flush.
*
* @param listener the listener notified when flush occurred
*/
public final void flush(FutureListener<Void> listener) {
writeToChannel(Unpooled.EMPTY_BUFFER, true, listener == null ? voidPromise : wrap(listener));
}
/**
* @return the write queue writability status
*/
public boolean writeQueueFull() {
return !outboundMessageQueue.isWritable();
}
/**
* Send a file as a file region for zero copy transfer to the socket.
*
* The implementation splits the file into multiple regions to avoid stalling the pipeline
* and producing idle timeouts for very large files.
*
* @param fc the file to send
* @param offset the file offset
* @param length the file length
* @param writeFuture the write future to be completed when the transfer is done or failed
*/
private void sendFileRegion(FileChannel fc, long offset, long length, ChannelPromise writeFuture) {
if (length < MAX_REGION_SIZE) {
FileRegion region = new DefaultFileRegion(fc, offset, length);
// Retain explicitly this file region so the underlying channel is not closed by the NIO channel when it
// as been sent as the caller can need it again
region.retain();
writeToChannel(region, writeFuture);
} else {
ChannelPromise promise = chctx.newPromise();
FileRegion region = new DefaultFileRegion(fc, offset, MAX_REGION_SIZE);
// Retain explicitly this file region so the underlying channel is not closed by the NIO channel when it
// as been sent as we need it again
region.retain();
writeToChannel(region, promise);
promise.addListener(future -> {
if (future.isSuccess()) {
sendFileRegion(fc, offset + MAX_REGION_SIZE, length - MAX_REGION_SIZE, writeFuture);
} else {
log.error(future.cause().getMessage(), future.cause());
writeFuture.setFailure(future.cause());
}
});
}
}
public ChannelFuture sendFile(FileChannel fc, long offset, long length) {
// Write the content.
ChannelPromise writeFuture = chctx.newPromise();
if (!supportsFileRegion()) {
// Cannot use zero-copy
try {
writeToChannel(new UncloseableChunkedNioFile(fc, offset, length), writeFuture);
} catch (IOException e) {
return chctx.newFailedFuture(e);
}
} else {
// No encryption - use zero-copy.
sendFileRegion(fc, offset, length, writeFuture);
}
return writeFuture;
}
private
|
VertxConnection
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/dynamic/support/TypeVariableTypeInformation.java
|
{
"start": 482,
"end": 3334
}
|
class ____<T> extends ParentTypeAwareTypeInformation<T> {
private final TypeVariable<?> variable;
private final Type owningType;
/**
* Creates a bew {@link TypeVariableTypeInformation} for the given {@link TypeVariable} owning {@link Type} and parent
* {@link TypeDiscoverer}.
*
* @param variable must not be {@code null}
* @param owningType must not be {@code null}
* @param parent can be be {@code null}
* @param typeVariableMap must not be {@code null}
*/
public TypeVariableTypeInformation(TypeVariable<?> variable, Type owningType, TypeDiscoverer<?> parent,
Map<TypeVariable<?>, Type> typeVariableMap) {
super(variable, parent, typeVariableMap);
LettuceAssert.notNull(variable, "TypeVariable must not be null");
this.variable = variable;
this.owningType = owningType;
}
@Override
public Class<T> getType() {
int index = getIndex(variable);
if (owningType instanceof ParameterizedType && index != -1) {
Type fieldType = ((ParameterizedType) owningType).getActualTypeArguments()[index];
return resolveClass(fieldType);
}
return resolveClass(variable);
}
/**
* Returns the index of the type parameter binding the given {@link TypeVariable}.
*
* @param variable
* @return
*/
private int getIndex(TypeVariable<?> variable) {
Class<?> rawType = resolveClass(owningType);
TypeVariable<?>[] typeParameters = rawType.getTypeParameters();
for (int i = 0; i < typeParameters.length; i++) {
if (variable.equals(typeParameters[i])) {
return i;
}
}
return -1;
}
@Override
public List<TypeInformation<?>> getTypeArguments() {
List<TypeInformation<?>> result = new ArrayList<>();
Type type = resolveType(variable);
if (type instanceof ParameterizedType) {
for (Type typeArgument : ((ParameterizedType) type).getActualTypeArguments()) {
result.add(createInfo(typeArgument));
}
}
return result;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof TypeVariableTypeInformation)) {
return false;
}
TypeVariableTypeInformation<?> that = (TypeVariableTypeInformation<?>) obj;
return getType().equals(that.getType());
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), variable, owningType);
}
@Override
public String toString() {
return variable.getName();
}
public String getVariableName() {
return variable.getName();
}
}
|
TypeVariableTypeInformation
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SpringBatchEndpointBuilderFactory.java
|
{
"start": 5179,
"end": 7690
}
|
interface ____
extends
EndpointProducerBuilder {
default SpringBatchEndpointBuilder basic() {
return (SpringBatchEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedSpringBatchEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedSpringBatchEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
public
|
AdvancedSpringBatchEndpointBuilder
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/OnCompletionDefinition.java
|
{
"start": 1649,
"end": 17607
}
|
class ____ extends OutputDefinition<OnCompletionDefinition>
implements ExecutorServiceAwareDefinition<OnCompletionDefinition> {
@XmlTransient
private ExecutorService executorServiceBean;
@XmlTransient
private boolean routeScoped = true;
@XmlAttribute
@Metadata(label = "advanced", javaType = "org.apache.camel.model.OnCompletionMode", defaultValue = "AfterConsumer",
enums = "AfterConsumer,BeforeConsumer")
private String mode;
@XmlAttribute
@Metadata(javaType = "java.lang.Boolean")
private String onCompleteOnly;
@XmlAttribute
@Metadata(javaType = "java.lang.Boolean")
private String onFailureOnly;
@XmlAttribute
@Metadata(label = "advanced", javaType = "java.lang.Boolean")
private String parallelProcessing;
@XmlAttribute
@Metadata(label = "advanced", javaType = "java.util.concurrent.ExecutorService")
private String executorService;
@XmlAttribute(name = "useOriginalMessage")
@Metadata(label = "advanced", javaType = "java.lang.Boolean")
private String useOriginalMessage;
@Metadata(description = "To use an expression to only trigger routing this completion steps in specific situations")
@XmlElement
@AsPredicate
private OnWhenDefinition onWhen;
public OnCompletionDefinition() {
}
protected OnCompletionDefinition(OnCompletionDefinition source) {
super(source);
this.executorServiceBean = source.executorServiceBean;
this.routeScoped = source.routeScoped;
this.mode = source.mode;
this.onCompleteOnly = source.onCompleteOnly;
this.onFailureOnly = source.onFailureOnly;
this.parallelProcessing = source.parallelProcessing;
this.executorService = source.executorService;
this.useOriginalMessage = source.useOriginalMessage;
this.onWhen = source.onWhen != null ? source.onWhen.copyDefinition() : null;
}
@Override
public OnCompletionDefinition copyDefinition() {
return new OnCompletionDefinition(this);
}
public void setRouteScoped(boolean routeScoped) {
this.routeScoped = routeScoped;
}
public boolean isRouteScoped() {
return routeScoped;
}
@Override
public void setParent(ProcessorDefinition<?> parent) {
if (routeScoped) {
super.setParent(parent);
}
}
@Override
public String toString() {
return "onCompletion[" + getOutputs() + "]";
}
@Override
public String getShortName() {
return "onCompletion";
}
@Override
public String getLabel() {
return "onCompletion";
}
@Override
public boolean isAbstract() {
return true;
}
@Override
public boolean isTopLevelOnly() {
return true;
}
/**
* Removes all existing global {@link org.apache.camel.model.OnCompletionDefinition} from the definition.
* <p/>
* This is used to let route scoped <tt>onCompletion</tt> overrule any global <tt>onCompletion</tt>. Do not remove
* an existing route-scoped because it is now possible (CAMEL-16374) to have several.
*
* @param definition the parent definition that is the route
*/
public void removeAllOnCompletionDefinition(ProcessorDefinition<?> definition) {
definition.getOutputs().removeIf(out -> out instanceof OnCompletionDefinition &&
!((OnCompletionDefinition) out).isRouteScoped());
}
@Override
public ProcessorDefinition<?> end() {
// pop parent block, as we added our self as block to parent when
// synchronized was defined in the route
getParent().popBlock();
return super.end();
}
/**
* Sets the mode to be after route is done (default due backwards compatible).
* <p/>
* This executes the on completion work <i>after</i> the route consumer have written response back to the callee (if
* its InOut mode).
*
* @return the builder
*/
public OnCompletionDefinition modeAfterConsumer() {
setMode(OnCompletionMode.AfterConsumer.name());
return this;
}
/**
* Sets the mode to be before consumer is done.
* <p/>
* This allows the on completion work to execute <i>before</i> the route consumer, writes any response back to the
* callee (if its InOut mode).
*
* @return the builder
*/
public OnCompletionDefinition modeBeforeConsumer() {
setMode(OnCompletionMode.BeforeConsumer.name());
return this;
}
/**
* Will only synchronize when the {@link org.apache.camel.Exchange} completed successfully (no errors).
*
* @return the builder
*/
public OnCompletionDefinition onCompleteOnly() {
boolean isOnFailureOnly = Boolean.toString(true).equals(onFailureOnly);
if (isOnFailureOnly) {
throw new IllegalArgumentException(
"Both onCompleteOnly and onFailureOnly cannot be true. Only one of them can be true. On node: " + this);
}
// must define return type as OutputDefinition and not this type to
// avoid end user being able
// to invoke onFailureOnly/onCompleteOnly more than once
setOnCompleteOnly(Boolean.toString(true));
setOnFailureOnly(Boolean.toString(false));
return this;
}
/**
* Will only synchronize when the {@link org.apache.camel.Exchange} ended with failure (exception or FAULT message).
*
* @return the builder
*/
public OnCompletionDefinition onFailureOnly() {
boolean isOnCompleteOnly = Boolean.toString(true).equals(onCompleteOnly);
if (isOnCompleteOnly) {
throw new IllegalArgumentException(
"Both onCompleteOnly and onFailureOnly cannot be true. Only one of them can be true. On node: " + this);
}
// must define return type as OutputDefinition and not this type to
// avoid end user being able
// to invoke onFailureOnly/onCompleteOnly more than once
setOnCompleteOnly(Boolean.toString(false));
setOnFailureOnly(Boolean.toString(true));
return this;
}
/**
* Sets an additional predicate that should be true before the onCompletion is triggered.
* <p/>
* To be used for fine grained controlling whether a completion callback should be invoked or not
*
* @param predicate predicate that determines true or false
* @return the builder
*/
public OnCompletionDefinition onWhen(@AsPredicate Predicate predicate) {
setOnWhen(new OnWhenDefinition(predicate));
return this;
}
/**
* Will use the original input message body when an {@link org.apache.camel.Exchange} for this on completion.
* <p/>
* The original input message is defensively copied, and the copied message body is converted to
* {@link org.apache.camel.StreamCache} if possible (stream caching is enabled, can be disabled globally or on the
* original route), to ensure the body can be read when the original message is being used later. If the body is
* converted to {@link org.apache.camel.StreamCache} then the message body on the current
* {@link org.apache.camel.Exchange} is replaced with the {@link org.apache.camel.StreamCache} body. If the body is
* not converted to {@link org.apache.camel.StreamCache} then the body will not be able to re-read when accessed
* later.
* <p/>
* <b>Important:</b> The original input means the input message that are bounded by the current
* {@link org.apache.camel.spi.UnitOfWork}. An unit of work typically spans one route, or multiple routes if they
* are connected using internal endpoints such as direct or seda. When messages is passed via external endpoints
* such as JMS or HTTP then the consumer will create a new unit of work, with the message it received as input as
* the original input. Also some EIP patterns such as splitter, multicast, will create a new unit of work boundary
* for the messages in their sub-route (eg the split message); however these EIPs have an option named
* <tt>shareUnitOfWork</tt> which allows to combine with the parent unit of work in regard to error handling and
* therefore use the parent original message.
* <p/>
* By default this feature is off.
*
* @return the builder
* @deprecated use {@link #useOriginalMessage()}
*/
@Deprecated(since = "4.6.0")
public OnCompletionDefinition useOriginalBody() {
setUseOriginalMessage(Boolean.toString(true));
return this;
}
/**
* Will use the original input message when an {@link org.apache.camel.Exchange} for this on completion.
* <p/>
* The original input message is defensively copied, and the copied message body is converted to
* {@link org.apache.camel.StreamCache} if possible (stream caching is enabled, can be disabled globally or on the
* original route), to ensure the body can be read when the original message is being used later. If the body is
* converted to {@link org.apache.camel.StreamCache} then the message body on the current
* {@link org.apache.camel.Exchange} is replaced with the {@link org.apache.camel.StreamCache} body. If the body is
* not converted to {@link org.apache.camel.StreamCache} then the body will not be able to re-read when accessed
* later.
* <p/>
* <b>Important:</b> The original input means the input message that are bounded by the current
* {@link org.apache.camel.spi.UnitOfWork}. An unit of work typically spans one route, or multiple routes if they
* are connected using internal endpoints such as direct or seda. When messages is passed via external endpoints
* such as JMS or HTTP then the consumer will create a new unit of work, with the message it received as input as
* the original input. Also some EIP patterns such as splitter, multicast, will create a new unit of work boundary
* for the messages in their sub-route (eg the split message); however these EIPs have an option named
* <tt>shareUnitOfWork</tt> which allows to combine with the parent unit of work in regard to error handling and
* therefore use the parent original message.
* <p/>
* By default this feature is off.
*
* @return the builder
*/
public OnCompletionDefinition useOriginalMessage() {
setUseOriginalMessage(Boolean.toString(true));
return this;
}
/**
* To use a custom Thread Pool to be used for parallel processing. Notice if you set this option, then parallel
* processing is automatic implied, and you do not have to enable that option as well.
*/
@Override
public OnCompletionDefinition executorService(ExecutorService executorService) {
this.executorServiceBean = executorService;
return this;
}
/**
* Refers to a custom Thread Pool to be used for parallel processing. Notice if you set this option, then parallel
* processing is automatic implied, and you do not have to enable that option as well.
*/
@Override
public OnCompletionDefinition executorService(String executorService) {
setExecutorService(executorService);
return this;
}
/**
* If enabled then the on completion process will run asynchronously by a separate thread from a thread pool. By
* default this is false, meaning the on completion process will run synchronously using the same caller thread as
* from the route.
*
* @return the builder
*/
public OnCompletionDefinition parallelProcessing() {
setParallelProcessing(Boolean.toString(true));
return this;
}
/**
* If enabled then the on completion process will run asynchronously by a separate thread from a thread pool. By
* default this is false, meaning the on completion process will run synchronously using the same caller thread as
* from the route.
*
* @return the builder
*/
public OnCompletionDefinition parallelProcessing(boolean parallelProcessing) {
setParallelProcessing(Boolean.toString(parallelProcessing));
return this;
}
@Override
public List<ProcessorDefinition<?>> getOutputs() {
return outputs;
}
@XmlElementRef
@Override
public void setOutputs(List<ProcessorDefinition<?>> outputs) {
super.setOutputs(outputs);
}
@Override
public ExecutorService getExecutorServiceBean() {
return executorServiceBean;
}
@Override
public String getExecutorServiceRef() {
return executorService;
}
public String getMode() {
return mode;
}
/**
* Sets the on completion mode.
* <p/>
* The default value is AfterConsumer
*/
public void setMode(String mode) {
this.mode = mode;
}
public String getOnCompleteOnly() {
return onCompleteOnly;
}
public void setOnCompleteOnly(String onCompleteOnly) {
this.onCompleteOnly = onCompleteOnly;
}
public String getOnFailureOnly() {
return onFailureOnly;
}
public void setOnFailureOnly(String onFailureOnly) {
this.onFailureOnly = onFailureOnly;
}
public OnWhenDefinition getOnWhen() {
return onWhen;
}
public void setOnWhen(OnWhenDefinition onWhen) {
this.onWhen = onWhen;
}
public String getUseOriginalMessage() {
return useOriginalMessage;
}
/**
* Will use the original input message body when an {@link org.apache.camel.Exchange} for this on completion.
* <p/>
* The original input message is defensively copied, and the copied message body is converted to
* {@link org.apache.camel.StreamCache} if possible (stream caching is enabled, can be disabled globally or on the
* original route), to ensure the body can be read when the original message is being used later. If the body is
* converted to {@link org.apache.camel.StreamCache} then the message body on the current
* {@link org.apache.camel.Exchange} is replaced with the {@link org.apache.camel.StreamCache} body. If the body is
* not converted to {@link org.apache.camel.StreamCache} then the body will not be able to re-read when accessed
* later.
* <p/>
* <b>Important:</b> The original input means the input message that are bounded by the current
* {@link org.apache.camel.spi.UnitOfWork}. An unit of work typically spans one route, or multiple routes if they
* are connected using internal endpoints such as direct or seda. When messages is passed via external endpoints
* such as JMS or HTTP then the consumer will create a new unit of work, with the message it received as input as
* the original input. Also some EIP patterns such as splitter, multicast, will create a new unit of work boundary
* for the messages in their sub-route (eg the split message); however these EIPs have an option named
* <tt>shareUnitOfWork</tt> which allows to combine with the parent unit of work in regard to error handling and
* therefore use the parent original message.
* <p/>
* By default this feature is off.
*
* @return the builder
*/
public void setUseOriginalMessage(String useOriginalMessage) {
this.useOriginalMessage = useOriginalMessage;
}
public String getParallelProcessing() {
return parallelProcessing;
}
public void setParallelProcessing(String parallelProcessing) {
this.parallelProcessing = parallelProcessing;
}
public String getExecutorService() {
return executorService;
}
public void setExecutorService(String executorService) {
this.executorService = executorService;
}
}
|
OnCompletionDefinition
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/long2darrays/Long2DArrays_assertHasSameDimensionsAs_with_Array_Test.java
|
{
"start": 1034,
"end": 1414
}
|
class ____ extends Long2DArraysBaseTest {
@Test
void should_delegate_to_Arrays2D() {
// GIVEN
long[][] other = new long[][] { { 0L, 4L }, { 8L, 12L } };
// WHEN
long2dArrays.assertHasSameDimensionsAs(info, actual, other);
// THEN
verify(arrays2d).assertHasSameDimensionsAs(info, actual, other);
}
}
|
Long2DArrays_assertHasSameDimensionsAs_with_Array_Test
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/rule/RuleExecutor.java
|
{
"start": 2426,
"end": 3404
}
|
class ____<TreeType extends Node<TreeType>> {
private final String name;
private final Rule<?, TreeType>[] rules;
private final Limiter limit;
@SafeVarargs
@SuppressWarnings("varargs")
public Batch(String name, Limiter limit, Rule<?, TreeType>... rules) {
this.name = name;
this.limit = limit;
this.rules = rules;
}
@SafeVarargs
public Batch(String name, Rule<?, TreeType>... rules) {
this(name, Limiter.DEFAULT, rules);
}
public String name() {
return name;
}
public Batch<TreeType> with(Rule<?, TreeType>[] rules) {
return new Batch<>(name, limit, rules);
}
public Rule<?, TreeType>[] rules() {
return rules;
}
}
private Iterable<Batch<TreeType>> batches = null;
protected abstract Iterable<RuleExecutor.Batch<TreeType>> batches();
public
|
Batch
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/merge/MergeEnhancedEntityDynamicUpdateTest.java
|
{
"start": 4214,
"end": 4439
}
|
class ____ {
@Id
Long id;
@ManyToOne(optional = false, fetch = FetchType.LAZY)
Person parent;
}
@Entity(name = "NullablePerson")
@Table(name = "NULLABLE_PERSON")
@DynamicUpdate
@DynamicInsert
static
|
PersonAddress
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.