language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/component/timer/TimerNegativeDelayTest.java
|
{
"start": 1032,
"end": 1657
}
|
class ____ extends ContextTestSupport {
@Test
public void testNegativeDelay() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(10);
context.getRouteController().startAllRoutes();
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("timer:foo?delay=-1&period=0&repeatCount=10").autoStartup(false).to("mock:result");
}
};
}
}
|
TimerNegativeDelayTest
|
java
|
apache__flink
|
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/SimpleProtoToRowTest.java
|
{
"start": 1347,
"end": 5143
}
|
class ____ {
@Test
public void testSimple() throws Exception {
SimpleTestMulti simple =
SimpleTestMulti.newBuilder()
.setA(1)
.setB(2L)
.setC(false)
.setD(0.1f)
.setE(0.01)
.setF("haha")
.setG(ByteString.copyFrom(new byte[] {1}))
.setH(SimpleTestMulti.Corpus.IMAGES)
.setI(Status.FINISHED)
.setFAbc7D(1) // test fieldNameToJsonName
.setVpr6S(2)
.build();
RowData row = ProtobufTestHelper.pbBytesToRow(SimpleTestMulti.class, simple.toByteArray());
assertEquals(11, row.getArity());
assertEquals(1, row.getInt(0));
assertEquals(2L, row.getLong(1));
assertFalse(row.getBoolean(2));
assertEquals(Float.valueOf(0.1f), Float.valueOf(row.getFloat(3)));
assertEquals(Double.valueOf(0.01d), Double.valueOf(row.getDouble(4)));
assertEquals("haha", row.getString(5).toString());
assertEquals(1, (row.getBinary(6))[0]);
assertEquals("IMAGES", row.getString(7).toString());
assertEquals("FINISHED", row.getString(8).toString());
assertEquals(1, row.getInt(9));
assertEquals(2, row.getInt(10));
}
@Test
public void testNotExistsValueIgnoringDefault() throws Exception {
SimpleTestMulti simple =
SimpleTestMulti.newBuilder()
.setB(2L)
.setC(false)
.setD(0.1f)
.setE(0.01)
.setF("haha")
.build();
RowData row = ProtobufTestHelper.pbBytesToRow(SimpleTestMulti.class, simple.toByteArray());
assertTrue(row.isNullAt(0));
assertFalse(row.isNullAt(1));
}
@Test
public void testDefaultValues() throws Exception {
SimpleTestMulti simple = SimpleTestMulti.newBuilder().build();
RowData row =
ProtobufTestHelper.pbBytesToRow(
SimpleTestMulti.class,
simple.toByteArray(),
new PbFormatConfig(SimpleTestMulti.class.getName(), false, true, ""),
false);
assertFalse(row.isNullAt(0));
assertFalse(row.isNullAt(1));
assertFalse(row.isNullAt(2));
assertFalse(row.isNullAt(3));
assertFalse(row.isNullAt(4));
assertFalse(row.isNullAt(5));
assertFalse(row.isNullAt(6));
assertFalse(row.isNullAt(7));
assertFalse(row.isNullAt(8));
assertEquals(10, row.getInt(0));
assertEquals(100L, row.getLong(1));
assertFalse(row.getBoolean(2));
assertEquals(0.0f, row.getFloat(3), 0.0001);
assertEquals(0.0d, row.getDouble(4), 0.0001);
assertEquals("f", row.getString(5).toString());
assertArrayEquals(ByteString.EMPTY.toByteArray(), row.getBinary(6));
assertEquals(SimpleTestMulti.Corpus.UNIVERSAL.toString(), row.getString(7).toString());
assertEquals(Status.UNSPECIFIED.toString(), row.getString(8).toString());
}
@Test
public void testIntEnum() throws Exception {
SimpleTestMulti simple =
SimpleTestMulti.newBuilder()
.setH(SimpleTestMulti.Corpus.IMAGES)
.setI(Status.STARTED)
.build();
RowData row =
ProtobufTestHelper.pbBytesToRow(SimpleTestMulti.class, simple.toByteArray(), true);
assertEquals(2, row.getInt(7));
assertEquals(1, row.getInt(8));
}
}
|
SimpleProtoToRowTest
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/ContextTestSupport.java
|
{
"start": 2936,
"end": 3084
}
|
class ____ creates a {@link CamelContext} with some routes along with a {@link ProducerTemplate} for
* use in core test cases
*/
public abstract
|
which
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/preprocessor/ContextProcessor.java
|
{
"start": 1294,
"end": 1802
}
|
interface ____ {
/**
* It will enrich the application submission context with value provided.
* @param host Address of the host from where application launched.
* @param value Value to be filled in ApplicationSubmissionContext.
* @param applicationId Application Id of the application.
* @param submissionContext Context of the application.
*/
void process(String host, String value, ApplicationId applicationId,
ApplicationSubmissionContext submissionContext);
}
|
ContextProcessor
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/InjectConstructorFactoryGeneratorTest.java
|
{
"start": 43798,
"end": 44410
}
|
class ____ {",
" @Inject A(Produced<String> str) {}",
"}");
daggerCompiler(file)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining("Produced may only be injected in @Produces methods");
});
}
@Test public void injectConstructorDependsOnProducer() {
Source file =
CompilerTests.javaSource(
"test.A",
"package test;",
"",
"import dagger.producers.Producer;",
"import javax.inject.Inject;",
"",
"final
|
A
|
java
|
apache__flink
|
flink-table/flink-table-code-splitter/src/main/java/org/apache/flink/table/codesplit/AddBoolBeforeReturnRewriter.java
|
{
"start": 5410,
"end": 6240
}
|
class
____ null;
}
@Override
public Void visitLambdaBody(JavaParser.LambdaBodyContext ctx) {
// skip lambda
return null;
}
@Override
public Void visitStatement(JavaParser.StatementContext ctx) {
if (ctx.RETURN() != null) {
// we add a pair of {} around these statements to avoid the original
// return statement without {}, for example:
// if (...) return;
String newReturnStatement = String.format("{ %s = true; return; }", boolVarName);
rewriter.replace(ctx.start, ctx.stop, newReturnStatement);
boolVarNames.get(boolVarNames.size() - 1).put(key, boolVarName);
}
return visitChildren(ctx);
}
}
}
|
return
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/deviceplugin/DevicePlugin.java
|
{
"start": 914,
"end": 969
}
|
interface ____ vendor plugin to implement.
* */
public
|
for
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/common/bytecode/WrapperTest.java
|
{
"start": 8519,
"end": 8617
}
|
interface ____ {
void setFloat(float f);
void setFloat(Float f);
}
public
|
I2
|
java
|
spring-projects__spring-boot
|
module/spring-boot-data-couchbase/src/main/java/org/springframework/boot/data/couchbase/autoconfigure/DataCouchbaseConfiguration.java
|
{
"start": 1840,
"end": 3632
}
|
class ____ {
@Bean
@ConditionalOnMissingBean
MappingCouchbaseConverter couchbaseMappingConverter(DataCouchbaseProperties properties,
CouchbaseMappingContext couchbaseMappingContext, CouchbaseCustomConversions couchbaseCustomConversions) {
MappingCouchbaseConverter converter = new MappingCouchbaseConverter(couchbaseMappingContext,
properties.getTypeKey());
converter.setCustomConversions(couchbaseCustomConversions);
return converter;
}
@Bean
@ConditionalOnMissingBean
TranslationService couchbaseTranslationService() {
return new JacksonTranslationService();
}
@Bean(name = BeanNames.COUCHBASE_MAPPING_CONTEXT)
@ConditionalOnMissingBean(name = BeanNames.COUCHBASE_MAPPING_CONTEXT)
CouchbaseMappingContext couchbaseMappingContext(DataCouchbaseProperties properties,
ApplicationContext applicationContext, CouchbaseCustomConversions couchbaseCustomConversions)
throws ClassNotFoundException {
CouchbaseMappingContext mappingContext = new CouchbaseMappingContext();
mappingContext.setInitialEntitySet(new EntityScanner(applicationContext).scan(Document.class));
mappingContext.setSimpleTypeHolder(couchbaseCustomConversions.getSimpleTypeHolder());
Class<?> fieldNamingStrategy = properties.getFieldNamingStrategy();
if (fieldNamingStrategy != null) {
mappingContext
.setFieldNamingStrategy((FieldNamingStrategy) BeanUtils.instantiateClass(fieldNamingStrategy));
}
mappingContext.setAutoIndexCreation(properties.isAutoIndex());
return mappingContext;
}
@Bean(name = BeanNames.COUCHBASE_CUSTOM_CONVERSIONS)
@ConditionalOnMissingBean(name = BeanNames.COUCHBASE_CUSTOM_CONVERSIONS)
CouchbaseCustomConversions couchbaseCustomConversions() {
return new CouchbaseCustomConversions(Collections.emptyList());
}
}
|
DataCouchbaseConfiguration
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/where/annotations/MappedSuperclassTest.java
|
{
"start": 707,
"end": 1473
}
|
class ____ {
@AfterEach
public void tearDown(EntityManagerFactoryScope scope) {
scope.getEntityManagerFactory().getSchemaManager().truncate();
}
@Test
public void testFindParent(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
Child child1 = new SubClass( 1L );
child1.state = 1;
entityManager.persist( child1 );
Child child2 = new Child( 2L );
child2.state = 0;
entityManager.persist( child2 );
}
);
scope.inTransaction(
entityManager -> {
List<Child> children = entityManager.createQuery( "select c from Child c", Child.class )
.getResultList();
assertThat( children.size() ).isEqualTo( 1 );
}
);
}
@Entity(name = "Child")
public static
|
MappedSuperclassTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/graph/internal/AttributeNodeImpl.java
|
{
"start": 1542,
"end": 4194
}
|
class ____<J, E, K>
extends AbstractGraphNode<J>
implements AttributeNodeImplementor<J, E, K>
permits AttributeNodeImpl.SingularAttributeNodeImpl,
AttributeNodeImpl.PluralAttributeNodeImpl,
AttributeNodeImpl.MapAttributeNodeImpl {
protected final PersistentAttribute<?, J> attribute;
protected final DomainType<E> valueGraphType;
protected final SimpleDomainType<K> keyGraphType;
protected SubGraphImplementor<E> valueSubgraph;
protected SubGraphImplementor<K> keySubgraph;
static <J> AttributeNodeImpl<J,?,?> create(
PersistentAttribute<?, J> attribute, boolean mutable) {
if ( attribute instanceof PluralPersistentAttribute<?, J, ?> pluralAttribute ) {
return create( pluralAttribute, mutable );
}
else if ( attribute instanceof SingularPersistentAttribute<?, J> singularAttribute ) {
return new SingularAttributeNodeImpl<>( singularAttribute, mutable,
singularAttribute.getValueGraphType() );
}
else {
throw new AssertionFailure( "Unrecognized attribute type: " + attribute );
}
}
static <J,E> AttributeNodeImpl<J,E,?> create(
PluralPersistentAttribute<?, J, E> attribute, boolean mutable) {
if ( attribute instanceof MapPersistentAttribute<?, ?, ?> mapAttribute ) {
return create( attribute, mapAttribute, mutable );
}
else {
return new PluralAttributeNodeImpl<>( attribute, mutable,
attribute.getValueGraphType() );
}
}
static <K,V> AttributeNodeImpl<Map<K,V>,V,K> create(
MapPersistentAttribute<?, K, V> attribute, boolean mutable) {
return new MapAttributeNodeImpl<>( attribute, attribute, mutable,
attribute.getValueGraphType(), attribute.getKeyGraphType() );
}
private static <J,K,V> AttributeNodeImpl<J,V,K> create(
PluralPersistentAttribute<?, J, V> plural, MapPersistentAttribute<?, K, ?> attribute, boolean mutable) {
return new MapAttributeNodeImpl<>( plural, attribute, mutable,
plural.getValueGraphType(), attribute.getKeyGraphType() );
}
AttributeNodeImpl(
PersistentAttribute<?, J> attribute, boolean mutable,
DomainType<E> valueGraphType, SimpleDomainType<K> keyGraphType) {
super( mutable );
this.attribute = attribute;
this.valueGraphType = valueGraphType;
this.keyGraphType = keyGraphType;
}
private AttributeNodeImpl(AttributeNodeImpl<J, E, K> that, boolean mutable) {
super( mutable );
attribute = that.attribute;
valueGraphType = that.valueGraphType;
keyGraphType = that.keyGraphType;
valueSubgraph = that.valueSubgraph == null ? null : that.valueSubgraph.makeCopy( mutable );
keySubgraph = that.keySubgraph == null ? null : that.keySubgraph.makeCopy( mutable );
}
static final
|
AttributeNodeImpl
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/component/file/FileConsumerPollStrategyStopOnRollbackTest.java
|
{
"start": 2844,
"end": 4029
}
|
class ____ implements PollingConsumerPollStrategy {
@Override
public boolean begin(Consumer consumer, Endpoint endpoint) {
// start consumer as we simulate the fail in begin
// and thus before camel lazy start it itself
try {
consumer.start();
} catch (Exception e) {
throw RuntimeCamelException.wrapRuntimeCamelException(e);
}
if (counter++ == 0) {
// simulate an error on first poll
throw new IllegalArgumentException("Damn I cannot do this");
}
return true;
}
@Override
public void commit(Consumer consumer, Endpoint endpoint, int polledMessages) {
event += "commit";
}
@Override
public boolean rollback(Consumer consumer, Endpoint endpoint, int retryCounter, Exception cause) {
if (cause.getMessage().equals("Damn I cannot do this")) {
event += "rollback";
// stop consumer as it does not work
consumer.stop();
}
return false;
}
}
}
|
MyPollStrategy
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/BadImportTest.java
|
{
"start": 21085,
"end": 21275
}
|
interface ____ {}
""")
.addOutputLines(
"Test.java",
"""
import org.immutables.value.Value;
@Value.Immutable
|
Test
|
java
|
apache__camel
|
dsl/camel-kamelet-main/src/main/java/org/apache/camel/main/reload/OpenApiGeneratorReloadStrategy.java
|
{
"start": 1445,
"end": 3586
}
|
class ____ extends FileWatcherResourceReloadStrategy {
private static final Logger LOG = LoggerFactory.getLogger(OpenApiGeneratorReloadStrategy.class);
// write to jbang generated file so it can be reloaded
private static final String OPENAPI_GENERATED_FILE = ".camel-jbang/generated-openapi.yaml";
private final File openapi;
private Method method;
public OpenApiGeneratorReloadStrategy(File openapi) {
String parent = openapi.getParent();
if (parent == null) {
parent = ".";
}
setFolder(parent);
// need to adjust file to be what file watcher uses when matching
Path dir = new File(parent).toPath();
this.openapi = dir.resolve(openapi.toPath()).toFile();
setFileFilter(this.openapi::equals);
setResourceReload((name, resource) -> {
if (!openapi.exists() && !openapi.isFile()) {
return;
}
LOG.info("Generating open-api rest-dsl from: {}", openapi);
try {
String out = (String) ObjectHelper.invokeMethodSafe(method, null, getCamelContext(), openapi);
Files.write(Paths.get(OPENAPI_GENERATED_FILE), out.getBytes());
} catch (Exception e) {
LOG.warn("Error generating open-api rest-dsl due: {}", e.getMessage(), e);
}
});
}
@Override
protected void doInit() throws Exception {
super.doInit();
DependencyDownloader downloader = getCamelContext().hasService(DependencyDownloader.class);
// these are extra dependencies used in special use-case so download as hidden
downloader.downloadHiddenDependency("org.apache.camel", "camel-openapi-rest-dsl-generator",
getCamelContext().getVersion());
// the generator is invoked via reflection
Class<?> clazz = getCamelContext().getClassResolver()
.resolveMandatoryClass("org.apache.camel.generator.openapi.RestDslGenerator");
method = clazz.getDeclaredMethod("generateToYaml", CamelContext.class, File.class);
}
}
|
OpenApiGeneratorReloadStrategy
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/internal/net/SslChannelProvider.java
|
{
"start": 1104,
"end": 4025
}
|
class ____ {
private final Executor workerPool;
private final boolean sni;
private final SslContextProvider sslContextProvider;
public SslChannelProvider(VertxInternal vertx,
SslContextProvider sslContextProvider,
boolean sni) {
this.workerPool = vertx.internalWorkerPool().executor();
this.sni = sni;
this.sslContextProvider = sslContextProvider;
}
public SslContextProvider sslContextProvider() {
return sslContextProvider;
}
public SslHandler createClientSslHandler(SocketAddress peerAddress, String serverName, boolean useAlpn, long sslHandshakeTimeout, TimeUnit sslHandshakeTimeoutUnit) {
SslContext sslContext = sslContextProvider.sslClientContext(serverName, useAlpn);
SslHandler sslHandler;
Executor delegatedTaskExec = sslContextProvider.useWorkerPool() ? workerPool : ImmediateExecutor.INSTANCE;
if (peerAddress != null && peerAddress.isInetSocket()) {
sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, peerAddress.host(), peerAddress.port(), delegatedTaskExec);
} else {
sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, delegatedTaskExec);
}
sslHandler.setHandshakeTimeout(sslHandshakeTimeout, sslHandshakeTimeoutUnit);
return sslHandler;
}
public ChannelHandler createServerHandler(boolean useAlpn, long sslHandshakeTimeout, TimeUnit sslHandshakeTimeoutUnit, HostAndPort remoteAddress) {
if (sni) {
return createSniHandler(useAlpn, sslHandshakeTimeout, sslHandshakeTimeoutUnit, remoteAddress);
} else {
return createServerSslHandler(useAlpn, sslHandshakeTimeout, sslHandshakeTimeoutUnit, remoteAddress);
}
}
private SslHandler createServerSslHandler(boolean useAlpn, long sslHandshakeTimeout, TimeUnit sslHandshakeTimeoutUnit, HostAndPort remoteAddress) {
SslContext sslContext = sslContextProvider.sslServerContext(useAlpn);
Executor delegatedTaskExec = sslContextProvider.useWorkerPool() ? workerPool : ImmediateExecutor.INSTANCE;
SslHandler sslHandler;
if (remoteAddress != null) {
sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, remoteAddress.host(), remoteAddress.port(), delegatedTaskExec);
} else {
sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, delegatedTaskExec);
}
sslHandler.setHandshakeTimeout(sslHandshakeTimeout, sslHandshakeTimeoutUnit);
return sslHandler;
}
private SniHandler createSniHandler(boolean useAlpn, long sslHandshakeTimeout, TimeUnit sslHandshakeTimeoutUnit, HostAndPort remoteAddress) {
Executor delegatedTaskExec = sslContextProvider.useWorkerPool() ? workerPool : ImmediateExecutor.INSTANCE;
return new VertxSniHandler(sslContextProvider.serverNameAsyncMapping(delegatedTaskExec, useAlpn), sslHandshakeTimeoutUnit.toMillis(sslHandshakeTimeout), delegatedTaskExec, remoteAddress);
}
}
|
SslChannelProvider
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/registry/selector/internal/StrategySelectorImpl.java
|
{
"start": 1901,
"end": 8585
}
|
class ____ service usable by this StrategySelectorImpl instance.
*/
public StrategySelectorImpl(ClassLoaderService classLoaderService) {
this.classLoaderService = classLoaderService;
contributors = classLoaderService.loadJavaServices( NamedStrategyContributor.class );
for ( var contributor : contributors ) {
contributor.contributeStrategyImplementations( new StartupContributions() );
}
}
@Override
@SuppressWarnings("unchecked")
public <T> Class<? extends T> selectStrategyImplementor(Class<T> strategy, String name) {
final var namedStrategyImplementorMap = namedStrategyImplementorByStrategyMap.get( strategy );
if ( namedStrategyImplementorMap != null ) {
final var registered = namedStrategyImplementorMap.get( name );
if ( registered != null ) {
return (Class<T>) registered;
}
}
final var lazyServiceResolver = lazyStrategyImplementorByStrategyMap.get( strategy );
if ( lazyServiceResolver != null ) {
final var resolve = lazyServiceResolver.resolve( name );
if ( resolve != null ) {
return (Class<? extends T>) resolve;
}
}
try {
return classLoaderService.classForName( name );
}
catch (ClassLoadingException e) {
throw new StrategySelectionException(
"Unable to resolve name [" + name + "] as strategy [" + strategy.getName() + "]",
e
);
}
}
@Override
public <T> T resolveStrategy(Class<T> strategy, Object strategyReference) {
return resolveDefaultableStrategy( strategy, strategyReference, (T) null );
}
@Override
public <T> T resolveDefaultableStrategy(Class<T> strategy, Object strategyReference, final T defaultValue) {
return resolveDefaultableStrategy(
strategy,
strategyReference,
(Callable<T>) () -> defaultValue
);
}
@Override
@SuppressWarnings("unchecked")
public <T> T resolveDefaultableStrategy(
Class<T> strategy,
Object strategyReference,
Callable<T> defaultResolver) {
return resolveStrategy( strategy, strategyReference, defaultResolver,
(StrategyCreator<T>) STANDARD_STRATEGY_CREATOR );
}
@Override
public <T> T resolveStrategy(
Class<T> strategy,
Object strategyReference,
T defaultValue,
StrategyCreator<T> creator) {
return resolveStrategy(
strategy,
strategyReference,
(Callable<T>) () -> defaultValue,
creator
);
}
@Override
public <T> Collection<Class<? extends T>> getRegisteredStrategyImplementors(Class<T> strategy) {
final var lazyServiceResolver = lazyStrategyImplementorByStrategyMap.get( strategy );
if ( lazyServiceResolver != null ) {
throw new StrategySelectionException( "Can't use this method on for strategy types which are embedded in the core library" );
}
final var registrations = namedStrategyImplementorByStrategyMap.get( strategy );
if ( registrations == null ) {
return emptySet();
}
else {
final Set<Class<? extends T>> implementors = new HashSet<>();
for ( var registration : registrations.values() ) {
if ( !strategy.isAssignableFrom( registration ) ) {
throw new StrategySelectionException(
String.format(
"Registered strategy [%s] is not a subtype of [%s]",
registration.getName(),
strategy.getName()
)
);
}
implementors.add( registration.asSubclass( strategy ) );
}
return implementors;
}
}
@SuppressWarnings("unchecked")
@Override
public <T> T resolveStrategy(
Class<T> strategy,
Object strategyReference,
Callable<T> defaultResolver,
StrategyCreator<T> creator) {
if ( strategyReference == null ) {
try {
return defaultResolver.call();
}
catch (Exception e) {
throw new StrategySelectionException( "Default resolver threw exception", e );
}
}
else if ( strategy.isInstance( strategyReference ) ) {
return strategy.cast( strategyReference );
}
else {
final var implementationClass =
strategyReference instanceof Class
? (Class<? extends T>) strategyReference
: selectStrategyImplementor( strategy, strategyReference.toString() );
try {
return creator.create( implementationClass );
}
catch (Exception e) {
throw new StrategySelectionException(
String.format( "Could not instantiate named strategy class [%s]",
implementationClass.getName() ),
e
);
}
}
}
private static <T> T create(Class<T> strategyClass) {
try {
return strategyClass.getDeclaredConstructor().newInstance();
}
catch (Exception e) {
throw new StrategySelectionException(
String.format( "Could not instantiate named strategy class [%s]", strategyClass.getName() ),
e
);
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Lifecycle
public <T> void registerStrategyLazily(Class<T> strategy, LazyServiceResolver<T> resolver) {
final var previous = lazyStrategyImplementorByStrategyMap.put( strategy, resolver );
if ( previous != null ) {
throw new HibernateException( "Detected a second LazyServiceResolver replacing an existing LazyServiceResolver implementation for strategy " + strategy.getName() );
}
}
private <T> void contributeImplementation(Class<T> strategy, Class<? extends T> implementation, String... names) {
final var namedStrategyImplementorMap =
namedStrategyImplementorByStrategyMap.computeIfAbsent( strategy, clazz -> new ConcurrentHashMap<>() );
for ( String name : names ) {
final var old = namedStrategyImplementorMap.put( name, implementation );
if ( BOOT_LOGGER.isTraceEnabled() ) {
if ( old == null ) {
BOOT_LOGGER.strategySelectorMapping(
strategy.getSimpleName(),
name,
implementation.getName()
);
}
else {
BOOT_LOGGER.strategySelectorMappingReplacing(
strategy.getSimpleName(),
name,
implementation.getName(),
old.getName()
);
}
}
}
}
private <T> void removeImplementation(Class<T> strategy, Class<? extends T> implementation) {
final var namedStrategyImplementorMap = namedStrategyImplementorByStrategyMap.get( strategy );
if ( namedStrategyImplementorMap == null ) {
BOOT_LOGGER.namedStrategyMapDidNotExistOnUnregister();
}
else {
final var itr = namedStrategyImplementorMap.values().iterator();
while ( itr.hasNext() ) {
if ( itr.next().equals( implementation ) ) {
itr.remove();
}
}
// try to clean up after ourselves...
if ( namedStrategyImplementorMap.isEmpty() ) {
namedStrategyImplementorByStrategyMap.remove( strategy );
}
}
}
@Override
public void stop() {
for ( var contributor : contributors ) {
contributor.clearStrategyImplementations( new ShutdownContributions() );
}
}
private
|
loader
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java
|
{
"start": 6774,
"end": 6990
}
|
class ____ extends
HadoopIllegalArgumentException {
private static final long serialVersionUID = 1L;
public BadAuthFormatException(String message) {
super(message);
}
}
}
|
BadAuthFormatException
|
java
|
apache__flink
|
flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
{
"start": 16560,
"end": 17795
}
|
class ____ implements ProgramDescription {
@SuppressWarnings("serial")
public static void main(String[] args) throws Exception {
if (args.length < 2) {
System.err.println("Usage: TestExecutionPlan");
return;
}
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<Long> input = env.fromSequence(0, 999).name("MySource");
DataStream<Long> result =
input.map(
new MapFunction<Long, Long>() {
@Override
public Long map(Long value) throws Exception {
return value * 2 + 1;
}
})
.name("MyMap");
result.sinkTo(new DiscardingSink<>()).name("MySink");
env.execute();
}
@Override
public String getDescription() {
return "TestExecutionPlan";
}
}
/** Test job that uses an eager sink. */
public static final
|
TestExecutionPlan
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/filter/EncodingConvertFilterTest2.java
|
{
"start": 1619,
"end": 9541
}
|
class ____ extends TestCase {
private DruidDataSource dataSource;
private static String CLIENT_ENCODING = "UTF-8";
private static String SERVER_ENCODING = "ISO-8859-1";
private static String text = "中华人民共和国";
protected void setUp() throws Exception {
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setFilters("encoding");
dataSource.setDriver(new MockDriver() {
public ResultSet createResultSet(MockPreparedStatement stmt) {
return new MyResultSet(stmt);
}
public ResultSet executeQuery(MockStatementBase stmt, String sql) throws SQLException {
return new MyResultSet(stmt);
}
});
dataSource.getConnectProperties().put("clientEncoding", CLIENT_ENCODING);
dataSource.getConnectProperties().put("serverEncoding", SERVER_ENCODING);
dataSource.init();
}
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
}
public void test_stat() throws Exception {
assertTrue(dataSource.isInited());
EncodingConvertFilter filter = (EncodingConvertFilter) dataSource.getProxyFilters().get(0);
DruidPooledConnection conn = dataSource.getConnection();
final String PARAM_VALUE = "中国";
CallableStatement stmt = conn.prepareCall("select ?");
stmt.setString(1, PARAM_VALUE);
MockCallableStatement raw = stmt.unwrap(MockCallableStatement.class);
stmt.execute();
String param1 = (String) raw.getParameters().get(0);
String C_TEXT = new String(param1.getBytes(SERVER_ENCODING), CLIENT_ENCODING);
assertEquals(PARAM_VALUE, C_TEXT);
assertFalse(param1.equals(PARAM_VALUE));
MyResultSet rawRs = new MyResultSet(raw);
rawRs.setValue(filter.encode((ConnectionProxy) conn.getConnection(), text));
raw.getOutParameters().add(rawRs);
ResultSet rs = (ResultSet) stmt.getObject(1);
rs.next();
assertEquals(text, rs.getString(1));
assertEquals(text, rs.getString("1"));
assertEquals(text, rs.getObject(1));
assertEquals(text, rs.getObject("1"));
assertEquals(text, rs.getObject(1, Collections.<String, Class<?>>emptyMap()));
assertEquals(text, rs.getObject("1", Collections.<String, Class<?>>emptyMap()));
assertEquals(text, rs.getString(2));
assertEquals(text, rs.getString("2"));
assertEquals(text, rs.getObject(2));
assertEquals(text, rs.getObject("2"));
assertEquals(text, rs.getObject(2, Collections.<String, Class<?>>emptyMap()));
assertEquals(text, rs.getObject("2", Collections.<String, Class<?>>emptyMap()));
assertEquals(text, rs.getString(3));
assertEquals(text, rs.getString("3"));
assertEquals(text, rs.getObject(3));
assertEquals(text, rs.getObject("3"));
assertEquals(text, rs.getObject(3, Collections.<String, Class<?>>emptyMap()));
assertEquals(text, rs.getObject("3", Collections.<String, Class<?>>emptyMap()));
assertEquals(text, rs.getString(4));
assertEquals(text, rs.getString("4"));
assertEquals(text, rs.getObject(4));
assertEquals(text, rs.getObject("4"));
assertEquals(text, rs.getObject(4, Collections.<String, Class<?>>emptyMap()));
assertEquals(text, rs.getObject("4", Collections.<String, Class<?>>emptyMap()));
stmt.registerOutParameter(2, Types.VARCHAR);
stmt.registerOutParameter(3, Types.CLOB);
raw.getOutParameters().add(param1);
raw.getOutParameters().add(param1);
assertEquals(C_TEXT, stmt.getString(4));
assertEquals(C_TEXT, stmt.getString("4"));
assertEquals(C_TEXT, stmt.getObject(4));
assertEquals(C_TEXT, stmt.getObject("4"));
assertEquals(C_TEXT, stmt.getObject(4, Collections.<String, Class<?>>emptyMap()));
assertEquals(C_TEXT, stmt.getObject("4", Collections.<String, Class<?>>emptyMap()));
assertEquals(C_TEXT, stmt.getString(5));
assertEquals(C_TEXT, stmt.getString("5"));
assertEquals(C_TEXT, stmt.getObject(5));
assertEquals(C_TEXT, stmt.getObject("5"));
assertEquals(C_TEXT, stmt.getObject(5, Collections.<String, Class<?>>emptyMap()));
assertEquals(C_TEXT, stmt.getObject("5", Collections.<String, Class<?>>emptyMap()));
stmt.setObject(1, C_TEXT);
assertEquals(param1, raw.getParameters().get(0));
stmt.setObject(2, new StringReader(C_TEXT));
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(1)));
stmt.setCharacterStream(3, new StringReader(C_TEXT));
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(2)));
stmt.setCharacterStream(4, new StringReader(C_TEXT), C_TEXT.length());
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(3)));
stmt.setCharacterStream(5, new StringReader(C_TEXT), (long) C_TEXT.length());
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(4)));
stmt.setObject(6, C_TEXT, Types.VARCHAR);
assertEquals(param1, raw.getParameters().get(5));
stmt.setObject(7, new StringReader(C_TEXT), Types.VARCHAR);
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(6)));
stmt.setObject(8, C_TEXT, Types.VARCHAR, 0);
assertEquals(param1, raw.getParameters().get(7));
stmt.setObject(9, new StringReader(C_TEXT), Types.VARCHAR, 0);
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(8)));
stmt.setObject(10, 1, Types.INTEGER);
assertEquals(1, raw.getParameters().get(9));
stmt.setObject(11, 2, Types.INTEGER, 0);
assertEquals(2, raw.getParameters().get(10));
stmt.setObject(12, 3);
assertEquals(3, raw.getParameters().get(11));
stmt.setObject("13", C_TEXT, Types.VARCHAR);
assertEquals(param1, raw.getParameters().get(12));
stmt.setObject("14", new StringReader(C_TEXT), Types.VARCHAR);
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(13)));
stmt.setObject("15", C_TEXT, Types.VARCHAR, 0);
assertEquals(param1, raw.getParameters().get(14));
stmt.setObject("16", new StringReader(C_TEXT), Types.VARCHAR, 0);
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(15)));
stmt.setObject("17", 1, Types.INTEGER);
assertEquals(1, raw.getParameters().get(16));
stmt.setObject("18", 2, Types.INTEGER, 0);
assertEquals(2, raw.getParameters().get(17));
stmt.setObject("19", 3);
assertEquals(3, raw.getParameters().get(18));
stmt.setCharacterStream("20", new StringReader(C_TEXT));
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(19)));
stmt.setCharacterStream("21", new StringReader(C_TEXT), C_TEXT.length());
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(20)));
stmt.setCharacterStream("22", new StringReader(C_TEXT), (long) C_TEXT.length());
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(21)));
stmt.setObject("23", C_TEXT);
assertEquals(param1, raw.getParameters().get(22));
stmt.setObject("24", new StringReader(C_TEXT));
assertEquals(param1, Utils.read((Reader) raw.getParameters().get(23)));
stmt.setObject("25", 1, Types.INTEGER);
assertEquals(1, raw.getParameters().get(24));
stmt.setObject("26", 2, Types.INTEGER, 0);
assertEquals(2, raw.getParameters().get(25));
stmt.setObject("27", 3);
assertEquals(3, raw.getParameters().get(26));
rs.close();
stmt.close();
conn.close();
}
public static
|
EncodingConvertFilterTest2
|
java
|
apache__camel
|
components/camel-cassandraql/src/main/java/org/apache/camel/component/cassandra/CassandraConsumer.java
|
{
"start": 1471,
"end": 3859
}
|
class ____ extends ScheduledPollConsumer implements ResumeAware<ResumeStrategy> {
/**
* Prepared statement used for polling
*/
private PreparedStatement preparedStatement;
private ResumeStrategy resumeStrategy;
public CassandraConsumer(CassandraEndpoint endpoint, Processor processor) {
super(endpoint, processor);
}
@Override
public CassandraEndpoint getEndpoint() {
return (CassandraEndpoint) super.getEndpoint();
}
@Override
protected int poll() throws Exception {
// Execute CQL Query
CqlSession session = getEndpoint().getSessionHolder().getSession();
ResultSet resultSet;
if (isPrepareStatements()) {
resultSet = session.execute(preparedStatement.bind());
} else {
resultSet = session.execute(getEndpoint().getCql());
}
// Create message from ResultSet
Exchange exchange = createExchange(false);
try {
Message message = exchange.getIn();
getEndpoint().fillMessage(resultSet, message);
// send message to next processor in the route
getProcessor().process(exchange);
return 1; // number of messages polled
} finally {
// log exception if an exception occurred and was not handled
if (exchange.getException() != null) {
getExceptionHandler().handleException("Error processing exchange", exchange, exchange.getException());
}
releaseExchange(exchange, false);
}
}
@Override
protected void doStart() throws Exception {
if (isPrepareStatements()) {
preparedStatement = getEndpoint().prepareStatement();
}
ResumeStrategyHelper.resume(getEndpoint().getCamelContext(), this, resumeStrategy, CASSANDRA_RESUME_ACTION);
super.doStart();
}
@Override
protected void doStop() throws Exception {
this.preparedStatement = null;
super.doStop();
}
public boolean isPrepareStatements() {
return getEndpoint().isPrepareStatements();
}
@Override
public ResumeStrategy getResumeStrategy() {
return resumeStrategy;
}
@Override
public void setResumeStrategy(ResumeStrategy resumeStrategy) {
this.resumeStrategy = resumeStrategy;
}
}
|
CassandraConsumer
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/onetoone/unidirectional/UnidirectionalMulIdWithNulls.java
|
{
"start": 956,
"end": 2229
}
|
class ____ {
private EmbId ei;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
ei = new EmbId( 1, 2 );
// Revision 1
scope.inTransaction( em -> {
EmbIdTestEntity eite = new EmbIdTestEntity( ei, "data" );
UniRefIngMulIdEntity notNullRef = new UniRefIngMulIdEntity( 1, "data 1", eite );
UniRefIngMulIdEntity nullRef = new UniRefIngMulIdEntity( 2, "data 2", null );
em.persist( eite );
em.persist( notNullRef );
em.persist( nullRef );
} );
}
@Test
public void testNullReference(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
UniRefIngMulIdEntity nullRef = auditReader.find( UniRefIngMulIdEntity.class, 2, 1 );
assertNull( nullRef.getReference() );
} );
}
@Test
public void testNotNullReference(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
EmbIdTestEntity eite = auditReader.find( EmbIdTestEntity.class, ei, 1 );
UniRefIngMulIdEntity notNullRef = auditReader.find( UniRefIngMulIdEntity.class, 1, 1 );
assertNotNull( notNullRef.getReference() );
assertEquals( eite, notNullRef.getReference() );
} );
}
}
|
UnidirectionalMulIdWithNulls
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/InitDestroyMethodLifecycleTests.java
|
{
"start": 16025,
"end": 16610
}
|
class ____ extends PackagePrivateInitDestroyBean
implements InitializingBean, DisposableBean {
@Override
public void afterPropertiesSet() {
this.initMethods.add("InitializingBean.afterPropertiesSet");
}
@Override
public void destroy() {
this.destroyMethods.add("DisposableBean.destroy");
}
@PostConstruct
void postConstruct() {
this.initMethods.add("SubPackagePrivateInitDestroyBean.postConstruct");
}
@PreDestroy
void preDestroy() {
this.destroyMethods.add("SubPackagePrivateInitDestroyBean.preDestroy");
}
}
}
|
SubPackagePrivateInitDestroyBean
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/Operators.java
|
{
"start": 58939,
"end": 59914
}
|
class ____ implements QueueSubscription<Object>, Scannable {
static final EmptySubscription INSTANCE = new EmptySubscription();
static final EmptySubscription FROM_SUBSCRIBE_INSTANCE = new EmptySubscription();
@Override
public void cancel() {
// deliberately no op
}
@Override
public void clear() {
// deliberately no op
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public @Nullable Object poll() {
return null;
}
@Override
public void request(long n) {
// deliberately no op
}
@Override
public int requestFusion(int requestedMode) {
return NONE; // can't enable fusion due to complete/error possibility
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.TERMINATED) return true;
return null;
}
@Override
public int size() {
return 0;
}
@Override
public String stepName() {
return "emptySubscription";
}
}
/**
* Base
|
EmptySubscription
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/enricher/PollDynamicFileNameTest.java
|
{
"start": 1057,
"end": 3250
}
|
class ____ extends ContextTestSupport {
@Test
public void testPollEnrichFileOne() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(2);
getMockEndpoint("mock:result").message(0).body().isEqualTo("Hello World");
getMockEndpoint("mock:result").message(1).body().isNull();
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, "myfile.txt");
template.sendBodyAndHeader("direct:start", "Foo", "target", "myfile.txt");
template.sendBodyAndHeader("direct:start", "Bar", "target", "unknown.txt");
assertMockEndpointsSatisfied();
// there should only be 1 file endpoint
long c = context.getEndpoints().stream()
.filter(e -> e.getEndpointKey().startsWith("file") && e.getEndpointUri().contains("?fileName=")).count();
Assertions.assertEquals(1, c, "There should only be 1 file endpoint");
}
@Test
public void testPollEnrichFileTwo() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceivedInAnyOrder("Hello World", "Bye World");
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, "myfile.txt");
template.sendBodyAndHeader(fileUri(), "Bye World", Exchange.FILE_NAME, "myfile2.txt");
template.sendBodyAndHeader("direct:start", "Foo", "target", "myfile.txt");
template.sendBodyAndHeader("direct:start", "Bar", "target", "myfile2.txt");
assertMockEndpointsSatisfied();
// there should only be 1 file endpoint
long c = context.getEndpoints().stream()
.filter(e -> e.getEndpointKey().startsWith("file") && e.getEndpointUri().contains("?fileName=")).count();
Assertions.assertEquals(1, c, "There should only be 1 file endpoint");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.poll(fileUri() + "?noop=true&fileName=${header.target}", 500)
.to("mock:result");
}
};
}
}
|
PollDynamicFileNameTest
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-s3/src/test/java/org/apache/camel/component/aws2/s3/integration/S3ObjectRangeOperationManualIT.java
|
{
"start": 2427,
"end": 5020
}
|
class ____ extends CamelTestSupport {
private static final String ACCESS_KEY = System.getProperty("aws.manual.access.key");
private static final String SECRET_KEY = System.getProperty("aws.manual.secret.key");
private static final Logger LOG = LoggerFactory.getLogger(S3ObjectRangeOperationManualIT.class);
@BindToRegistry("amazonS3Client")
S3Client client
= S3Client.builder().credentialsProvider(
StaticCredentialsProvider.create(
AwsBasicCredentials.create(ACCESS_KEY, SECRET_KEY)))
.region(Region.US_WEST_1).build();
@EndpointInject
private ProducerTemplate template;
@EndpointInject("mock:result")
private MockEndpoint result;
@Test
public void sendIn() throws Exception {
result.expectedMessageCount(1);
template.send("direct:getObjectRange", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(AWS2S3Constants.KEY, "element.txt");
exchange.getIn().setHeader(AWS2S3Constants.RANGE_START, 0);
exchange.getIn().setHeader(AWS2S3Constants.RANGE_END, 9);
}
});
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
String awsEndpoint = "aws2-s3://mycamelbucket?operation=getObjectRange&autoCreateBucket=false";
from("direct:getObjectRange").to(awsEndpoint).process(new Processor() {
@SuppressWarnings("unchecked")
@Override
public void process(Exchange exchange) throws Exception {
ResponseInputStream<GetObjectResponse> s3 = exchange.getIn().getBody(ResponseInputStream.class);
LOG.info(readInputStream(s3));
}
}).to("mock:result");
}
};
}
private String readInputStream(ResponseInputStream<GetObjectResponse> s3Object) throws IOException {
StringBuilder textBuilder = new StringBuilder();
try (Reader reader
= new BufferedReader(new InputStreamReader(s3Object, StandardCharsets.UTF_8))) {
int c = 0;
while ((c = reader.read()) != -1) {
textBuilder.append((char) c);
}
}
return textBuilder.toString();
}
}
|
S3ObjectRangeOperationManualIT
|
java
|
google__guice
|
core/src/com/google/inject/internal/aop/ClassBuilding.java
|
{
"start": 10458,
"end": 10675
}
|
class ____ can be fast-invoked. */
private static void visitFastMethods(Class<?> hostClass, Consumer<Method> visitor) {
if (hasPackageAccess()) {
// can fast-invoke all non-private methods declared by the
|
that
|
java
|
apache__camel
|
components/camel-servicenow/camel-servicenow-component/src/main/java/org/apache/camel/component/servicenow/releases/fuji/FujiServiceNowTableProcessor.java
|
{
"start": 1277,
"end": 8603
}
|
class ____ extends FujiServiceNowProcessor {
FujiServiceNowTableProcessor(ServiceNowEndpoint endpoint) throws Exception {
super(endpoint);
}
@Override
protected void doProcess(
Exchange exchange, Class<?> requestModel, Class<?> responseModel, String apiVersion, String action,
String tableName, String sysId)
throws Exception {
Response response;
if (ObjectHelper.equal(ServiceNowConstants.ACTION_RETRIEVE, action, true)) {
response = retrieveRecord(exchange.getIn(), responseModel, apiVersion, tableName, sysId);
} else if (ObjectHelper.equal(ServiceNowConstants.ACTION_CREATE, action, true)) {
response = createRecord(exchange.getIn(), requestModel, responseModel, apiVersion, tableName);
} else if (ObjectHelper.equal(ServiceNowConstants.ACTION_MODIFY, action, true)) {
response = modifyRecord(exchange.getIn(), requestModel, responseModel, apiVersion, tableName, sysId);
} else if (ObjectHelper.equal(ServiceNowConstants.ACTION_DELETE, action, true)) {
response = deleteRecord(responseModel, apiVersion, tableName, sysId);
} else if (ObjectHelper.equal(ServiceNowConstants.ACTION_UPDATE, action, true)) {
response = updateRecord(exchange.getIn(), requestModel, responseModel, apiVersion, tableName, sysId);
} else {
throw new IllegalArgumentException("Unknown action " + action);
}
setBodyAndHeaders(exchange.getIn(), responseModel, response);
}
/*
* GET
* https://instance.service-now.com/api/now/table/{tableName}
* https://instance.service-now.com/api/now/table/{tableName}/{sys_id}
*/
private Response retrieveRecord(
Message in, Class<?> responseModel, String apiVersion, String tableName, String sysId)
throws Exception {
return ObjectHelper.isEmpty(sysId)
? client.reset()
.types(MediaType.APPLICATION_JSON_TYPE)
.path("now")
.path(apiVersion)
.path("table")
.path(tableName)
.query(ServiceNowParams.SYSPARM_QUERY, in)
.query(ServiceNowParams.SYSPARM_DISPLAY_VALUE, in)
.query(ServiceNowParams.SYSPARM_EXCLUDE_REFERENCE_LINK, in)
.query(ServiceNowParams.SYSPARM_FIELDS, in)
.query(ServiceNowParams.SYSPARM_LIMIT, in)
.query(ServiceNowParams.SYSPARM_VIEW, in)
.query(responseModel)
.invoke(HttpMethod.GET)
: client.reset()
.types(MediaType.APPLICATION_JSON_TYPE)
.path("now")
.path(apiVersion)
.path("table")
.path(tableName)
.path(sysId)
.query(ServiceNowParams.SYSPARM_DISPLAY_VALUE, in)
.query(ServiceNowParams.SYSPARM_EXCLUDE_REFERENCE_LINK, in)
.query(ServiceNowParams.SYSPARM_FIELDS, in)
.query(ServiceNowParams.SYSPARM_VIEW, in)
.query(responseModel)
.invoke(HttpMethod.GET);
}
/*
* POST
* https://instance.service-now.com/api/now/table/{tableName}
*/
private Response createRecord(
Message in, Class<?> requestModel, Class<?> responseModel, String apiVersion, String tableName)
throws Exception {
validateBody(in, requestModel);
return client.reset()
.types(MediaType.APPLICATION_JSON_TYPE)
.path("now")
.path(apiVersion)
.path("table")
.path(tableName)
.query(ServiceNowParams.SYSPARM_DISPLAY_VALUE, in)
.query(ServiceNowParams.SYSPARM_EXCLUDE_REFERENCE_LINK, in)
.query(ServiceNowParams.SYSPARM_FIELDS, in)
.query(ServiceNowParams.SYSPARM_INPUT_DISPLAY_VALUE, in)
.query(ServiceNowParams.SYSPARM_SUPPRESS_AUTO_SYS_FIELD, in)
.query(ServiceNowParams.SYSPARM_VIEW, in)
.query(responseModel)
.invoke(HttpMethod.POST, in.getMandatoryBody());
}
/*
* PUT
* https://instance.service-now.com/api/now/table/{tableName}/{sys_id}
*/
private Response modifyRecord(
Message in, Class<?> requestModel, Class<?> responseModel, String apiVersion, String tableName, String sysId)
throws Exception {
validateBody(in, requestModel);
return client.reset()
.types(MediaType.APPLICATION_JSON_TYPE)
.path("now")
.path(apiVersion)
.path("table")
.path(tableName)
.path(ObjectHelper.notNull(sysId, "sysId"))
.query(ServiceNowParams.SYSPARM_DISPLAY_VALUE, in)
.query(ServiceNowParams.SYSPARM_EXCLUDE_REFERENCE_LINK, in)
.query(ServiceNowParams.SYSPARM_FIELDS, in)
.query(ServiceNowParams.SYSPARM_INPUT_DISPLAY_VALUE, in)
.query(ServiceNowParams.SYSPARM_SUPPRESS_AUTO_SYS_FIELD, in)
.query(ServiceNowParams.SYSPARM_VIEW, in)
.query(responseModel)
.invoke(HttpMethod.PUT, in.getMandatoryBody());
}
/*
* DELETE
* https://instance.service-now.com/api/now/table/{tableName}/{sys_id}
*/
private Response deleteRecord(
Class<?> responseModel, String apiVersion, String tableName, String sysId)
throws Exception {
return client.reset()
.types(MediaType.APPLICATION_JSON_TYPE)
.path("now")
.path(apiVersion)
.path("table")
.path(tableName)
.path(ObjectHelper.notNull(sysId, "sysId"))
.query(responseModel)
.invoke(HttpMethod.DELETE);
}
/*
* PATCH
* http://instance.service-now.com/api/now/table/{tableName}/{sys_id}
*/
private Response updateRecord(
Message in, Class<?> requestModel, Class<?> responseModel, String apiVersion, String tableName, String sysId)
throws Exception {
validateBody(in, requestModel);
return client.reset()
.types(MediaType.APPLICATION_JSON_TYPE)
.path("now")
.path(apiVersion)
.path("table")
.path(tableName)
.path(ObjectHelper.notNull(sysId, "sysId"))
.query(ServiceNowParams.SYSPARM_DISPLAY_VALUE, in)
.query(ServiceNowParams.SYSPARM_EXCLUDE_REFERENCE_LINK, in)
.query(ServiceNowParams.SYSPARM_FIELDS, in)
.query(ServiceNowParams.SYSPARM_INPUT_DISPLAY_VALUE, in)
.query(ServiceNowParams.SYSPARM_SUPPRESS_AUTO_SYS_FIELD, in)
.query(ServiceNowParams.SYSPARM_VIEW, in)
.query(responseModel)
.invoke("PATCH", in.getMandatoryBody());
}
}
|
FujiServiceNowTableProcessor
|
java
|
apache__camel
|
core/camel-support/src/main/java/org/apache/camel/support/task/AbstractTask.java
|
{
"start": 852,
"end": 1998
}
|
class ____ implements Task {
static final long NEVER = -1L;
final String name;
Status status;
long firstAttemptTime;
long lastAttemptTime;
long nextAttemptTime;
Throwable cause;
public AbstractTask(String name) {
this.name = name;
this.status = Status.Active;
this.firstAttemptTime = NEVER;
this.lastAttemptTime = NEVER;
this.nextAttemptTime = NEVER;
}
@Override
public String getName() {
return name;
}
@Override
public Status getStatus() {
return status;
}
@Override
public long getFirstAttemptTime() {
return firstAttemptTime;
}
@Override
public long getLastAttemptTime() {
return lastAttemptTime;
}
@Override
public long getNextAttemptTime() {
return nextAttemptTime;
}
@Override
public long getCurrentElapsedTime() {
if (firstAttemptTime > 0) {
return System.currentTimeMillis() - firstAttemptTime;
}
return NEVER;
}
@Override
public Throwable getException() {
return cause;
}
}
|
AbstractTask
|
java
|
netty__netty
|
transport-native-io_uring/src/test/java/io/netty/channel/uring/IoUringSocketDataReadInitialStateTest.java
|
{
"start": 1015,
"end": 1394
}
|
class ____ extends SocketDataReadInitialStateTest {
@BeforeAll
public static void loadJNI() {
assumeTrue(IoUring.isAvailable());
}
@Override
protected List<TestsuitePermutation.BootstrapComboFactory<ServerBootstrap, Bootstrap>> newFactories() {
return IoUringSocketTestPermutation.INSTANCE.socket();
}
}
|
IoUringSocketDataReadInitialStateTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimTests.java
|
{
"start": 721,
"end": 1197
}
|
class ____ extends AbstractTrimTests {
public LTrimTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
return parameters("LTrimEvaluator", true, false);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new LTrim(source, args.get(0));
}
}
|
LTrimTests
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/web/servlet/request/MockMultipartHttpServletRequestBuilder.java
|
{
"start": 1015,
"end": 1855
}
|
class ____
extends AbstractMockMultipartHttpServletRequestBuilder<MockMultipartHttpServletRequestBuilder> {
/**
* Package-private constructor. Use static factory methods in
* {@link MockMvcRequestBuilders}.
* <p>For other ways to initialize a {@code MockMultipartHttpServletRequest},
* see {@link #with(RequestPostProcessor)} and the
* {@link RequestPostProcessor} extension point.
* @param httpMethod the HTTP method (GET, POST, etc.)
*/
MockMultipartHttpServletRequestBuilder(HttpMethod httpMethod) {
super(httpMethod);
super.contentType(MediaType.MULTIPART_FORM_DATA);
}
/**
* Variant of {@link #MockMultipartHttpServletRequestBuilder(HttpMethod)}
* that defaults to {@link HttpMethod#POST}.
*/
MockMultipartHttpServletRequestBuilder() {
this(HttpMethod.POST);
}
}
|
MockMultipartHttpServletRequestBuilder
|
java
|
elastic__elasticsearch
|
modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java
|
{
"start": 44664,
"end": 47069
}
|
interface ____ {
Object input();
Type targetType();
TestResult run();
default Object attemptConversion() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>(Map.of("field", input())));
Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, "field", "field", targetType(), false);
processor.execute(ingestDocument);
return ingestDocument.getFieldValue("field", Object.class);
}
default String describeTest() {
return Strings.format("converting (%s) %s to %s", input().getClass().getSimpleName(), input(), targetType());
}
}
record ExpectConvertsTestCase(Object input, Type targetType, Object expectedOutput) implements TestCase {
@Override
public TestResult run() {
try {
Object actualOutput = attemptConversion();
if (expectedOutput.equals(actualOutput)) {
return new TestPass(this);
} else {
return new TestFailureWrongValue(this, expectedOutput, actualOutput);
}
} catch (Exception e) {
return new TestFailureUnexpectedException(this, expectedOutput, e);
}
}
@Override
public String toString() {
return Strings.format(
"Expected %s to give (%s) %s",
describeTest(),
expectedOutput.getClass().getSimpleName(),
expectedOutput
);
}
}
record ExpectThrowsTestCase(Object input, Type targetType) implements TestCase {
@Override
public TestResult run() {
try {
Object actualOutput = attemptConversion();
return new TestFailureMissingException(this, actualOutput);
} catch (Exception e) {
return new TestPass(this);
}
}
@Override
public String toString() {
return Strings.format("Expected %s to throw", describeTest());
}
}
|
TestCase
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/cluster/ClusterTopologyRefreshOptions.java
|
{
"start": 5509,
"end": 22845
}
|
class ____ {
private final Set<RefreshTrigger> adaptiveRefreshTriggers = new HashSet<>(DEFAULT_ADAPTIVE_REFRESH_TRIGGERS);
private Duration adaptiveRefreshTimeout = DEFAULT_ADAPTIVE_REFRESH_TIMEOUT_DURATION;
private boolean closeStaleConnections = DEFAULT_CLOSE_STALE_CONNECTIONS;
private boolean dynamicRefreshSources = DEFAULT_DYNAMIC_REFRESH_SOURCES;
private boolean periodicRefreshEnabled = DEFAULT_PERIODIC_REFRESH_ENABLED;
private Duration refreshPeriod = DEFAULT_REFRESH_PERIOD_DURATION;
private int refreshTriggersReconnectAttempts = DEFAULT_REFRESH_TRIGGERS_RECONNECT_ATTEMPTS;
private Builder() {
}
/**
* Enables adaptive topology refreshing using one or more {@link RefreshTrigger triggers}. Adaptive refresh triggers
* initiate topology view updates based on events happened during Redis Cluster operations. Adaptive triggers lead to an
* immediate topology refresh. Adaptive triggered refreshes are rate-limited using a timeout since events can happen on
* a large scale. Adaptive refresh triggers are all enabled by default. See also
* {@link #adaptiveRefreshTriggersTimeout(long, TimeUnit)} and {@link RefreshTrigger}.
*
* @param refreshTrigger one or more {@link RefreshTrigger} to enabled
* @return {@code this}
* @deprecated Starting from 7.0, this method has no effect as all adaptive triggers are enabled by default.
* @see #disableAllAdaptiveRefreshTriggers()
* @see #disableAdaptiveRefreshTrigger(RefreshTrigger...)
*/
@Deprecated
public Builder enableAdaptiveRefreshTrigger(RefreshTrigger... refreshTrigger) {
LettuceAssert.notNull(refreshTrigger, "RefreshTriggers must not be null");
LettuceAssert.noNullElements(refreshTrigger, "RefreshTriggers must not contain null elements");
LettuceAssert.notEmpty(refreshTrigger, "RefreshTriggers must contain at least one element");
adaptiveRefreshTriggers.addAll(Arrays.asList(refreshTrigger));
return this;
}
/**
* Enables adaptive topology refreshing using all {@link RefreshTrigger triggers}. Adaptive refresh triggers initiate
* topology view updates based on events happened during Redis Cluster operations. Adaptive triggers lead to an
* immediate topology refresh. Adaptive triggered refreshes are rate-limited using a timeout since events can happen on
* a large scale. Adaptive refresh triggers are all enabled by default. See also
* {@link #adaptiveRefreshTriggersTimeout(long, TimeUnit)} and {@link RefreshTrigger}.
*
* @return {@code this}
* @deprecated Starting from 7.0, this method has no effect as all adaptive triggers are enabled by default.
* @see #disableAllAdaptiveRefreshTriggers()
* @see #disableAdaptiveRefreshTrigger(RefreshTrigger...)
*/
@Deprecated
public Builder enableAllAdaptiveRefreshTriggers() {
adaptiveRefreshTriggers.addAll(EnumSet.allOf(RefreshTrigger.class));
return this;
}
/**
* Disables adaptive topology refreshing using one or more {@link RefreshTrigger triggers}. Adaptive refresh triggers
* initiate topology view updates based on events happened during Redis Cluster operations. Adaptive triggers lead to an
* immediate topology refresh. Adaptive triggered refreshes are rate-limited using a timeout since events can happen on
* a large scale. Adaptive refresh triggers are all enabled by default. See also
* {@link #adaptiveRefreshTriggersTimeout(long, TimeUnit)} and {@link RefreshTrigger}.
*
* @param refreshTrigger one or more {@link RefreshTrigger} to enabled
* @return {@code this}
* @since 7.0
*/
public Builder disableAdaptiveRefreshTrigger(RefreshTrigger... refreshTrigger) {
LettuceAssert.notNull(refreshTrigger, "RefreshTriggers must not be null");
LettuceAssert.noNullElements(refreshTrigger, "RefreshTriggers must not contain null elements");
LettuceAssert.notEmpty(refreshTrigger, "RefreshTriggers must contain at least one element");
Arrays.asList(refreshTrigger).forEach(adaptiveRefreshTriggers::remove);
return this;
}
/**
* Disables adaptive topology refreshing using all {@link RefreshTrigger triggers}. Adaptive refresh triggers initiate
* topology view updates based on events happened during Redis Cluster operations. Adaptive triggers lead to an
* immediate topology refresh. Adaptive triggered refreshes are rate-limited using a timeout since events can happen on
* a large scale. Adaptive refresh triggers are all enabled by default. See also
* {@link #adaptiveRefreshTriggersTimeout(long, TimeUnit)} and {@link RefreshTrigger}.
*
* @return {@code this}
* @since 7.0
*/
public Builder disableAllAdaptiveRefreshTriggers() {
adaptiveRefreshTriggers.clear();
return this;
}
/**
* Set the timeout for adaptive topology updates. This timeout is to rate-limit topology updates initiated by refresh
* triggers to one topology refresh per timeout. Defaults to {@literal 30 SECONDS}. See {@link #DEFAULT_REFRESH_PERIOD}
* and {@link #DEFAULT_REFRESH_PERIOD_UNIT}.
*
* @param timeout timeout for rate-limit adaptive topology updates, must be greater than {@literal 0}.
* @return {@code this}
* @since 5.0
*/
public Builder adaptiveRefreshTriggersTimeout(Duration timeout) {
LettuceAssert.notNull(refreshPeriod, "Adaptive refresh triggers timeout must not be null");
LettuceAssert.isTrue(refreshPeriod.toNanos() > 0, "Adaptive refresh triggers timeout must be greater 0");
this.adaptiveRefreshTimeout = timeout;
return this;
}
/**
* Set the timeout for adaptive topology updates. This timeout is to rate-limit topology updates initiated by refresh
* triggers to one topology refresh per timeout. Defaults to {@literal 30 SECONDS}. See {@link #DEFAULT_REFRESH_PERIOD}
* and {@link #DEFAULT_REFRESH_PERIOD_UNIT}.
*
* @param timeout timeout for rate-limit adaptive topology updates
* @param unit unit for {@code timeout}
* @return {@code this}
* @deprecated since 5.0, use {@link #adaptiveRefreshTriggersTimeout(Duration)}.
*/
@Deprecated
public Builder adaptiveRefreshTriggersTimeout(long timeout, TimeUnit unit) {
LettuceAssert.isTrue(timeout > 0, "Triggers timeout must be greater 0");
LettuceAssert.notNull(unit, "TimeUnit must not be null");
return adaptiveRefreshTriggersTimeout(Duration.ofNanos(unit.toNanos(timeout)));
}
/**
* Flag, whether to close stale connections when refreshing the cluster topology. Defaults to {@code true}. Comes only
* into effect if {@link #isPeriodicRefreshEnabled()} is {@code true}. See
* {@link ClusterTopologyRefreshOptions#DEFAULT_CLOSE_STALE_CONNECTIONS}.
*
* @param closeStaleConnections {@code true} if stale connections are cleaned up after cluster topology updates
* @return {@code this}
*/
public Builder closeStaleConnections(boolean closeStaleConnections) {
this.closeStaleConnections = closeStaleConnections;
return this;
}
/**
* Discover cluster nodes from topology and use the discovered nodes as source for the cluster topology. Using dynamic
* refresh will query all discovered nodes for the cluster topology and calculate the number of clients for each node.
* If set to {@code false}, only the initial seed nodes will be used as sources for topology discovery and the number of
* clients including response latency will be obtained only for the initial seed nodes. This can be useful when using
* Redis Cluster with many nodes. Defaults to {@code true}. See
* {@link ClusterTopologyRefreshOptions#DEFAULT_DYNAMIC_REFRESH_SOURCES}.
*
* @param dynamicRefreshSources {@code true} to discover and query all cluster nodes for obtaining the cluster topology
* @return {@code this}
*/
public Builder dynamicRefreshSources(boolean dynamicRefreshSources) {
this.dynamicRefreshSources = dynamicRefreshSources;
return this;
}
/**
* Enables periodic cluster topology updates. The client starts updating the cluster topology in the intervals of
* {@link Builder#refreshPeriod}. Defaults to {@code false}. See {@link #DEFAULT_PERIODIC_REFRESH_ENABLED}.
*
* @return {@code this}
*/
public Builder enablePeriodicRefresh() {
return enablePeriodicRefresh(true);
}
/**
* Enable regular cluster topology updates. The client starts updating the cluster topology in the intervals of
* {@link Builder#refreshPeriod}. Defaults to {@code false}. See {@link #DEFAULT_PERIODIC_REFRESH_ENABLED}.
*
* @param enabled {@code true} enable regular cluster topology updates or {@code false} to disable auto-updating
* @return {@code this}
*/
public Builder enablePeriodicRefresh(boolean enabled) {
this.periodicRefreshEnabled = enabled;
return this;
}
/**
* Enables periodic refresh and sets the refresh period. Defaults to {@literal 60 SECONDS}. See
* {@link #DEFAULT_REFRESH_PERIOD} and {@link #DEFAULT_REFRESH_PERIOD_UNIT}. This method is a shortcut for
* {@link #refreshPeriod(long, TimeUnit)} and {@link #enablePeriodicRefresh()}.
*
* @param refreshPeriod period for triggering topology updates, must be greater {@literal 0}
* @return {@code this}
* @since 5.0
*/
public Builder enablePeriodicRefresh(Duration refreshPeriod) {
return refreshPeriod(refreshPeriod).enablePeriodicRefresh();
}
/**
* Enables periodic refresh and sets the refresh period. Defaults to {@literal 60 SECONDS}. See
* {@link #DEFAULT_REFRESH_PERIOD} and {@link #DEFAULT_REFRESH_PERIOD_UNIT}. This method is a shortcut for
* {@link #refreshPeriod(long, TimeUnit)} and {@link #enablePeriodicRefresh()}.
*
* @param refreshPeriod period for triggering topology updates, must be greater {@literal 0}
* @param refreshPeriodUnit unit for {@code refreshPeriod}, must not be {@code null}
* @return {@code this}
* @deprecated since 5.0, use {@link #enablePeriodicRefresh(Duration)}.
*/
@Deprecated
public Builder enablePeriodicRefresh(long refreshPeriod, TimeUnit refreshPeriodUnit) {
return refreshPeriod(refreshPeriod, refreshPeriodUnit).enablePeriodicRefresh();
}
/**
* Set the refresh period. Defaults to {@literal 60 SECONDS}. See {@link #DEFAULT_REFRESH_PERIOD} and
* {@link #DEFAULT_REFRESH_PERIOD_UNIT}.
*
* @param refreshPeriod period for triggering topology updates, must be greater {@literal 0}
* @return {@code this}
* @since 5.0
*/
public Builder refreshPeriod(Duration refreshPeriod) {
LettuceAssert.notNull(refreshPeriod, "RefreshPeriod duration must not be null");
LettuceAssert.isTrue(refreshPeriod.toNanos() > 0, "RefreshPeriod must be greater 0");
this.refreshPeriod = refreshPeriod;
return this;
}
/**
* Set the refresh period. Defaults to {@literal 60 SECONDS}. See {@link #DEFAULT_REFRESH_PERIOD} and
* {@link #DEFAULT_REFRESH_PERIOD_UNIT}.
*
* @param refreshPeriod period for triggering topology updates, must be greater {@literal 0}
* @param refreshPeriodUnit unit for {@code refreshPeriod}, must not be {@code null}
* @return {@code this}
* @deprecated since 5.0, use {@link #refreshPeriod(Duration)}.
*/
@Deprecated
public Builder refreshPeriod(long refreshPeriod, TimeUnit refreshPeriodUnit) {
LettuceAssert.isTrue(refreshPeriod > 0, "RefreshPeriod must be greater 0");
LettuceAssert.notNull(refreshPeriodUnit, "TimeUnit must not be null");
return refreshPeriod(Duration.ofNanos(refreshPeriodUnit.toNanos(refreshPeriod)));
}
/**
* Set the threshold for the {@link RefreshTrigger#PERSISTENT_RECONNECTS}. Topology updates based on persistent
* reconnects lead only to a refresh if the reconnect process tries at least {@code refreshTriggersReconnectAttempts}.
* See {@link #DEFAULT_REFRESH_TRIGGERS_RECONNECT_ATTEMPTS}.
*
* @param refreshTriggersReconnectAttempts number of reconnect attempts for a connection before a n adaptive topology
* refresh is triggered
* @return {@code this}
*/
public Builder refreshTriggersReconnectAttempts(int refreshTriggersReconnectAttempts) {
this.refreshTriggersReconnectAttempts = refreshTriggersReconnectAttempts;
return this;
}
/**
* Create a new instance of {@link ClusterTopologyRefreshOptions}
*
* @return new instance of {@link ClusterTopologyRefreshOptions}
*/
public ClusterTopologyRefreshOptions build() {
return new ClusterTopologyRefreshOptions(this);
}
}
/**
* Returns the set of {@link RefreshTrigger triggers}. Adaptive refresh triggers initiate topology view updates based on
* events happened during Redis Cluster operations. Adaptive triggers lead to an immediate topology refresh. Adaptive
* triggered refreshes are rate-limited using a timeout since events can happen on a large scale. Adaptive refresh triggers
* are disabled by default.
*
* @return the set of {@link RefreshTrigger triggers}
*/
public Set<RefreshTrigger> getAdaptiveRefreshTriggers() {
return adaptiveRefreshTriggers;
}
/**
* Timeout between adaptive cluster topology updates. Defaults to {@literal 30}.
*
* @return the period between the regular cluster topology updates
*/
public Duration getAdaptiveRefreshTimeout() {
return adaptiveRefreshTimeout;
}
/**
* Flag, whether to close stale connections when refreshing the cluster topology. Defaults to {@code true}. Comes only into
* effect if {@link #isPeriodicRefreshEnabled()} is {@code true}.
*
* @return {@code true} if stale connections are cleaned up after cluster topology updates
*/
public boolean isCloseStaleConnections() {
return closeStaleConnections;
}
/**
* Discover cluster nodes from topology and use the discovered nodes as source for the cluster topology. Using dynamic
* refresh will query all discovered nodes for the cluster topology and calculate the number of clients for each node. If
* set to {@code false}, only the initial seed nodes will be used as sources for topology discovery and the number of
* clients including response latency will be obtained only for the initial seed nodes. This can be useful when using Redis
* Cluster with many nodes. Defaults to {@code true}. See
* {@link ClusterTopologyRefreshOptions#DEFAULT_DYNAMIC_REFRESH_SOURCES}.
*
* @return {@code true} if dynamic refresh sources are enabled
*/
public boolean useDynamicRefreshSources() {
return dynamicRefreshSources;
}
/**
* Flag, whether regular cluster topology updates are updated. The client starts updating the cluster topology in the
* intervals of {@link #getRefreshPeriod()}. Defaults to {@code false}.
*
* @return {@code true} it the cluster topology view is updated periodically
*/
public boolean isPeriodicRefreshEnabled() {
return periodicRefreshEnabled;
}
/**
* Period between the regular cluster topology updates. Defaults to {@literal 60}.
*
* @return the period between the regular cluster topology updates
*/
public Duration getRefreshPeriod() {
return refreshPeriod;
}
/**
* Threshold for {@link RefreshTrigger#PERSISTENT_RECONNECTS}. Topology updates based on persistent reconnects lead only to
* a refresh if the reconnect process tries at least {@code refreshTriggersReconnectAttempts}. See
* {@link #DEFAULT_REFRESH_TRIGGERS_RECONNECT_ATTEMPTS}.
*
* @return number of reconnect attempts for a connection before an adaptive topology refresh is triggered
*/
public int getRefreshTriggersReconnectAttempts() {
return refreshTriggersReconnectAttempts;
}
/**
* Available refresh triggers to signal early topology refreshing.
*/
public
|
Builder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/StringMatcher.java
|
{
"start": 881,
"end": 1450
}
|
class ____ as a facade / encapsulation around the expression and testing of string-based patterns within Elasticsearch security.
* Security supports "wildcards" in a number of places (e.g. index names within roles). These cases also support
* {@link org.apache.lucene.util.automaton.RegExp Lucene-syntax regular expressions} and are implemented via Lucene
* {@link org.apache.lucene.util.automaton.Automaton} objects.
* However, it can be more efficient to have special handling and avoid {@code Automata} for particular cases such as exact string matches.
* This
|
acts
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/stopwithsavepoint/StopWithSavepointStoppingException.java
|
{
"start": 1457,
"end": 2536
}
|
class ____ extends FlinkException {
private final String savepointPath;
public StopWithSavepointStoppingException(String savepointPath, JobID jobID) {
super(formatMessage(savepointPath, jobID));
this.savepointPath = savepointPath;
}
public StopWithSavepointStoppingException(String savepointPath, JobID jobID, Throwable cause) {
super(formatMessage(savepointPath, jobID), cause);
this.savepointPath = savepointPath;
}
private static String formatMessage(String savepointPath, JobID jobID) {
return String.format(
"A savepoint has been created at: %s, but the corresponding job %s failed "
+ "during stopping. The savepoint is consistent, but might have "
+ "uncommitted transactions. If you want to commit the transaction "
+ "please restart a job from this savepoint.",
savepointPath, jobID);
}
public String getSavepointPath() {
return savepointPath;
}
}
|
StopWithSavepointStoppingException
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/cache/ClassLevelDirtiesContextTestNGTests.java
|
{
"start": 10091,
"end": 10543
}
|
class ____ extends BaseTestCase {
@org.testng.annotations.Test
void verifyContextWasAutowired1() {
assertApplicationContextWasAutowired();
}
@org.testng.annotations.Test
void verifyContextWasAutowired2() {
assertApplicationContextWasAutowired();
}
@org.testng.annotations.Test
void verifyContextWasAutowired3() {
assertApplicationContextWasAutowired();
}
}
static
|
ClassLevelDirtiesContextWithAfterEachTestMethodModeTestCase
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/query/Dimensions.java
|
{
"start": 239,
"end": 521
}
|
class ____ {
private int length;
private int width;
public int getLength() {
return length;
}
public void setLength(int length) {
this.length = length;
}
public int getWidth() {
return width;
}
public void setWidth(int width) {
this.width = width;
}
}
|
Dimensions
|
java
|
apache__kafka
|
metadata/src/main/java/org/apache/kafka/controller/ControllerResult.java
|
{
"start": 986,
"end": 2838
}
|
class ____<T> {
private final List<ApiMessageAndVersion> records;
private final T response;
private final boolean isAtomic;
protected ControllerResult(List<ApiMessageAndVersion> records, T response, boolean isAtomic) {
Objects.requireNonNull(records);
this.records = records;
this.response = response;
this.isAtomic = isAtomic;
}
public List<ApiMessageAndVersion> records() {
return records;
}
public T response() {
return response;
}
public boolean isAtomic() {
return isAtomic;
}
@Override
public boolean equals(Object o) {
if (o == null || (!o.getClass().equals(getClass()))) {
return false;
}
ControllerResult<?> other = (ControllerResult<?>) o;
return records.equals(other.records) &&
Objects.equals(response, other.response) &&
Objects.equals(isAtomic, other.isAtomic);
}
@Override
public int hashCode() {
return Objects.hash(records, response, isAtomic);
}
@Override
public String toString() {
return String.format(
"ControllerResult(records=%s, response=%s, isAtomic=%s)",
records.stream().map(ApiMessageAndVersion::toString).collect(Collectors.joining(",")),
response,
isAtomic
);
}
public ControllerResult<T> withoutRecords() {
return new ControllerResult<>(List.of(), response, false);
}
public static <T> ControllerResult<T> atomicOf(List<ApiMessageAndVersion> records, T response) {
return new ControllerResult<>(records, response, true);
}
public static <T> ControllerResult<T> of(List<ApiMessageAndVersion> records, T response) {
return new ControllerResult<>(records, response, false);
}
}
|
ControllerResult
|
java
|
apache__flink
|
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/protobuf/PatchedProtoWriteSupport.java
|
{
"start": 30601,
"end": 30847
}
|
class ____ extends FieldWriter {
@Override
void writeRawValue(Object value) {
Timestamp timestamp = (Timestamp) value;
recordConsumer.addLong(Timestamps.toNanos(timestamp));
}
}
|
TimestampWriter
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/TransportCancelConnectorSyncJobAction.java
|
{
"start": 931,
"end": 2022
}
|
class ____ extends HandledTransportAction<
CancelConnectorSyncJobAction.Request,
ConnectorUpdateActionResponse> {
protected ConnectorSyncJobIndexService connectorSyncJobIndexService;
@Inject
public TransportCancelConnectorSyncJobAction(TransportService transportService, ActionFilters actionFilters, Client client) {
super(
CancelConnectorSyncJobAction.NAME,
transportService,
actionFilters,
CancelConnectorSyncJobAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.connectorSyncJobIndexService = new ConnectorSyncJobIndexService(client);
}
@Override
protected void doExecute(
Task task,
CancelConnectorSyncJobAction.Request request,
ActionListener<ConnectorUpdateActionResponse> listener
) {
connectorSyncJobIndexService.cancelConnectorSyncJob(
request.getConnectorSyncJobId(),
listener.map(r -> new ConnectorUpdateActionResponse(r.getResult()))
);
}
}
|
TransportCancelConnectorSyncJobAction
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/BinaryComparisonPipe.java
|
{
"start": 781,
"end": 2009
}
|
class ____ extends BinaryPipe {
private final BinaryComparisonOperation operation;
public BinaryComparisonPipe(Source source, Expression expression, Pipe left, Pipe right, BinaryComparisonOperation operation) {
super(source, expression, left, right);
this.operation = operation;
}
@Override
protected NodeInfo<BinaryComparisonPipe> info() {
return NodeInfo.create(this, BinaryComparisonPipe::new, expression(), left(), right(), operation);
}
@Override
protected BinaryPipe replaceChildren(Pipe left, Pipe right) {
return new BinaryComparisonPipe(source(), expression(), left, right, operation);
}
@Override
public BinaryComparisonProcessor asProcessor() {
return new BinaryComparisonProcessor(left().asProcessor(), right().asProcessor(), operation);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), operation);
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj)) {
BinaryComparisonPipe other = (BinaryComparisonPipe) obj;
return Objects.equals(operation, other.operation);
}
return false;
}
}
|
BinaryComparisonPipe
|
java
|
quarkusio__quarkus
|
integration-tests/hibernate-reactive-oracle/src/main/java/io/quarkus/it/hibernate/reactive/oracle/model/lazy/Author.java
|
{
"start": 362,
"end": 1464
}
|
class ____ {
@Id
private Integer id;
private String name;
@OneToMany(mappedBy = "author", cascade = PERSIST)
private Collection<Book> books = new ArrayList<>();
public Author() {
}
public Author(Integer id, String name) {
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Collection<Book> getBooks() {
return books;
}
public void setBooks(Collection<Book> books) {
this.books = books;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Author author = (Author) o;
return Objects.equals(name, author.name);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
}
|
Author
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/manytoone/NotOptionalManyToOneTest.java
|
{
"start": 984,
"end": 1817
}
|
class ____ {
@BeforeEach
public void setUp(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Child child = new Child( 1, "Luigi" );
Parent parent = new Parent( 2, "Roberto", child );
session.persist( child );
session.persist( parent );
}
);
}
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testInnerJoinIsUsed(SessionFactoryScope scope) {
SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction(
session -> {
session.get( Parent.class, 2 );
statementInspector.assertNumberOfJoins( 0, SqlAstJoinType.INNER, 1 );
}
);
}
@Entity(name = "Parent")
public static
|
NotOptionalManyToOneTest
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
|
{
"start": 10530,
"end": 18731
}
|
class ____ {
private boolean bypass = true;
AssertHelper(boolean bp) {
bypass = bp;
}
public void doAssert(boolean x) {
if (bypass) {
assertFalse(x);
} else {
assertTrue(x);
}
}
}
private void testBypassProviderHelper(final String[] users,
final short expectedPermission, final boolean bypass) throws Exception {
final AssertHelper asserter = new AssertHelper(bypass);
assertTrue(CALLED.contains("start"));
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
final Path userPath = new Path("/user");
final Path authz = new Path("/user/authz");
final Path authzChild = new Path("/user/authz/child2");
fs.mkdirs(userPath);
fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
fs.mkdirs(authz);
fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
fs.mkdirs(authzChild);
fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
for(String user : users) {
UserGroupInformation ugiBypass =
UserGroupInformation.createUserForTesting(user,
new String[]{"g1"});
ugiBypass.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
assertEquals(expectedPermission,
fs.getFileStatus(authzChild).getPermission().toShort());
asserter.doAssert(CALLED.contains("getAttributes"));
asserter.doAssert(CALLED.contains("checkPermission|null|null|null"));
CALLED.clear();
assertEquals(expectedPermission,
fs.listStatus(userPath)[0].getPermission().toShort());
asserter.doAssert(CALLED.contains("getAttributes"));
asserter.doAssert(
CALLED.contains("checkPermission|null|null|READ_EXECUTE"));
CALLED.clear();
fs.getAclStatus(authzChild);
asserter.doAssert(CALLED.contains("getAttributes"));
asserter.doAssert(CALLED.contains("checkPermission|null|null|null"));
return null;
}
});
}
}
@Test
public void testAuthzDelegationToProvider() throws Exception {
LOG.info("Test not bypassing provider");
String[] users = {"u1"};
testBypassProviderHelper(users, PROVIDER_PERMISSION, false);
}
@Test
public void testAuthzBypassingProvider() throws Exception {
LOG.info("Test bypassing provider");
String[] users = {"u2", "u3"};
testBypassProviderHelper(users, HDFS_PERMISSION, true);
}
private void verifyFileStatus(UserGroupInformation ugi) throws IOException {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
FileStatus status = fs.getFileStatus(new Path("/"));
LOG.info("Path '/' is owned by: "
+ status.getOwner() + ":" + status.getGroup());
Path userDir = new Path("/user/" + ugi.getShortUserName());
fs.mkdirs(userDir);
status = fs.getFileStatus(userDir);
assertEquals(ugi.getShortUserName(), status.getOwner());
assertEquals("supergroup", status.getGroup());
assertEquals(new FsPermission((short) 0755), status.getPermission());
Path authzDir = new Path("/user/authz");
fs.mkdirs(authzDir);
status = fs.getFileStatus(authzDir);
assertEquals("foo", status.getOwner());
assertEquals("bar", status.getGroup());
assertEquals(new FsPermission((short) 0770), status.getPermission());
AclStatus aclStatus = fs.getAclStatus(authzDir);
assertEquals(1, aclStatus.getEntries().size());
assertEquals(AclEntryType.GROUP, aclStatus.getEntries().get(0).getType());
assertEquals("xxx", aclStatus.getEntries().get(0).getName());
assertEquals(FsAction.ALL, aclStatus.getEntries().get(0).getPermission());
Map<String, byte[]> xAttrs = fs.getXAttrs(authzDir);
assertTrue(xAttrs.containsKey("user.test"));
assertEquals(2, xAttrs.get("user.test").length);
}
/**
* With the custom provider configured, verify file status attributes.
* A superuser can bypass permission check while resolving paths. So,
* verify file status for both superuser and non-superuser.
*/
@Test
public void testCustomProvider() throws Exception {
final UserGroupInformation[] users = new UserGroupInformation[]{
UserGroupInformation.createUserForTesting(
System.getProperty("user.name"), new String[]{"supergroup"}),
UserGroupInformation.createUserForTesting(
"normaluser", new String[]{"normalusergroup"}),
};
for (final UserGroupInformation user : users) {
user.doAs((PrivilegedExceptionAction<Object>) () -> {
verifyFileStatus(user);
return null;
});
}
}
@Test
public void testAclFeature() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
"testuser", new String[]{"testgroup"});
ugi.doAs((PrivilegedExceptionAction<Object>) () -> {
FileSystem fs = miniDFS.getFileSystem();
Path aclDir = new Path("/user/acl");
fs.mkdirs(aclDir);
Path aclChildDir = new Path(aclDir, "subdir");
fs.mkdirs(aclChildDir);
AclStatus aclStatus = fs.getAclStatus(aclDir);
assertEquals(0, aclStatus.getEntries().size());
return null;
});
}
@Test
// HDFS-14389 - Ensure getAclStatus returns the owner, group and permissions
// from the Attribute Provider, and not from HDFS.
public void testGetAclStatusReturnsProviderOwnerPerms() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
final Path userPath = new Path("/user");
final Path authz = new Path("/user/authz");
final Path authzChild = new Path("/user/authz/child2");
fs.mkdirs(userPath);
fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
fs.mkdirs(authz);
fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
fs.mkdirs(authzChild);
fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
new String[]{"g1"});
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
assertEquals(PROVIDER_PERMISSION,
fs.getFileStatus(authzChild).getPermission().toShort());
assertEquals("foo", fs.getAclStatus(authzChild).getOwner());
assertEquals("bar", fs.getAclStatus(authzChild).getGroup());
assertEquals(PROVIDER_PERMISSION,
fs.getAclStatus(authzChild).getPermission().toShort());
return null;
}
});
}
@Test
// HDFS-16529 - Ensure enforcer AccessControlException subclass are caught
// and re-thrown as plain ACE exceptions.
public void testSubClassedAccessControlExceptions() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
shouldThrowAccessException = true;
final Path userPath = new Path("/user");
final Path authz = new Path("/user/authz");
final Path authzChild = new Path("/user/authz/child2");
fs.mkdirs(userPath);
fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
fs.mkdirs(authz);
fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
fs.mkdirs(authzChild);
fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
new String[]{"g1"});
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
try {
fs.access(authzChild, FsAction.ALL);
fail("Exception should be thrown");
// The DFS Client will get a RemoteException containing an
// AccessControlException (ACE). If the ACE is a subclass of ACE then
// the client does not unwrap it correctly. The change in HDFS-16529
// is to ensure ACE is always thrown rather than a sub
|
AssertHelper
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/boot/MetadataBuildingContextTestingImpl.java
|
{
"start": 944,
"end": 2894
}
|
class ____ implements MetadataBuildingContext {
private final MetadataBuildingOptions buildingOptions;
private final EffectiveMappingDefaults mappingDefaults;
private final InFlightMetadataCollector metadataCollector;
private final BootstrapContext bootstrapContext;
private final ObjectNameNormalizer objectNameNormalizer;
private final TypeDefinitionRegistryStandardImpl typeDefinitionRegistry;
public MetadataBuildingContextTestingImpl(StandardServiceRegistry serviceRegistry) {
MetadataBuilderImpl.MetadataBuildingOptionsImpl buildingOptions = new MetadataBuilderImpl.MetadataBuildingOptionsImpl( serviceRegistry );
this.buildingOptions = buildingOptions;
buildingOptions.setBootstrapContext( bootstrapContext = new BootstrapContextImpl( serviceRegistry, buildingOptions ) );
mappingDefaults = new RootMappingDefaults(
new MetadataBuilderImpl.MappingDefaultsImpl( serviceRegistry ),
new PersistenceUnitMetadataImpl()
);
metadataCollector = new InFlightMetadataCollectorImpl( bootstrapContext, buildingOptions );
objectNameNormalizer = new ObjectNameNormalizer(this);
typeDefinitionRegistry = new TypeDefinitionRegistryStandardImpl();
bootstrapContext.getTypeConfiguration().scope( this );
}
@Override
public BootstrapContext getBootstrapContext() {
return bootstrapContext;
}
@Override
public MetadataBuildingOptions getBuildingOptions() {
return buildingOptions;
}
@Override
public EffectiveMappingDefaults getEffectiveDefaults() {
return mappingDefaults;
}
@Override
public InFlightMetadataCollector getMetadataCollector() {
return metadataCollector;
}
@Override
public ObjectNameNormalizer getObjectNameNormalizer() {
return objectNameNormalizer;
}
@Override
public TypeDefinitionRegistryStandardImpl getTypeDefinitionRegistry() {
return typeDefinitionRegistry;
}
@Override
public String getCurrentContributorName() {
return "orm";
}
}
|
MetadataBuildingContextTestingImpl
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/annotations/CollectionIdJavaClass.java
|
{
"start": 978,
"end": 1040
}
|
class ____ use as the collection-id.
*/
Class<?> idType();
}
|
to
|
java
|
apache__camel
|
components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConfiguration.java
|
{
"start": 36842,
"end": 48782
}
|
class ____ partitioning messages amongst sub-topics. The default partitioner is based on the hash
* of the key.
*/
public void setPartitioner(String partitioner) {
this.partitioner = partitioner;
}
/**
* Whether the message keys should be ignored when computing the partition. This setting has effect only when
* {@link #partitioner} is not set
*/
public boolean isPartitionerIgnoreKeys() {
return partitionerIgnoreKeys;
}
public void setPartitionerIgnoreKeys(boolean partitionerIgnoreKeys) {
this.partitionerIgnoreKeys = partitionerIgnoreKeys;
}
public String getTopic() {
return topic;
}
/**
* Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send
* a message to a single topic.
*/
public void setTopic(String topic) {
this.topic = topic;
}
public int getConsumersCount() {
return consumersCount;
}
/**
* The number of consumers that connect to kafka server. Each consumer is run on a separate thread that retrieves
* and process the incoming data.
*/
public void setConsumersCount(int consumersCount) {
this.consumersCount = consumersCount;
}
public String getClientId() {
return clientId;
}
/**
* The client id is a user-specified string sent in each request to help trace calls. It should logically identify
* the application making the request.
*/
public void setClientId(String clientId) {
this.clientId = clientId;
}
public boolean isAutoCommitEnable() {
return offsetRepository == null && autoCommitEnable;
}
public boolean getAutoCommitEnable() {
if (!batching) {
return autoCommitEnable;
}
return false;
}
/**
* If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed
* offset will be used when the process fails as the position from which the new consumer will begin.
*/
public void setAutoCommitEnable(boolean autoCommitEnable) {
this.autoCommitEnable = autoCommitEnable;
}
public boolean isAllowManualCommit() {
return allowManualCommit;
}
/**
* Whether to allow doing manual commits via {@link KafkaManualCommit}.
* <p/>
* If this option is enabled then an instance of {@link KafkaManualCommit} is stored on the {@link Exchange} message
* header, which allows end users to access this API and perform manual offset commits via the Kafka consumer.
*/
public void setAllowManualCommit(boolean allowManualCommit) {
this.allowManualCommit = allowManualCommit;
}
public int getShutdownTimeout() {
return shutdownTimeout;
}
/**
* Timeout in milliseconds to wait gracefully for the consumer or producer to shut down and terminate its worker
* threads.
*/
public void setShutdownTimeout(int shutdownTimeout) {
this.shutdownTimeout = shutdownTimeout;
}
public StateRepository<String, String> getOffsetRepository() {
return offsetRepository;
}
/**
* The offset repository to use to locally store the offset of each partition of the topic. Defining one will
* disable the autocommit.
*/
public void setOffsetRepository(StateRepository<String, String> offsetRepository) {
this.offsetRepository = offsetRepository;
}
public Integer getAutoCommitIntervalMs() {
return autoCommitIntervalMs;
}
/**
* The frequency in ms that the consumer offsets are committed to zookeeper.
*/
public void setAutoCommitIntervalMs(Integer autoCommitIntervalMs) {
this.autoCommitIntervalMs = autoCommitIntervalMs;
}
public Integer getFetchMinBytes() {
return fetchMinBytes;
}
/**
* The minimum amount of data the server should return for a fetch request. If insufficient data is available, the
* request will wait for that much data to accumulate before answering the request.
*/
public void setFetchMinBytes(Integer fetchMinBytes) {
this.fetchMinBytes = fetchMinBytes;
}
/**
* The maximum amount of data the server should return for a fetch request. This is not an absolute maximum, if the
* first message in the first non-empty partition of the fetch is larger than this value, the message will still be
* returned to ensure that the consumer can make progress. The maximum message size accepted by the broker is
* defined via message.max.bytes (broker config) or max.message.bytes (topic config). Note that the consumer
* performs multiple fetches in parallel.
*/
public Integer getFetchMaxBytes() {
return fetchMaxBytes;
}
public void setFetchMaxBytes(Integer fetchMaxBytes) {
this.fetchMaxBytes = fetchMaxBytes;
}
public Integer getFetchWaitMaxMs() {
return fetchWaitMaxMs;
}
/**
* The maximum amount of time the server will block before answering the fetch request if there isn't enough data to
* immediately satisfy fetch.min.bytes
*/
public void setFetchWaitMaxMs(Integer fetchWaitMaxMs) {
this.fetchWaitMaxMs = fetchWaitMaxMs;
}
public String getAutoOffsetReset() {
return autoOffsetReset;
}
/**
* What to do when there is no initial offset in ZooKeeper or if an offset is out of range: earliest : automatically
* reset the offset to the earliest offset latest: automatically reset the offset to the latest offset fail: throw
* exception to the consumer
*/
public void setAutoOffsetReset(String autoOffsetReset) {
this.autoOffsetReset = autoOffsetReset;
}
public boolean isBreakOnFirstError() {
return breakOnFirstError;
}
/**
* This options controls what happens when a consumer is processing an exchange and it fails. If the option is
* <tt>false</tt> then the consumer continues to the next message and processes it. If the option is <tt>true</tt>
* then the consumer breaks out.
*
* Using the default NoopCommitManager will cause the consumer to not commit the offset so that the message is
* re-attempted. The consumer should use the KafkaManualCommit to determine the best way to handle the message.
*
* Using either the SyncCommitManager or the AsyncCommitManager, the consumer will seek back to the offset of the
* message that caused a failure, and then re-attempt to process this message. However, this can lead to endless
* processing of the same message if it's bound to fail every time, e.g., a poison message. Therefore, it's
* recommended to deal with that, for example, by using Camel's error handler.
*/
public void setBreakOnFirstError(boolean breakOnFirstError) {
this.breakOnFirstError = breakOnFirstError;
}
public String getBrokers() {
return brokers;
}
/**
* URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers
* or a VIP pointing to a subset of brokers.
* <p/>
* This option is known as <tt>bootstrap.servers</tt> in the Kafka documentation.
*/
public void setBrokers(String brokers) {
this.brokers = brokers;
}
public String getSchemaRegistryURL() {
return schemaRegistryURL;
}
/**
* URL of the schema registry servers to use. The format is host1:port1,host2:port2. This is known as
* schema.registry.url in multiple Schema registries documentation. This option is only available externally (not
* standard Apache Kafka)
*/
public void setSchemaRegistryURL(String schemaRegistryURL) {
this.schemaRegistryURL = schemaRegistryURL;
}
public boolean isSpecificAvroReader() {
return specificAvroReader;
}
/**
* This enables the use of a specific Avro reader for use with the in multiple Schema registries documentation with
* Avro Deserializers implementation. This option is only available externally (not standard Apache Kafka)
*/
public void setSpecificAvroReader(boolean specificAvroReader) {
this.specificAvroReader = specificAvroReader;
}
public String getCompressionCodec() {
return compressionCodec;
}
/**
* This parameter allows you to specify the compression codec for all data generated by this producer. Valid values
* are "none", "gzip", "snappy", "lz4" and "zstd".
*/
public void setCompressionCodec(String compressionCodec) {
this.compressionCodec = compressionCodec;
}
public Integer getRetryBackoffMs() {
return retryBackoffMs;
}
/**
* The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids
* repeatedly sending requests in a tight loop under some failure scenarios. This value is the initial backoff value
* and will increase exponentially for each failed request, up to the retry.backoff.max.ms value.
*/
public void setRetryBackoffMs(Integer retryBackoffMs) {
this.retryBackoffMs = retryBackoffMs;
}
public Integer getRetryBackoffMaxMs() {
return retryBackoffMaxMs;
}
/**
* The maximum amount of time in milliseconds to wait when retrying a request to the broker that has repeatedly
* failed. If provided, the backoff per client will increase exponentially for each failed request, up to this
* maximum. To prevent all clients from being synchronized upon retry, a randomized jitter with a factor of 0.2 will
* be applied to the backoff, resulting in the backoff falling within a range between 20% below and 20% above the
* computed value. If retry.backoff.ms is set to be higher than retry.backoff.max.ms, then retry.backoff.max.ms will
* be used as a constant backoff from the beginning without any exponential increase
*/
public void setRetryBackoffMaxMs(Integer retryBackoffMaxMs) {
this.retryBackoffMaxMs = retryBackoffMaxMs;
}
public Integer getSendBufferBytes() {
return sendBufferBytes;
}
/**
* Socket write buffer size
*/
public void setSendBufferBytes(Integer sendBufferBytes) {
this.sendBufferBytes = sendBufferBytes;
}
public Integer getRequestTimeoutMs() {
return requestTimeoutMs;
}
/**
* The amount of time the broker will wait trying to meet the request.required.acks requirement before sending back
* an error to the client.
*/
public void setRequestTimeoutMs(Integer requestTimeoutMs) {
this.requestTimeoutMs = requestTimeoutMs;
}
public Integer getDeliveryTimeoutMs() {
return deliveryTimeoutMs;
}
/**
* An upper bound on the time to report success or failure after a call to send() returns. This limits the total
* time that a record will be delayed prior to sending, the time to await acknowledgement from the broker (if
* expected), and the time allowed for retriable send failures.
*/
public void setDeliveryTimeoutMs(Integer deliveryTimeoutMs) {
this.deliveryTimeoutMs = deliveryTimeoutMs;
}
public Integer getQueueBufferingMaxMessages() {
return queueBufferingMaxMessages;
}
/**
* The maximum number of unsent messages that can be queued up the producer when using async mode before either the
* producer must be blocked or data must be dropped.
*/
public void setQueueBufferingMaxMessages(Integer queueBufferingMaxMessages) {
this.queueBufferingMaxMessages = queueBufferingMaxMessages;
}
public String getValueSerializer() {
return valueSerializer;
}
/**
* The serializer
|
for
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/validation/beanvalidation/ValidatorFactoryTests.java
|
{
"start": 13215,
"end": 14154
}
|
class ____ {
@NotNull
private String name;
@Valid
private ValidAddress address = new ValidAddress();
@Valid
private List<ValidAddress> addressList = new ArrayList<>();
@Valid
private Set<ValidAddress> addressSet = new LinkedHashSet<>();
public boolean expectsAutowiredValidator = false;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public ValidAddress getAddress() {
return address;
}
public void setAddress(ValidAddress address) {
this.address = address;
}
public List<ValidAddress> getAddressList() {
return addressList;
}
public void setAddressList(List<ValidAddress> addressList) {
this.addressList = addressList;
}
public Set<ValidAddress> getAddressSet() {
return addressSet;
}
public void setAddressSet(Set<ValidAddress> addressSet) {
this.addressSet = addressSet;
}
}
public static
|
ValidPerson
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/Processor.java
|
{
"start": 1474,
"end": 1731
}
|
interface ____ {
/**
* Processes the message exchange
*
* @param exchange the message exchange
* @throws Exception if an internal processing error has occurred.
*/
void process(Exchange exchange) throws Exception;
}
|
Processor
|
java
|
google__dagger
|
javatests/dagger/functional/basic/NestedTest.java
|
{
"start": 815,
"end": 1199
}
|
class ____ {
@Test public void nestedFoo() {
OuterClassFoo.NestedComponent nestedFoo = DaggerOuterClassFoo_NestedComponent.create();
assertThat(nestedFoo.thing()).isNotNull();
}
@Test public void nestedBar() {
OuterClassBar.NestedComponent nestedBar = DaggerOuterClassBar_NestedComponent.create();
assertThat(nestedBar.injectedThing()).isNotNull();
}
}
|
NestedTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/DifferentNameButSameTest.java
|
{
"start": 8611,
"end": 9074
}
|
class ____ {
A.B test(Object B) {
return new A.B();
}
A.B test2() {
return null;
}
}
""")
.doTest();
}
@Test
public void innerClassConstructor() {
BugCheckerRefactoringTestHelper.newInstance(DifferentNameButSame.class, getClass())
.addInputLines(
"A.java",
"""
package pkg;
|
Test
|
java
|
apache__flink
|
flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/MiniClusterPipelineExecutorServiceLoader.java
|
{
"start": 5256,
"end": 5954
}
|
class ____ implements PipelineExecutorFactory {
private final MiniCluster miniCluster;
public MiniClusterPipelineExecutorFactory(MiniCluster miniCluster) {
this.miniCluster = miniCluster;
}
@Override
public String getName() {
return MiniClusterPipelineExecutorServiceLoader.NAME;
}
@Override
public boolean isCompatibleWith(Configuration configuration) {
return true;
}
@Override
public PipelineExecutor getExecutor(Configuration configuration) {
return new MiniClusterExecutor(miniCluster);
}
}
private static
|
MiniClusterPipelineExecutorFactory
|
java
|
apache__camel
|
components/camel-azure/camel-azure-eventhubs/src/test/java/org/apache/camel/component/azure/eventhubs/EventProcessorTest.java
|
{
"start": 1310,
"end": 3910
}
|
class ____ {
@Test
public void testCreateEventProcessorWithNonValidOptions() {
final EventHubsConfiguration configuration = new EventHubsConfiguration();
final Consumer<EventContext> onEvent = event -> {
};
final Consumer<ErrorContext> onError = error -> {
};
assertThrows(IllegalArgumentException.class,
() -> EventHubsClientFactory.createEventProcessorClient(configuration, onEvent, onError));
configuration.setBlobContainerName("testContainer");
assertThrows(IllegalArgumentException.class,
() -> EventHubsClientFactory.createEventProcessorClient(configuration, onEvent, onError));
configuration.setBlobAccountName("testAccount");
assertThrows(IllegalArgumentException.class,
() -> EventHubsClientFactory.createEventProcessorClient(configuration, onEvent, onError));
configuration.setBlobAccessKey("testAccess");
assertNotNull(EventHubsClientFactory.createEventProcessorClient(configuration, onEvent, onError));
configuration.setBlobContainerName(null);
assertThrows(IllegalArgumentException.class,
() -> EventHubsClientFactory.createEventProcessorClient(configuration, onEvent, onError));
}
@Test
public void testCreateEventProcessorWithTokenCredential() {
final EventHubsConfiguration configuration = new EventHubsConfiguration();
final Consumer<EventContext> onEvent = event -> {
};
final Consumer<ErrorContext> onError = error -> {
};
configuration.setBlobContainerName("testContainer");
configuration.setBlobAccountName("testAccount");
configuration.setBlobAccessKey("testAccess");
assertNotNull(EventHubsClientFactory.createEventProcessorClient(configuration, onEvent, onError));
configuration.setTokenCredential(new IntelliJCredentialBuilder().tenantId("tenantId").build());
configuration.setCredentialType(CredentialType.TOKEN_CREDENTIAL);
assertThrows(IllegalArgumentException.class,
() -> EventHubsClientFactory.createEventProcessorClient(configuration, onEvent, onError));
configuration.setNamespace("namespace");
assertThrows(IllegalArgumentException.class,
() -> EventHubsClientFactory.createEventProcessorClient(configuration, onEvent, onError));
configuration.setEventHubName("eventHubName");
assertNotNull(EventHubsClientFactory.createEventProcessorClient(configuration, onEvent, onError));
}
}
|
EventProcessorTest
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedulerMixedModeAbsoluteAndPercentageVector.java
|
{
"start": 2976,
"end": 3912
}
|
class ____
extends JerseyTestBase {
private static final String EXPECTED_FILE_TMPL = "webapp/mixed-%s-%s.json";
public TestRMWebServicesCapacitySchedulerMixedModeAbsoluteAndPercentageVector() {
backupSchedulerConfigFileInTarget();
}
private MockRM rm;
private Configuration conf;
private RMWebServices rmWebServices;
@AfterAll
public static void afterClass() {
restoreSchedulerConfigFileInTarget();
}
@Override
protected Application configure() {
ResourceConfig config = new ResourceConfig();
config.register(RMWebServices.class);
config.register(new JerseyBinder());
config.register(GenericExceptionHandler.class);
config.register(TestRMWebServicesAppsModification.TestRMCustomAuthFilter.class);
config.register(new JettisonFeature()).register(JAXBContextResolver.class);
return config;
}
private
|
TestRMWebServicesCapacitySchedulerMixedModeAbsoluteAndPercentageVector
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestJoinDatamerge.java
|
{
"start": 5810,
"end": 6055
}
|
class ____
extends SimpleCheckerReduceBase {
public boolean verify(int key, int occ) {
return (key == 0 && occ == 2) ||
(key != 0 && (key % (srcs * srcs) == 0) && occ == 1);
}
}
private static
|
InnerJoinReduceChecker
|
java
|
spring-projects__spring-framework
|
spring-context-indexer/src/test/java/org/springframework/context/index/processor/PropertiesMarshallerTests.java
|
{
"start": 1077,
"end": 2541
}
|
class ____ {
@Test
void readWrite() throws IOException {
CandidateComponentsMetadata metadata = new CandidateComponentsMetadata();
metadata.add(createItem("com.foo", "first", "second"));
metadata.add(createItem("com.bar", "first"));
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
PropertiesMarshaller.write(metadata, outputStream);
CandidateComponentsMetadata readMetadata = PropertiesMarshaller.read(
new ByteArrayInputStream(outputStream.toByteArray()));
assertThat(readMetadata).has(Metadata.of("com.foo", "first", "second"));
assertThat(readMetadata).has(Metadata.of("com.bar", "first"));
assertThat(readMetadata.getItems()).hasSize(2);
}
@Test
void metadataIsWrittenDeterministically() throws IOException {
CandidateComponentsMetadata metadata = new CandidateComponentsMetadata();
metadata.add(createItem("com.b", "type"));
metadata.add(createItem("com.c", "type"));
metadata.add(createItem("com.a", "type"));
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
PropertiesMarshaller.write(metadata, outputStream);
String contents = outputStream.toString(StandardCharsets.ISO_8859_1);
assertThat(contents.split(System.lineSeparator())).containsExactly("com.a=type", "com.b=type", "com.c=type");
}
private static ItemMetadata createItem(String type, String... stereotypes) {
return new ItemMetadata(type, new HashSet<>(Arrays.asList(stereotypes)));
}
}
|
PropertiesMarshallerTests
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/resource/basic/SubResourceInterfaceAndClientInterfaceTest.java
|
{
"start": 7199,
"end": 7320
}
|
interface ____ extends UndangerousGoodsResource {
@GET
String get();
}
public
|
DangerousGoodsResource
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/builditem/StreamingLogHandlerBuildItem.java
|
{
"start": 317,
"end": 952
}
|
class ____ extends SimpleBuildItem {
private final RuntimeValue<Optional<Handler>> handlerValue;
/**
* Construct a new instance.
*
* @param handlerValue the handler value to add to the run time configuration
*/
public StreamingLogHandlerBuildItem(final RuntimeValue<Optional<Handler>> handlerValue) {
this.handlerValue = Assert.checkNotNullParam("handlerValue", handlerValue);
}
/**
* Get the handler value.
*
* @return the handler value
*/
public RuntimeValue<Optional<Handler>> getHandlerValue() {
return handlerValue;
}
}
|
StreamingLogHandlerBuildItem
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/timeout/BuilderReadTimeoutTest.java
|
{
"start": 781,
"end": 2106
}
|
class ____ {
@TestHTTPResource
URI uri;
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(Client.class, Resource.class));
@Test
void shouldTimeoutIfReadTimeoutSetShort() {
Client client = QuarkusRestClientBuilder.newBuilder()
.baseUri(uri)
.readTimeout(1, TimeUnit.SECONDS)
.build(Client.class);
RuntimeException exception = assertThrows(RuntimeException.class, client::slow);
assertThat(exception).hasCauseInstanceOf(TimeoutException.class);
}
@Test
void shouldNotTimeoutOnFastResponse() {
Client client = QuarkusRestClientBuilder.newBuilder()
.baseUri(uri)
.readTimeout(1, TimeUnit.SECONDS)
.build(Client.class);
assertThat(client.fast()).isEqualTo("fast-response");
}
@Test
void shouldNotTimeoutOnDefaultTimeout() {
Client client = QuarkusRestClientBuilder.newBuilder()
.baseUri(uri)
.build(Client.class);
assertThat(client.slow()).isEqualTo("slow-response");
}
@Produces(MediaType.TEXT_PLAIN)
@Consumes(MediaType.TEXT_PLAIN)
public static
|
BuilderReadTimeoutTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/linear/MinMaxScoreNormalizer.java
|
{
"start": 397,
"end": 2427
}
|
class ____ extends ScoreNormalizer {
public static final MinMaxScoreNormalizer INSTANCE = new MinMaxScoreNormalizer();
public static final NodeFeature LINEAR_RETRIEVER_MINMAX_SINGLE_DOC_FIX = new NodeFeature("linear_retriever.minmax_single_doc_fix");
public static final String NAME = "minmax";
private static final float EPSILON = 1e-6f;
public MinMaxScoreNormalizer() {}
@Override
public String getName() {
return NAME;
}
@Override
public ScoreDoc[] normalizeScores(ScoreDoc[] docs) {
if (docs.length == 0) {
return docs;
}
// create a new array to avoid changing ScoreDocs in place
ScoreDoc[] scoreDocs = new ScoreDoc[docs.length];
float min = Float.MAX_VALUE;
float max = Float.MIN_VALUE;
boolean atLeastOneValidScore = false;
for (ScoreDoc rd : docs) {
if (false == atLeastOneValidScore && false == Float.isNaN(rd.score)) {
atLeastOneValidScore = true;
}
if (rd.score > max) {
max = rd.score;
}
if (rd.score < min) {
min = rd.score;
}
}
if (false == atLeastOneValidScore) {
// we do not have any scores to normalize, so we just return the original array
return docs;
}
boolean minEqualsMax = Math.abs(min - max) < EPSILON;
for (int i = 0; i < docs.length; i++) {
float score;
if (minEqualsMax) {
// This can happen if there is only one doc in the result set or if all docs have nearly equivalent scores
// (i.e. within epsilon). In this case, assign every doc the max normalized score.
score = 1.0f;
} else {
score = (docs[i].score - min) / (max - min);
}
scoreDocs[i] = new ScoreDoc(docs[i].doc, score, docs[i].shardIndex);
}
return scoreDocs;
}
}
|
MinMaxScoreNormalizer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/type/internal/ImmutableNamedBasicTypeImpl.java
|
{
"start": 405,
"end": 710
}
|
class ____<J> extends NamedBasicTypeImpl<J> {
public ImmutableNamedBasicTypeImpl(
JavaType<J> jtd,
JdbcType std,
String name) {
super( jtd, std, name );
}
@Override
protected MutabilityPlan<J> getMutabilityPlan() {
return ImmutableMutabilityPlan.instance();
}
}
|
ImmutableNamedBasicTypeImpl
|
java
|
quarkusio__quarkus
|
extensions/mongodb-client/runtime/src/test/java/io/quarkus/mongodb/reactive/ReactiveMongoClientTest.java
|
{
"start": 775,
"end": 11434
}
|
class ____ extends MongoTestBase {
private ReactiveMongoClient client;
@BeforeEach
void init() {
client = new ReactiveMongoClientImpl(MongoClients.create(getConnectionString()));
}
@AfterEach
void cleanup() {
dropOurCollection(client);
client.close();
}
@Test
void testFindOneReturnsObjectWithId() {
String collection = randomCollection();
ReactiveMongoDatabase database = client.getDatabase(DATABASE);
ReactiveMongoCollection<Document> myCollection = database.getCollection(collection);
Document document = createDoc();
myCollection.insertOne(document)
.chain(() -> myCollection.find(eq("foo", "bar")).collect().first())
.invoke(found -> {
assertThat(found).isNotNull();
assertThat(found.getObjectId("_id")).isNotNull();
})
.await().indefinitely();
}
@Test
void testFindOneReturnsEmptyWhenNonMatches() {
String collection = randomCollection();
ReactiveMongoDatabase database = client.getDatabase(DATABASE);
ReactiveMongoCollection<Document> myCollection = database.getCollection(collection);
Document document = createDoc();
myCollection.insertOne(document)
.chain(() -> myCollection.find(eq("nothing", "missing")).collect().first())
.invoke(opt -> assertThat(opt).isNull())
.await().indefinitely();
}
@Test
void testInsertPreexistingObjectID() {
String collection = randomCollection();
ReactiveMongoDatabase database = client.getDatabase(DATABASE);
ReactiveMongoCollection<Document> myCollection = database.getCollection(collection);
Document doc = createDoc();
ObjectId value = new ObjectId();
doc.put("_id", value);
myCollection.insertOne(doc).await().indefinitely();
Optional<Document> optional = myCollection.find().collect().first().await().asOptional().indefinitely();
assertThat(optional).isNotEmpty();
assertThat(optional.orElse(new Document()).getObjectId("_id")).isEqualTo(value);
}
@Test
void testInsertFollowedWithRetrieve() {
String collection = randomCollection();
ReactiveMongoDatabase database = client.getDatabase(DATABASE);
ReactiveMongoCollection<Document> myCollection = database.getCollection(collection);
Document doc = createDoc();
ObjectId value = new ObjectId();
doc.put("_id", value);
myCollection.insertOne(doc).await().indefinitely();
Optional<Document> optional = myCollection.find().collect().first().await().asOptional().indefinitely();
assertThat(optional).isNotEmpty();
assertThat(optional.orElse(new Document()).getObjectId("_id")).isEqualTo(value);
assertThat(optional.orElse(new Document())).isEqualTo(doc);
}
@Test
void testInsertionFailedWhenDocumentExist() {
String collection = randomCollection();
ReactiveMongoDatabase database = client.getDatabase(DATABASE);
ReactiveMongoCollection<Document> myCollection = database.getCollection(collection);
Document doc = createDoc();
ObjectId value = new ObjectId();
doc.put("_id", value);
myCollection.insertOne(doc).await().indefinitely();
try {
myCollection.insertOne(doc).await().indefinitely();
fail("Write Exception expected");
} catch (Exception e) {
assertThat(e).isInstanceOf(MongoWriteException.class);
}
}
@Test
void testFindBatch() {
String collectionName = randomCollection();
ReactiveMongoCollection<Document> myCollection = client.getDatabase(DATABASE).getCollection(collectionName);
List<Document> toBeInserted = new ArrayList<>();
for (int i = 0; i < 3000; i++) {
toBeInserted.add(createDoc(i));
}
List<Document> documents = new CopyOnWriteArrayList<>();
myCollection.insertMany(toBeInserted)
.chain(() -> myCollection.find(new FindOptions().sort(eq("foo", 1)))
.onItem().invoke(documents::add)
.onItem().ignoreAsUni())
.await().indefinitely();
assertThat(documents.size()).isEqualTo(3000);
assertThat(documents.get(0).getString("foo")).isEqualTo("bar0");
assertThat(documents.get(3000 - 1).getString("foo")).isEqualTo("bar999");
}
@Test
void testFindBatchWithClass() {
String collectionName = randomCollection();
ReactiveMongoCollection<Document> myCollection = client.getDatabase(DATABASE).getCollection(collectionName);
List<Document> toBeInserted = new ArrayList<>();
for (int i = 0; i < 3000; i++) {
toBeInserted.add(createDoc(i));
}
List<Document> documents = new CopyOnWriteArrayList<>();
myCollection.insertMany(toBeInserted)
.chain(() -> myCollection.find(Document.class, new FindOptions().sort(eq("foo", 1)))
.onItem().invoke(documents::add)
.onItem().ignoreAsUni())
.await().indefinitely();
assertThat(documents.size()).isEqualTo(3000);
assertThat(documents.get(0).getString("foo")).isEqualTo("bar0");
assertThat(documents.get(3000 - 1).getString("foo")).isEqualTo("bar999");
}
@Test
void testFindBatchWithFilter() {
String collectionName = randomCollection();
ReactiveMongoCollection<Document> myCollection = client.getDatabase(DATABASE).getCollection(collectionName);
List<Document> toBeInserted = new ArrayList<>();
for (int i = 0; i < 3000; i++) {
toBeInserted.add(createDoc(i));
}
List<Document> documents = new CopyOnWriteArrayList<>();
myCollection.insertMany(toBeInserted)
.chain(() -> myCollection.find(new FindOptions().filter(eq("num", 123)).sort(eq("foo", 1)))
.onItem().invoke(documents::add)
.onItem().ignoreAsUni())
.await().indefinitely();
assertThat(documents.size()).isEqualTo(3000);
assertThat(documents.get(0).getString("foo")).isEqualTo("bar0");
assertThat(documents.get(3000 - 1).getString("foo")).isEqualTo("bar999");
}
@Test
void testFindBatchWithFilterAndClass() {
String collectionName = randomCollection();
ReactiveMongoCollection<Document> myCollection = client.getDatabase(DATABASE).getCollection(collectionName);
List<Document> toBeInserted = new ArrayList<>();
for (int i = 0; i < 3000; i++) {
toBeInserted.add(createDoc(i));
}
List<Document> documents = new CopyOnWriteArrayList<>();
myCollection.insertMany(toBeInserted)
.chain(() -> myCollection.find(Document.class,
new FindOptions().filter(eq("num", 123)).sort(eq("foo", 1)))
.onItem().invoke(documents::add)
.onItem().ignoreAsUni())
.await().indefinitely();
assertThat(documents.size()).isEqualTo(3000);
assertThat(documents.get(0).getString("foo")).isEqualTo("bar0");
assertThat(documents.get(3000 - 1).getString("foo")).isEqualTo("bar999");
}
@Test
void testUpsertCreatesHexIfRecordDoesNotExist() {
upsertDoc(randomCollection(), createDoc(), null).await().indefinitely();
}
@Test
void testUpsertWithASetOnInsertIsNotOverWritten() throws Exception {
String collection = randomCollection();
Document docToInsert = createDoc();
Document insertStatement = new Document();
insertStatement.put("$set", docToInsert);
Document nested = new Document();
nested.put("a-field", "an-entry");
insertStatement.put("$setOnInsert", nested);
upsertDoc(collection, docToInsert, insertStatement, null).onItem().invoke(saved -> {
assertThat(saved).isNotNull();
assertThat("an-entry").isEqualTo(saved.getString("a-field"));
}).await().indefinitely();
}
private Uni<Document> upsertDoc(String collection, Document docToInsert, String expectedId) {
Document insertStatement = new Document();
insertStatement.put("$set", docToInsert);
return upsertDoc(collection, docToInsert, insertStatement, expectedId);
}
private Uni<Document> upsertDoc(String collection, Document docToInsert,
Document insertStatement,
String expectedId) {
return client.getDatabase(DATABASE).getCollection(collection)
.updateMany(eq("foo", docToInsert.getString("foo")),
insertStatement,
new UpdateOptions().upsert(true))
.chain(result -> {
assertThat(result.getModifiedCount()).isEqualTo(0);
if (expectedId == null) {
assertThat(0).isEqualTo(result.getMatchedCount());
assertThat(result.getUpsertedId()).isNotNull();
} else {
assertThat(1).isEqualTo(result.getMatchedCount());
assertThat(result.getUpsertedId()).isNull();
}
return client.getDatabase(DATABASE).getCollection(collection).find().collect().first();
});
}
@Test
void testAggregate() {
final int numDocs = 100;
final String collection = randomCollection();
List<Document> pipeline = new ArrayList<>();
Document doc1 = new Document();
doc1.put("$regex", "bar1");
Document doc2 = new Document();
doc2.put("foo", doc1);
Document doc3 = new Document();
doc3.put("$match", doc2);
pipeline.add(doc3);
Document doc4 = new Document();
doc4.put("$count", "foo_starting_with_bar1");
pipeline.add(doc4);
Optional<Integer> optional = client.getDatabase(DATABASE).createCollection(collection)
.chain(() -> insertDocs(client, collection, numDocs))
.onItem().transformToMulti(x -> client.getDatabase(DATABASE).getCollection(collection).aggregate(pipeline))
.collect().first()
.onItem().transform(doc -> doc.getInteger("foo_starting_with_bar1"))
.await().asOptional().indefinitely();
assertThat(optional).contains(11);
}
}
|
ReactiveMongoClientTest
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockito/junit/TestableJUnitRunner.java
|
{
"start": 579,
"end": 1753
}
|
class ____ extends MockitoJUnitRunner {
private static final ThreadLocal<SimpleMockitoLogger> LOGGER =
new ThreadLocal<SimpleMockitoLogger>() {
protected SimpleMockitoLogger initialValue() {
return new SimpleMockitoLogger();
}
};
public TestableJUnitRunner(Class<?> klass)
throws InvocationTargetException, InitializationError {
super(
new StrictRunner(
new RunnerFactory()
.create(
klass,
new Supplier<MockitoTestListener>() {
public MockitoTestListener get() {
return new MismatchReportingTestListener(
LOGGER.get());
}
}),
klass));
}
public static SimpleMockitoLogger refreshedLogger() {
return LOGGER.get().clear();
}
}
|
TestableJUnitRunner
|
java
|
google__auto
|
value/src/main/java/com/google/auto/value/AutoBuilder.java
|
{
"start": 842,
"end": 981
}
|
class ____ be implemented as a builder.
*
* <p>A simple example:
*
* <pre>{@code
* @AutoBuilder(ofClass = Person.class)
* abstract
|
should
|
java
|
apache__camel
|
components/camel-thrift/src/main/java/org/apache/camel/component/thrift/ThriftEndpoint.java
|
{
"start": 1562,
"end": 3543
}
|
class ____ extends DefaultEndpoint implements EndpointServiceLocation {
@UriParam
private ThriftConfiguration configuration;
@UriParam(defaultValue = "false", label = "advanced",
description = "Sets whether synchronous processing should be strictly used")
private boolean synchronous;
private String serviceName;
private String servicePackage;
public ThriftEndpoint(String uri, ThriftComponent component, ThriftConfiguration config) {
super(uri, component);
this.configuration = config;
// Extract service and package names from the full service name
serviceName = ThriftUtils.extractServiceName(configuration.getService());
servicePackage = ThriftUtils.extractServicePackage(configuration.getService());
}
@Override
public String getServiceUrl() {
return configuration.getHost() + ":" + configuration.getPort();
}
@Override
public String getServiceProtocol() {
return "thrift";
}
public ThriftConfiguration getConfiguration() {
return configuration;
}
@Override
public Producer createProducer() throws Exception {
ThriftProducer producer = new ThriftProducer(this, configuration);
if (isSynchronous()) {
return new SynchronousDelegateProducer(producer);
} else {
return producer;
}
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
ThriftConsumer consumer = new ThriftConsumer(this, processor, configuration);
configureConsumer(consumer);
return consumer;
}
public String getServiceName() {
return serviceName;
}
public String getServicePackage() {
return servicePackage;
}
public boolean isSynchronous() {
return synchronous;
}
public void setSynchronous(boolean synchronous) {
this.synchronous = synchronous;
}
}
|
ThriftEndpoint
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/TruncConvertedDatetimeAttributeTest.java
|
{
"start": 2980,
"end": 3382
}
|
class ____ {
@Id
private Long id;
@Convert( converter = DateConverter.class )
private Long dateCol;
@Convert( converter = InstantConverter.class )
private Instant instantCol;
public TestEntity() {
}
public TestEntity(Long id, Long dateCol, Instant instantCol) {
this.id = id;
this.dateCol = dateCol;
this.instantCol = instantCol;
}
}
@Converter
public static
|
TestEntity
|
java
|
google__auto
|
value/src/main/java/com/google/auto/value/processor/EclipseHack.java
|
{
"start": 2583,
"end": 3097
}
|
class ____ type arguments that happen to be type
* variables with the same names as the corresponding parameters, we will do the wrong thing on
* Eclipse. But doing the wrong thing in that case is better than doing the wrong thing in the
* usual case.
*/
static TypeMirror getEnclosingType(DeclaredType type) {
TypeMirror enclosing = type.getEnclosingType();
if (!enclosing.getKind().equals(TypeKind.DECLARED)
|| !enclosing.getClass().getName().contains("eclipse")) {
// If the
|
with
|
java
|
apache__flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
{
"start": 2095,
"end": 2370
}
|
class ____ not present on the passed classpath.
*/
public static FromClasspathEntryClassInformationProvider create(
String jobClassName, Iterable<URL> classpath) throws IOException, FlinkException {
Preconditions.checkNotNull(jobClassName, "No job
|
is
|
java
|
elastic__elasticsearch
|
test/framework/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeUtils.java
|
{
"start": 2389,
"end": 7093
}
|
class ____ {
private final String id;
private String name;
private String ephemeralId = UUIDs.randomBase64UUID();
private String hostName;
private String hostAddress;
private TransportAddress address;
private Map<String, String> attributes = Map.of();
private Set<DiscoveryNodeRole> roles = DiscoveryNodeRole.roles();
private BuildVersion buildVersion;
private Version version;
private IndexVersion minIndexVersion;
private IndexVersion minReadOnlyIndexVersion;
private IndexVersion maxIndexVersion;
private String externalId;
private Builder(String id) {
this.id = Objects.requireNonNull(id);
}
public Builder name(String name) {
this.name = name;
return this;
}
public Builder ephemeralId(String ephemeralId) {
this.ephemeralId = Objects.requireNonNull(ephemeralId);
return this;
}
public Builder address(TransportAddress address) {
return address(null, null, address);
}
public Builder address(String hostName, String hostAddress, TransportAddress address) {
this.hostName = hostName;
this.hostAddress = hostAddress;
this.address = Objects.requireNonNull(address);
return this;
}
public Builder attributes(Map<String, String> attributes) {
this.attributes = Objects.requireNonNull(attributes);
return this;
}
public Builder roles(Set<DiscoveryNodeRole> roles) {
this.roles = Objects.requireNonNull(roles);
return this;
}
@Deprecated
public Builder version(Version version) {
this.version = version;
return this;
}
@Deprecated
public Builder version(Version version, IndexVersion minIndexVersion, IndexVersion maxIndexVersion) {
this.buildVersion = BuildVersion.fromVersionId(version.id());
this.version = version;
this.minIndexVersion = minIndexVersion;
this.minReadOnlyIndexVersion = minIndexVersion;
this.maxIndexVersion = maxIndexVersion;
return this;
}
public Builder version(
BuildVersion version,
IndexVersion minIndexVersion,
IndexVersion minReadOnlyIndexVersion,
IndexVersion maxIndexVersion
) {
// see comment in VersionInformation
assert version.equals(BuildVersion.current());
this.buildVersion = version;
this.version = Version.CURRENT;
this.minIndexVersion = minIndexVersion;
this.minReadOnlyIndexVersion = minReadOnlyIndexVersion;
this.maxIndexVersion = maxIndexVersion;
return this;
}
public Builder version(VersionInformation versions) {
this.buildVersion = versions.buildVersion();
this.version = versions.nodeVersion();
this.minIndexVersion = versions.minIndexVersion();
this.minReadOnlyIndexVersion = versions.minReadOnlyIndexVersion();
this.maxIndexVersion = versions.maxIndexVersion();
return this;
}
public Builder externalId(String externalId) {
this.externalId = externalId;
return this;
}
public Builder applySettings(Settings settings) {
return name(Node.NODE_NAME_SETTING.get(settings)).attributes(Node.NODE_ATTRIBUTES.getAsMap(settings))
.roles(DiscoveryNode.getRolesFromSettings(settings))
.externalId(Node.NODE_EXTERNAL_ID_SETTING.get(settings));
}
public DiscoveryNode build() {
if (address == null) {
address = buildNewFakeTransportAddress();
}
if (hostName == null) {
hostName = address.address().getHostString();
}
if (hostAddress == null) {
hostAddress = address.getAddress();
}
VersionInformation versionInfo;
if (minIndexVersion == null || minReadOnlyIndexVersion == null || maxIndexVersion == null) {
versionInfo = VersionInformation.inferVersions(version);
} else {
versionInfo = new VersionInformation(buildVersion, version, minIndexVersion, minReadOnlyIndexVersion, maxIndexVersion);
}
return new DiscoveryNode(name, id, ephemeralId, hostName, hostAddress, address, attributes, roles, versionInfo, externalId);
}
}
}
|
Builder
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/flush/AutoFlushTest.java
|
{
"start": 5182,
"end": 5638
}
|
class ____ {
@Id
@GeneratedValue
private Long id;
private String name;
//Getters and setters are omitted for brevity
//end::flushing-auto-flush-jpql-entity-example[]
public Person() {}
public Person(String name) {
this.name = name;
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
//tag::flushing-auto-flush-jpql-entity-example[]
}
@Entity(name = "Advertisement")
public static
|
Person
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/client/processor/src/main/java/org/jboss/resteasy/reactive/client/processor/beanparam/BeanParamParser.java
|
{
"start": 2131,
"end": 2894
}
|
class ____ {
public static List<Item> parse(ClassInfo beanParamClass, IndexView index) {
Set<ClassInfo> processedBeanParamClasses = Collections.newSetFromMap(new IdentityHashMap<>());
return parseInternal(beanParamClass, index, processedBeanParamClasses);
}
private static List<Item> parseInternal(ClassInfo beanParamClass, IndexView index,
Set<ClassInfo> processedBeanParamClasses) {
if (!processedBeanParamClasses.add(beanParamClass)) {
throw new IllegalArgumentException("Cycle detected in BeanParam annotations; already processed class "
+ beanParamClass.name());
}
try {
List<Item> resultList = new ArrayList<>();
// Parse
|
BeanParamParser
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/support/SpringApplicationJsonEnvironmentPostProcessor.java
|
{
"start": 2289,
"end": 6160
}
|
class ____ implements EnvironmentPostProcessor, Ordered {
/**
* Name of the {@code spring.application.json} property.
*/
public static final String SPRING_APPLICATION_JSON_PROPERTY = "spring.application.json";
/**
* Name of the {@code SPRING_APPLICATION_JSON} environment variable.
*/
public static final String SPRING_APPLICATION_JSON_ENVIRONMENT_VARIABLE = "SPRING_APPLICATION_JSON";
private static final String SERVLET_ENVIRONMENT_CLASS = "org.springframework.web."
+ "context.support.StandardServletEnvironment";
private static final Set<String> SERVLET_ENVIRONMENT_PROPERTY_SOURCES = new LinkedHashSet<>(
Arrays.asList(StandardServletEnvironment.JNDI_PROPERTY_SOURCE_NAME,
StandardServletEnvironment.SERVLET_CONTEXT_PROPERTY_SOURCE_NAME,
StandardServletEnvironment.SERVLET_CONFIG_PROPERTY_SOURCE_NAME));
/**
* The default order for the processor.
*/
public static final int DEFAULT_ORDER = Ordered.HIGHEST_PRECEDENCE + 5;
private int order = DEFAULT_ORDER;
@Override
public int getOrder() {
return this.order;
}
public void setOrder(int order) {
this.order = order;
}
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
MutablePropertySources propertySources = environment.getPropertySources();
propertySources.stream()
.map(JsonPropertyValue::get)
.filter(Objects::nonNull)
.findFirst()
.ifPresent((v) -> processJson(environment, v));
}
private void processJson(ConfigurableEnvironment environment, JsonPropertyValue propertyValue) {
JsonParser parser = JsonParserFactory.getJsonParser();
Map<String, Object> map = parser.parseMap(propertyValue.getJson());
if (!map.isEmpty()) {
addJsonPropertySource(environment, new JsonPropertySource(propertyValue, flatten(map)));
}
}
/**
* Flatten the map keys using period separator.
* @param map the map that should be flattened
* @return the flattened map
*/
private Map<String, Object> flatten(Map<String, Object> map) {
Map<String, Object> result = new LinkedHashMap<>();
flatten(null, result, map);
return result;
}
private void flatten(@Nullable String prefix, Map<String, Object> result, Map<String, Object> map) {
String namePrefix = (prefix != null) ? prefix + "." : "";
map.forEach((key, value) -> extract(namePrefix + key, result, value));
}
@SuppressWarnings("unchecked")
private void extract(String name, Map<String, Object> result, Object value) {
if (value instanceof Map<?, ?> map) {
if (CollectionUtils.isEmpty(map)) {
result.put(name, value);
return;
}
flatten(name, result, (Map<String, Object>) value);
}
else if (value instanceof Collection<?> collection) {
if (CollectionUtils.isEmpty(collection)) {
result.put(name, value);
return;
}
int index = 0;
for (Object object : collection) {
extract(name + "[" + index + "]", result, object);
index++;
}
}
else {
result.put(name, value);
}
}
private void addJsonPropertySource(ConfigurableEnvironment environment, PropertySource<?> source) {
MutablePropertySources sources = environment.getPropertySources();
String name = findPropertySource(sources);
if (sources.contains(name)) {
sources.addBefore(name, source);
}
else {
sources.addFirst(source);
}
}
private String findPropertySource(MutablePropertySources sources) {
if (ClassUtils.isPresent(SERVLET_ENVIRONMENT_CLASS, null)) {
PropertySource<?> servletPropertySource = sources.stream()
.filter((source) -> SERVLET_ENVIRONMENT_PROPERTY_SOURCES.contains(source.getName()))
.findFirst()
.orElse(null);
if (servletPropertySource != null) {
return servletPropertySource.getName();
}
}
return StandardEnvironment.SYSTEM_PROPERTIES_PROPERTY_SOURCE_NAME;
}
private static
|
SpringApplicationJsonEnvironmentPostProcessor
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java
|
{
"start": 1779,
"end": 12806
}
|
class ____ extends AbstractQueryBuilder<GeoPolygonQueryBuilder> {
public static final String NAME = "geo_polygon";
public static final String GEO_POLYGON_DEPRECATION_MSG = "["
+ GeoShapeQueryBuilder.NAME
+ "] query "
+ "where polygons are defined in geojson or wkt";
/**
* The default value for ignore_unmapped.
*/
public static final boolean DEFAULT_IGNORE_UNMAPPED = false;
private static final ParseField VALIDATION_METHOD = new ParseField("validation_method");
private static final ParseField POINTS_FIELD = new ParseField("points");
private static final ParseField IGNORE_UNMAPPED_FIELD = new ParseField("ignore_unmapped");
private final String fieldName;
private final List<GeoPoint> shell;
private GeoValidationMethod validationMethod = GeoValidationMethod.DEFAULT;
private boolean ignoreUnmapped = DEFAULT_IGNORE_UNMAPPED;
@Deprecated
public GeoPolygonQueryBuilder(String fieldName, List<GeoPoint> points) {
if (Strings.isEmpty(fieldName)) {
throw new IllegalArgumentException("fieldName must not be null");
}
if (points == null || points.isEmpty()) {
throw new IllegalArgumentException("polygon must not be null or empty");
} else {
GeoPoint start = points.get(0);
if (start.equals(points.get(points.size() - 1))) {
if (points.size() < 4) {
throw new IllegalArgumentException("too few points defined for geo_polygon query");
}
} else {
if (points.size() < 3) {
throw new IllegalArgumentException("too few points defined for geo_polygon query");
}
}
}
this.fieldName = fieldName;
this.shell = new ArrayList<>(points);
if (shell.get(shell.size() - 1).equals(shell.get(0)) == false) {
shell.add(shell.get(0));
}
}
/**
* Read from a stream.
*/
public GeoPolygonQueryBuilder(StreamInput in) throws IOException {
super(in);
fieldName = in.readString();
int size = in.readVInt();
shell = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
shell.add(in.readGeoPoint());
}
validationMethod = GeoValidationMethod.readFromStream(in);
ignoreUnmapped = in.readBoolean();
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeString(fieldName);
out.writeCollection(shell, StreamOutput::writeGeoPoint);
validationMethod.writeTo(out);
out.writeBoolean(ignoreUnmapped);
}
public String fieldName() {
return fieldName;
}
public List<GeoPoint> points() {
return shell;
}
/** Sets the validation method to use for geo coordinates. */
public GeoPolygonQueryBuilder setValidationMethod(GeoValidationMethod method) {
this.validationMethod = method;
return this;
}
/**
* Sets whether the query builder should ignore unmapped fields (and run a
* {@link MatchNoDocsQuery} in place of this query) or throw an exception if
* the field is unmapped.
*/
public GeoPolygonQueryBuilder ignoreUnmapped(boolean ignoreUnmapped) {
this.ignoreUnmapped = ignoreUnmapped;
return this;
}
@Override
protected Query doToQuery(SearchExecutionContext context) throws IOException {
MappedFieldType fieldType = context.getFieldType(fieldName);
if (fieldType == null) {
if (ignoreUnmapped) {
return new MatchNoDocsQuery();
} else {
throw new QueryShardException(context, "failed to find geo_point field [" + fieldName + "]");
}
}
if ((fieldType instanceof GeoPointFieldType) == false) {
throw new QueryShardException(context, "field [" + fieldName + "] is not a geo_point field");
}
List<GeoPoint> shell = new ArrayList<>(this.shell.size());
for (GeoPoint geoPoint : this.shell) {
shell.add(new GeoPoint(geoPoint));
}
final int shellSize = shell.size();
// validation was not available prior to 2.x, so to support bwc
// percolation queries we only ignore_malformed on 2.x created indexes
if (GeoValidationMethod.isIgnoreMalformed(validationMethod) == false) {
for (GeoPoint point : shell) {
if (GeoUtils.isValidLatitude(point.lat()) == false) {
throw new QueryShardException(
context,
"illegal latitude value [{}] for [{}]",
point.lat(),
GeoPolygonQueryBuilder.NAME
);
}
if (GeoUtils.isValidLongitude(point.lon()) == false) {
throw new QueryShardException(
context,
"illegal longitude value [{}] for [{}]",
point.lon(),
GeoPolygonQueryBuilder.NAME
);
}
}
}
if (GeoValidationMethod.isCoerce(validationMethod)) {
for (GeoPoint point : shell) {
GeoUtils.normalizePoint(point, true, true);
}
}
double[] lats = new double[shellSize];
double[] lons = new double[shellSize];
GeoPoint p;
for (int i = 0; i < shellSize; ++i) {
p = shell.get(i);
lats[i] = p.lat();
lons[i] = p.lon();
}
Polygon polygon = new Polygon(lats, lons);
Query query = LatLonPoint.newPolygonQuery(fieldType.name(), polygon);
if (fieldType.hasDocValues()) {
Query dvQuery = LatLonDocValuesField.newSlowPolygonQuery(fieldType.name(), polygon);
query = new IndexOrDocValuesQuery(query, dvQuery);
}
return query;
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.startObject(fieldName);
builder.startArray(POINTS_FIELD.getPreferredName());
for (GeoPoint point : shell) {
builder.startArray().value(point.lon()).value(point.lat()).endArray();
}
builder.endArray();
builder.endObject();
builder.field(VALIDATION_METHOD.getPreferredName(), validationMethod);
builder.field(IGNORE_UNMAPPED_FIELD.getPreferredName(), ignoreUnmapped);
printBoostAndQueryName(builder);
builder.endObject();
}
public static GeoPolygonQueryBuilder fromXContent(XContentParser parser) throws IOException {
String fieldName = null;
List<GeoPoint> shell = null;
Float boost = null;
GeoValidationMethod validationMethod = null;
String queryName = null;
String currentFieldName = null;
XContentParser.Token token;
boolean ignoreUnmapped = DEFAULT_IGNORE_UNMAPPED;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
fieldName = currentFieldName;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if (POINTS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
shell = new ArrayList<>();
while ((token = parser.nextToken()) != Token.END_ARRAY) {
shell.add(GeoUtils.parseGeoPoint(parser));
}
} else {
throw new ParsingException(
parser.getTokenLocation(),
"[geo_polygon] query does not support [" + currentFieldName + "]"
);
}
} else {
throw new ParsingException(
parser.getTokenLocation(),
"[geo_polygon] query does not support token type [" + token.name() + "] under [" + currentFieldName + "]"
);
}
}
} else if (token.isValue()) {
if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
ignoreUnmapped = parser.booleanValue();
} else if (VALIDATION_METHOD.match(currentFieldName, parser.getDeprecationHandler())) {
validationMethod = GeoValidationMethod.fromString(parser.text());
} else {
throw new ParsingException(
parser.getTokenLocation(),
"[geo_polygon] query does not support [" + currentFieldName + "]"
);
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[geo_polygon] unexpected token type [" + token.name() + "]");
}
}
GeoPolygonQueryBuilder builder = new GeoPolygonQueryBuilder(fieldName, shell);
if (validationMethod != null) {
// if GeoValidationMethod was explicitly set ignore deprecated coerce and ignoreMalformed settings
builder.setValidationMethod(validationMethod);
}
if (queryName != null) {
builder.queryName(queryName);
}
if (boost != null) {
builder.boost(boost);
}
builder.ignoreUnmapped(ignoreUnmapped);
return builder;
}
@Override
protected boolean doEquals(GeoPolygonQueryBuilder other) {
return Objects.equals(validationMethod, other.validationMethod)
&& Objects.equals(fieldName, other.fieldName)
&& Objects.equals(shell, other.shell)
&& Objects.equals(ignoreUnmapped, other.ignoreUnmapped);
}
@Override
protected int doHashCode() {
return Objects.hash(validationMethod, fieldName, shell, ignoreUnmapped);
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
}
|
GeoPolygonQueryBuilder
|
java
|
quarkusio__quarkus
|
extensions/swagger-ui/deployment/src/test/java/io/quarkus/swaggerui/deployment/ErroneousConfigTest2.java
|
{
"start": 340,
"end": 874
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.setExpectedException(ConfigurationException.class)
.withApplicationRoot((jar) -> jar
.addAsResource(new StringAsset("quarkus.swagger-ui.path=/api\n"
+ "quarkus.smallrye-openapi.path=/api\n"), "application.properties"));
@Test
public void shouldNotStartApplicationIfSwaggerPathIsSameAsOpenAPIPath() {
Assertions.fail();
}
}
|
ErroneousConfigTest2
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/language/XPathIntegerResultTest.java
|
{
"start": 1013,
"end": 1617
}
|
class ____ extends ContextTestSupport {
@Test
public void testXPathInteger() {
Object out = template.requestBody("direct:start", "<hello><id>123</id></hello>");
assertIsInstanceOf(Integer.class, out);
Assertions.assertEquals(123, (Integer) out);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.setBody(xpath("//hello/id", Integer.class));
}
};
}
}
|
XPathIntegerResultTest
|
java
|
micronaut-projects__micronaut-core
|
inject-java-test/src/test/groovy/io/micronaut/inject/visitor/beans/OuterBean.java
|
{
"start": 300,
"end": 361
}
|
interface ____ {
String getName();
}
}
|
InnerInterface
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java
|
{
"start": 5772,
"end": 6040
}
|
class ____ extends BaseTasksResponse implements ToXContentObject {
/** Name of the response's REST param which is used to determine whether this response should be verbose. */
public static final String VERBOSE = "verbose";
public static
|
Response
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java
|
{
"start": 1545,
"end": 9735
}
|
class ____ {
private static final Logger logger = LogManager.getLogger(ClusterInfoSimulator.class);
private final RoutingAllocation allocation;
private final Map<String, DiskUsage> leastAvailableSpaceUsage;
private final Map<String, DiskUsage> mostAvailableSpaceUsage;
private final CopyOnFirstWriteMap<String, Long> shardSizes;
private final Map<String, EstimatedHeapUsage> estimatedHeapUsages;
private final ShardMovementWriteLoadSimulator shardMovementWriteLoadSimulator;
public ClusterInfoSimulator(RoutingAllocation allocation) {
this.allocation = allocation;
this.leastAvailableSpaceUsage = getAdjustedDiskSpace(allocation, allocation.clusterInfo().getNodeLeastAvailableDiskUsages());
this.mostAvailableSpaceUsage = getAdjustedDiskSpace(allocation, allocation.clusterInfo().getNodeMostAvailableDiskUsages());
this.shardSizes = new CopyOnFirstWriteMap<>(allocation.clusterInfo().shardSizes);
this.estimatedHeapUsages = allocation.clusterInfo().getEstimatedHeapUsages();
this.shardMovementWriteLoadSimulator = new ShardMovementWriteLoadSimulator(allocation);
}
/**
* Cluster info contains a reserved space that is necessary to finish initializing shards (that are currently in progress).
* for all initializing shards sum(expected size) = reserved space + already used space
* This deducts already used space from disk usage as when shard start is simulated it is going to add entire expected shard size.
*/
private static Map<String, DiskUsage> getAdjustedDiskSpace(RoutingAllocation allocation, Map<String, DiskUsage> diskUsage) {
var diskUsageCopy = new HashMap<>(diskUsage);
for (var entry : diskUsageCopy.entrySet()) {
var nodeId = entry.getKey();
var usage = entry.getValue();
var reserved = allocation.clusterInfo().getReservedSpace(nodeId, usage.path());
if (reserved.total() == 0) {
continue;
}
var node = allocation.routingNodes().node(nodeId);
if (node == null) {
continue;
}
long adjustment = 0;
for (ShardId shardId : reserved.shardIds()) {
var shard = node.getByShardId(shardId);
if (shard != null) {
var expectedSize = getExpectedShardSize(shard, 0, allocation);
adjustment += expectedSize;
}
}
adjustment -= reserved.total();
entry.setValue(updateWithFreeBytes(usage, adjustment));
}
return diskUsageCopy;
}
/**
* This method updates disk usage to reflect shard relocations and new replica initialization.
* In case of a single data path both mostAvailableSpaceUsage and leastAvailableSpaceUsage are update to reflect the change.
* In case of multiple data path only mostAvailableSpaceUsage as it is used in calculation in
* {@link org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider} for allocating new shards.
* This assumes the worst case (all shards are placed on a single most used disk) and prevents node overflow.
* Balance is later recalculated with a refreshed cluster info containing actual shards placement.
*/
public void simulateShardStarted(ShardRouting shard) {
assert shard.initializing() : "expected an initializing shard, but got: " + shard;
var project = allocation.metadata().projectFor(shard.index());
var size = getExpectedShardSize(
shard,
shard.getExpectedShardSize(),
(shardId, primary) -> shardSizes.get(shardIdentifierFromRouting(shardId, primary)),
allocation.snapshotShardSizeInfo(),
project,
allocation.routingTable(project.id())
);
if (size != UNAVAILABLE_EXPECTED_SHARD_SIZE) {
if (shard.relocatingNodeId() != null) {
// relocation
modifyDiskUsage(shard.relocatingNodeId(), size);
modifyDiskUsage(shard.currentNodeId(), -size);
} else {
// new shard
if (shouldReserveSpaceForInitializingShard(shard, allocation.metadata())) {
modifyDiskUsage(shard.currentNodeId(), -size);
}
shardSizes.put(shardIdentifierFromRouting(shard), project.getIndexSafe(shard.index()).ignoreDiskWatermarks() ? 0 : size);
}
}
shardMovementWriteLoadSimulator.simulateShardStarted(shard);
}
/**
* This method simulates starting an already started shard with an optional {@code sourceNodeId} in case of a relocation.
* @param startedShard The shard to simulate. Must be started already.
* @param sourceNodeId The source node ID if the shard started as a result of relocation. {@code null} otherwise.
*/
public void simulateAlreadyStartedShard(ShardRouting startedShard, @Nullable String sourceNodeId) {
assert startedShard.started() : "expected an already started shard, but got: " + startedShard;
if (logger.isDebugEnabled()) {
logger.debug(
"simulated started shard {} on node [{}] as a {}",
startedShard.shardId(),
startedShard.currentNodeId(),
sourceNodeId != null ? "relocating shard from node [" + sourceNodeId + "]" : "new shard"
);
}
final long expectedShardSize = startedShard.getExpectedShardSize();
if (sourceNodeId != null) {
final var relocatingShard = startedShard.moveToUnassigned(new UnassignedInfo(REINITIALIZED, "simulation"))
.initialize(sourceNodeId, null, expectedShardSize)
.moveToStarted(expectedShardSize)
.relocate(startedShard.currentNodeId(), expectedShardSize)
.getTargetRelocatingShard();
simulateShardStarted(relocatingShard);
} else {
final var initializingShard = startedShard.moveToUnassigned(new UnassignedInfo(REINITIALIZED, "simulation"))
.initialize(startedShard.currentNodeId(), null, expectedShardSize);
simulateShardStarted(initializingShard);
}
}
private void modifyDiskUsage(String nodeId, long freeDelta) {
if (freeDelta == 0) {
return;
}
var diskUsage = mostAvailableSpaceUsage.get(nodeId);
if (diskUsage == null) {
return;
}
var path = diskUsage.path();
updateDiskUsage(leastAvailableSpaceUsage, nodeId, path, freeDelta);
updateDiskUsage(mostAvailableSpaceUsage, nodeId, path, freeDelta);
}
private void updateDiskUsage(Map<String, DiskUsage> availableSpaceUsage, String nodeId, String path, long freeDelta) {
var usage = availableSpaceUsage.get(nodeId);
if (usage != null && Objects.equals(usage.path(), path)) {
// ensure new value is within bounds
availableSpaceUsage.put(nodeId, updateWithFreeBytes(usage, freeDelta));
}
}
private static DiskUsage updateWithFreeBytes(DiskUsage usage, long delta) {
// free bytes might go out of range in case when multiple data path are used
// we might not know exact disk used to allocate a shard and conservatively update
// most used disk on a target node and least used disk on a source node
var freeBytes = withinRange(0, usage.totalBytes(), usage.freeBytes() + delta);
return usage.copyWithFreeBytes(freeBytes);
}
private static long withinRange(long min, long max, long value) {
return Math.max(min, Math.min(max, value));
}
public ClusterInfo getClusterInfo() {
return allocation.clusterInfo()
.updateWith(
leastAvailableSpaceUsage,
mostAvailableSpaceUsage,
shardSizes.toImmutableMap(),
Map.of(),
estimatedHeapUsages,
shardMovementWriteLoadSimulator.simulatedNodeUsageStatsForThreadPools()
);
}
}
|
ClusterInfoSimulator
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/multiplepersistenceunits/MultiplePersistenceUnitsCdiSessionTest.java
|
{
"start": 981,
"end": 6791
}
|
class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(DefaultEntity.class)
.addClass(User.class)
.addClass(Plane.class)
.addAsResource("application-multiple-persistence-units.properties", "application.properties"));
@Inject
Session defaultSession;
@Inject
@PersistenceUnit("users")
Session usersSession;
@Inject
@PersistenceUnit("inventory")
Session inventorySession;
@Test
@Transactional
public void defaultEntityManagerInTransaction() {
DefaultEntity defaultEntity = new DefaultEntity("default");
defaultSession.persist(defaultEntity);
DefaultEntity savedDefaultEntity = defaultSession.get(DefaultEntity.class, defaultEntity.getId());
assertEquals(defaultEntity.getName(), savedDefaultEntity.getName());
}
@Test
@ActivateRequestContext
public void defaultEntityManagerInRequestNoTransaction() {
// Reads are allowed
assertThatCode(() -> defaultSession.createQuery("select count(*) from DefaultEntity"))
.doesNotThrowAnyException();
// Writes are not
DefaultEntity defaultEntity = new DefaultEntity("default");
assertThatThrownBy(() -> defaultSession.persist(defaultEntity))
.isInstanceOf(TransactionRequiredException.class)
.hasMessageContaining(
"Transaction is not active, consider adding @Transactional to your method to automatically activate one");
}
@Test
public void defaultEntityManagerNoRequestNoTransaction() {
DefaultEntity defaultEntity = new DefaultEntity("default");
assertThatThrownBy(() -> defaultSession.persist(defaultEntity))
.isInstanceOf(ContextNotActiveException.class)
.hasMessageContainingAll(
"Cannot use the EntityManager/Session because neither a transaction nor a CDI request context is active",
"Consider adding @Transactional to your method to automatically activate a transaction",
"@ActivateRequestContext if you have valid reasons not to use transactions");
}
@Test
@Transactional
public void usersEntityManagerInTransaction() {
User user = new User("gsmet");
usersSession.persist(user);
User savedUser = usersSession.get(User.class, user.getId());
assertEquals(user.getName(), savedUser.getName());
}
@Test
@ActivateRequestContext
public void usersEntityManagerInRequestNoTransaction() {
// Reads are allowed
assertThatCode(() -> usersSession.createQuery("select count(*) from User"))
.doesNotThrowAnyException();
// Writes are not
User user = new User("gsmet");
assertThatThrownBy(() -> usersSession.persist(user))
.isInstanceOf(TransactionRequiredException.class)
.hasMessageContaining(
"Transaction is not active, consider adding @Transactional to your method to automatically activate one");
}
@Test
public void usersEntityManagerNoRequestNoTransaction() {
User user = new User("gsmet");
assertThatThrownBy(() -> usersSession.persist(user))
.isInstanceOf(ContextNotActiveException.class)
.hasMessageContainingAll(
"Cannot use the EntityManager/Session because neither a transaction nor a CDI request context is active",
"Consider adding @Transactional to your method to automatically activate a transaction",
"@ActivateRequestContext if you have valid reasons not to use transactions");
}
@Test
@Transactional
public void inventoryEntityManagerInTransaction() {
Plane plane = new Plane("Airbus A380");
inventorySession.persist(plane);
Plane savedPlane = inventorySession.get(Plane.class, plane.getId());
assertEquals(plane.getName(), savedPlane.getName());
}
@Test
@ActivateRequestContext
public void inventoryEntityManagerInRequestNoTransaction() {
// Reads are allowed
assertThatCode(() -> inventorySession.createQuery("select count(*) from Plane"))
.doesNotThrowAnyException();
// Writes are not
Plane plane = new Plane("Airbus A380");
assertThatThrownBy(() -> inventorySession.persist(plane))
.isInstanceOf(TransactionRequiredException.class)
.hasMessageContaining(
"Transaction is not active, consider adding @Transactional to your method to automatically activate one");
}
@Test
public void inventoryEntityManagerNoRequestNoTransaction() {
Plane plane = new Plane("Airbus A380");
assertThatThrownBy(() -> inventorySession.persist(plane))
.isInstanceOf(ContextNotActiveException.class)
.hasMessageContainingAll(
"Cannot use the EntityManager/Session because neither a transaction nor a CDI request context is active",
"Consider adding @Transactional to your method to automatically activate a transaction",
"@ActivateRequestContext if you have valid reasons not to use transactions");
}
@Test
@Transactional
public void testUserInInventorySession() {
User user = new User("gsmet");
assertThatThrownBy(() -> inventorySession.persist(user)).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Unknown entity type");
}
}
|
MultiplePersistenceUnitsCdiSessionTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bootstrap/registry/classloading/ClassLoaderServiceImplTest.java
|
{
"start": 5138,
"end": 5734
}
|
class ____ extends ClassLoader {
private List<String> names = new ArrayList<>( );
public InternalClassLoader() {
super( null );
}
@Override
public Class<?> loadClass(String name) throws ClassNotFoundException {
if ( name.startsWith( "org.hibernate" ) ) {
names.add( name );
}
return super.loadClass( name );
}
@Override
protected URL findResource(String name) {
if ( name.startsWith( "org.hibernate" ) ) {
names.add( name );
}
return super.findResource( name );
}
public int getAccessCount() {
return names.size();
}
}
}
|
InternalClassLoader
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/LanguageEndpointBuilderFactory.java
|
{
"start": 6107,
"end": 8114
}
|
class ____ the result type (type from output).
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param resultType the value to set
* @return the dsl builder
*/
default LanguageEndpointBuilder resultType(String resultType) {
doSetProperty("resultType", resultType);
return this;
}
/**
* Sets the script to execute.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param script the value to set
* @return the dsl builder
*/
default LanguageEndpointBuilder script(String script) {
doSetProperty("script", script);
return this;
}
/**
* Whether or not the result of the script should be used as message
* body. This options is default true.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: producer
*
* @param transform the value to set
* @return the dsl builder
*/
default LanguageEndpointBuilder transform(boolean transform) {
doSetProperty("transform", transform);
return this;
}
/**
* Whether or not the result of the script should be used as message
* body. This options is default true.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: producer
*
* @param transform the value to set
* @return the dsl builder
*/
default LanguageEndpointBuilder transform(String transform) {
doSetProperty("transform", transform);
return this;
}
}
/**
* Advanced builder for endpoint for the Language component.
*/
public
|
of
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStListIterator.java
|
{
"start": 1188,
"end": 1705
}
|
class ____<V> extends AbstractStateIterator<V> {
public ForStListIterator(
State originalState,
StateRequestType requestType,
StateRequestHandler stateHandler,
Collection<V> partialResult) {
super(originalState, requestType, stateHandler, partialResult);
}
@Override
public boolean hasNextLoading() {
return false;
}
@Override
protected Object nextPayloadForContinuousLoading() {
return null;
}
}
|
ForStListIterator
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/criteria/CriteriaOrderedSetAggregateTest.java
|
{
"start": 1865,
"end": 12769
}
|
class ____ {
@BeforeEach
public void prepareData(SessionFactoryScope scope) {
scope.inTransaction( em -> {
Date now = new Date();
EntityOfBasics entity1 = new EntityOfBasics();
entity1.setId( 1 );
entity1.setTheString( "5" );
entity1.setTheInt( 5 );
entity1.setTheInteger( -1 );
entity1.setTheDouble( 5.0 );
entity1.setTheDate( now );
entity1.setTheBoolean( true );
em.persist( entity1 );
EntityOfBasics entity2 = new EntityOfBasics();
entity2.setId( 2 );
entity2.setTheString( "6" );
entity2.setTheInt( 6 );
entity2.setTheInteger( -2 );
entity2.setTheDouble( 6.0 );
entity2.setTheBoolean( true );
em.persist( entity2 );
EntityOfBasics entity3 = new EntityOfBasics();
entity3.setId( 3 );
entity3.setTheString( "7" );
entity3.setTheInt( 7 );
entity3.setTheInteger( 3 );
entity3.setTheDouble( 7.0 );
entity3.setTheBoolean( false );
entity3.setTheDate( new Date( now.getTime() + 200000L ) );
em.persist( entity3 );
EntityOfBasics entity4 = new EntityOfBasics();
entity4.setId( 4 );
entity4.setTheString( "13" );
entity4.setTheInt( 13 );
entity4.setTheInteger( 4 );
entity4.setTheDouble( 13.0 );
entity4.setTheBoolean( false );
entity4.setTheDate( new Date( now.getTime() + 300000L ) );
em.persist( entity4 );
EntityOfBasics entity5 = new EntityOfBasics();
entity5.setId( 5 );
entity5.setTheString( "5" );
entity5.setTheInt( 5 );
entity5.setTheInteger( 5 );
entity5.setTheDouble( 9.0 );
entity5.setTheBoolean( false );
em.persist( entity5 );
} );
}
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsStringAggregation.class)
public void testListaggWithoutOrder(SessionFactoryScope scope) {
scope.inTransaction( session -> {
HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<String> cr = cb.createQuery( String.class );
Root<EntityOfBasics> root = cr.from( EntityOfBasics.class );
JpaExpression<String> function = cb.listagg( null, root.get( "theString" ), "," );
cr.select( function );
List<String> elements = Arrays.asList( session.createQuery( cr ).getSingleResult().split( "," ) );
List<String> expectedElements = List.of( "13", "5", "5", "6", "7" );
elements.sort( String.CASE_INSENSITIVE_ORDER );
assertEquals( expectedElements, elements );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsStringAggregation.class)
public void testListagg(SessionFactoryScope scope) {
scope.inTransaction( session -> {
HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<String> cr = cb.createQuery( String.class );
Root<EntityOfBasics> root = cr.from( EntityOfBasics.class );
JpaExpression<String> function = cb.listagg(
cb.desc( root.get( "id" ) ),
root.get( "theString" ),
","
);
cr.select( function );
String result = session.createQuery( cr ).getSingleResult();
assertEquals( "5,13,7,6,5", result );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsStringAggregation.class)
public void testListaggWithFilter(SessionFactoryScope scope) {
scope.inTransaction( session -> {
HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<String> cr = cb.createQuery( String.class );
Root<EntityOfBasics> root = cr.from( EntityOfBasics.class );
JpaExpression<String> function = cb.listagg(
cb.desc( root.get( "id" ) ),
cb.lt( root.get( "theInt" ), cb.literal( 10 ) ),
root.get( "theString" ),
","
);
cr.select( function );
String result = session.createQuery( cr ).getSingleResult();
assertEquals( "5,7,6,5", result );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsStringAggregation.class)
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsWindowFunctions.class)
@RequiresDialect(H2Dialect.class)
public void testListaggWithFilterAndWindow(SessionFactoryScope scope) {
// note : not many dbs support this for now but the generated sql is correct
scope.inTransaction( session -> {
HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<String> cr = cb.createQuery( String.class );
Root<EntityOfBasics> root = cr.from( EntityOfBasics.class );
JpaWindow window = cb.createWindow().partitionBy( root.get( "theInt" ) );
JpaExpression<String> function = cb.listagg(
cb.desc( root.get( "id" ) ),
cb.lt( root.get( "theInt" ), cb.literal( 10 ) ),
window,
root.get( "theString" ),
","
);
cr.select( function );
List<String> resultList = session.createQuery( cr ).getResultList();
assertEquals( "5,5", resultList.get( 0 ) );
assertEquals( "6", resultList.get( 1 ) );
assertEquals( "7", resultList.get( 2 ) );
assertNull( resultList.get( 3 ) );
assertEquals( "5,5", resultList.get( 4 ) );
} );
}
/*
* Skipped for MySQL 8.0: The test fails due to a regression in MySQL 8.0.x, which no longer supports NULLS FIRST/LAST in ORDER BY within LISTAGG as expected.
* See https://bugs.mysql.com/bug.php?id=117765 for more details.
* This is a MySQL issue, not a problem in the dialect implementation.
*/
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsStringAggregation.class)
@SkipForDialect(dialectClass = MySQLDialect.class, majorVersion = 8, reason = "https://bugs.mysql.com/bug.php?id=117765")
@SkipForDialect(dialectClass = MySQLDialect.class, majorVersion = 9, reason = "https://bugs.mysql.com/bug.php?id=117765")
public void testListaggWithNullsClause(SessionFactoryScope scope) {
scope.inTransaction( session -> {
HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<String> cr = cb.createQuery( String.class );
Root<EntityOfBasics> root = cr.from( EntityOfBasics.class );
JpaExpression<String> function = cb.listagg(
cb.desc( root.get( "id" ), true ),
root.get( "theString" ),
cb.literal( "," )
);
cr.select( function );
String result = session.createQuery( cr ).getSingleResult();
assertEquals( "5,13,7,6,5", result );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsInverseDistributionFunctions.class)
public void testInverseDistribution(SessionFactoryScope scope) {
scope.inTransaction( session -> {
HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<Integer> cr = cb.createQuery( Integer.class );
Root<EntityOfBasics> root = cr.from( EntityOfBasics.class );
JpaExpression<Integer> function = cb.percentileDisc(
cb.literal( 0.5 ),
root.get( "theInt" ),
SortDirection.ASCENDING,
Nulls.NONE
);
cr.select( function );
Integer result = session.createQuery( cr ).getSingleResult();
assertEquals( 6, result );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsInverseDistributionFunctions.class)
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsWindowFunctions.class)
@SkipForDialect(dialectClass = PostgreSQLDialect.class)
@SkipForDialect(dialectClass = CockroachDialect.class)
@SkipForDialect(dialectClass = PostgresPlusDialect.class)
public void testInverseDistributionWithWindow(SessionFactoryScope scope) {
// note : PostgreSQL, CockroachDB and EDB currently do not support
// ordered-set aggregate functions with OVER clause
scope.inTransaction( session -> {
HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<Integer> cr = cb.createQuery( Integer.class );
Root<EntityOfBasics> root = cr.from( EntityOfBasics.class );
JpaWindow window = cb.createWindow().partitionBy( root.get( "theInt" ) );
JpaExpression<Integer> function = cb.percentileDisc(
cb.literal( 0.5 ),
window,
root.get( "theInt" ),
SortDirection.ASCENDING,
Nulls.NONE
);
cr.select( function ).orderBy( cb.asc( cb.literal( 1 ) ) );
List<Integer> resultList = session.createQuery( cr ).getResultList();
assertEquals( 5, resultList.size() );
assertEquals( 5, resultList.get( 0 ) );
assertEquals( 5, resultList.get( 1 ) );
assertEquals( 6, resultList.get( 2 ) );
assertEquals( 7, resultList.get( 3 ) );
assertEquals( 13, resultList.get( 4 ) );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsHypotheticalSetFunctions.class)
public void testHypotheticalSetPercentRank(SessionFactoryScope scope) {
scope.inTransaction( session -> {
HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<Double> cr = cb.createQuery( Double.class );
Root<EntityOfBasics> root = cr.from( EntityOfBasics.class );
JpaExpression<Double> function = cb.percentRank( cb.asc( root.get( "theInt" ) ), cb.literal( 5 ) );
cr.select( function );
Double result = session.createQuery( cr ).getSingleResult();
assertEquals( 0.0D, result );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsHypotheticalSetFunctions.class)
public void testHypotheticalSetRank(SessionFactoryScope scope) {
scope.inTransaction( session -> {
HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<Long> cr = cb.createQuery( Long.class );
Root<EntityOfBasics> root = cr.from( EntityOfBasics.class );
JpaExpression<Long> function = cb.rank( cb.asc( root.get( "theInt" ) ), cb.literal( 5 ) );
cr.select( function );
Long result = session.createQuery( cr ).getSingleResult();
assertEquals( 1L, result );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsHypotheticalSetFunctions.class)
@RequiresDialect(H2Dialect.class)
public void testHypotheticalSetRankWithGroupByHavingOrderByLimit(SessionFactoryScope scope) {
scope.inTransaction( session -> {
HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
CriteriaQuery<Tuple> cr = cb.createQuery( Tuple.class );
JpaRoot<EntityOfBasics> e1 = (JpaRoot<EntityOfBasics>) cr.from( EntityOfBasics.class );
JpaCrossJoin<EntityOfBasics> e2 = e1.crossJoin( EntityOfBasics.class );
JpaExpression<Long> function = cb.rank( cb.asc( e1.get( "theInt" ) ), cb.literal( 5 ) );
cr.multiselect( e2.get( "id" ), function )
.groupBy( e2.get( "id" ) ).having( cb.gt( e2.get( "id" ), cb.literal( 1 ) ) )
.orderBy( cb.asc( cb.literal( 1 ) ), cb.asc( cb.literal( 2 ) ) );
List<Tuple> resultList = session.createQuery( cr ).setFirstResult( 1 ).getResultList();
assertEquals( 3, resultList.size() );
assertEquals( 1L, resultList.get( 0 ).get( 1, Long.class ) );
assertEquals( 1L, resultList.get( 1 ).get( 1, Long.class ) );
assertEquals( 1L, resultList.get( 2 ).get( 1, Long.class ) );
} );
}
}
|
CriteriaOrderedSetAggregateTest
|
java
|
google__error-prone
|
check_api/src/main/java/com/google/errorprone/matchers/Matchers.java
|
{
"start": 5248,
"end": 13083
}
|
class ____ {
private Matchers() {}
/** A matcher that matches any AST node. */
public static <T extends Tree> Matcher<T> anything() {
return (t, state) -> true;
}
/** A matcher that matches no AST node. */
public static <T extends Tree> Matcher<T> nothing() {
return (t, state) -> false;
}
/** Matches an AST node iff it does not match the given matcher. */
public static <T extends Tree> Matcher<T> not(Matcher<T> matcher) {
return (t, state) -> !matcher.matches(t, state);
}
/**
* Compose several matchers together, such that the composite matches an AST node iff all the
* given matchers do.
*/
@SafeVarargs
public static <T extends Tree> Matcher<T> allOf(Matcher<? super T>... matchers) {
return (t, state) -> {
for (Matcher<? super T> matcher : matchers) {
if (!matcher.matches(t, state)) {
return false;
}
}
return true;
};
}
/**
* Compose several matchers together, such that the composite matches an AST node iff all the
* given matchers do.
*/
public static <T extends Tree> Matcher<T> allOf(Iterable<? extends Matcher<? super T>> matchers) {
return (t, state) -> {
for (Matcher<? super T> matcher : matchers) {
if (!matcher.matches(t, state)) {
return false;
}
}
return true;
};
}
/**
* Compose several matchers together, such that the composite matches an AST node if any of the
* given matchers do.
*/
public static <T extends Tree> Matcher<T> anyOf(Iterable<? extends Matcher<? super T>> matchers) {
return (t, state) -> {
for (Matcher<? super T> matcher : matchers) {
if (matcher.matches(t, state)) {
return true;
}
}
return false;
};
}
@SafeVarargs
public static <T extends Tree> Matcher<T> anyOf(Matcher<? super T>... matchers) {
// IntelliJ claims it can infer <Matcher<? super T>>, but blaze can't (b/132970194).
return anyOf(Arrays.<Matcher<? super T>>asList(matchers));
}
/** Matches if an AST node is an instance of the given class. */
public static <T extends Tree> Matcher<T> isInstance(java.lang.Class<?> klass) {
return (t, state) -> klass.isInstance(t);
}
/** Matches an AST node of a given kind, for example, an Annotation or a switch block. */
public static <T extends Tree> Matcher<T> kindIs(Kind kind) {
return (tree, state) -> tree.getKind() == kind;
}
/** Matches an AST node of a given kind, for example, an Annotation or a switch block. */
public static <T extends Tree> Matcher<T> kindAnyOf(Set<Kind> kinds) {
return (tree, state) -> kinds.contains(tree.getKind());
}
/** Matches an AST node which is the same object reference as the given node. */
public static <T extends Tree> Matcher<T> isSame(Tree t) {
return (tree, state) -> tree == t;
}
/** Matches a static method. */
public static StaticMethodMatcher staticMethod() {
return MethodMatchers.staticMethod();
}
/** Matches an instance method. */
public static InstanceMethodMatcher instanceMethod() {
return MethodMatchers.instanceMethod();
}
/** Matches a static or instance method. */
public static AnyMethodMatcher anyMethod() {
return MethodMatchers.anyMethod();
}
/** Matches a constructor. */
public static ConstructorMatcher constructor() {
return MethodMatchers.constructor();
}
/**
* Match a Tree based solely on the Symbol produced by {@link ASTHelpers#getSymbol(Tree)}.
*
* <p>If {@code getSymbol} returns {@code null}, the matcher returns false instead of calling
* {@code pred}.
*/
public static <T extends Tree> Matcher<T> symbolMatcher(BiPredicate<Symbol, VisitorState> pred) {
return (tree, state) -> {
Symbol sym = getSymbol(tree);
return sym != null && pred.test(sym, state);
};
}
/** Matches an AST node that represents a non-static field. */
public static Matcher<ExpressionTree> isInstanceField() {
return symbolMatcher(
(symbol, state) -> symbol.getKind() == ElementKind.FIELD && !ASTHelpers.isStatic(symbol));
}
/** Matches an AST node that represents a local variable or parameter. */
public static Matcher<ExpressionTree> isVariable() {
return symbolMatcher(
(symbol, state) ->
symbol.getKind() == ElementKind.LOCAL_VARIABLE
|| symbol.getKind() == ElementKind.PARAMETER);
}
/**
* Matches a compound assignment operator AST node which matches a given left-operand matcher, a
* given right-operand matcher, and a specific compound assignment operator.
*
* @param operator Which compound assignment operator to match against.
* @param leftOperandMatcher The matcher to apply to the left operand.
* @param rightOperandMatcher The matcher to apply to the right operand.
*/
public static CompoundAssignment compoundAssignment(
Kind operator,
Matcher<ExpressionTree> leftOperandMatcher,
Matcher<ExpressionTree> rightOperandMatcher) {
Set<Kind> operators = new HashSet<>(1);
operators.add(operator);
return new CompoundAssignment(operators, leftOperandMatcher, rightOperandMatcher);
}
/**
* Matches a compound assignment operator AST node which matches a given left-operand matcher, a
* given right-operand matcher, and is one of a set of compound assignment operators. Does not
* match compound assignment operators.
*
* @param operators Which compound assignment operators to match against.
* @param receiverMatcher The matcher to apply to the receiver.
* @param expressionMatcher The matcher to apply to the expression.
*/
public static CompoundAssignment compoundAssignment(
Set<Kind> operators,
Matcher<ExpressionTree> receiverMatcher,
Matcher<ExpressionTree> expressionMatcher) {
return new CompoundAssignment(operators, receiverMatcher, expressionMatcher);
}
/**
* Matches when the receiver of an instance method is the same reference as a particular argument
* to the method. For example, receiverSameAsArgument(1) would match {@code obj.method("", obj)}
*
* @param argNum The number of the argument to compare against (zero-based.
*/
public static Matcher<? super MethodInvocationTree> receiverSameAsArgument(int argNum) {
return (t, state) -> {
List<? extends ExpressionTree> args = t.getArguments();
if (args.size() <= argNum) {
return false;
}
ExpressionTree arg = args.get(argNum);
JCExpression methodSelect = (JCExpression) t.getMethodSelect();
if (methodSelect instanceof JCFieldAccess fieldAccess) {
return ASTHelpers.sameVariable(fieldAccess.getExpression(), arg);
} else if (methodSelect instanceof JCIdent) {
// A bare method call: "equals(foo)". Receiver is implicitly "this".
return arg.toString().equals("this");
}
return false;
};
}
public static Matcher<MethodInvocationTree> receiverOfInvocation(
Matcher<ExpressionTree> expressionTreeMatcher) {
return (methodInvocationTree, state) -> {
ExpressionTree receiver = ASTHelpers.getReceiver(methodInvocationTree);
return receiver != null && expressionTreeMatcher.matches(receiver, state);
};
}
/**
* Matches if the given annotation matcher matches all of or any of the annotations on this tree
* node.
*
* @param matchType Whether to match if the matchers match any of or all of the annotations on
* this tree.
* @param annotationMatcher The annotation matcher to use.
*/
public static <T extends Tree> MultiMatcher<T, AnnotationTree> annotations(
MatchType matchType, Matcher<AnnotationTree> annotationMatcher) {
return new AnnotationMatcher<>(matchType, annotationMatcher);
}
/** Matches a
|
Matchers
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/dns/MxRecord.java
|
{
"start": 663,
"end": 870
}
|
interface ____ {
/**
* The record time to live
*/
long ttl();
/**
* The priority of the MX record.
*/
int priority();
/**
* The name of the MX record
*/
String name();
}
|
MxRecord
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/output/SocketAddressOutput.java
|
{
"start": 320,
"end": 880
}
|
class ____<K, V> extends CommandOutput<K, V, SocketAddress> {
private String hostname;
private boolean hasHostname;
public SocketAddressOutput(RedisCodec<K, V> codec) {
super(codec, null);
}
@Override
public void set(ByteBuffer bytes) {
if (!hasHostname) {
hostname = decodeString(bytes);
hasHostname = true;
return;
}
int port = Integer.parseInt(decodeString(bytes));
output = InetSocketAddress.createUnresolved(hostname, port);
}
}
|
SocketAddressOutput
|
java
|
quarkusio__quarkus
|
extensions/agroal/deployment/src/test/java/io/quarkus/agroal/test/MultipleDataSourcesConfigTest.java
|
{
"start": 395,
"end": 1375
}
|
class ____ {
//tag::injection[]
@Inject
AgroalDataSource defaultDataSource;
@Inject
@DataSource("users")
AgroalDataSource dataSource1;
@Inject
@DataSource("inventory")
AgroalDataSource dataSource2;
//end::injection[]
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(MultipleDataSourcesTestUtil.class))
.withConfigurationResource("application-multiple-datasources.properties");
@Test
public void testDataSourceInjection() throws SQLException {
testDataSource("default", defaultDataSource, "jdbc:h2:tcp://localhost/mem:default", "username-default", 13);
testDataSource("users", dataSource1, "jdbc:h2:tcp://localhost/mem:users", "username1", 11);
testDataSource("inventory", dataSource2, "jdbc:h2:tcp://localhost/mem:inventory", "username2", 12);
}
}
|
MultipleDataSourcesConfigTest
|
java
|
google__truth
|
core/src/test/java/com/google/common/truth/TestCorrespondences.java
|
{
"start": 1176,
"end": 3365
}
|
class ____ {
/**
* A correspondence between strings and integers which tests whether the string parses as the
* integer. Parsing is as specified by {@link Integer#decode(String)}. It considers null to
* correspond to null only.
*/
static final Correspondence<@Nullable String, @Nullable Integer>
STRING_PARSES_TO_INTEGER_CORRESPONDENCE =
Correspondence.from(TestCorrespondences::stringParsesToInteger, "parses to");
private static boolean stringParsesToInteger(
@Nullable String actual, @Nullable Integer expected) {
if (actual == null) {
return expected == null;
}
// These checks can fail under Kotlin/Native, which doesn't catch the problem earlier: KT-68165.
//noinspection ConstantConditions
if (actual.getClass() != String.class) {
throw new ClassCastException("not a String: " + actual.getClass());
}
//noinspection ConstantConditions
if (expected != null && expected.getClass() != Integer.class) {
throw new ClassCastException("not an Integer: " + expected.getClass());
}
try {
return Integer.decode(actual).equals(expected);
} catch (NumberFormatException e) {
return false;
}
}
/** A formatter for the diffs between integers. */
static final DiffFormatter<@Nullable Integer, @Nullable Integer> INT_DIFF_FORMATTER =
(a, e) -> Integer.toString(requireNonNull(a) - requireNonNull(e));
/**
* A correspondence between integers which tests whether they are within 10 of each other. Smart
* diffing is enabled, with a formatted diff showing the actual value less the expected value.
* Does not support null values.
*/
static final Correspondence<@Nullable Integer, @Nullable Integer> WITHIN_10_OF =
Correspondence.from(
(@Nullable Integer actual, @Nullable Integer expected) -> {
if (actual == null || expected == null) {
throw new NullPointerExceptionFromWithin10Of();
}
return abs(actual - expected) <= 10;
},
"is within 10 of")
.formattingDiffsUsing(INT_DIFF_FORMATTER);
private static final
|
TestCorrespondences
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/http/codec/protobuf/KotlinSerializationProtobufEncoder.java
|
{
"start": 2013,
"end": 3399
}
|
class ____ extends KotlinSerializationBinaryEncoder<ProtoBuf> {
/**
* Construct a new encoder using {@link ProtoBuf.Default} instance which
* only encodes types annotated with {@link kotlinx.serialization.Serializable @Serializable}
* at type or generics level.
*/
public KotlinSerializationProtobufEncoder() {
this(ProtoBuf.Default);
}
/**
* Construct a new encoder using {@link ProtoBuf.Default} instance which
* only encodes types for which the specified predicate returns {@code true}.
* @since 7.0
*/
public KotlinSerializationProtobufEncoder(Predicate<ResolvableType> typePredicate) {
this(ProtoBuf.Default, typePredicate);
}
/**
* Construct a new encoder using the provided {@link ProtoBuf} instance which
* only encodes types annotated with {@link kotlinx.serialization.Serializable @Serializable}
* at type or generics level.
*/
public KotlinSerializationProtobufEncoder(ProtoBuf protobuf) {
super(protobuf, ProtobufCodecSupport.MIME_TYPES);
}
/**
* Construct a new encoder using the provided {@link ProtoBuf} instance which
* only encodes types for which the specified predicate returns {@code true}.
* @since 7.0
*/
public KotlinSerializationProtobufEncoder(ProtoBuf protobuf, Predicate<ResolvableType> typePredicate) {
super(protobuf, typePredicate, ProtobufCodecSupport.MIME_TYPES);
}
}
|
KotlinSerializationProtobufEncoder
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/Web3jEndpointBuilderFactory.java
|
{
"start": 47227,
"end": 47519
}
|
interface ____
extends
AdvancedWeb3jEndpointConsumerBuilder,
AdvancedWeb3jEndpointProducerBuilder {
default Web3jEndpointBuilder basic() {
return (Web3jEndpointBuilder) this;
}
}
public
|
AdvancedWeb3jEndpointBuilder
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/resource/beans/container/internal/JpaCompliantLifecycleStrategy.java
|
{
"start": 2365,
"end": 5220
}
|
class ____<B> implements ContainedBeanImplementor<B> {
private final Class<B> beanType;
private final BeanInstanceProducer fallbackProducer;
private BeanManager beanManager;
private InjectionTarget<B> injectionTarget;
private CreationalContext<B> creationalContext;
private B beanInstance;
public BeanImpl(Class<B> beanType, BeanInstanceProducer fallbackProducer, BeanManager beanManager) {
this.beanType = beanType;
this.fallbackProducer = fallbackProducer;
this.beanManager = beanManager;
}
@Override
public Class<B> getBeanClass() {
return beanType;
}
@Override
public B getBeanInstance() {
if ( beanInstance == null ) {
initialize();
}
return beanInstance;
}
@Override
public void initialize() {
if ( beanInstance != null ) {
return;
}
if ( beanManager == null ) {
try {
beanInstance = fallbackProducer.produceBeanInstance( beanType );
return;
}
catch (Exception e) {
// the CDI BeanManager is not know to be ready for use and the
// fallback-producer was unable to create the bean...
throw new IllegalStateException(
"CDI BeanManager is not known to be ready for use and the fallback-producer was unable to create the bean",
new NotYetReadyException( e )
);
}
}
final AnnotatedType<B> annotatedType;
try {
annotatedType = beanManager.createAnnotatedType( beanType );
}
catch (Exception e) {
throw new IllegalStateException( new NotYetReadyException( e ) );
}
try {
injectionTarget = beanManager.getInjectionTargetFactory( annotatedType ).createInjectionTarget( null );
creationalContext = beanManager.createCreationalContext( null );
beanInstance = injectionTarget.produce( creationalContext );
injectionTarget.inject( beanInstance, creationalContext );
injectionTarget.postConstruct( beanInstance );
}
catch (NotYetReadyException e) {
throw e;
}
catch (Exception e) {
BEANS_MSG_LOGGER.errorResolvingCdiBeanUsingFallback( beanType.getName() );
beanInstance = fallbackProducer.produceBeanInstance( beanType );
try {
if ( creationalContext != null ) {
creationalContext.release();
}
}
catch (Exception ignore) {
}
creationalContext = null;
injectionTarget = null;
}
beanManager = null;
}
@Override
public void release() {
if ( beanInstance == null ) {
return;
}
try {
if ( injectionTarget == null ) {
// todo : BeanInstanceProducer#release?
return;
}
injectionTarget.preDestroy( beanInstance );
injectionTarget.dispose( beanInstance );
creationalContext.release();
}
catch (Exception ignore) {
}
finally {
beanInstance = null;
creationalContext = null;
injectionTarget = null;
}
}
}
private static
|
BeanImpl
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
|
{
"start": 632,
"end": 1796
}
|
interface ____ {
/**
* Encapsulate the result of a bootstrap check.
*/
record BootstrapCheckResult(String message) {
private static final BootstrapCheckResult SUCCESS = new BootstrapCheckResult(null);
public static BootstrapCheckResult success() {
return SUCCESS;
}
public static BootstrapCheckResult failure(final String message) {
Objects.requireNonNull(message);
return new BootstrapCheckResult(message);
}
public boolean isSuccess() {
return this == SUCCESS;
}
public boolean isFailure() {
return isSuccess() == false;
}
public String getMessage() {
assert isFailure();
assert message != null;
return message;
}
}
/**
* Test if the node fails the check.
*
* @param context the bootstrap context
* @return the result of the bootstrap check
*/
BootstrapCheckResult check(BootstrapContext context);
default boolean alwaysEnforce() {
return false;
}
ReferenceDocs referenceDocs();
}
|
BootstrapCheck
|
java
|
apache__camel
|
components/camel-jms/src/test/java/org/apache/camel/component/jms/issues/JmsPassThroughJmsKeyFormatStrategyTest.java
|
{
"start": 1714,
"end": 4549
}
|
class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
private final String uri = "activemq:queue:JmsPassThroughJmsKeyFormatStrategyTest";
@Test
public void testSendWithHeaders() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.message(0).body().isEqualTo("Hello World");
mock.message(0).header("HEADER_1").isEqualTo("VALUE_1");
mock.message(0).header("HEADER_2").isEqualTo("VALUE_2");
template.sendBodyAndHeader(uri, "Hello World", "HEADER_1", "VALUE_1");
MockEndpoint.assertIsSatisfied(context);
assertEquals("VALUE_1", mock.getReceivedExchanges().get(0).getIn().getHeader("HEADER_1"));
assertEquals("VALUE_2", mock.getReceivedExchanges().get(0).getIn().getHeader("HEADER_2"));
assertEquals("VALUE_1", mock.getReceivedExchanges().get(0).getIn().getHeaders().get("HEADER_1"));
assertEquals("VALUE_2", mock.getReceivedExchanges().get(0).getIn().getHeaders().get("HEADER_2"));
}
@Override
protected String getComponentName() {
return "activemq";
}
@Override
protected JmsComponent setupComponent(CamelContext camelContext, ArtemisService service, String componentName) {
final JmsComponent component = super.setupComponent(camelContext, service, componentName);
// configure to use passthrough
component.getConfiguration().setJmsKeyFormatStrategy(new PassThroughJmsKeyFormatStrategy());
return component;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(uri)
.process(exchange -> {
Map<String, Object> headers = exchange.getIn().getHeaders();
assertEquals("VALUE_1", headers.get("HEADER_1"));
assertEquals("VALUE_1", exchange.getIn().getHeader("HEADER_1"));
})
.setHeader("HEADER_2", constant("VALUE_2"))
.to("mock:result");
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
}
|
JmsPassThroughJmsKeyFormatStrategyTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java
|
{
"start": 1100,
"end": 5972
}
|
class ____ implements AggregatorFunction {
private static final List<IntermediateStateDesc> INTERMEDIATE_STATE_DESC = List.of(
new IntermediateStateDesc("hll", ElementType.BYTES_REF) );
private final DriverContext driverContext;
private final HllStates.SingleState state;
private final List<Integer> channels;
private final int precision;
public CountDistinctIntAggregatorFunction(DriverContext driverContext, List<Integer> channels,
HllStates.SingleState state, int precision) {
this.driverContext = driverContext;
this.channels = channels;
this.state = state;
this.precision = precision;
}
public static CountDistinctIntAggregatorFunction create(DriverContext driverContext,
List<Integer> channels, int precision) {
return new CountDistinctIntAggregatorFunction(driverContext, channels, CountDistinctIntAggregator.initSingle(driverContext.bigArrays(), precision), precision);
}
public static List<IntermediateStateDesc> intermediateStateDesc() {
return INTERMEDIATE_STATE_DESC;
}
@Override
public int intermediateBlockCount() {
return INTERMEDIATE_STATE_DESC.size();
}
@Override
public void addRawInput(Page page, BooleanVector mask) {
if (mask.allFalse()) {
// Entire page masked away
} else if (mask.allTrue()) {
addRawInputNotMasked(page);
} else {
addRawInputMasked(page, mask);
}
}
private void addRawInputMasked(Page page, BooleanVector mask) {
IntBlock vBlock = page.getBlock(channels.get(0));
IntVector vVector = vBlock.asVector();
if (vVector == null) {
addRawBlock(vBlock, mask);
return;
}
addRawVector(vVector, mask);
}
private void addRawInputNotMasked(Page page) {
IntBlock vBlock = page.getBlock(channels.get(0));
IntVector vVector = vBlock.asVector();
if (vVector == null) {
addRawBlock(vBlock);
return;
}
addRawVector(vVector);
}
private void addRawVector(IntVector vVector) {
for (int valuesPosition = 0; valuesPosition < vVector.getPositionCount(); valuesPosition++) {
int vValue = vVector.getInt(valuesPosition);
CountDistinctIntAggregator.combine(state, vValue);
}
}
private void addRawVector(IntVector vVector, BooleanVector mask) {
for (int valuesPosition = 0; valuesPosition < vVector.getPositionCount(); valuesPosition++) {
if (mask.getBoolean(valuesPosition) == false) {
continue;
}
int vValue = vVector.getInt(valuesPosition);
CountDistinctIntAggregator.combine(state, vValue);
}
}
private void addRawBlock(IntBlock vBlock) {
for (int p = 0; p < vBlock.getPositionCount(); p++) {
int vValueCount = vBlock.getValueCount(p);
if (vValueCount == 0) {
continue;
}
int vStart = vBlock.getFirstValueIndex(p);
int vEnd = vStart + vValueCount;
for (int vOffset = vStart; vOffset < vEnd; vOffset++) {
int vValue = vBlock.getInt(vOffset);
CountDistinctIntAggregator.combine(state, vValue);
}
}
}
private void addRawBlock(IntBlock vBlock, BooleanVector mask) {
for (int p = 0; p < vBlock.getPositionCount(); p++) {
if (mask.getBoolean(p) == false) {
continue;
}
int vValueCount = vBlock.getValueCount(p);
if (vValueCount == 0) {
continue;
}
int vStart = vBlock.getFirstValueIndex(p);
int vEnd = vStart + vValueCount;
for (int vOffset = vStart; vOffset < vEnd; vOffset++) {
int vValue = vBlock.getInt(vOffset);
CountDistinctIntAggregator.combine(state, vValue);
}
}
}
@Override
public void addIntermediateInput(Page page) {
assert channels.size() == intermediateBlockCount();
assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size();
Block hllUncast = page.getBlock(channels.get(0));
if (hllUncast.areAllValuesNull()) {
return;
}
BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector();
assert hll.getPositionCount() == 1;
BytesRef hllScratch = new BytesRef();
CountDistinctIntAggregator.combineIntermediate(state, hll.getBytesRef(0, hllScratch));
}
@Override
public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) {
state.toIntermediate(blocks, offset, driverContext);
}
@Override
public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) {
blocks[offset] = CountDistinctIntAggregator.evaluateFinal(state, driverContext);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getClass().getSimpleName()).append("[");
sb.append("channels=").append(channels);
sb.append("]");
return sb.toString();
}
@Override
public void close() {
state.close();
}
}
|
CountDistinctIntAggregatorFunction
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java
|
{
"start": 197552,
"end": 198132
}
|
class ____ implements BiConsumer<Set<String>, ActionListener<RoleRetrievalResult>> {
private final Function<Set<String>, RoleRetrievalResult> roleDescriptorsFunc;
InMemoryRolesProvider(Function<Set<String>, RoleRetrievalResult> roleDescriptorsFunc) {
this.roleDescriptorsFunc = roleDescriptorsFunc;
}
@Override
public void accept(Set<String> roles, ActionListener<RoleRetrievalResult> listener) {
listener.onResponse(roleDescriptorsFunc.apply(roles));
}
}
private abstract static
|
InMemoryRolesProvider
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetLocalizationStatusesRequestPBImpl.java
|
{
"start": 1664,
"end": 4712
}
|
class ____ extends
GetLocalizationStatusesRequest {
private GetLocalizationStatusesRequestProto proto =
GetLocalizationStatusesRequestProto.getDefaultInstance();
private GetLocalizationStatusesRequestProto.Builder builder;
private boolean viaProto = false;
private List<ContainerId> containerIds;
public GetLocalizationStatusesRequestPBImpl() {
builder = GetLocalizationStatusesRequestProto.newBuilder();
}
public GetLocalizationStatusesRequestPBImpl(
GetLocalizationStatusesRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetLocalizationStatusesRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null) {
return false;
}
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (this.containerIds != null) {
addLocalContainerIdsToProto();
}
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetLocalizationStatusesRequestProto.newBuilder(proto);
}
viaProto = false;
}
private void addLocalContainerIdsToProto() {
maybeInitBuilder();
builder.clearContainerId();
if (this.containerIds == null) {
return;
}
List<ContainerIdProto> protoList = new ArrayList<ContainerIdProto>();
for (ContainerId id : containerIds) {
protoList.add(convertToProtoFormat(id));
}
builder.addAllContainerId(protoList);
}
private void initLocalContainerIds() {
if (this.containerIds != null) {
return;
}
GetLocalizationStatusesRequestProtoOrBuilder p = viaProto ? proto : builder;
List<ContainerIdProto> toAdd = p.getContainerIdList();
this.containerIds = new ArrayList<>();
for (ContainerIdProto id : toAdd) {
this.containerIds.add(convertFromProtoFormat(id));
}
}
@Override
public List<ContainerId> getContainerIds() {
initLocalContainerIds();
return this.containerIds;
}
@Override
public void setContainerIds(List<ContainerId> containerIds) {
maybeInitBuilder();
if (containerIds == null) {
builder.clearContainerId();
}
this.containerIds = containerIds;
}
private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
return new ContainerIdPBImpl(p);
}
private ContainerIdProto convertToProtoFormat(ContainerId t) {
return ((ContainerIdPBImpl) t).getProto();
}
}
|
GetLocalizationStatusesRequestPBImpl
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest76_brace.java
|
{
"start": 976,
"end": 2480
}
|
class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = //
"((SELECT *\n" +
"FROM emp\n" +
"WHERE empno = 111));";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
// assertEquals(2, visitor.getColumns().size());
{
String text = SQLUtils.toOracleString(stmt);
assertEquals("(SELECT *\n" +
"FROM emp\n" +
"WHERE empno = 111);", text);
}
// assertTrue(visitor.getColumns().contains(new TableStat.Column("acduser.vw_acd_info", "xzqh")));
// assertTrue(visitor.getOrderByColumns().contains(new TableStat.Column("employees", "last_name")));
}
}
|
OracleSelectTest76_brace
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/BoundedBlockingSubpartitionTest.java
|
{
"start": 7749,
"end": 8585
}
|
class ____ implements BoundedData {
private BoundedData.Reader reader;
private TestingBoundedData(BoundedData.Reader reader) {
this.reader = checkNotNull(reader);
}
@Override
public void writeBuffer(Buffer buffer) throws IOException {}
@Override
public void finishWrite() throws IOException {}
@Override
public Reader createReader(ResultSubpartitionView ignored) throws IOException {
return reader;
}
@Override
public long getSize() {
throw new UnsupportedOperationException();
}
@Override
public Path getFilePath() {
throw new UnsupportedOperationException();
}
@Override
public void close() {}
}
private static
|
TestingBoundedData
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DialectFeatureChecks.java
|
{
"start": 23079,
"end": 23316
}
|
class ____ implements DialectFeatureCheck {
public boolean apply(Dialect dialect) {
// TiDB db does not support subqueries for ON condition
return !( dialect instanceof TiDBDialect );
}
}
public static
|
SupportsSubqueryInOnClause
|
java
|
google__gson
|
gson/src/test/java/com/google/gson/functional/NamingPolicyTest.java
|
{
"start": 12574,
"end": 12918
}
|
class ____ {
Integer a;
@SerializedName("a")
Double b;
ClassWithDuplicateFields(Integer a) {
this(a, null);
}
ClassWithDuplicateFields(Double b) {
this(null, b);
}
ClassWithDuplicateFields(Integer a, Double b) {
this.a = a;
this.b = b;
}
}
private static
|
ClassWithDuplicateFields
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.