language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractSeek.java
|
{
"start": 1208,
"end": 2008
}
|
class ____ extends AbstractContractSeekTest {
@BeforeAll
public static void createCluster() throws IOException {
RouterWebHDFSContract.createCluster();
}
@AfterAll
public static void teardownCluster() throws IOException {
RouterWebHDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new RouterWebHDFSContract(conf);
}
@Override
public void testNegativeSeek() throws Throwable {
System.out.println("Not supported");
}
@Override
public void testSeekReadClosedFile() throws Throwable {
System.out.println("Not supported");
}
@Override
public void testSeekPastEndOfFileThenReseekAndRead() throws Throwable {
System.out.println("Not supported");
}
}
|
TestRouterWebHDFSContractSeek
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/buildextension/beans/SyntheticBeanParamsTest.java
|
{
"start": 4843,
"end": 9835
}
|
class ____ {
static boolean invoked = false;
static void invoke(Map<String, Object> params) {
assertEquals(true, (boolean) params.get("bool"));
assertEquals((byte) 1, (byte) params.get("b"));
assertEquals((short) 2, (short) params.get("s"));
assertEquals(3, (int) params.get("i"));
assertEquals(4L, (long) params.get("l"));
assertEquals(5.0F, (float) params.get("f"));
assertEquals(6.0, (double) params.get("d"));
assertEquals('a', (char) params.get("ch"));
assertEquals("bc", (String) params.get("str"));
assertEquals(SimpleEnum.FOO, (SimpleEnum) params.get("en"));
assertEquals(Object.class, (Class<?>) params.get("cls"));
assertEquals(SimpleEnum.class, (Class<?>) params.get("clsJandex"));
assertEquals("one", ((SimpleAnnotation) params.get("ann")).value());
assertEquals(2, ((boolean[]) params.get("boolArray")).length);
assertEquals(true, ((boolean[]) params.get("boolArray"))[0]);
assertEquals(false, ((boolean[]) params.get("boolArray"))[1]);
assertEquals(2, ((byte[]) params.get("bArray")).length);
assertEquals((byte) 7, ((byte[]) params.get("bArray"))[0]);
assertEquals((byte) 8, ((byte[]) params.get("bArray"))[1]);
assertEquals(2, ((short[]) params.get("sArray")).length);
assertEquals((short) 9, ((short[]) params.get("sArray"))[0]);
assertEquals((short) 10, ((short[]) params.get("sArray"))[1]);
assertEquals(2, ((int[]) params.get("iArray")).length);
assertEquals(11, ((int[]) params.get("iArray"))[0]);
assertEquals(12, ((int[]) params.get("iArray"))[1]);
assertEquals(2, ((long[]) params.get("lArray")).length);
assertEquals(13L, ((long[]) params.get("lArray"))[0]);
assertEquals(14L, ((long[]) params.get("lArray"))[1]);
assertEquals(2, ((float[]) params.get("fArray")).length);
assertEquals(15.0F, ((float[]) params.get("fArray"))[0]);
assertEquals(16.0F, ((float[]) params.get("fArray"))[1]);
assertEquals(2, ((double[]) params.get("dArray")).length);
assertEquals(17.0, ((double[]) params.get("dArray"))[0]);
assertEquals(18.0, ((double[]) params.get("dArray"))[1]);
assertEquals(2, ((char[]) params.get("chArray")).length);
assertEquals('d', ((char[]) params.get("chArray"))[0]);
assertEquals('e', ((char[]) params.get("chArray"))[1]);
assertEquals(2, ((String[]) params.get("strArray")).length);
assertEquals("fg", ((String[]) params.get("strArray"))[0]);
assertEquals("hi", ((String[]) params.get("strArray"))[1]);
assertEquals(2, ((SimpleEnum[]) params.get("enArray")).length);
assertEquals(SimpleEnum.BAR, ((SimpleEnum[]) params.get("enArray"))[0]);
assertEquals(SimpleEnum.BAZ, ((SimpleEnum[]) params.get("enArray"))[1]);
assertEquals(2, ((Enum<?>[]) params.get("enMixedArray")).length);
assertEquals(SimpleEnum.FOO, ((Enum<?>[]) params.get("enMixedArray"))[0]);
assertEquals(AnotherEnum.INSTANCE, ((Enum<?>[]) params.get("enMixedArray"))[1]);
assertEquals(2, ((Class<?>[]) params.get("clsArray")).length);
assertEquals(String.class, ((Class<?>[]) params.get("clsArray"))[0]);
assertEquals(Number.class, ((Class<?>[]) params.get("clsArray"))[1]);
assertEquals(2, ((Class<?>[]) params.get("clsJandexArray")).length);
assertEquals(SimpleEnum.class, ((Class<?>[]) params.get("clsJandexArray"))[0]);
assertEquals(SimpleAnnotation.class, ((Class<?>[]) params.get("clsJandexArray"))[1]);
assertEquals(2, ((SimpleAnnotation[]) params.get("annArray")).length);
assertEquals("two", ((SimpleAnnotation[]) params.get("annArray"))[0].value());
assertEquals("three", ((SimpleAnnotation[]) params.get("annArray"))[1].value());
assertEquals(2, ((Annotation[]) params.get("annMixedArray")).length);
assertEquals("four", ((SimpleAnnotation) ((Annotation[]) params.get("annMixedArray"))[0]).value());
assertEquals(42, ((AnotherAnnotation) ((Annotation[]) params.get("annMixedArray"))[1]).value());
invoked = true;
}
}
private static AnnotationInstance simpleAnnotation(String value) {
return AnnotationInstance.create(DotName.createSimple(SimpleAnnotation.class.getName()), null,
List.of(AnnotationValue.createStringValue("value", value)));
}
private static AnnotationInstance anotherAnnotation(int value) {
return AnnotationInstance.create(DotName.createSimple(AnotherAnnotation.class.getName()), null,
List.of(AnnotationValue.createIntegerValue("value", value)));
}
}
|
Verification
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/factory/annotation/ParameterResolutionDelegate.java
|
{
"start": 1631,
"end": 4525
}
|
class ____ {
private static final Annotation[] EMPTY_ANNOTATION_ARRAY = new Annotation[0];
private static final AnnotatedElement EMPTY_ANNOTATED_ELEMENT = new AnnotatedElement() {
@Override
public <T extends Annotation> @Nullable T getAnnotation(Class<T> annotationClass) {
return null;
}
@Override
public Annotation[] getAnnotations() {
return EMPTY_ANNOTATION_ARRAY;
}
@Override
public Annotation[] getDeclaredAnnotations() {
return EMPTY_ANNOTATION_ARRAY;
}
};
private ParameterResolutionDelegate() {
}
/**
* Determine if the supplied {@link Parameter} can <em>potentially</em> be
* autowired from an {@link AutowireCapableBeanFactory}.
* <p>Returns {@code true} if the supplied parameter is annotated or
* meta-annotated with {@link Autowired @Autowired},
* {@link Qualifier @Qualifier}, or {@link Value @Value}.
* <p>Note that {@link #resolveDependency} may still be able to resolve the
* dependency for the supplied parameter even if this method returns {@code false}.
* @param parameter the parameter whose dependency should be autowired
* (must not be {@code null})
* @param parameterIndex the index of the parameter in the constructor or method
* that declares the parameter
* @see #resolveDependency
*/
public static boolean isAutowirable(Parameter parameter, int parameterIndex) {
Assert.notNull(parameter, "Parameter must not be null");
AnnotatedElement annotatedParameter = getEffectiveAnnotatedParameter(parameter, parameterIndex);
return (AnnotatedElementUtils.hasAnnotation(annotatedParameter, Autowired.class) ||
AnnotatedElementUtils.hasAnnotation(annotatedParameter, Qualifier.class) ||
AnnotatedElementUtils.hasAnnotation(annotatedParameter, Value.class));
}
/**
* Resolve the dependency for the supplied {@link Parameter} from the
* supplied {@link AutowireCapableBeanFactory}.
* <p>Provides comprehensive autowiring support for individual method parameters
* on par with Spring's dependency injection facilities for autowired fields and
* methods, including support for {@link Autowired @Autowired},
* {@link Qualifier @Qualifier}, and {@link Value @Value} with support for property
* placeholders and SpEL expressions in {@code @Value} declarations.
* <p>The dependency is required unless the parameter is annotated or meta-annotated
* with {@link Autowired @Autowired} with the {@link Autowired#required required}
* flag set to {@code false}.
* <p>If an explicit <em>qualifier</em> is not declared, the name of the parameter
* will be used as the qualifier for resolving ambiguities.
* @param parameter the parameter whose dependency should be resolved (must not be
* {@code null})
* @param parameterIndex the index of the parameter in the constructor or method
* that declares the parameter
* @param containingClass the concrete
|
ParameterResolutionDelegate
|
java
|
quarkusio__quarkus
|
extensions/security/deployment/src/test/java/io/quarkus/security/test/cdi/app/denied/unnanotated/BeanWithSecurityAnnotationsSubBean.java
|
{
"start": 212,
"end": 294
}
|
class ____ extends BeanWithSecurityAnnotations {
}
|
BeanWithSecurityAnnotationsSubBean
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/injection/guice/AbstractProcessor.java
|
{
"start": 1290,
"end": 2679
}
|
class ____ implements ElementVisitor<Boolean> {
protected Errors errors;
protected InjectorImpl injector;
protected AbstractProcessor(Errors errors) {
this.errors = errors;
}
public void process(Iterable<InjectorShell> isolatedInjectorBuilders) {
for (InjectorShell injectorShell : isolatedInjectorBuilders) {
process(injectorShell.getInjector(), injectorShell.getElements());
}
}
public void process(InjectorImpl injector, List<Element> elements) {
Errors errorsAnyElement = this.errors;
this.injector = injector;
try {
for (Iterator<Element> i = elements.iterator(); i.hasNext();) {
Element element = i.next();
this.errors = errorsAnyElement.withSource(element.getSource());
Boolean allDone = element.acceptVisitor(this);
if (allDone) {
i.remove();
}
}
} finally {
this.errors = errorsAnyElement;
this.injector = null;
}
}
@Override
public Boolean visit(Message message) {
return false;
}
@Override
public <T> Boolean visit(Binding<T> binding) {
return false;
}
@Override
public <T> Boolean visit(ProviderLookup<T> providerLookup) {
return false;
}
}
|
AbstractProcessor
|
java
|
apache__camel
|
components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FromFtpRemoteFileSortByNestedExpressionIT.java
|
{
"start": 1064,
"end": 2882
}
|
class ____ extends FtpServerTestSupport {
private String getFtpUrl() {
return "ftp://admin@localhost:{{ftp.server.port}}/sortbynested?password=admin&delay=5000";
}
@Override
public void doPostSetup() throws Exception {
prepareFtpServer();
}
@Test
public void testSortFiles() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from(getFtpUrl() + "&sortBy=file:ext;file:name").to("mock:result");
}
});
context.start();
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello Dublin", "Hello London", "Hello Paris", "Hello Copenhagen");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testSortFilesReverse() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from(getFtpUrl() + "&sortBy=file:ext;reverse:file:name").to("mock:reverse");
}
});
context.start();
MockEndpoint reverse = getMockEndpoint("mock:reverse");
reverse.expectedBodiesReceived("Hello Paris", "Hello London", "Hello Dublin", "Hello Copenhagen");
MockEndpoint.assertIsSatisfied(context);
}
private void prepareFtpServer() {
// prepares the FTP Server by creating files on the server that we want
// to unit
// test that we can pool
sendFile(getFtpUrl(), "Hello Paris", "paris.txt");
sendFile(getFtpUrl(), "Hello London", "london.txt");
sendFile(getFtpUrl(), "Hello Copenhagen", "copenhagen.xml");
sendFile(getFtpUrl(), "Hello Dublin", "dublin.txt");
}
}
|
FromFtpRemoteFileSortByNestedExpressionIT
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/ImportAwareTests.java
|
{
"start": 11722,
"end": 11870
}
|
class ____ {
}
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Import(FeatureConfiguration.class)
public @
|
ApplicationConfiguration
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/deser/std/StdNodeBasedDeserializer.java
|
{
"start": 531,
"end": 1133
}
|
class ____<T>
extends StdDeserializer<T>
{
protected ValueDeserializer<Object> _treeDeserializer;
/*
/**********************************************************************
/* Life-cycle
/**********************************************************************
*/
protected StdNodeBasedDeserializer(JavaType targetType) {
super(targetType);
}
protected StdNodeBasedDeserializer(Class<T> targetType) {
super(targetType);
}
/**
* "Copy-constructor" used when creating a modified copies, most often
* if sub-
|
StdNodeBasedDeserializer
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/core/Maybe.java
|
{
"start": 1593,
"end": 1726
}
|
class ____ a deferred computation and emission of a single value, no value at all or an exception.
* <p>
* The {@code Maybe}
|
represents
|
java
|
reactor__reactor-core
|
reactor-core/src/jcstress/java/reactor/core/publisher/FluxBufferTimeoutStressTest.java
|
{
"start": 12016,
"end": 14642
}
|
class ____ {
final VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create();
final StressSubscriber<List<Long>> subscriber = new StressSubscriber<>(1);
final FastLogger fastLogger = new FastLogger(getClass().getName());
final FluxBufferTimeout.BufferTimeoutWithBackpressureSubscriber<Long, List<Long>> bufferTimeoutSubscriber =
new FluxBufferTimeout.BufferTimeoutWithBackpressureSubscriber<>(subscriber, 2, 1, TimeUnit.SECONDS, virtualTimeScheduler.createWorker(), bufferSupplier(), fastLogger);
Sinks.Many<Long> proxy = Sinks.unsafe().many().unicast().onBackpressureBuffer();
AtomicLong emits = new AtomicLong();
final AtomicLong requested = new AtomicLong();
{
proxy.asFlux()
.doOnRequest(r -> requested.incrementAndGet())
.subscribe(bufferTimeoutSubscriber);
}
@Actor
public void next() {
for (long i = 0; i < 4; i++) {
if (proxy.tryEmitNext(i) != Sinks.EmitResult.OK) {
return;
}
emits.set(i + 1);
}
proxy.tryEmitComplete();
}
@Actor
public void request() {
subscriber.request(1);
}
@Actor
public void cancel() {
subscriber.cancel();
}
@Arbiter
public void arbiter(LLL_Result result) {
result.r1 = subscriber.onNextCalls.get();
result.r2 = subscriber.onCompleteCalls.get();
result.r3 = requested.get();
if (subscriber.onCompleteCalls.get() > 1) {
fail(fastLogger, "unexpected completion " + subscriber.onCompleteCalls.get());
}
if (subscriber.concurrentOnComplete.get()) {
fail(fastLogger, "subscriber concurrent onComplete");
}
if (subscriber.concurrentOnNext.get()) {
fail(fastLogger, "subscriber concurrent onNext");
}
int emits = (int) this.emits.get();
if (subscriber.onCompleteCalls.get() == 1 && !allValuesHandled(fastLogger, 4,
emptyList(),
subscriber.receivedValues)) {
fail(fastLogger,
"Completed but not all values handled!" + "; result=" + result);
}
if (subscriber.onNextCalls.get() > 0 && !allValuesHandled(fastLogger, emits,
subscriber.discardedValues,
subscriber.receivedValues)) {
fail(fastLogger, "Not all " + emits + " emits handled!" + "; result=" + result);
}
}
}
@JCStressTest
@Outcome(id = "1, 1, 1", expect = Expect.ACCEPTABLE, desc = "")
@Outcome(id = "2, 1, 1", expect = Expect.ACCEPTABLE, desc = "")
@Outcome(id = "0, 0, 1", expect = Expect.ACCEPTABLE, desc = "")
@Outcome(id = "1, 0, 1", expect = Expect.ACCEPTABLE, desc = "")
@Outcome(id = "2, 0, 1", expect = Expect.ACCEPTABLE, desc = "")
@State
public static
|
FluxBufferTimeoutStressTestRaceDeliveryAndCancelWithBackpressure
|
java
|
spring-projects__spring-boot
|
module/spring-boot-health/src/test/java/org/springframework/boot/health/autoconfigure/actuate/endpoint/HealthEndpointAutoConfigurationTests.java
|
{
"start": 17291,
"end": 17717
}
|
class ____ {
@Bean
HealthIndicator simpleHealthIndicator() {
return () -> Health.up().withDetail("counter", 42).build();
}
@Bean
HealthIndicator additionalHealthIndicator() {
return () -> Health.up().build();
}
@Bean
ReactiveHealthIndicator reactiveHealthIndicator() {
return () -> Mono.just(Health.up().build());
}
}
@Configuration(proxyBeanMethods = false)
static
|
HealthIndicatorsConfiguration
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/server/authorization/OAuth2DeviceCodeGrantTests.java
|
{
"start": 30769,
"end": 31545
}
|
class ____ extends AuthorizationServerConfiguration {
// @formatter:off
@Bean
SecurityFilterChain authorizationServerSecurityFilterChain(HttpSecurity http) throws Exception {
http
.oauth2AuthorizationServer((authorizationServer) ->
authorizationServer
.deviceAuthorizationEndpoint(Customizer.withDefaults())
.deviceVerificationEndpoint(Customizer.withDefaults())
)
.authorizeHttpRequests((authorize) ->
authorize.anyRequest().authenticated()
);
return http.build();
}
// @formatter:on
@Bean
AuthorizationServerSettings authorizationServerSettings() {
return AuthorizationServerSettings.builder().multipleIssuersAllowed(true).build();
}
}
}
|
AuthorizationServerConfigurationWithMultipleIssuersAllowed
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/param/MySqlParameterizedOutputVisitorTest_64.java
|
{
"start": 386,
"end": 1220
}
|
class ____ extends TestCase {
public void test_for_parameterize() throws Exception {
String sql = "select * from abc where id = trim(' abc ')";
List<Object> params = new ArrayList<Object>();
String psql = ParameterizedOutputVisitorUtils.parameterize(sql, JdbcConstants.MYSQL, params, VisitorFeature.OutputParameterizedUnMergeShardingTable);
assertEquals("SELECT *\n" +
"FROM abc\n" +
"WHERE id = ?", psql);
assertEquals(1, params.size());
assertEquals("\"abc\"", JSON.toJSONString(params.get(0)));
String rsql = ParameterizedOutputVisitorUtils.restore(psql, JdbcConstants.MYSQL, params);
assertEquals("SELECT *\n" +
"FROM abc\n" +
"WHERE id = 'abc'", rsql);
}
}
|
MySqlParameterizedOutputVisitorTest_64
|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-qos/src/main/java/org/apache/dubbo/qos/command/impl/SetProfilerWarnPercent.java
|
{
"start": 1401,
"end": 2048
}
|
class ____ implements BaseCommand {
private static final ErrorTypeAwareLogger logger =
LoggerFactory.getErrorTypeAwareLogger(SetProfilerWarnPercent.class);
@Override
public String execute(CommandContext commandContext, String[] args) {
if (args == null || args.length != 1) {
return "args error. example: setProfilerWarnPercent 0.75";
}
ProfilerSwitch.setWarnPercent(Double.parseDouble(args[0]));
logger.warn(
QOS_PROFILER_WARN_PERCENT, "", "", "Dubbo Invocation Profiler warn percent has been set to " + args[0]);
return "OK";
}
}
|
SetProfilerWarnPercent
|
java
|
netty__netty
|
transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollSocketChannelTest.java
|
{
"start": 1094,
"end": 4782
}
|
class ____ {
@Test
public void testTcpInfo() throws Exception {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(1, EpollIoHandler.newFactory());
try {
Bootstrap bootstrap = new Bootstrap();
EpollSocketChannel ch = (EpollSocketChannel) bootstrap.group(group)
.channel(EpollSocketChannel.class)
.handler(new ChannelInboundHandlerAdapter())
.bind(new InetSocketAddress(0)).syncUninterruptibly().channel();
EpollTcpInfo info = ch.tcpInfo();
assertTcpInfo0(info);
ch.close().syncUninterruptibly();
} finally {
group.shutdownGracefully();
}
}
@Test
public void testTcpInfoReuse() throws Exception {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(1, EpollIoHandler.newFactory());
try {
Bootstrap bootstrap = new Bootstrap();
EpollSocketChannel ch = (EpollSocketChannel) bootstrap.group(group)
.channel(EpollSocketChannel.class)
.handler(new ChannelInboundHandlerAdapter())
.bind(new InetSocketAddress(0)).syncUninterruptibly().channel();
EpollTcpInfo info = new EpollTcpInfo();
ch.tcpInfo(info);
assertTcpInfo0(info);
ch.close().syncUninterruptibly();
} finally {
group.shutdownGracefully();
}
}
private static void assertTcpInfo0(EpollTcpInfo info) throws Exception {
assertNotNull(info);
assertTrue(info.state() >= 0);
assertTrue(info.caState() >= 0);
assertTrue(info.retransmits() >= 0);
assertTrue(info.probes() >= 0);
assertTrue(info.backoff() >= 0);
assertTrue(info.options() >= 0);
assertTrue(info.sndWscale() >= 0);
assertTrue(info.rcvWscale() >= 0);
assertTrue(info.rto() >= 0);
assertTrue(info.ato() >= 0);
assertTrue(info.sndMss() >= 0);
assertTrue(info.rcvMss() >= 0);
assertTrue(info.unacked() >= 0);
assertTrue(info.sacked() >= 0);
assertTrue(info.lost() >= 0);
assertTrue(info.retrans() >= 0);
assertTrue(info.fackets() >= 0);
assertTrue(info.lastDataSent() >= 0);
assertTrue(info.lastAckSent() >= 0);
assertTrue(info.lastDataRecv() >= 0);
assertTrue(info.lastAckRecv() >= 0);
assertTrue(info.pmtu() >= 0);
assertTrue(info.rcvSsthresh() >= 0);
assertTrue(info.rtt() >= 0);
assertTrue(info.rttvar() >= 0);
assertTrue(info.sndSsthresh() >= 0);
assertTrue(info.sndCwnd() >= 0);
assertTrue(info.advmss() >= 0);
assertTrue(info.reordering() >= 0);
assertTrue(info.rcvRtt() >= 0);
assertTrue(info.rcvSpace() >= 0);
assertTrue(info.totalRetrans() >= 0);
}
// See https://github.com/netty/netty/issues/7159
@Test
public void testSoLingerNoAssertError() throws Exception {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(1, EpollIoHandler.newFactory());
try {
Bootstrap bootstrap = new Bootstrap();
EpollSocketChannel ch = (EpollSocketChannel) bootstrap.group(group)
.channel(EpollSocketChannel.class)
.option(ChannelOption.SO_LINGER, 10)
.handler(new ChannelInboundHandlerAdapter())
.bind(new InetSocketAddress(0)).syncUninterruptibly().channel();
ch.close().syncUninterruptibly();
} finally {
group.shutdownGracefully();
}
}
}
|
EpollSocketChannelTest
|
java
|
apache__spark
|
common/utils-java/src/main/java/org/apache/spark/api/java/function/Function4.java
|
{
"start": 1006,
"end": 1125
}
|
interface ____<T1, T2, T3, T4, R> extends Serializable {
R call(T1 v1, T2 v2, T3 v3, T4 v4) throws Exception;
}
|
Function4
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/ext/ExternalTypeIdWithCreatorTest.java
|
{
"start": 584,
"end": 649
}
|
class ____ implements Payload999 { }
public static
|
BarPayload999
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java
|
{
"start": 5094,
"end": 5881
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory lhs;
private final EvalOperator.ExpressionEvaluator.Factory rhs;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory lhs,
EvalOperator.ExpressionEvaluator.Factory rhs) {
this.source = source;
this.lhs = lhs;
this.rhs = rhs;
}
@Override
public LessThanOrEqualDoublesEvaluator get(DriverContext context) {
return new LessThanOrEqualDoublesEvaluator(source, lhs.get(context), rhs.get(context), context);
}
@Override
public String toString() {
return "LessThanOrEqualDoublesEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]";
}
}
}
|
Factory
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java
|
{
"start": 1806,
"end": 2030
}
|
class ____ extends ESSingleNodeTestCase {
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return Arrays.asList(CustomScriptPlugin.class);
}
public static
|
GeoPointScriptDocValuesIT
|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/load/engine/Engine.java
|
{
"start": 14063,
"end": 14852
}
|
class ____ implements DecodeJob.DiskCacheProvider {
private final DiskCache.Factory factory;
private volatile DiskCache diskCache;
LazyDiskCacheProvider(DiskCache.Factory factory) {
this.factory = factory;
}
@VisibleForTesting
synchronized void clearDiskCacheIfCreated() {
if (diskCache == null) {
return;
}
diskCache.clear();
}
@Override
public DiskCache getDiskCache() {
if (diskCache == null) {
synchronized (this) {
if (diskCache == null) {
diskCache = factory.build();
}
if (diskCache == null) {
diskCache = new DiskCacheAdapter();
}
}
}
return diskCache;
}
}
@VisibleForTesting
static
|
LazyDiskCacheProvider
|
java
|
elastic__elasticsearch
|
test/framework/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java
|
{
"start": 16727,
"end": 17671
}
|
class ____ extends NumberSyntheticSourceSupport {
private final boolean preserveSource;
protected NumberSyntheticSourceSupportForKeepTests(
Function<Number, Number> round,
boolean ignoreMalformed,
Mapper.SourceKeepMode sourceKeepMode
) {
super(round, ignoreMalformed);
this.preserveSource = sourceKeepMode == Mapper.SourceKeepMode.ALL;
}
@Override
public boolean preservesExactSource() {
return preserveSource;
}
@Override
public SyntheticSourceExample example(int maxVals) {
var example = super.example(maxVals);
return new SyntheticSourceExample(
example.expectedForSyntheticSource(),
example.expectedForSyntheticSource(),
example.mapping()
);
}
}
protected
|
NumberSyntheticSourceSupportForKeepTests
|
java
|
apache__camel
|
components/camel-caffeine/src/test/java/org/apache/camel/component/caffeine/processor/idempotent/CaffeineIdempotentRepositoryTest.java
|
{
"start": 1259,
"end": 4401
}
|
class ____ extends CamelTestSupport {
private CaffeineIdempotentRepository repo;
private Cache<String, Boolean> cache;
private String key01;
private String key02;
@Override
protected void doPreSetup() throws Exception {
super.doPreSetup();
repo = new CaffeineIdempotentRepository("test");
key01 = generateRandomString();
key02 = generateRandomString();
}
@Test
void testAdd() {
// add first key
assertTrue(repo.add(key01));
assertTrue(repo.getCache().asMap().containsKey(key01));
// try to add the same key again
assertFalse(repo.add(key01));
// try to add another one
assertTrue(repo.add(key02));
assertTrue(repo.getCache().asMap().containsKey(key02));
}
@Test
void testConfirm() {
// add first key and confirm
assertTrue(repo.add(key01));
assertTrue(repo.confirm(key01));
// try to confirm a key that isn't there
assertFalse(repo.confirm(key02));
}
@Test
void testContains() {
assertFalse(repo.contains(key01));
// add key and check again
assertTrue(repo.add(key01));
assertTrue(repo.contains(key01));
}
@Test
void testRemove() {
// add key to remove
assertTrue(repo.add(key01));
assertTrue(repo.add(key02));
assertTrue(repo.getCache().asMap().containsKey(key01));
assertTrue(repo.getCache().asMap().containsKey(key02));
// clear repo
repo.clear();
assertFalse(repo.getCache().asMap().containsKey(key01));
assertFalse(repo.getCache().asMap().containsKey(key02));
}
@Test
void testClear() {
// add key to remove
assertTrue(repo.add(key01));
assertTrue(repo.confirm(key01));
// remove key
assertTrue(repo.remove(key01));
assertFalse(repo.confirm(key01));
// try to remove a key that isn't there
repo.remove(key02);
}
@Test
void testRepositoryInRoute() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:out");
mock.expectedBodiesReceived("a", "b");
// c is a duplicate
// should be started
assertTrue(repo.getStatus().isStarted(), "Should be started");
// send 3 message with one duplicated key (key01)
template.sendBodyAndHeader("direct://in", "a", "messageId", key01);
template.sendBodyAndHeader("direct://in", "b", "messageId", key02);
template.sendBodyAndHeader("direct://in", "c", "messageId", key01);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct://in")
.idempotentConsumer(header("messageId"), repo)
.to("mock://out");
}
};
}
protected static String generateRandomString() {
return UUID.randomUUID().toString();
}
}
|
CaffeineIdempotentRepositoryTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/sqm/ConcurrentQueriesByIdsTest.java
|
{
"start": 825,
"end": 2247
}
|
class ____ {
public static final String QUERY_STRING = "select e from simple e where e.id in (:ids)";
@Test
public void run(SessionFactoryScope scope) throws InterruptedException {
scope.inTransaction( session -> {
for ( int i = 0; i < 100; i++ ) {
SimpleEntity entity = new SimpleEntity();
entity.setId( i );
entity.setName( "Name: " + i );
session.persist( entity );
}
} );
ExecutorService executorService = Executors.newFixedThreadPool( 3 );
CompletableFuture<List<SimpleEntity>>[] results = new CompletableFuture[10];
for ( int i = 0; i < 10; i++ ) {
int index = i;
results[i] = CompletableFuture.supplyAsync( () -> executeQuery( scope, index ), executorService );
}
for ( int i = 0; i < 10; i++ ) {
assertThat( results[i].join() ).hasSize( 10 );
}
executorService.shutdown();
}
private List<SimpleEntity> executeQuery(SessionFactoryScope scope, int index) {
return scope.fromSession(
session -> executeQuery( session, index )
);
}
private List<SimpleEntity> executeQuery(Session session, int index) {
int base = index * 10;
return session.createQuery( QUERY_STRING, SimpleEntity.class )
.setParameter(
"ids",
Arrays.asList( base + 0, base + 1, base + 2, base + 3, base + 4, base + 5,
base + 6, base + 7, base + 8, base + 9
)
)
.list();
}
@Entity(name = "simple")
public static
|
ConcurrentQueriesByIdsTest
|
java
|
spring-projects__spring-security
|
docs/src/test/java/org/springframework/security/docs/servlet/authentication/authorizationmanagerfactory/AuthorizationManagerFactoryTests.java
|
{
"start": 2288,
"end": 4646
}
|
class ____ {
public final SpringTestContext spring = new SpringTestContext(this);
@Autowired
MockMvc mockMvc;
@Test
@WithMockUser(authorities = { FactorGrantedAuthority.PASSWORD_AUTHORITY, FactorGrantedAuthority.OTT_AUTHORITY })
void getWhenAuthenticatedWithPasswordAndOttThenPermits() throws Exception {
this.spring.register(UseAuthorizationManagerFactoryConfiguration.class, Http200Controller.class).autowire();
// @formatter:off
this.mockMvc.perform(get("/"))
.andExpect(status().isOk())
.andExpect(authenticated().withUsername("user"));
// @formatter:on
}
@Test
@WithMockUser(authorities = FactorGrantedAuthority.PASSWORD_AUTHORITY)
void getWhenAuthenticatedWithPasswordThenRedirectsToOtt() throws Exception {
this.spring.register(UseAuthorizationManagerFactoryConfiguration.class, Http200Controller.class).autowire();
// @formatter:off
this.mockMvc.perform(get("/"))
.andExpect(status().is3xxRedirection())
.andExpect(redirectedUrl("/login?factor.type=ott&factor.reason=missing"));
// @formatter:on
}
@Test
@WithMockUser(authorities = FactorGrantedAuthority.OTT_AUTHORITY)
void getWhenAuthenticatedWithOttThenRedirectsToPassword() throws Exception {
this.spring.register(UseAuthorizationManagerFactoryConfiguration.class, Http200Controller.class).autowire();
// @formatter:off
this.mockMvc.perform(get("/"))
.andExpect(status().is3xxRedirection())
.andExpect(redirectedUrl("/login?factor.type=password&factor.reason=missing"));
// @formatter:on
}
@Test
@WithMockUser
void getWhenAuthenticatedThenRedirectsToPassword() throws Exception {
this.spring.register(UseAuthorizationManagerFactoryConfiguration.class, Http200Controller.class).autowire();
// @formatter:off
this.mockMvc.perform(get("/"))
.andExpect(status().is3xxRedirection())
.andExpect(redirectedUrl("/login?factor.type=password&factor.type=ott&factor.reason=missing&factor.reason=missing"));
// @formatter:on
}
@Test
void getWhenUnauthenticatedThenRedirectsToBoth() throws Exception {
this.spring.register(UseAuthorizationManagerFactoryConfiguration.class, Http200Controller.class).autowire();
// @formatter:off
this.mockMvc.perform(get("/"))
.andExpect(status().is3xxRedirection())
.andExpect(redirectedUrl("/login"));
// @formatter:on
}
@RestController
static
|
AuthorizationManagerFactoryTests
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-csrf/deployment/src/test/java/io/quarkus/csrf/reactive/ProgrammaticCsrfTest.java
|
{
"start": 8311,
"end": 9310
}
|
class ____ {
@Inject
Template csrfToken;
@GET
@Path("/csrfTokenForm")
@Produces(MediaType.TEXT_HTML)
public TemplateInstance getCsrfTokenForm() {
return csrfToken.instance();
}
@POST
@Path("/csrfTokenForm")
@Consumes(MediaType.APPLICATION_FORM_URLENCODED)
@Produces(MediaType.TEXT_PLAIN)
public Uni<String> postCsrfTokenForm(@FormParam("name") String userName) {
return Uni.createFrom().item(userName);
}
@POST
@Path("/csrfTokenPost")
@Produces(MediaType.TEXT_PLAIN)
public Uni<String> postJson() {
return Uni.createFrom().item("no user");
}
@POST
@Path("/csrfTokenPostBody")
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.TEXT_PLAIN)
public Uni<String> postJson(String body) {
return Uni.createFrom().item(body);
}
}
public static
|
TestResource
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlAsyncStopAction.java
|
{
"start": 415,
"end": 719
}
|
class ____ extends ActionType<EsqlQueryResponse> {
public static final EsqlAsyncStopAction INSTANCE = new EsqlAsyncStopAction();
public static final String NAME = EsqlAsyncActionNames.ESQL_ASYNC_STOP_ACTION_NAME;
private EsqlAsyncStopAction() {
super(NAME);
}
}
|
EsqlAsyncStopAction
|
java
|
jhy__jsoup
|
src/main/java/org/jsoup/select/NodeTraversor.java
|
{
"start": 566,
"end": 6345
}
|
class ____ {
/**
Run a depth-first traverse of the root and all of its descendants.
@param visitor Node visitor.
@param root the initial node point to traverse.
@see NodeVisitor#traverse(Node root)
*/
public static void traverse(NodeVisitor visitor, Node root) {
Validate.notNull(visitor);
Validate.notNull(root);
Node node = root;
int depth = 0;
while (node != null) {
Node parent = node.parentNode(); // remember parent to find nodes that get replaced in .head
int origSize = parent != null ? parent.childNodeSize() : 0;
Node next = node.nextSibling();
visitor.head(node, depth); // visit current node
// check for modifications to the tree
if (parent != null && !node.hasParent()) { // removed or replaced
if (origSize == parent.childNodeSize()) { // replaced
node = parent.childNode(node.siblingIndex()); // replace ditches parent but keeps sibling index
continue;
}
// else, removed
node = next;
if (node == null) {
// was last in parent. need to walk up the tree, tail()ing on the way, until we find a suitable next. Otherwise, would revisit ancestor nodes.
node = parent;
while (true) {
depth--;
visitor.tail(node, depth);
if (node == root) break;
if (node.nextSibling() != null) {
node = node.nextSibling();
break;
}
node = node.parentNode();
if (node == null) break;
}
if (node == root || node == null) break; // done, break outer
}
continue; // don't tail removed
}
if (node.childNodeSize() > 0) { // descend
node = node.childNode(0);
depth++;
} else {
while (true) {
assert node != null; // as depth > 0, will have parent
if (!(node.nextSibling() == null && depth > 0)) break;
visitor.tail(node, depth); // when no more siblings, ascend
node = node.parentNode();
depth--;
}
visitor.tail(node, depth);
if (node == root)
break;
node = node.nextSibling();
}
}
}
/**
Run a depth-first traversal of each Element.
@param visitor Node visitor.
@param elements Elements to traverse.
*/
public static void traverse(NodeVisitor visitor, Elements elements) {
Validate.notNull(visitor);
Validate.notNull(elements);
for (Element el : elements)
traverse(visitor, el);
}
/**
Run a depth-first filtered traversal of the root and all of its descendants.
@param filter NodeFilter visitor.
@param root the root node point to traverse.
@return The filter result of the root node, or {@link FilterResult#STOP}.
@see NodeFilter
*/
public static FilterResult filter(NodeFilter filter, Node root) {
Node node = root;
int depth = 0;
while (node != null) {
FilterResult result = filter.head(node, depth);
if (result == FilterResult.STOP)
return result;
// Descend into child nodes:
if (result == FilterResult.CONTINUE && node.childNodeSize() > 0) {
node = node.childNode(0);
++depth;
continue;
}
// No siblings, move upwards:
while (true) {
assert node != null; // depth > 0, so has parent
if (!(node.nextSibling() == null && depth > 0)) break;
// 'tail' current node:
if (result == FilterResult.CONTINUE || result == FilterResult.SKIP_CHILDREN) {
result = filter.tail(node, depth);
if (result == FilterResult.STOP)
return result;
}
Node prev = node; // In case we need to remove it below.
node = node.parentNode();
depth--;
if (result == FilterResult.REMOVE)
prev.remove(); // Remove AFTER finding parent.
result = FilterResult.CONTINUE; // Parent was not pruned.
}
// 'tail' current node, then proceed with siblings:
if (result == FilterResult.CONTINUE || result == FilterResult.SKIP_CHILDREN) {
result = filter.tail(node, depth);
if (result == FilterResult.STOP)
return result;
}
if (node == root)
return result;
Node prev = node; // In case we need to remove it below.
node = node.nextSibling();
if (result == FilterResult.REMOVE)
prev.remove(); // Remove AFTER finding sibling.
}
// root == null?
return FilterResult.CONTINUE;
}
/**
Run a depth-first filtered traversal of each Element.
@param filter NodeFilter visitor.
@see NodeFilter
*/
public static void filter(NodeFilter filter, Elements elements) {
Validate.notNull(filter);
Validate.notNull(elements);
for (Element el : elements)
if (filter(filter, el) == FilterResult.STOP)
break;
}
}
|
NodeTraversor
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/buffer/BufferBuilderAndConsumerTest.java
|
{
"start": 1653,
"end": 12859
}
|
class ____ {
private static final int BUFFER_INT_SIZE = 10;
private static final int BUFFER_SIZE = BUFFER_INT_SIZE * Integer.BYTES;
@Test
void referenceCounting() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
assertThat(bufferBuilder.appendAndCommit(toByteBuffer(1, 2, 3)))
.isEqualTo(3 * Integer.BYTES);
bufferBuilder.close();
Buffer buffer = bufferConsumer.build();
assertThat(buffer.isRecycled()).isFalse();
buffer.recycleBuffer();
assertThat(buffer.isRecycled()).isFalse();
bufferConsumer.close();
assertThat(buffer.isRecycled()).isTrue();
}
@Test
void append() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
int[] intsToWrite = new int[] {0, 1, 2, 3, 42};
ByteBuffer bytesToWrite = toByteBuffer(intsToWrite);
assertThat(bufferBuilder.appendAndCommit(bytesToWrite)).isEqualTo(bytesToWrite.limit());
assertThat(bytesToWrite.position()).isEqualTo(bytesToWrite.limit());
assertThat(bufferBuilder.isFull()).isFalse();
assertContent(bufferConsumer, intsToWrite);
}
@Test
void multipleAppends() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
bufferBuilder.appendAndCommit(toByteBuffer(0, 1));
bufferBuilder.appendAndCommit(toByteBuffer(2));
bufferBuilder.appendAndCommit(toByteBuffer(3, 42));
assertContent(bufferConsumer, 0, 1, 2, 3, 42);
}
@Test
void multipleNotCommittedAppends() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
bufferBuilder.append(toByteBuffer(0, 1));
bufferBuilder.append(toByteBuffer(2));
bufferBuilder.append(toByteBuffer(3, 42));
assertContent(bufferConsumer);
bufferBuilder.commit();
assertContent(bufferConsumer, 0, 1, 2, 3, 42);
}
@Test
void appendOverSize() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
ByteBuffer bytesToWrite = toByteBuffer(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 42);
assertThat(bufferBuilder.appendAndCommit(bytesToWrite)).isEqualTo(BUFFER_SIZE);
assertThat(bufferBuilder.isFull()).isTrue();
assertContent(bufferConsumer, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
bufferConsumer = bufferBuilder.createBufferConsumer();
assertThat(bufferBuilder.appendAndCommit(bytesToWrite)).isEqualTo(Integer.BYTES);
assertThat(bufferBuilder.isFull()).isFalse();
assertContent(bufferConsumer, 42);
}
@Test
void creatingBufferConsumerTwice() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
bufferBuilder.createBufferConsumer();
assertThatThrownBy(bufferBuilder::createBufferConsumer)
.isInstanceOf(IllegalStateException.class);
}
@Test
void copy() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
BufferConsumer bufferConsumer1 = bufferBuilder.createBufferConsumer();
bufferBuilder.appendAndCommit(toByteBuffer(0, 1));
BufferConsumer bufferConsumer2 = bufferConsumer1.copy();
bufferBuilder.appendAndCommit(toByteBuffer(2));
assertContent(bufferConsumer1, 0, 1, 2);
assertContent(bufferConsumer2, 0, 1, 2);
BufferConsumer bufferConsumer3 = bufferConsumer1.copy();
bufferBuilder.appendAndCommit(toByteBuffer(3, 42));
BufferConsumer bufferConsumer4 = bufferConsumer1.copy();
assertContent(bufferConsumer1, 3, 42);
assertContent(bufferConsumer2, 3, 42);
assertContent(bufferConsumer3, 3, 42);
assertContent(bufferConsumer4, 3, 42);
}
@Test
void buildEmptyBuffer() {
try (BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE)) {
Buffer buffer = buildSingleBuffer(bufferBuilder);
assertThat(buffer.getSize()).isZero();
assertContent(buffer, FreeingBufferRecycler.INSTANCE);
}
}
@Test
void buildingBufferMultipleTimes() {
try (BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE)) {
try (BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer()) {
bufferBuilder.appendAndCommit(toByteBuffer(0, 1));
bufferBuilder.appendAndCommit(toByteBuffer(2));
assertContent(bufferConsumer, 0, 1, 2);
bufferBuilder.appendAndCommit(toByteBuffer(3, 42));
bufferBuilder.appendAndCommit(toByteBuffer(44));
assertContent(bufferConsumer, 3, 42, 44);
ArrayList<Integer> originalValues = new ArrayList<>();
while (!bufferBuilder.isFull()) {
bufferBuilder.appendAndCommit(toByteBuffer(1337));
originalValues.add(1337);
}
assertContent(
bufferConsumer,
originalValues.stream().mapToInt(Integer::intValue).toArray());
}
}
}
@Test
void emptyIsFinished() {
testIsFinished(0);
}
@Test
void partiallyFullIsFinished() {
testIsFinished(BUFFER_INT_SIZE / 2);
}
@Test
void fullIsFinished() {
testIsFinished(BUFFER_INT_SIZE);
}
@Test
void testWritableBytes() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
assertThat(bufferBuilder.getWritableBytes()).isEqualTo(bufferBuilder.getMaxCapacity());
ByteBuffer byteBuffer = toByteBuffer(1, 2, 3);
bufferBuilder.append(byteBuffer);
assertThat(bufferBuilder.getWritableBytes())
.isEqualTo(bufferBuilder.getMaxCapacity() - byteBuffer.position());
assertThat(bufferBuilder.getWritableBytes())
.isEqualTo(bufferBuilder.getMaxCapacity() - byteBuffer.position());
}
@Test
void testWritableBytesWhenFull() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
bufferBuilder.append(toByteBuffer(new int[bufferBuilder.getMaxCapacity()]));
assertThat(bufferBuilder.getWritableBytes()).isZero();
}
@Test
void recycleWithoutConsumer() {
// given: Recycler with the counter of recycle invocation.
CountedRecycler recycler = new CountedRecycler();
BufferBuilder bufferBuilder =
new BufferBuilder(allocateUnpooledSegment(BUFFER_SIZE), recycler);
// when: Invoke the recycle.
bufferBuilder.close();
// then: Recycling successfully finished.
assertThat(recycler.recycleInvocationCounter).isOne();
}
@Test
void recycleConsumerAndBufferBuilder() {
// given: Recycler with the counter of recycling invocation.
CountedRecycler recycler = new CountedRecycler();
BufferBuilder bufferBuilder =
new BufferBuilder(allocateUnpooledSegment(BUFFER_SIZE), recycler);
// and: One buffer consumer.
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
// when: Invoke the recycle of BufferBuilder.
bufferBuilder.close();
// then: Nothing happened because BufferBuilder has already consumer.
assertThat(recycler.recycleInvocationCounter).isZero();
// when: Close the consumer.
bufferConsumer.close();
// then: Recycling successfully finished.
assertThat(recycler.recycleInvocationCounter).isOne();
}
@Test
void trimToAvailableSize() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
assertThat(bufferBuilder.getMaxCapacity()).isEqualTo(BUFFER_SIZE);
bufferBuilder.trim(BUFFER_SIZE / 2);
assertThat(bufferBuilder.getMaxCapacity()).isEqualTo(BUFFER_SIZE / 2);
bufferBuilder.trim(0);
assertThat(bufferBuilder.getMaxCapacity()).isZero();
}
@Test
void trimToNegativeSize() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
assertThat(bufferBuilder.getMaxCapacity()).isEqualTo(BUFFER_SIZE);
bufferBuilder.trim(-1);
assertThat(bufferBuilder.getMaxCapacity()).isZero();
}
@Test
void trimToSizeLessThanWritten() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
assertThat(bufferBuilder.getMaxCapacity()).isEqualTo(BUFFER_SIZE);
bufferBuilder.append(toByteBuffer(1, 2, 3));
bufferBuilder.trim(4);
// Should be minimum possible size = 3 * int == 12.
assertThat(bufferBuilder.getMaxCapacity()).isEqualTo(12);
}
@Test
void trimToSizeGreaterThanMax() {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
assertThat(bufferBuilder.getMaxCapacity()).isEqualTo(BUFFER_SIZE);
bufferBuilder.trim(BUFFER_SIZE + 1);
assertThat(bufferBuilder.getMaxCapacity()).isEqualTo(BUFFER_SIZE);
}
private static void testIsFinished(int writes) {
BufferBuilder bufferBuilder = createEmptyBufferBuilder(BUFFER_SIZE);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
for (int i = 0; i < writes; i++) {
assertThat(bufferBuilder.appendAndCommit(toByteBuffer(42))).isEqualTo(Integer.BYTES);
}
int expectedWrittenBytes = writes * Integer.BYTES;
assertThat(bufferBuilder.isFinished()).isFalse();
assertThat(bufferConsumer.isFinished()).isFalse();
assertThat(bufferConsumer.getWrittenBytes()).isZero();
bufferConsumer.build();
assertThat(bufferBuilder.isFinished()).isFalse();
assertThat(bufferConsumer.isFinished()).isFalse();
assertThat(bufferConsumer.getWrittenBytes()).isEqualTo(expectedWrittenBytes);
int actualWrittenBytes = bufferBuilder.finish();
assertThat(actualWrittenBytes).isEqualTo(expectedWrittenBytes);
assertThat(bufferBuilder.isFinished()).isTrue();
assertThat(bufferConsumer.isFinished()).isFalse();
assertThat(bufferConsumer.getWrittenBytes()).isEqualTo(expectedWrittenBytes);
actualWrittenBytes = bufferBuilder.finish();
assertThat(actualWrittenBytes).isEqualTo(expectedWrittenBytes);
assertThat(bufferBuilder.isFinished()).isTrue();
assertThat(bufferConsumer.isFinished()).isFalse();
assertThat(bufferConsumer.getWrittenBytes()).isEqualTo(expectedWrittenBytes);
assertThat(bufferConsumer.build().getSize()).isZero();
assertThat(bufferBuilder.isFinished()).isTrue();
}
private static
|
BufferBuilderAndConsumerTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/dialect/function/JsonPathHelperTest.java
|
{
"start": 333,
"end": 1628
}
|
class ____ {
@Test
public void testRoot() {
assertEquals(
List.of(),
JsonPathHelper.parseJsonPathElements( "$" )
);
}
@Test
public void testRootArray() {
assertEquals(
List.of( new JsonPathHelper.JsonIndexAccess( 0 ) ),
JsonPathHelper.parseJsonPathElements( "$[0]" )
);
}
@Test
public void testDeReferenceRootArray() {
assertEquals(
List.of( new JsonPathHelper.JsonIndexAccess( 0 ), new JsonPathHelper.JsonAttribute( "attribute" ) ),
JsonPathHelper.parseJsonPathElements( "$[0].attribute" )
);
}
@Test
public void testSimplePath() {
assertEquals(
List.of( new JsonPathHelper.JsonAttribute( "attribute" ) ),
JsonPathHelper.parseJsonPathElements( "$.attribute" )
);
}
@Test
public void testArrayPath() {
assertEquals(
List.of( new JsonPathHelper.JsonAttribute( "attribute" ), new JsonPathHelper.JsonIndexAccess( 0 ) ),
JsonPathHelper.parseJsonPathElements( "$.attribute[0]" )
);
}
@Test
public void testDeepArrayPath() {
assertEquals(
List.of(
new JsonPathHelper.JsonAttribute( "attribute" ),
new JsonPathHelper.JsonIndexAccess( 0 ),
new JsonPathHelper.JsonAttribute( "subAttribute" )
),
JsonPathHelper.parseJsonPathElements( "$.attribute[0].subAttribute" )
);
}
}
|
JsonPathHelperTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/ManyShardsIT.java
|
{
"start": 9508,
"end": 13636
}
|
class ____ {
private final int maxAllowed;
private final AtomicInteger current = new AtomicInteger();
SearchContextCounter(int maxAllowed) {
this.maxAllowed = maxAllowed;
}
void onNewContext() {
int total = current.incrementAndGet();
assertThat("opening more shards than the limit", total, Matchers.lessThanOrEqualTo(maxAllowed));
}
void onContextReleased() {
int total = current.decrementAndGet();
assertThat(total, Matchers.greaterThanOrEqualTo(0));
}
}
public void testLimitConcurrentShards() {
Iterable<SearchService> searchServices = internalCluster().getInstances(SearchService.class);
try {
var queries = List.of(
"from test-* | stats count(user) by tags",
"from test-* | stats count(user) by tags | LIMIT 0",
"from test-* | stats count(user) by tags | LIMIT 1",
"from test-* | stats count(user) by tags | LIMIT 1000",
"from test-* | LIMIT 0",
"from test-* | LIMIT 1",
"from test-* | LIMIT 1000",
"from test-* | SORT tags | LIMIT 0",
"from test-* | SORT tags | LIMIT 1",
"from test-* | SORT tags | LIMIT 1000"
);
for (String q : queries) {
var pragmas = randomPragmas();
// For queries involving TopN, the node-reduce driver may hold on to contexts for longer (due to late materialization, which
// is only turned when the NODE_LEVEL_REDUCTION is turned on), so we don't check against the limit.
boolean nodeLevelReduction = QueryPragmas.NODE_LEVEL_REDUCTION.get(pragmas.getSettings());
int maxAllowed = q.contains("SORT tags") && nodeLevelReduction ? Integer.MAX_VALUE : pragmas.maxConcurrentShardsPerNode();
for (SearchService searchService : searchServices) {
SearchContextCounter counter = new SearchContextCounter(maxAllowed);
var mockSearchService = (MockSearchService) searchService;
mockSearchService.setOnCreateSearchContext(r -> counter.onNewContext());
mockSearchService.setOnRemoveContext(r -> counter.onContextReleased());
}
run(syncEsqlQueryRequest(q).pragmas(pragmas)).close();
}
} finally {
for (SearchService searchService : searchServices) {
var mockSearchService = (MockSearchService) searchService;
mockSearchService.setOnCreateSearchContext(r -> {});
mockSearchService.setOnRemoveContext(r -> {});
}
}
}
public void testCancelUnnecessaryRequests() {
assumeTrue("Requires pragmas", canUseQueryPragmas());
internalCluster().ensureAtLeastNumDataNodes(3);
var coordinatingNode = internalCluster().getNodeNames()[0];
var exchanges = new AtomicInteger(0);
var coordinatorNodeTransport = MockTransportService.getInstance(coordinatingNode);
coordinatorNodeTransport.addSendBehavior((connection, requestId, action, request, options) -> {
if (Objects.equals(action, ExchangeService.OPEN_EXCHANGE_ACTION_NAME)) {
logger.info("Opening exchange on node [{}]", connection.getNode().getId());
exchanges.incrementAndGet();
}
connection.sendRequest(requestId, action, request, options);
});
var query = syncEsqlQueryRequest("from test-* | LIMIT 1").pragmas(
new QueryPragmas(Settings.builder().put(QueryPragmas.MAX_CONCURRENT_NODES_PER_CLUSTER.getKey(), 1).build())
);
try (var result = safeGet(client().execute(EsqlQueryAction.INSTANCE, query))) {
assertThat(Iterables.size(result.rows()), equalTo(1L));
assertThat(exchanges.get(), lessThanOrEqualTo(2));
} finally {
coordinatorNodeTransport.clearAllRules();
}
}
}
|
SearchContextCounter
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/internal/matchers/ContainsExtraTypeInfo.java
|
{
"start": 533,
"end": 614
}
|
interface ____ {
/**
* @param className - name of the
|
ContainsExtraTypeInfo
|
java
|
spring-projects__spring-boot
|
module/spring-boot-webservices-test/src/test/java/org/springframework/boot/webservices/test/autoconfigure/server/WebServiceServerTypeExcludeFilterTests.java
|
{
"start": 1427,
"end": 3967
}
|
class ____ {
private final MetadataReaderFactory metadataReaderFactory = new SimpleMetadataReaderFactory();
@Test
void matchWhenHasNoEndpoints() throws IOException {
WebServiceServerTypeExcludeFilter filter = new WebServiceServerTypeExcludeFilter(WithNoEndpoints.class);
assertThat(exclude(filter, WebService1.class)).isFalse();
assertThat(exclude(filter, WebService2.class)).isFalse();
assertThat(exclude(filter, ExampleService.class)).isTrue();
assertThat(exclude(filter, ExampleRepository.class)).isTrue();
}
@Test
void matchWhenHasEndpoint() throws IOException {
WebServiceServerTypeExcludeFilter filter = new WebServiceServerTypeExcludeFilter(WithEndpoint.class);
assertThat(exclude(filter, WebService1.class)).isFalse();
assertThat(exclude(filter, WebService2.class)).isTrue();
assertThat(exclude(filter, ExampleService.class)).isTrue();
assertThat(exclude(filter, ExampleRepository.class)).isTrue();
}
@Test
void matchNotUsingDefaultFilters() throws IOException {
WebServiceServerTypeExcludeFilter filter = new WebServiceServerTypeExcludeFilter(NotUsingDefaultFilters.class);
assertThat(exclude(filter, WebService1.class)).isTrue();
assertThat(exclude(filter, WebService2.class)).isTrue();
assertThat(exclude(filter, ExampleService.class)).isTrue();
assertThat(exclude(filter, ExampleRepository.class)).isTrue();
}
@Test
void matchWithIncludeFilter() throws IOException {
WebServiceServerTypeExcludeFilter filter = new WebServiceServerTypeExcludeFilter(WithIncludeFilter.class);
assertThat(exclude(filter, WebService1.class)).isFalse();
assertThat(exclude(filter, WebService2.class)).isFalse();
assertThat(exclude(filter, ExampleService.class)).isTrue();
assertThat(exclude(filter, ExampleRepository.class)).isFalse();
}
@Test
void matchWithExcludeFilter() throws IOException {
WebServiceServerTypeExcludeFilter filter = new WebServiceServerTypeExcludeFilter(WithExcludeFilter.class);
assertThat(exclude(filter, WebService1.class)).isTrue();
assertThat(exclude(filter, WebService2.class)).isFalse();
assertThat(exclude(filter, ExampleService.class)).isTrue();
assertThat(exclude(filter, ExampleRepository.class)).isTrue();
}
private boolean exclude(WebServiceServerTypeExcludeFilter filter, Class<?> type) throws IOException {
MetadataReader metadataReader = this.metadataReaderFactory.getMetadataReader(type.getName());
return filter.match(metadataReader, this.metadataReaderFactory);
}
@WebServiceServerTest
static
|
WebServiceServerTypeExcludeFilterTests
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/InterruptionTest.java
|
{
"start": 3286,
"end": 3828
}
|
class ____ extends AbstractFuture<Object> {
void f(Future<?> f) {
new AbstractFuture<Object>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return f.cancel(mayInterruptIfRunning);
}
};
}
}
""")
.doTest();
}
@Test
public void negativeInterrupt() {
compilationHelper
.addSourceLines(
"Test.java",
"""
|
Test
|
java
|
elastic__elasticsearch
|
x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java
|
{
"start": 2078,
"end": 4304
}
|
class ____ extends AbstractWatcherIntegrationTestCase {
@Override
protected boolean addMockHttpTransport() {
return false; // enable http
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).build();
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopy(super.nodePlugins(), Netty4Plugin.class); // for http
}
public void testChainedInputsAreWorking() throws Exception {
String index = "the-most-awesome-index-ever";
createIndex(index);
prepareIndex(index).setId("id").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
InetSocketAddress address = internalCluster().httpAddresses()[0];
HttpInput.Builder httpInputBuilder = httpInput(
HttpRequestTemplate.builder(address.getHostString(), address.getPort())
.path("/" + index + "/_search")
.body(Strings.toString(jsonBuilder().startObject().field("size", 1).endObject()))
);
ChainInput.Builder chainedInputBuilder = chainInput().add("first", simpleInput("url", "/" + index + "/_search"))
.add("second", httpInputBuilder);
new PutWatchRequestBuilder(client(), "_name").setSource(
watchBuilder().trigger(schedule(interval(5, SECONDS)))
.input(chainedInputBuilder)
.addAction("indexAction", indexAction("my-index"))
).get();
timeWarp().trigger("_name");
refresh();
assertWatchWithMinimumPerformedActionsCount("_name", 1, false);
}
public void assertWatchExecuted() {
try {
refresh();
assertResponse(prepareSearch("my-index"), searchResponse -> {
assertHitCount(searchResponse, 1);
assertThat(searchResponse.getHits().getAt(0).getSourceAsString(), containsString("the-most-awesome-index-ever"));
});
} catch (IndexNotFoundException e) {
fail("Index not found: [" + e.getIndex() + "]");
}
}
}
|
ChainIntegrationTests
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/CheckpointBarrier.java
|
{
"start": 1971,
"end": 4919
}
|
class ____ extends RuntimeEvent {
private final long id;
private final long timestamp;
private final CheckpointOptions checkpointOptions;
public CheckpointBarrier(long id, long timestamp, CheckpointOptions checkpointOptions) {
this.id = id;
this.timestamp = timestamp;
this.checkpointOptions = checkNotNull(checkpointOptions);
}
public long getId() {
return id;
}
public long getTimestamp() {
return timestamp;
}
public CheckpointOptions getCheckpointOptions() {
return checkpointOptions;
}
public CheckpointBarrier withOptions(CheckpointOptions checkpointOptions) {
return this.checkpointOptions == checkpointOptions
? this
: new CheckpointBarrier(id, timestamp, checkpointOptions);
}
// ------------------------------------------------------------------------
// Serialization
// ------------------------------------------------------------------------
//
// These methods are inherited form the generic serialization of AbstractEvent
// but would require the CheckpointBarrier to be mutable. Since all serialization
// for events goes through the EventSerializer class, which has special serialization
// for the CheckpointBarrier, we don't need these methods
//
@Override
public void write(DataOutputView out) throws IOException {
throw new UnsupportedOperationException("This method should never be called");
}
@Override
public void read(DataInputView in) throws IOException {
throw new UnsupportedOperationException("This method should never be called");
}
// ------------------------------------------------------------------------
@Override
public int hashCode() {
return (int) (id ^ (id >>> 32) ^ timestamp ^ (timestamp >>> 32));
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other == null || other.getClass() != CheckpointBarrier.class) {
return false;
} else {
CheckpointBarrier that = (CheckpointBarrier) other;
return that.id == this.id
&& that.timestamp == this.timestamp
&& this.checkpointOptions.equals(that.checkpointOptions);
}
}
@Override
public String toString() {
return String.format(
"CheckpointBarrier %d @ %d Options: %s", id, timestamp, checkpointOptions);
}
public boolean isCheckpoint() {
return !checkpointOptions.getCheckpointType().isSavepoint();
}
public CheckpointBarrier asUnaligned() {
return checkpointOptions.isUnalignedCheckpoint()
? this
: new CheckpointBarrier(
getId(), getTimestamp(), getCheckpointOptions().toUnaligned());
}
}
|
CheckpointBarrier
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SpringAiVectorStoreEndpointBuilderFactory.java
|
{
"start": 9457,
"end": 11613
}
|
interface ____ {
/**
* Spring AI Vector Store (camel-spring-ai-vector-store)
* Spring AI Vector Store
*
* Category: ai
* Since: 4.17
* Maven coordinates: org.apache.camel:camel-spring-ai-vector-store
*
* @return the dsl builder for the headers' name.
*/
default SpringAiVectorStoreHeaderNameBuilder springAiVectorStore() {
return SpringAiVectorStoreHeaderNameBuilder.INSTANCE;
}
/**
* Spring AI Vector Store (camel-spring-ai-vector-store)
* Spring AI Vector Store
*
* Category: ai
* Since: 4.17
* Maven coordinates: org.apache.camel:camel-spring-ai-vector-store
*
* Syntax: <code>spring-ai-vector-store:storeId</code>
*
* Path parameter: storeId (required)
* The id
*
* @param path storeId
* @return the dsl builder
*/
default SpringAiVectorStoreEndpointBuilder springAiVectorStore(String path) {
return SpringAiVectorStoreEndpointBuilderFactory.endpointBuilder("spring-ai-vector-store", path);
}
/**
* Spring AI Vector Store (camel-spring-ai-vector-store)
* Spring AI Vector Store
*
* Category: ai
* Since: 4.17
* Maven coordinates: org.apache.camel:camel-spring-ai-vector-store
*
* Syntax: <code>spring-ai-vector-store:storeId</code>
*
* Path parameter: storeId (required)
* The id
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path storeId
* @return the dsl builder
*/
default SpringAiVectorStoreEndpointBuilder springAiVectorStore(String componentName, String path) {
return SpringAiVectorStoreEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Spring AI Vector Store component.
*/
public static
|
SpringAiVectorStoreBuilders
|
java
|
dropwizard__dropwizard
|
dropwizard-validation/src/main/java/io/dropwizard/validation/selfvalidating/ViolationCollector.java
|
{
"start": 521,
"end": 8331
}
|
class ____ {
private final ConstraintValidatorContext constraintValidatorContext;
private boolean violationOccurred = false;
/**
* Constructs a new {@link ViolationCollector} with the given {@link ConstraintValidatorContext}.
*
* @param constraintValidatorContext the wrapped {@link ConstraintValidatorContext}
*/
public ViolationCollector(ConstraintValidatorContext constraintValidatorContext) {
this.constraintValidatorContext = constraintValidatorContext;
}
/**
* Adds a new violation to this collector. This also sets {@code violationOccurred} to {@code true}.
* <br/>
* Prefer the method with explicit message parameters if you want to interpolate the message.
*
* @param message the message of the violation
* @see #addViolation(String, Map)
*/
public void addViolation(String message) {
addViolation(message, Collections.emptyMap());
}
/**
* Adds a new violation to this collector. This also sets {@code violationOccurred} to {@code true}.
*
* @param message the message of the violation
* @param messageParameters a map of message parameters which can be interpolated in the violation message
* @since 2.0.3
*/
public void addViolation(String message, Map<String, Object> messageParameters) {
violationOccurred = true;
getContextWithMessageParameters(messageParameters)
.buildConstraintViolationWithTemplate(message)
.addConstraintViolation();
}
/**
* Adds a new violation to this collector. This also sets {@code violationOccurred} to {@code true}.
* <br/>
* Prefer the method with explicit message parameters if you want to interpolate the message.
*
* @param propertyName the name of the property
* @param message the message of the violation
* @see #addViolation(String, String, Map)
* @since 2.0.2
*/
public void addViolation(String propertyName, String message) {
addViolation(propertyName, message, Collections.emptyMap());
}
/**
* Adds a new violation to this collector. This also sets {@code violationOccurred} to {@code true}.
*
* @param propertyName the name of the property
* @param message the message of the violation
* @param messageParameters a map of message parameters which can be interpolated in the violation message
* @since 2.0.3
*/
public void addViolation(String propertyName, String message, Map<String, Object> messageParameters) {
violationOccurred = true;
getContextWithMessageParameters(messageParameters)
.buildConstraintViolationWithTemplate(message)
.addPropertyNode(propertyName)
.addConstraintViolation();
}
/**
* Adds a new violation to this collector. This also sets {@code violationOccurred} to {@code true}.
* Prefer the method with explicit message parameters if you want to interpolate the message.
*
* @param propertyName the name of the property with the violation
* @param index the index of the element with the violation
* @param message the message of the violation (any EL expression will be escaped and not parsed)
* @see ViolationCollector#addViolation(String, Integer, String, Map)
* @since 2.0.2
*/
public void addViolation(String propertyName, Integer index, String message) {
addViolation(propertyName, index, message, Collections.emptyMap());
}
/**
* Adds a new violation to this collector. This also sets {@code violationOccurred} to {@code true}.
*
* @param propertyName the name of the property with the violation
* @param index the index of the element with the violation
* @param message the message of the violation
* @param messageParameters a map of message parameters which can be interpolated in the violation message
* @since 2.0.3
*/
public void addViolation(String propertyName, Integer index, String message, Map<String, Object> messageParameters) {
violationOccurred = true;
getContextWithMessageParameters(messageParameters)
.buildConstraintViolationWithTemplate(message)
.addPropertyNode(propertyName)
.addBeanNode().inIterable().atIndex(index)
.addConstraintViolation();
}
/**
* Adds a new violation to this collector. This also sets {@code violationOccurred} to {@code true}.
*
* @param propertyName the name of the property with the violation
* @param key the key of the element with the violation
* @param message the message of the violation
* @since 2.0.2
*/
public void addViolation(String propertyName, String key, String message) {
addViolation(propertyName, key, message, Collections.emptyMap());
}
/**
* Adds a new violation to this collector. This also sets {@code violationOccurred} to {@code true}.
*
* @param propertyName the name of the property with the violation
* @param key the key of the element with the violation
* @param message the message of the violation
* @param messageParameters a map of message parameters which can be interpolated in the violation message
* @since 2.0.3
*/
public void addViolation(String propertyName, String key, String message, Map<String, Object> messageParameters) {
violationOccurred = true;
final HibernateConstraintValidatorContext context = getContextWithMessageParameters(messageParameters);
context.buildConstraintViolationWithTemplate(message)
.addPropertyNode(propertyName)
.addBeanNode().inIterable().atKey(key)
.addConstraintViolation();
}
/**
* Returns a {@link HibernateConstraintValidatorContext} updated with the given message parameters.
*
* @param messageParameters the message parameters to set to the context
* @return the {@link HibernateConstraintValidatorContext}
*/
private HibernateConstraintValidatorContext getContextWithMessageParameters(Map<String, Object> messageParameters) {
final HibernateConstraintValidatorContext context =
constraintValidatorContext.unwrap(HibernateConstraintValidatorContext.class);
for (Map.Entry<String, Object> messageParameter : messageParameters.entrySet()) {
final Object value = messageParameter.getValue();
final String escapedValue = value == null ? null : escapeMessageParameter(value.toString());
context.addMessageParameter(messageParameter.getKey(), escapedValue);
}
return context;
}
/**
* This method returns the wrapped context for raw access to the validation framework. If you use
* the context to add violations make sure to call <code>setViolationOccurred(true)</code>.
*
* @return the wrapped Hibernate ConstraintValidatorContext
*/
public ConstraintValidatorContext getContext() {
return constraintValidatorContext;
}
/**
* Returns, if a violation has previously occurred.
*
* @return if any violation was collected
*/
public boolean hasViolationOccurred() {
return violationOccurred;
}
/**
* Manually sets if a violation occurred. This is automatically set if <code>addViolation</code> is called.
*
* @param violationOccurred if any violation was collected
*/
public void setViolationOccurred(boolean violationOccurred) {
this.violationOccurred = violationOccurred;
}
}
|
ViolationCollector
|
java
|
google__auto
|
value/src/it/functional/src/test/java/com/google/auto/value/AutoValueTest.java
|
{
"start": 112052,
"end": 112304
}
|
class ____ {
abstract String foo();
static Builder builder() {
return new AutoValue_AutoValueTest_BuilderAnnotationsNotCopied.Builder();
}
@AutoValue.Builder
@MyAnnotation("thing")
abstract static
|
BuilderAnnotationsNotCopied
|
java
|
quarkusio__quarkus
|
extensions/smallrye-graphql-client/deployment/src/test/java/io/quarkus/smallrye/graphql/client/deployment/model/TestingGraphQLClientApiWithNoConfigKey.java
|
{
"start": 225,
"end": 381
}
|
interface ____ {
@Query
public List<Person> people();
@Query
public String returnHeader(String key);
}
|
TestingGraphQLClientApiWithNoConfigKey
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/JsonViewResponseBodyAdvice.java
|
{
"start": 1823,
"end": 2092
}
|
class ____. Consider the use of a composite interface.
*
* @author Rossen Stoyanchev
* @since 4.1
* @see com.fasterxml.jackson.annotation.JsonView
* @see com.fasterxml.jackson.databind.ObjectMapper#writerWithView(Class)
*/
@SuppressWarnings("removal")
public
|
argument
|
java
|
spring-projects__spring-boot
|
module/spring-boot-data-redis/src/dockerTest/java/org/springframework/boot/data/redis/autoconfigure/DataRedisRepositoriesAutoConfigurationTests.java
|
{
"start": 1889,
"end": 3499
}
|
class ____ {
@Container
public static RedisContainer redis = TestImage.container(RedisContainer.class);
private final AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
@BeforeEach
void setUp() {
TestPropertyValues
.of("spring.data.redis.host=" + redis.getHost(), "spring.data.redis.port=" + redis.getFirstMappedPort())
.applyTo(this.context.getEnvironment());
}
@AfterEach
void close() {
this.context.close();
}
@Test
void testDefaultRepositoryConfiguration() {
this.context.register(TestConfiguration.class, DataRedisAutoConfiguration.class,
DataRedisRepositoriesAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class);
this.context.refresh();
assertThat(this.context.getBean(CityRepository.class)).isNotNull();
}
@Test
void testNoRepositoryConfiguration() {
this.context.register(EmptyConfiguration.class, DataRedisAutoConfiguration.class,
DataRedisRepositoriesAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class);
this.context.refresh();
assertThat(this.context.getBean("redisTemplate")).isNotNull();
}
@Test
void doesNotTriggerDefaultRepositoryDetectionIfCustomized() {
this.context.register(CustomizedConfiguration.class, DataRedisAutoConfiguration.class,
DataRedisRepositoriesAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class);
this.context.refresh();
assertThat(this.context.getBean(CityRepository.class)).isNotNull();
}
@Configuration(proxyBeanMethods = false)
@TestAutoConfigurationPackage(City.class)
static
|
DataRedisRepositoriesAutoConfigurationTests
|
java
|
quarkusio__quarkus
|
extensions/flyway/deployment/src/test/java/io/quarkus/flyway/test/FlywayExtensionWithMultipleDatasourcesAndCustomizersTest.java
|
{
"start": 4080,
"end": 4335
}
|
class ____ implements FlywayConfigurationCustomizer {
@Override
public void customize(FluentConfiguration configuration) {
configuration.callbacks(new FlywayExtensionCallback2());
}
}
}
|
AddCallbacksCustomizerForUsersDS
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsLists.java
|
{
"start": 902,
"end": 1042
}
|
class ____ {
static MetricsRecordBuilderImpl builder(String name) {
return new MetricsCollectorImpl().addRecord(name);
}
}
|
MetricsLists
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/shard/ShardPath.java
|
{
"start": 1190,
"end": 14657
}
|
class ____ {
public static final String INDEX_FOLDER_NAME = "index";
public static final String TRANSLOG_FOLDER_NAME = "translog";
private final Path path;
private final ShardId shardId;
private final Path shardStatePath;
private final boolean isCustomDataPath;
public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, ShardId shardId) {
assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id()))
: "dataPath must end with the shard ID but didn't: " + dataPath.toString();
assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id()))
: "shardStatePath must end with the shard ID but didn't: " + dataPath.toString();
assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID())
: "dataPath must end with index path id but didn't: " + dataPath.toString();
assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID())
: "shardStatePath must end with index path id but didn't: " + dataPath.toString();
if (isCustomDataPath && dataPath.equals(shardStatePath)) {
throw new IllegalArgumentException("shard state path must be different to the data path when using custom data paths");
}
this.isCustomDataPath = isCustomDataPath;
this.path = dataPath;
this.shardId = shardId;
this.shardStatePath = shardStatePath;
}
public Path resolveTranslog() {
return path.resolve(TRANSLOG_FOLDER_NAME);
}
public Path resolveIndex() {
return path.resolve(INDEX_FOLDER_NAME);
}
public Path getDataPath() {
return path;
}
public boolean exists() {
return Files.exists(path);
}
public ShardId getShardId() {
return shardId;
}
public Path getShardStatePath() {
return shardStatePath;
}
/**
* Returns the data-path root for this shard. The root is a parent of {@link #getDataPath()} without the index name
* and the shard ID.
*/
public Path getRootDataPath() {
Path noIndexShardId = getDataPath().getParent().getParent();
return isCustomDataPath ? noIndexShardId : noIndexShardId.getParent(); // also strip the indices folder
}
/**
* Returns the state-path root for this shard. The root is a parent of {@link #getRootStatePath()} ()} without the index name
* and the shard ID.
*/
public Path getRootStatePath() {
return getShardStatePath().getParent().getParent().getParent(); // also strip the indices folder
}
/**
* Returns <code>true</code> iff the data location is a custom data location and therefore outside of the nodes configured data paths.
*/
public boolean isCustomDataPath() {
return isCustomDataPath;
}
/**
* This method walks through the nodes shard paths to find the data and state path for the given shard. If multiple
* directories with a valid shard state exist the one with the highest version will be used.
* <b>Note:</b> this method resolves custom data locations for the shard if such a custom data path is provided.
*/
public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, ShardId shardId, String customDataPath) throws IOException {
final Path[] paths = env.availableShardPaths(shardId);
final Path sharedDataPath = env.sharedDataPath();
return loadShardPath(logger, shardId, customDataPath, paths, sharedDataPath);
}
/**
* This method walks through the nodes shard paths to find the data and state path for the given shard. If multiple
* directories with a valid shard state exist the one with the highest version will be used.
* <b>Note:</b> this method resolves custom data locations for the shard.
*/
public static ShardPath loadShardPath(
Logger logger,
ShardId shardId,
String customDataPath,
Path[] availableShardPaths,
Path sharedDataPath
) throws IOException {
final String indexUUID = shardId.getIndex().getUUID();
Path loadedPath = null;
for (Path path : availableShardPaths) {
// EMPTY is safe here because we never call namedObject
ShardStateMetadata load = ShardStateMetadata.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path);
if (load != null) {
if (load.indexUUID.equals(indexUUID) == false && IndexMetadata.INDEX_UUID_NA_VALUE.equals(load.indexUUID) == false) {
logger.warn(
"{} found shard on path: [{}] with a different index UUID - this "
+ "shard seems to be leftover from a different index with the same name. "
+ "Remove the leftover shard in order to reuse the path with the current index",
shardId,
path
);
throw new IllegalStateException(
shardId
+ " index UUID in shard state was: "
+ load.indexUUID
+ " expected: "
+ indexUUID
+ " on shard path: "
+ path
);
}
if (loadedPath == null) {
loadedPath = path;
} else {
throw new IllegalStateException(shardId + " more than one shard state found");
}
}
}
if (loadedPath == null) {
return null;
} else {
final Path dataPath;
final Path statePath = loadedPath;
final boolean hasCustomDataPath = Strings.isNotEmpty(customDataPath);
if (hasCustomDataPath) {
dataPath = NodeEnvironment.resolveCustomLocation(customDataPath, shardId, sharedDataPath);
} else {
dataPath = statePath;
}
logger.debug("{} loaded data path [{}], state path [{}]", shardId, dataPath, statePath);
return new ShardPath(hasCustomDataPath, dataPath, statePath, shardId);
}
}
/**
* This method tries to delete left-over shards where the index name has been reused but the UUID is different
* to allow the new shard to be allocated.
*/
public static void deleteLeftoverShardDirectory(
final Logger logger,
final NodeEnvironment env,
final ShardLock lock,
final IndexSettings indexSettings,
final Consumer<Path[]> listener
) throws IOException {
final String indexUUID = indexSettings.getUUID();
final Path[] paths = env.availableShardPaths(lock.getShardId());
for (Path path : paths) {
// EMPTY is safe here because we never call namedObject
ShardStateMetadata load = ShardStateMetadata.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path);
if (load != null) {
if (load.indexUUID.equals(indexUUID) == false && IndexMetadata.INDEX_UUID_NA_VALUE.equals(load.indexUUID) == false) {
logger.warn("{} deleting leftover shard on path: [{}] with a different index UUID", lock.getShardId(), path);
assert Files.isDirectory(path) : path + " is not a directory";
NodeEnvironment.acquireFSLockForPaths(indexSettings, path);
listener.accept(new Path[] { path });
IOUtils.rm(path);
}
}
}
}
public static ShardPath selectNewPathForShard(
NodeEnvironment env,
ShardId shardId,
IndexSettings indexSettings,
long avgShardSizeInBytes,
Map<Path, Integer> dataPathToShardCount
) throws IOException {
final Path dataPath;
final Path statePath;
if (indexSettings.hasCustomDataPath()) {
dataPath = env.resolveCustomLocation(indexSettings.customDataPath(), shardId);
statePath = env.dataPaths()[0].resolve(shardId);
} else {
BigInteger totFreeSpace = BigInteger.ZERO;
for (NodeEnvironment.DataPath nodeDataPath : env.dataPaths()) {
totFreeSpace = totFreeSpace.add(BigInteger.valueOf(nodeDataPath.fileStore.getUsableSpace()));
}
// TODO: this is a hack!! We should instead keep track of incoming (relocated) shards since we know
// how large they will be once they're done copying, instead of a silly guess for such cases:
// Very rough heuristic of how much disk space we expect the shard will use over its lifetime, the max of current average
// shard size across the cluster and 5% of the total available free space on this node:
BigInteger estShardSizeInBytes = BigInteger.valueOf(avgShardSizeInBytes).max(totFreeSpace.divide(BigInteger.valueOf(20)));
// TODO - do we need something more extensible? Yet, this does the job for now...
final NodeEnvironment.DataPath[] paths = env.dataPaths();
// If no better path is chosen, use the one with the most space by default
NodeEnvironment.DataPath bestPath = getPathWithMostFreeSpace(env);
if (paths.length != 1) {
Map<NodeEnvironment.DataPath, Long> pathToShardCount = env.shardCountPerPath(shardId.getIndex());
// Compute how much space there is on each path
final Map<NodeEnvironment.DataPath, BigInteger> pathsToSpace = Maps.newMapWithExpectedSize(paths.length);
for (NodeEnvironment.DataPath nodeDataPath : paths) {
FileStore fileStore = nodeDataPath.fileStore;
BigInteger usableBytes = BigInteger.valueOf(fileStore.getUsableSpace());
pathsToSpace.put(nodeDataPath, usableBytes);
}
bestPath = Arrays.stream(paths)
// Filter out paths that have enough space
.filter((path) -> pathsToSpace.get(path).subtract(estShardSizeInBytes).compareTo(BigInteger.ZERO) > 0)
// Sort by the number of shards for this index
.sorted((p1, p2) -> {
int cmp = Long.compare(pathToShardCount.getOrDefault(p1, 0L), pathToShardCount.getOrDefault(p2, 0L));
if (cmp == 0) {
// if the number of shards is equal, tie-break with the number of total shards
cmp = Integer.compare(
dataPathToShardCount.getOrDefault(p1.path, 0),
dataPathToShardCount.getOrDefault(p2.path, 0)
);
if (cmp == 0) {
// if the number of shards is equal, tie-break with the usable bytes
cmp = pathsToSpace.get(p2).compareTo(pathsToSpace.get(p1));
}
}
return cmp;
})
// Return the first result
.findFirst()
// Or the existing best path if there aren't any that fit the criteria
.orElse(bestPath);
}
statePath = bestPath.resolve(shardId);
dataPath = statePath;
}
return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId);
}
static NodeEnvironment.DataPath getPathWithMostFreeSpace(NodeEnvironment env) throws IOException {
final NodeEnvironment.DataPath[] paths = env.dataPaths();
NodeEnvironment.DataPath bestPath = null;
long maxUsableBytes = Long.MIN_VALUE;
for (NodeEnvironment.DataPath dataPath : paths) {
FileStore fileStore = dataPath.fileStore;
long usableBytes = fileStore.getUsableSpace(); // NB usable bytes doesn't account for reserved space (e.g. incoming recoveries)
assert usableBytes >= 0 : "usable bytes must be >= 0, got: " + usableBytes;
if (bestPath == null || usableBytes > maxUsableBytes) {
// This path has been determined to be "better" based on the usable bytes
maxUsableBytes = usableBytes;
bestPath = dataPath;
}
}
return bestPath;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final ShardPath shardPath = (ShardPath) o;
if (Objects.equals(shardId, shardPath.shardId) == false) {
return false;
}
if (Objects.equals(path, shardPath.path) == false) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = path != null ? path.hashCode() : 0;
result = 31 * result + (shardId != null ? shardId.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "ShardPath{path=" + path + ", shard=" + shardId + '}';
}
}
|
ShardPath
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/config/SpringCamelContextSimpleCustomDefaultThreadPoolProfileTest.java
|
{
"start": 1299,
"end": 2306
}
|
class ____ extends SpringTestSupport {
@Override
protected AbstractXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext(
"org/apache/camel/spring/config/SpringCamelContextSimpleCustomDefaultThreadPoolProfileTest.xml");
}
@Test
public void testDefaultThreadPoolProfile() throws Exception {
CamelContext context = getMandatoryBean(CamelContext.class, "camel-B");
ThreadPoolProfile profile = context.getExecutorServiceManager().getDefaultThreadPoolProfile();
assertEquals(25, profile.getMaxPoolSize().intValue());
// should inherit default values
assertEquals(10, profile.getPoolSize().intValue());
assertEquals(60, profile.getKeepAliveTime().longValue());
assertEquals(1000, profile.getMaxQueueSize().intValue());
assertEquals(ThreadPoolRejectedPolicy.CallerRuns, profile.getRejectedPolicy());
}
}
|
SpringCamelContextSimpleCustomDefaultThreadPoolProfileTest
|
java
|
apache__maven
|
api/maven-api-di/src/main/java/org/apache/maven/api/di/Typed.java
|
{
"start": 1733,
"end": 1946
}
|
interface ____ {
/**
* Specifies the types that should be considered for dependency injection.
* <p>
* When specified, only the listed types will be available for injection,
* even if the
|
Typed
|
java
|
apache__avro
|
lang/java/thrift/src/test/java/org/apache/avro/thrift/test/Foo.java
|
{
"start": 71438,
"end": 72490
}
|
class ____ extends org.apache.thrift.scheme.StandardScheme<zip_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, zip_args struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, zip_args struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static
|
zip_argsStandardScheme
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java
|
{
"start": 1656,
"end": 1808
}
|
class ____ extends InputStream
implements ReadableByteChannel {
private Reader reader;
private static
|
SocketInputStream
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/collect/TreeBasedTableColumnMapTest.java
|
{
"start": 838,
"end": 1084
}
|
class ____ extends ColumnMapTests {
public TreeBasedTableColumnMapTest() {
super(false, true, true, false);
}
@Override
Table<Integer, String, Character> makeTable() {
return TreeBasedTable.create();
}
}
|
TreeBasedTableColumnMapTest
|
java
|
spring-projects__spring-framework
|
spring-jms/src/main/java/org/springframework/jms/listener/AbstractJmsListeningContainer.java
|
{
"start": 2690,
"end": 19923
}
|
class ____ extends JmsDestinationAccessor
implements BeanNameAware, DisposableBean, SmartLifecycle {
private @Nullable String clientId;
private boolean autoStartup = true;
private int phase = DEFAULT_PHASE;
private @Nullable String beanName;
private @Nullable Connection sharedConnection;
private boolean sharedConnectionStarted = false;
protected final Lock sharedConnectionLock = new ReentrantLock();
private boolean active = false;
private volatile boolean running;
private final List<Object> pausedTasks = new ArrayList<>();
protected final Lock lifecycleLock = new ReentrantLock();
protected final Condition lifecycleCondition = this.lifecycleLock.newCondition();
/**
* Specify the JMS client ID for a shared Connection created and used
* by this container.
* <p>Note that client IDs need to be unique among all active Connections
* of the underlying JMS provider. Furthermore, a client ID can only be
* assigned if the original ConnectionFactory hasn't already assigned one.
* @see jakarta.jms.Connection#setClientID
* @see #setConnectionFactory
*/
public void setClientId(@Nullable String clientId) {
this.clientId = clientId;
}
/**
* Return the JMS client ID for the shared Connection created and used
* by this container, if any.
*/
public @Nullable String getClientId() {
return this.clientId;
}
/**
* Set whether to automatically start the container after initialization.
* <p>Default is "true"; set this to "false" to allow for manual startup
* through the {@link #start()} method.
*/
public void setAutoStartup(boolean autoStartup) {
this.autoStartup = autoStartup;
}
@Override
public boolean isAutoStartup() {
return this.autoStartup;
}
/**
* Specify the lifecycle phase in which this container should be started and stopped.
* <p>The startup order proceeds from lowest to highest, and the shutdown order
* is the reverse of that. The default is {@link #DEFAULT_PHASE} meaning that
* this container starts as late as possible and stops as soon as possible.
* @see SmartLifecycle#getPhase()
*/
public void setPhase(int phase) {
this.phase = phase;
}
/**
* Return the lifecycle phase in which this container will be started and stopped.
* @see #setPhase
*/
@Override
public int getPhase() {
return this.phase;
}
@Override
public void setBeanName(@Nullable String beanName) {
this.beanName = beanName;
}
/**
* Return the bean name that this listener container has been assigned
* in its containing bean factory, if any.
*/
protected final @Nullable String getBeanName() {
return this.beanName;
}
/**
* Delegates to {@link #validateConfiguration()} and {@link #initialize()}.
*/
@Override
public void afterPropertiesSet() {
super.afterPropertiesSet();
validateConfiguration();
initialize();
}
/**
* Validate the configuration of this container.
* <p>The default implementation is empty. To be overridden in subclasses.
*/
protected void validateConfiguration() {
}
/**
* Calls {@link #shutdown()} when the BeanFactory destroys the container instance.
* @see #shutdown()
*/
@Override
public void destroy() {
shutdown();
}
//-------------------------------------------------------------------------
// Lifecycle methods for starting and stopping the container
//-------------------------------------------------------------------------
/**
* Initialize this container.
* <p>Creates a JMS Connection, starts the {@link jakarta.jms.Connection}
* (if {@link #setAutoStartup(boolean) "autoStartup"} hasn't been turned off),
* and calls {@link #doInitialize()}.
* @throws org.springframework.jms.JmsException if startup failed
*/
public void initialize() throws JmsException {
try {
this.lifecycleLock.lock();
try {
this.active = true;
this.lifecycleCondition.signalAll();
}
finally {
this.lifecycleLock.unlock();
}
doInitialize();
}
catch (JMSException ex) {
releaseSharedConnection();
throw convertJmsAccessException(ex);
}
}
/**
* Stop the shared Connection, call {@link #doShutdown()},
* and close this container.
* @throws JmsException if shutdown failed
*/
public void shutdown() throws JmsException {
logger.debug("Shutting down JMS listener container");
boolean wasRunning;
this.lifecycleLock.lock();
try {
wasRunning = this.running;
this.running = false;
this.active = false;
this.pausedTasks.clear();
this.lifecycleCondition.signalAll();
}
finally {
this.lifecycleLock.unlock();
}
// Stop shared Connection early, if necessary.
if (wasRunning && sharedConnectionEnabled()) {
try {
stopSharedConnection();
}
catch (Throwable ex) {
logger.debug("Could not stop JMS Connection on shutdown", ex);
}
}
// Shut down the invokers.
try {
doShutdown();
}
catch (JMSException ex) {
throw convertJmsAccessException(ex);
}
finally {
if (sharedConnectionEnabled()) {
releaseSharedConnection();
}
}
}
/**
* Return whether this container is currently active,
* that is, whether it has been set up but not shut down yet.
*/
public final boolean isActive() {
this.lifecycleLock.lock();
try {
return this.active;
}
finally {
this.lifecycleLock.unlock();
}
}
/**
* Start this container.
* @throws JmsException if starting failed
* @see #doStart
*/
@Override
public void start() throws JmsException {
try {
doStart();
}
catch (JMSException ex) {
throw convertJmsAccessException(ex);
}
}
/**
* Start the shared Connection, if any, and notify all invoker tasks.
* @throws JMSException if thrown by JMS API methods
* @see #startSharedConnection
*/
protected void doStart() throws JMSException {
// Lazily establish a shared Connection, if necessary.
if (sharedConnectionEnabled()) {
establishSharedConnection();
}
// Reschedule paused tasks, if any.
this.lifecycleLock.lock();
try {
this.running = true;
this.lifecycleCondition.signalAll();
resumePausedTasks();
}
finally {
this.lifecycleLock.unlock();
}
// Start the shared Connection, if any.
if (sharedConnectionEnabled()) {
startSharedConnection();
}
}
/**
* Stop this container.
* @throws JmsException if stopping failed
* @see #doStop
*/
@Override
public void stop() throws JmsException {
try {
doStop();
}
catch (JMSException ex) {
throw convertJmsAccessException(ex);
}
}
/**
* Notify all invoker tasks and stop the shared Connection, if any.
* @throws JMSException if thrown by JMS API methods
* @see #stopSharedConnection
*/
protected void doStop() throws JMSException {
this.lifecycleLock.lock();
try {
this.running = false;
this.lifecycleCondition.signalAll();
}
finally {
this.lifecycleLock.unlock();
}
if (sharedConnectionEnabled()) {
stopSharedConnection();
}
}
/**
* Determine whether this container is currently running,
* that is, whether it has been started and not stopped yet.
* @see #start()
* @see #stop()
* @see #runningAllowed()
*/
@Override
public final boolean isRunning() {
return (this.running && runningAllowed());
}
/**
* Check whether this container's listeners are generally allowed to run.
* <p>This implementation always returns {@code true}; the default 'running'
* state is purely determined by {@link #start()} / {@link #stop()}.
* <p>Subclasses may override this method to check against temporary
* conditions that prevent listeners from actually running. In other words,
* they may apply further restrictions to the 'running' state, returning
* {@code false} if such a restriction prevents listeners from running.
*/
protected boolean runningAllowed() {
return true;
}
//-------------------------------------------------------------------------
// Management of a shared JMS Connection
//-------------------------------------------------------------------------
/**
* Establish a shared Connection for this container.
* <p>The default implementation delegates to {@link #createSharedConnection()},
* which does one immediate attempt and throws an exception if it fails.
* Can be overridden to have a recovery process in place, retrying
* until a Connection can be successfully established.
* @throws JMSException if thrown by JMS API methods
*/
protected void establishSharedConnection() throws JMSException {
this.sharedConnectionLock.lock();
try {
if (this.sharedConnection == null) {
this.sharedConnection = createSharedConnection();
logger.debug("Established shared JMS Connection");
}
}
finally {
this.sharedConnectionLock.unlock();
}
}
/**
* Refresh the shared Connection that this container holds.
* <p>Called on startup and also after an infrastructure exception
* that occurred during invoker setup and/or execution.
* @throws JMSException if thrown by JMS API methods
*/
protected final void refreshSharedConnection() throws JMSException {
this.sharedConnectionLock.lock();
try {
releaseSharedConnection();
this.sharedConnection = createSharedConnection();
if (this.sharedConnectionStarted) {
this.sharedConnection.start();
}
}
finally {
this.sharedConnectionLock.unlock();
}
}
/**
* Create a shared Connection for this container.
* <p>The default implementation creates a standard Connection
* and prepares it through {@link #prepareSharedConnection}.
* @return the prepared Connection
* @throws JMSException if the creation failed
*/
protected Connection createSharedConnection() throws JMSException {
Connection con = createConnection();
try {
prepareSharedConnection(con);
return con;
}
catch (JMSException ex) {
JmsUtils.closeConnection(con);
throw ex;
}
}
/**
* Prepare the given Connection, which is about to be registered
* as shared Connection for this container.
* <p>The default implementation sets the specified client id, if any.
* Subclasses can override this to apply further settings.
* @param connection the Connection to prepare
* @throws JMSException if the preparation efforts failed
* @see #getClientId()
*/
protected void prepareSharedConnection(Connection connection) throws JMSException {
String clientId = getClientId();
if (clientId != null) {
connection.setClientID(clientId);
}
}
/**
* Start the shared Connection.
* @throws JMSException if thrown by JMS API methods
* @see jakarta.jms.Connection#start()
*/
protected void startSharedConnection() throws JMSException {
this.sharedConnectionLock.lock();
try {
this.sharedConnectionStarted = true;
if (this.sharedConnection != null) {
try {
this.sharedConnection.start();
}
catch (jakarta.jms.IllegalStateException ex) {
logger.debug("Ignoring Connection start exception - assuming already started: " + ex);
}
}
}
finally {
this.sharedConnectionLock.unlock();
}
}
/**
* Stop the shared Connection.
* @throws JMSException if thrown by JMS API methods
* @see jakarta.jms.Connection#start()
*/
protected void stopSharedConnection() throws JMSException {
this.sharedConnectionLock.lock();
try {
this.sharedConnectionStarted = false;
if (this.sharedConnection != null) {
try {
this.sharedConnection.stop();
}
catch (jakarta.jms.IllegalStateException ex) {
logger.debug("Ignoring Connection stop exception - assuming already stopped: " + ex);
}
}
}
finally {
this.sharedConnectionLock.unlock();
}
}
/**
* Release the shared Connection, if any.
* @since 6.1
* @see ConnectionFactoryUtils#releaseConnection
*/
protected final void releaseSharedConnection() {
this.sharedConnectionLock.lock();
try {
ConnectionFactoryUtils.releaseConnection(
this.sharedConnection, getConnectionFactory(), this.sharedConnectionStarted);
this.sharedConnection = null;
}
finally {
this.sharedConnectionLock.unlock();
}
}
/**
* Return the shared JMS Connection maintained by this container.
* Available after initialization.
* @return the shared Connection (never {@code null})
* @throws IllegalStateException if this container does not maintain a
* shared Connection, or if the Connection hasn't been initialized yet
* @see #sharedConnectionEnabled()
*/
protected final Connection getSharedConnection() {
if (!sharedConnectionEnabled()) {
throw new IllegalStateException(
"This listener container does not maintain a shared Connection");
}
this.sharedConnectionLock.lock();
try {
if (this.sharedConnection == null) {
throw new SharedConnectionNotInitializedException(
"This listener container's shared Connection has not been initialized yet");
}
return this.sharedConnection;
}
finally {
this.sharedConnectionLock.unlock();
}
}
//-------------------------------------------------------------------------
// Management of paused tasks
//-------------------------------------------------------------------------
/**
* Take the given task object and reschedule it, either immediately if
* this container is currently running, or later once this container
* has been restarted.
* <p>If this container has already been shut down, the task will not
* get rescheduled at all.
* @param task the task object to reschedule
* @return whether the task has been rescheduled
* (either immediately or for a restart of this container)
* @see #doRescheduleTask
*/
protected final boolean rescheduleTaskIfNecessary(Object task) {
if (this.running) {
try {
doRescheduleTask(task);
}
catch (RuntimeException ex) {
logRejectedTask(task, ex);
this.pausedTasks.add(task);
}
return true;
}
else if (this.active) {
this.pausedTasks.add(task);
return true;
}
else {
return false;
}
}
/**
* Try to resume all paused tasks.
* Tasks for which rescheduling failed simply remain in paused mode.
*/
protected void resumePausedTasks() {
this.lifecycleLock.lock();
try {
if (!this.pausedTasks.isEmpty()) {
for (Iterator<?> it = this.pausedTasks.iterator(); it.hasNext();) {
Object task = it.next();
try {
doRescheduleTask(task);
it.remove();
if (logger.isDebugEnabled()) {
logger.debug("Resumed paused task: " + task);
}
}
catch (RuntimeException ex) {
logRejectedTask(task, ex);
// Keep the task in paused mode...
}
}
}
}
finally {
this.lifecycleLock.unlock();
}
}
/**
* Determine the number of currently paused tasks, if any.
*/
public int getPausedTaskCount() {
this.lifecycleLock.lock();
try {
return this.pausedTasks.size();
}
finally {
this.lifecycleLock.unlock();
}
}
/**
* Reschedule the given task object immediately.
* <p>To be implemented by subclasses if they ever call
* {@code rescheduleTaskIfNecessary}.
* This implementation throws an UnsupportedOperationException.
* @param task the task object to reschedule
* @see #rescheduleTaskIfNecessary
*/
protected void doRescheduleTask(Object task) {
throw new UnsupportedOperationException(
ClassUtils.getShortName(getClass()) + " does not support rescheduling of tasks");
}
/**
* Log a task that has been rejected by {@link #doRescheduleTask}.
* <p>The default implementation simply logs a corresponding message
* at warn level.
* @param task the rejected task object
* @param ex the exception thrown from {@link #doRescheduleTask}
*/
protected void logRejectedTask(Object task, RuntimeException ex) {
if (logger.isWarnEnabled()) {
logger.warn("Listener container task [" + task + "] has been rejected and paused: " + ex);
}
}
//-------------------------------------------------------------------------
// Template methods to be implemented by subclasses
//-------------------------------------------------------------------------
/**
* Return whether a shared JMS Connection should be maintained
* by this container base class.
* @see #getSharedConnection()
*/
protected abstract boolean sharedConnectionEnabled();
/**
* Register any invokers within this container.
* <p>Subclasses need to implement this method for their specific
* invoker management process.
* <p>A shared JMS Connection, if any, will already have been
* started at this point.
* @throws JMSException if registration failed
* @see #getSharedConnection()
*/
protected abstract void doInitialize() throws JMSException;
/**
* Close the registered invokers.
* <p>Subclasses need to implement this method for their specific
* invoker management process.
* <p>A shared JMS Connection, if any, will automatically be closed
* <i>afterwards</i>.
* @throws JMSException if shutdown failed
* @see #shutdown()
*/
protected abstract void doShutdown() throws JMSException;
/**
* Exception that indicates that the initial setup of this container's
* shared JMS Connection failed. This is indicating to invokers that they need
* to establish the shared Connection themselves on first access.
*/
@SuppressWarnings("serial")
public static
|
AbstractJmsListeningContainer
|
java
|
apache__camel
|
components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/fixed/unmarshall/simple/trim/BindySimpleFixedLengthUnmarshallTest.java
|
{
"start": 1677,
"end": 2810
}
|
class ____ {
private static final String URI_MOCK_RESULT = "mock:result";
private static final String URI_DIRECT_START = "direct:start";
@Produce(URI_DIRECT_START)
private ProducerTemplate template;
@EndpointInject(URI_MOCK_RESULT)
private MockEndpoint result;
private String expected;
@Test
@DirtiesContext
public void testUnMarshallMessage() throws Exception {
expected = "10A9 PaulineM ISINXD12345678BUYShare000002500.45USD01-08-2009Hello ";
template.sendBody(expected);
result.expectedMessageCount(1);
result.assertIsSatisfied();
// check the model
BindySimpleFixedLengthUnmarshallTest.Order order
= result.getReceivedExchanges().get(0).getIn().getBody(BindySimpleFixedLengthUnmarshallTest.Order.class);
assertEquals(10, order.getOrderNr());
// the field is not trimmed
assertEquals(" Pauline", order.getFirstName());
assertEquals("M ", order.getLastName());
assertEquals("Hello ", order.getComment());
}
public static
|
BindySimpleFixedLengthUnmarshallTest
|
java
|
hibernate__hibernate-orm
|
tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/constructor/MapperSuperClassExtendingNonEntityWithInstanceGetEntityManager.java
|
{
"start": 256,
"end": 492
}
|
class ____
extends NonEntityWithInstanceGetEntityManager {
@Id
private Long id;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
}
|
MapperSuperClassExtendingNonEntityWithInstanceGetEntityManager
|
java
|
apache__spark
|
common/unsafe/src/main/java/org/apache/spark/sql/catalyst/util/CollationSupport.java
|
{
"start": 21951,
"end": 24441
}
|
class ____ {
public static UTF8String exec(final UTF8String srcString) {
return execBinary(srcString);
}
public static UTF8String exec(
final UTF8String srcString,
final UTF8String trimString,
final int collationId) {
CollationFactory.Collation collation = CollationFactory.fetchCollation(collationId);
if (collation.isUtf8BinaryType && !collation.supportsSpaceTrimming) {
return execBinary(srcString, trimString);
}
if (collation.isUtf8BinaryType) {
// special handling needed for utf8_binary_rtrim collation.
return execBinaryTrim(srcString, trimString, collationId);
} else if (collation.isUtf8LcaseType) {
return execLowercase(srcString, trimString, collationId);
} else {
return execICU(srcString, trimString, collationId);
}
}
public static String genCode(final String srcString) {
return String.format("CollationSupport.StringTrim.execBinary(%s)", srcString);
}
public static String genCode(
final String srcString,
final String trimString,
final int collationId) {
String expr = "CollationSupport.StringTrim.exec";
if (collationId == CollationFactory.UTF8_BINARY_COLLATION_ID) {
return String.format(expr + "Binary(%s, %s)", srcString, trimString);
} else {
return String.format(expr + "(%s, %s, %d)", srcString, trimString, collationId);
}
}
public static UTF8String execBinary(
final UTF8String srcString) {
return srcString.trim();
}
public static UTF8String execBinary(
final UTF8String srcString,
final UTF8String trimString) {
return srcString.trim(trimString);
}
public static UTF8String execLowercase(
final UTF8String srcString,
final UTF8String trimString,
final int collationId) {
return CollationAwareUTF8String.lowercaseTrim(srcString, trimString, collationId);
}
public static UTF8String execICU(
final UTF8String srcString,
final UTF8String trimString,
final int collationId) {
return CollationAwareUTF8String.trim(srcString, trimString, collationId);
}
public static UTF8String execBinaryTrim(
final UTF8String srcString,
final UTF8String trimString,
final int collationId) {
return CollationAwareUTF8String.binaryTrim(srcString, trimString, collationId);
}
}
public static
|
StringTrim
|
java
|
grpc__grpc-java
|
xds/src/main/java/io/grpc/xds/GrpcXdsTransportFactory.java
|
{
"start": 3291,
"end": 4825
}
|
class ____<ReqT, RespT> implements
XdsTransportFactory.StreamingCall<ReqT, RespT> {
private final ClientCall<ReqT, RespT> call;
public XdsStreamingCall(
String methodName,
MethodDescriptor.Marshaller<ReqT> reqMarshaller,
MethodDescriptor.Marshaller<RespT> respMarshaller,
CallCredentials callCredentials) {
this.call =
channel.newCall(
MethodDescriptor.<ReqT, RespT>newBuilder()
.setFullMethodName(methodName)
.setType(MethodDescriptor.MethodType.BIDI_STREAMING)
.setRequestMarshaller(reqMarshaller)
.setResponseMarshaller(respMarshaller)
.build(),
CallOptions.DEFAULT.withCallCredentials(
callCredentials)); // TODO(zivy): support waitForReady
}
@Override
public void start(EventHandler<RespT> eventHandler) {
call.start(new EventHandlerToCallListenerAdapter<>(eventHandler), new Metadata());
call.request(1);
}
@Override
public void sendMessage(ReqT message) {
call.sendMessage(message);
}
@Override
public void startRecvMessage() {
call.request(1);
}
@Override
public void sendError(Exception e) {
call.cancel("Cancelled by XdsClientImpl", e);
}
@Override
public boolean isReady() {
return call.isReady();
}
}
}
private static
|
XdsStreamingCall
|
java
|
spring-projects__spring-boot
|
smoke-test/spring-boot-smoke-test-test/src/test/java/smoketest/test/domain/VehicleIdentificationNumberTests.java
|
{
"start": 1124,
"end": 2553
}
|
class ____ {
private static final String SAMPLE_VIN = "41549485710496749";
@Test
@SuppressWarnings("NullAway") // Test null check
void createWhenVinIsNullShouldThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> new VehicleIdentificationNumber(null))
.withMessage("'vin' must not be null");
}
@Test
void createWhenVinIsMoreThan17CharsShouldThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> new VehicleIdentificationNumber("012345678901234567"))
.withMessage("'vin' must be exactly 17 characters");
}
@Test
void createWhenVinIsLessThan17CharsShouldThrowException() {
assertThatIllegalArgumentException().isThrownBy(() -> new VehicleIdentificationNumber("0123456789012345"))
.withMessage("'vin' must be exactly 17 characters");
}
@Test
void toStringShouldReturnVin() {
VehicleIdentificationNumber vin = new VehicleIdentificationNumber(SAMPLE_VIN);
assertThat(vin).hasToString(SAMPLE_VIN);
}
@Test
void equalsAndHashCodeShouldBeBasedOnVin() {
VehicleIdentificationNumber vin1 = new VehicleIdentificationNumber(SAMPLE_VIN);
VehicleIdentificationNumber vin2 = new VehicleIdentificationNumber(SAMPLE_VIN);
VehicleIdentificationNumber vin3 = new VehicleIdentificationNumber("00000000000000000");
assertThat(vin1).hasSameHashCodeAs(vin2);
assertThat(vin1).isEqualTo(vin1).isEqualTo(vin2).isNotEqualTo(vin3);
}
}
|
VehicleIdentificationNumberTests
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/jdk8/CompletableFromCompletionStage.java
|
{
"start": 1062,
"end": 1831
}
|
class ____<T> extends Completable {
final CompletionStage<T> stage;
public CompletableFromCompletionStage(CompletionStage<T> stage) {
this.stage = stage;
}
@Override
protected void subscribeActual(CompletableObserver observer) {
// We need an indirection because one can't detach from a whenComplete
// and cancellation should not hold onto the stage.
BiConsumerAtomicReference<Object> whenReference = new BiConsumerAtomicReference<>();
CompletionStageHandler<Object> handler = new CompletionStageHandler<>(observer, whenReference);
whenReference.lazySet(handler);
observer.onSubscribe(handler);
stage.whenComplete(whenReference);
}
static final
|
CompletableFromCompletionStage
|
java
|
apache__camel
|
core/camel-core-reifier/src/main/java/org/apache/camel/reifier/validator/EndpointValidatorReifier.java
|
{
"start": 1251,
"end": 2186
}
|
class ____ extends ValidatorReifier<EndpointValidatorDefinition> {
public EndpointValidatorReifier(CamelContext camelContext, ValidatorDefinition definition) {
super(camelContext, (EndpointValidatorDefinition) definition);
}
@Override
// Validator implements AutoCloseable and must be closed by this method client.
protected Validator doCreateValidator() {
Endpoint endpoint = definition.getUri() != null
? camelContext.getEndpoint(definition.getUri()) : lookupByNameAndType(definition.getRef(), Endpoint.class);
SendProcessor processor = new SendProcessor(endpoint, ExchangePattern.InOut);
@SuppressWarnings("resource")
// NOTE: the client must take care of closing this resource.
Validator v = new ProcessorValidator(camelContext).setProcessor(processor).setType(parseString(definition.getType()));
return v;
}
}
|
EndpointValidatorReifier
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GoogleDriveEndpointBuilderFactory.java
|
{
"start": 40218,
"end": 42817
}
|
interface ____ extends EndpointProducerBuilder {
default GoogleDriveEndpointProducerBuilder basic() {
return (GoogleDriveEndpointProducerBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedGoogleDriveEndpointProducerBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedGoogleDriveEndpointProducerBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
/**
* Builder for endpoint for the Google Drive component.
*/
public
|
AdvancedGoogleDriveEndpointProducerBuilder
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/postgresql/datatypes/BitStringTest.java
|
{
"start": 308,
"end": 635
}
|
class ____ extends PGTest {
public void test_timestamp() throws Exception {
String sql = "B'101'";
PGExprParser parser = new PGExprParser(sql);
SQLBinaryExpr expr = (SQLBinaryExpr) parser.expr();
assertEquals("B'101'", SQLUtils.toSQLString(expr, JdbcConstants.POSTGRESQL));
}
}
|
BitStringTest
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/StateDescriptorPassingTest.java
|
{
"start": 2868,
"end": 15893
}
|
class ____ {
@Test
void testReduceWindowState() {
Configuration configuration = new Configuration();
String serializerConfigStr =
"{java.io.File: {type: kryo, kryo-type: registered, class: com.esotericsoftware.kryo.serializers.JavaSerializer}}";
configuration.setString(PipelineOptions.SERIALIZATION_CONFIG.key(), serializerConfigStr);
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
DataStream<File> src =
env.fromData(new File("/"))
.assignTimestampsAndWatermarks(
WatermarkStrategy.<File>forMonotonousTimestamps()
.withTimestampAssigner(
(file, ts) -> System.currentTimeMillis()));
SingleOutputStreamOperator<?> result =
src.keyBy(
new KeySelector<File, String>() {
@Override
public String getKey(File value) {
return null;
}
})
.window(TumblingEventTimeWindows.of(Duration.ofMillis(1000)))
.reduce(
new ReduceFunction<File>() {
@Override
public File reduce(File value1, File value2) {
return null;
}
});
validateStateDescriptorConfigured(result);
}
@Test
void testApplyWindowState() {
Configuration configuration = new Configuration();
String serializerConfigStr =
"{java.io.File: {type: kryo, kryo-type: registered, class: com.esotericsoftware.kryo.serializers.JavaSerializer}}";
configuration.setString(PipelineOptions.SERIALIZATION_CONFIG.key(), serializerConfigStr);
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
DataStream<File> src =
env.fromData(new File("/"))
.assignTimestampsAndWatermarks(
WatermarkStrategy.<File>forMonotonousTimestamps()
.withTimestampAssigner(
(file, ts) -> System.currentTimeMillis()));
SingleOutputStreamOperator<?> result =
src.keyBy(
new KeySelector<File, String>() {
@Override
public String getKey(File value) {
return null;
}
})
.window(TumblingEventTimeWindows.of(Duration.ofMillis(1000)))
.apply(
new WindowFunction<File, String, String, TimeWindow>() {
@Override
public void apply(
String s,
TimeWindow window,
Iterable<File> input,
Collector<String> out) {}
});
validateListStateDescriptorConfigured(result);
}
@Test
void testProcessWindowState() {
Configuration configuration = new Configuration();
String serializerConfigStr =
"{java.io.File: {type: kryo, kryo-type: registered, class: com.esotericsoftware.kryo.serializers.JavaSerializer}}";
configuration.setString(PipelineOptions.SERIALIZATION_CONFIG.key(), serializerConfigStr);
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
DataStream<File> src =
env.fromData(new File("/"))
.assignTimestampsAndWatermarks(
WatermarkStrategy.<File>forMonotonousTimestamps()
.withTimestampAssigner(
(file, ts) -> System.currentTimeMillis()));
SingleOutputStreamOperator<?> result =
src.keyBy(
new KeySelector<File, String>() {
@Override
public String getKey(File value) {
return null;
}
})
.window(TumblingEventTimeWindows.of(Duration.ofMillis(1000)))
.process(
new ProcessWindowFunction<File, String, String, TimeWindow>() {
@Override
public void process(
String s,
Context ctx,
Iterable<File> input,
Collector<String> out) {}
});
validateListStateDescriptorConfigured(result);
}
@Test
void testProcessAllWindowState() {
Configuration configuration = new Configuration();
String serializerConfigStr =
"{java.io.File: {type: kryo, kryo-type: registered, class: com.esotericsoftware.kryo.serializers.JavaSerializer}}";
configuration.setString(PipelineOptions.SERIALIZATION_CONFIG.key(), serializerConfigStr);
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
// simulate ingestion time
DataStream<File> src =
env.fromData(new File("/"))
.assignTimestampsAndWatermarks(
WatermarkStrategy.<File>forMonotonousTimestamps()
.withTimestampAssigner(
(file, ts) -> System.currentTimeMillis()));
SingleOutputStreamOperator<?> result =
src.windowAll(TumblingEventTimeWindows.of(Duration.ofMillis(1000)))
.process(
new ProcessAllWindowFunction<File, String, TimeWindow>() {
@Override
public void process(
Context ctx,
Iterable<File> input,
Collector<String> out) {}
});
validateListStateDescriptorConfigured(result);
}
@Test
void testReduceWindowAllState() {
Configuration configuration = new Configuration();
String serializerConfigStr =
"{java.io.File: {type: kryo, kryo-type: registered, class: com.esotericsoftware.kryo.serializers.JavaSerializer}}";
configuration.setString(PipelineOptions.SERIALIZATION_CONFIG.key(), serializerConfigStr);
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
// simulate ingestion time
DataStream<File> src =
env.fromData(new File("/"))
.assignTimestampsAndWatermarks(
WatermarkStrategy.<File>forMonotonousTimestamps()
.withTimestampAssigner(
(file, ts) -> System.currentTimeMillis()));
SingleOutputStreamOperator<?> result =
src.windowAll(TumblingEventTimeWindows.of(Duration.ofMillis(1000)))
.reduce(
new ReduceFunction<File>() {
@Override
public File reduce(File value1, File value2) {
return null;
}
});
validateStateDescriptorConfigured(result);
}
@Test
void testApplyWindowAllState() {
Configuration configuration = new Configuration();
String serializerConfigStr =
"{java.io.File: {type: kryo, kryo-type: registered, class: com.esotericsoftware.kryo.serializers.JavaSerializer}}";
configuration.setString(PipelineOptions.SERIALIZATION_CONFIG.key(), serializerConfigStr);
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
// simulate ingestion time
DataStream<File> src =
env.fromData(new File("/"))
.assignTimestampsAndWatermarks(
WatermarkStrategy.<File>forMonotonousTimestamps()
.withTimestampAssigner(
(file, ts) -> System.currentTimeMillis()));
SingleOutputStreamOperator<?> result =
src.windowAll(TumblingEventTimeWindows.of(Duration.ofMillis(1000)))
.apply(
new AllWindowFunction<File, String, TimeWindow>() {
@Override
public void apply(
TimeWindow window,
Iterable<File> input,
Collector<String> out) {}
});
validateListStateDescriptorConfigured(result);
}
// ------------------------------------------------------------------------
// generic validation
// ------------------------------------------------------------------------
private void validateStateDescriptorConfigured(SingleOutputStreamOperator<?> result) {
OneInputTransformation<?, ?> transform =
(OneInputTransformation<?, ?>) result.getTransformation();
StreamOperatorFactory<?> factory = transform.getOperatorFactory();
StateDescriptor<?, ?> descr;
if (factory instanceof WindowOperatorFactory) {
descr = ((WindowOperatorFactory<?, ?, ?, ?, ?>) factory).getStateDescriptor();
} else {
WindowOperator<?, ?, ?, ?, ?> op =
(WindowOperator<?, ?, ?, ?, ?>) transform.getOperator();
descr = op.getStateDescriptor();
}
// this would be the first statement to fail if state descriptors were not properly
// initialized
TypeSerializer<?> serializer = descr.getSerializer();
assertThat(serializer).isInstanceOf(KryoSerializer.class);
Kryo kryo = ((KryoSerializer<?>) serializer).getKryo();
assertThat(kryo.getSerializer(File.class))
.as("serializer registration was not properly passed on")
.isInstanceOf(JavaSerializer.class);
}
private void validateListStateDescriptorConfigured(SingleOutputStreamOperator<?> result) {
OneInputTransformation<?, ?> transform =
(OneInputTransformation<?, ?>) result.getTransformation();
StreamOperatorFactory<?> factory = transform.getOperatorFactory();
StateDescriptor<?, ?> descr;
if (factory instanceof WindowOperatorFactory) {
descr = ((WindowOperatorFactory<?, ?, ?, ?, ?>) factory).getStateDescriptor();
} else {
WindowOperator<?, ?, ?, ?, ?> op =
(WindowOperator<?, ?, ?, ?, ?>) transform.getOperator();
descr = op.getStateDescriptor();
}
assertThat(descr).isInstanceOf(ListStateDescriptor.class);
ListStateDescriptor<?> listDescr = (ListStateDescriptor<?>) descr;
// this would be the first statement to fail if state descriptors were not properly
// initialized
TypeSerializer<?> serializer = listDescr.getSerializer();
assertThat(serializer).isInstanceOf(ListSerializer.class);
TypeSerializer<?> elementSerializer = listDescr.getElementSerializer();
assertThat(elementSerializer).isInstanceOf(KryoSerializer.class);
Kryo kryo = ((KryoSerializer<?>) elementSerializer).getKryo();
assertThat(kryo.getSerializer(File.class))
.as("serializer registration was not properly passed on")
.isInstanceOf(JavaSerializer.class);
}
}
|
StateDescriptorPassingTest
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/support/ContextLoaderUtilsContextHierarchyTests.java
|
{
"start": 24785,
"end": 24898
}
|
class ____ {
}
@ContextHierarchyA
private static
|
SingleTestClassWithSingleLevelContextHierarchyFromMetaAnnotation
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/UndesiredAllocationsTracker.java
|
{
"start": 1903,
"end": 12593
}
|
class ____ {
private static final Logger logger = LogManager.getLogger(UndesiredAllocationsTracker.class);
private static final TimeValue FIVE_MINUTES = timeValueMinutes(5);
private static final FeatureFlag UNDESIRED_ALLOCATION_TRACKER_ENABLED = new FeatureFlag("undesired_allocation_tracker");
/**
* Warning logs will be periodically written if we see a shard that's been in an undesired allocation for this long
*/
public static final Setting<TimeValue> UNDESIRED_ALLOCATION_DURATION_LOG_THRESHOLD_SETTING = Setting.timeSetting(
"cluster.routing.allocation.desired_balance.undesired_duration_logging.threshold",
FIVE_MINUTES,
ONE_MINUTE,
Setting.Property.Dynamic,
Setting.Property.NodeScope
);
/**
* The minimum amount of time between warnings about persistent undesired allocations
*/
public static final Setting<TimeValue> UNDESIRED_ALLOCATION_DURATION_LOG_INTERVAL_SETTING = Setting.timeSetting(
"cluster.routing.allocation.desired_balance.undesired_duration_logging.interval",
FIVE_MINUTES,
ONE_MINUTE,
Setting.Property.Dynamic,
Setting.Property.NodeScope
);
/**
* The maximum number of undesired allocations to track. We expect this to be relatively small.
*/
public static final Setting<Integer> MAX_UNDESIRED_ALLOCATIONS_TO_TRACK = Setting.intSetting(
"cluster.routing.allocation.desired_balance.undesired_duration_logging.max_to_track",
UNDESIRED_ALLOCATION_TRACKER_ENABLED.isEnabled() ? 10 : 0,
0,
100,
Setting.Property.Dynamic,
Setting.Property.NodeScope
);
private final TimeProvider timeProvider;
private final LinkedHashMap<String, UndesiredAllocation> undesiredAllocations = new LinkedHashMap<>();
private final FrequencyCappedAction undesiredAllocationDurationLogInterval;
private volatile TimeValue undesiredAllocationDurationLoggingThreshold;
private volatile int maxUndesiredAllocationsToTrack;
private boolean missingAllocationAssertionsEnabled = true;
UndesiredAllocationsTracker(ClusterSettings clusterSettings, TimeProvider timeProvider) {
this.timeProvider = timeProvider;
this.undesiredAllocationDurationLogInterval = new FrequencyCappedAction(timeProvider::relativeTimeInMillis, ZERO);
clusterSettings.initializeAndWatch(
UNDESIRED_ALLOCATION_DURATION_LOG_INTERVAL_SETTING,
undesiredAllocationDurationLogInterval::setMinInterval
);
clusterSettings.initializeAndWatch(
UNDESIRED_ALLOCATION_DURATION_LOG_THRESHOLD_SETTING,
value -> undesiredAllocationDurationLoggingThreshold = value
);
clusterSettings.initializeAndWatch(MAX_UNDESIRED_ALLOCATIONS_TO_TRACK, value -> this.maxUndesiredAllocationsToTrack = value);
}
/**
* Track an allocation as being undesired
*/
public void trackUndesiredAllocation(ShardRouting shardRouting) {
assert shardRouting.unassigned() == false : "Shouldn't record unassigned shards as undesired allocations";
if (undesiredAllocations.size() < maxUndesiredAllocationsToTrack) {
final var allocationId = shardRouting.allocationId().getId();
if (undesiredAllocations.containsKey(allocationId) == false) {
undesiredAllocations.put(
allocationId,
new UndesiredAllocation(shardRouting.shardId(), timeProvider.relativeTimeInMillis())
);
}
}
}
/**
* Remove any tracking of the specified allocation (a no-op if the allocation isn't being tracked)
*/
public void removeTracking(ShardRouting shardRouting) {
assert shardRouting.unassigned() == false : "Shouldn't remove tracking of unassigned shards";
undesiredAllocations.remove(shardRouting.allocationId().getId());
}
/**
* Clear any {@link ShardRouting} that are no longer present in the routing nodes
*/
public void cleanup(RoutingNodes routingNodes) {
undesiredAllocations.entrySet().removeIf(e -> {
final var undesiredAllocation = e.getValue();
final var allocationId = e.getKey();
return routingNodes.getByAllocationId(undesiredAllocation.shardId(), allocationId) == null;
});
shrinkIfOversized();
}
/**
* Clear all tracked allocations
*/
public void clear() {
undesiredAllocations.clear();
}
/**
* If there are shards that have been in undesired allocations for longer than the configured
* threshold, log a warning
*/
public void maybeLogUndesiredShardsWarning(
RoutingNodes routingNodes,
RoutingAllocation routingAllocation,
DesiredBalance desiredBalance
) {
final long currentTimeMillis = timeProvider.relativeTimeInMillis();
if (undesiredAllocations.isEmpty() == false) {
final long earliestUndesiredTimestamp = undesiredAllocations.firstEntry().getValue().undesiredSince();
if (earliestUndesiredTimestamp < currentTimeMillis
&& currentTimeMillis - earliestUndesiredTimestamp > undesiredAllocationDurationLoggingThreshold.millis()) {
undesiredAllocationDurationLogInterval.maybeExecute(
() -> logDecisionsForUndesiredShardsOverThreshold(routingNodes, routingAllocation, desiredBalance)
);
}
}
}
private boolean shardTierMatchesNodeTier(ShardRouting shardRouting, DiscoveryNode discoveryNode) {
return switch (shardRouting.role()) {
case INDEX_ONLY -> discoveryNode.getRoles().contains(DiscoveryNodeRole.INDEX_ROLE);
case SEARCH_ONLY -> discoveryNode.getRoles().contains(DiscoveryNodeRole.SEARCH_ROLE);
default -> true;
};
}
private void logDecisionsForUndesiredShardsOverThreshold(
RoutingNodes routingNodes,
RoutingAllocation routingAllocation,
DesiredBalance desiredBalance
) {
final long currentTimeMillis = timeProvider.relativeTimeInMillis();
final long loggingThresholdTimestamp = currentTimeMillis - undesiredAllocationDurationLoggingThreshold.millis();
for (var allocation : undesiredAllocations.entrySet()) {
final var undesiredAllocation = allocation.getValue();
final var allocationId = allocation.getKey();
if (undesiredAllocation.undesiredSince() < loggingThresholdTimestamp) {
final var shardRouting = routingNodes.getByAllocationId(undesiredAllocation.shardId(), allocationId);
if (shardRouting != null) {
logUndesiredShardDetails(
shardRouting,
timeValueMillis(currentTimeMillis - undesiredAllocation.undesiredSince()),
routingNodes,
routingAllocation,
desiredBalance
);
} else {
assert false : undesiredAllocation + " for allocationID " + allocationId + " was not cleaned up";
}
}
}
}
private void logUndesiredShardDetails(
ShardRouting shardRouting,
TimeValue undesiredDuration,
RoutingNodes routingNodes,
RoutingAllocation allocation,
DesiredBalance desiredBalance
) {
final RoutingAllocation.DebugMode originalDebugMode = allocation.getDebugMode();
allocation.setDebugMode(RoutingAllocation.DebugMode.EXCLUDE_YES_DECISIONS);
try {
final var assignment = desiredBalance.getAssignment(shardRouting.shardId());
if (assignment != null) {
logger.warn("Shard {} has been in an undesired allocation for {}", shardRouting.shardId(), undesiredDuration);
for (final var nodeId : assignment.nodeIds()) {
if (allocation.nodes().nodeExists(nodeId)) {
if (shardTierMatchesNodeTier(shardRouting, allocation.nodes().get(nodeId))) {
final var decision = allocation.deciders().canAllocate(shardRouting, routingNodes.node(nodeId), allocation);
logger.warn("Shard {} allocation decision for node [{}]: {}", shardRouting.shardId(), nodeId, decision);
}
} else {
logger.warn("Shard {} desired node [{}] has left the cluster", shardRouting.shardId(), nodeId);
}
}
} else {
assert missingAllocationAssertionsEnabled == false
: "Shard " + shardRouting + " was missing an assignment, this shouldn't be possible. " + desiredBalance;
}
} finally {
allocation.setDebugMode(originalDebugMode);
}
}
/**
* If the maximum to track was reduced, and we are tracking more than the new maximum, purge the most recent entries
* to bring us under the new limit
*/
private void shrinkIfOversized() {
if (undesiredAllocations.size() > maxUndesiredAllocationsToTrack) {
final var newestExcessAllocationIds = undesiredAllocations.entrySet()
.stream()
.sorted((a, b) -> Long.compare(b.getValue().undesiredSince(), a.getValue().undesiredSince()))
.limit(undesiredAllocations.size() - maxUndesiredAllocationsToTrack)
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
undesiredAllocations.keySet().removeAll(newestExcessAllocationIds);
}
}
// visible for testing
Map<String, UndesiredAllocation> getUndesiredAllocations() {
return Map.copyOf(undesiredAllocations);
}
/**
* Rather than storing the {@link ShardRouting}, we store a map of allocationId -> {@link UndesiredAllocation}
* this is because the allocation ID will persist as long as a shard stays on the same node, but the
* {@link ShardRouting} changes for a variety of reasons even when the shard doesn't move.
*
* @param shardId The shard ID
* @param undesiredSince The timestamp when the shard was first observed in an undesired allocation
*/
record UndesiredAllocation(ShardId shardId, long undesiredSince) {}
// Exposed for testing
public Releasable disableMissingAllocationAssertions() {
missingAllocationAssertionsEnabled = false;
return () -> missingAllocationAssertionsEnabled = true;
}
}
|
UndesiredAllocationsTracker
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/ApplicationEventPublisher.java
|
{
"start": 1244,
"end": 4073
}
|
interface ____ {
/**
* Notify all <strong>matching</strong> listeners registered with this
* application of an application event. Events may be framework events
* (such as ContextRefreshedEvent) or application-specific events.
* <p>Such an event publication step is effectively a hand-off to the
* multicaster and does not imply synchronous/asynchronous execution
* or even immediate execution at all. Event listeners are encouraged
* to be as efficient as possible, individually using asynchronous
* execution for longer-running and potentially blocking operations.
* <p>For usage in a reactive call stack, include event publication
* as a simple hand-off:
* {@code Mono.fromRunnable(() -> eventPublisher.publishEvent(...))}.
* As with any asynchronous execution, thread-local data is not going
* to be available for reactive listener methods. All state which is
* necessary to process the event needs to be included in the event
* instance itself.
* <p>For the convenient inclusion of the current transaction context
* in a reactive hand-off, consider using
* {@link org.springframework.transaction.reactive.TransactionalEventPublisher#publishEvent(java.util.function.Function)}.
* For thread-bound transactions, this is not necessary since the
* state will be implicitly available through thread-local storage.
* @param event the event to publish
* @see #publishEvent(Object)
* @see ApplicationListener#supportsAsyncExecution()
* @see org.springframework.context.event.ContextRefreshedEvent
* @see org.springframework.context.event.ContextClosedEvent
*/
default void publishEvent(ApplicationEvent event) {
publishEvent((Object) event);
}
/**
* Notify all <strong>matching</strong> listeners registered with this
* application of an event.
* <p>If the specified {@code event} is not an {@link ApplicationEvent},
* it is wrapped in a {@link PayloadApplicationEvent}.
* <p>Such an event publication step is effectively a hand-off to the
* multicaster and does not imply synchronous/asynchronous execution
* or even immediate execution at all. Event listeners are encouraged
* to be as efficient as possible, individually using asynchronous
* execution for longer-running and potentially blocking operations.
* <p>For the convenient inclusion of the current transaction context
* in a reactive hand-off, consider using
* {@link org.springframework.transaction.reactive.TransactionalEventPublisher#publishEvent(Object)}.
* For thread-bound transactions, this is not necessary since the
* state will be implicitly available through thread-local storage.
* @param event the event to publish
* @since 4.2
* @see #publishEvent(ApplicationEvent)
* @see PayloadApplicationEvent
*/
void publishEvent(Object event);
}
|
ApplicationEventPublisher
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/builditem/ArchiveRootBuildItem.java
|
{
"start": 1040,
"end": 8817
}
|
class ____ {
private List<Path> archiveRoots = new ArrayList<>();
private Collection<Path> excludedFromIndexing;
private Builder() {
}
/**
* Adds a single archive root path to the builder.
*
* @param root the archive root path to add
* @return this builder instance
*/
public Builder addArchiveRoot(Path root) {
this.archiveRoots.add(root);
return this;
}
/**
* Adds multiple archive root paths to the builder.
*
* @param paths a {@link PathCollection} of archive root paths to add
* @return this builder instance
*/
public Builder addArchiveRoots(PathCollection paths) {
paths.forEach(archiveRoots::add);
return this;
}
/**
* Sets the collection of paths to exclude from indexing.
*
* @param excludedFromIndexing a collection of paths to be excluded
* @return this builder instance
*/
public Builder setExcludedFromIndexing(Collection<Path> excludedFromIndexing) {
this.excludedFromIndexing = excludedFromIndexing;
return this;
}
/**
* @deprecated Use {@link #addArchiveRoot(Path)} instead to add archive roots.
* This method clears previous archive roots before setting the new one.
*
* @param archiveLocation the archive location to set
* @return this builder instance
*/
@Deprecated
public Builder setArchiveLocation(Path archiveLocation) {
this.archiveRoots.clear();
this.archiveRoots.add(archiveLocation);
return this;
}
/**
* Builds the {@link ArchiveRootBuildItem} using the configured properties.
*
* @param buildCloseables a {@link QuarkusBuildCloseablesBuildItem} to manage opened resources (e.g., zip file systems)
* @return a new {@link ArchiveRootBuildItem} instance
* @throws IOException if an I/O error occurs when accessing the archive roots
*/
public ArchiveRootBuildItem build(QuarkusBuildCloseablesBuildItem buildCloseables) throws IOException {
return new ArchiveRootBuildItem(this, buildCloseables);
}
}
/**
* Creates a new {@link Builder} instance for building an {@link ArchiveRootBuildItem}.
*
* @return a new {@link Builder} instance
*/
public static Builder builder() {
return new Builder();
}
private final Path archiveRoot;
private final Collection<Path> excludedFromIndexing;
private final PathCollection rootDirs;
private final PathCollection paths;
/**
* Constructs an {@link ArchiveRootBuildItem} with a single application classes directory.
*
* @param appClassesDir the path to the application classes directory
*/
public ArchiveRootBuildItem(Path appClassesDir) {
this(appClassesDir, appClassesDir);
}
/**
* @deprecated Use {@link Builder} instead.
* Constructs an {@link ArchiveRootBuildItem} with a given archive location and root directory.
*
* @param archiveLocation the archive location (e.g., JAR file path)
* @param archiveRoot the root directory of the archive
*/
@Deprecated
public ArchiveRootBuildItem(Path archiveLocation, Path archiveRoot) {
this(archiveLocation, archiveRoot, Collections.emptySet());
}
private ArchiveRootBuildItem(Path archiveLocation, Path archiveRoot, Collection<Path> excludedFromIndexing) {
if (!Files.isDirectory(archiveRoot)) {
throw new IllegalArgumentException(archiveRoot + " does not point to the application output directory");
}
this.rootDirs = PathList.of(archiveRoot);
this.paths = PathList.of(archiveLocation);
this.archiveRoot = archiveRoot;
this.excludedFromIndexing = excludedFromIndexing;
}
private ArchiveRootBuildItem(Builder builder, QuarkusBuildCloseablesBuildItem buildCloseables) throws IOException {
this.excludedFromIndexing = builder.excludedFromIndexing;
if (!builder.archiveRoots.isEmpty()) {
final PathList.Builder rootDirs = PathList.builder();
final PathList.Builder paths = PathList.builder();
for (Path root : builder.archiveRoots) {
paths.add(root);
if (Files.isDirectory(root)) {
rootDirs.add(root);
} else {
final FileSystem fs = buildCloseables.add(ZipUtils.newFileSystem(root));
fs.getRootDirectories().forEach(rootDirs::add);
}
}
this.rootDirs = rootDirs.build();
this.paths = paths.build();
this.archiveRoot = this.rootDirs.iterator().next();
} else {
this.paths = this.rootDirs = PathsCollection.of();
this.archiveRoot = null;
}
}
/**
* If this archive is a jar file it will return the path to the jar file on the file system,
* otherwise it will return the directory that this corresponds to.
*
* @deprecated in favor of {@link #getResolvedPaths()}
*/
@Deprecated
public Path getArchiveLocation() {
final Iterator<Path> i = paths.iterator();
Path last = i.next();
while (i.hasNext()) {
last = i.next();
}
return last;
}
/**
* Returns a path representing the archive root. Note that if this is a jar archive this is not the path to the
* jar, but rather a path to the root of the mounted {@link com.sun.nio.zipfs.ZipFileSystem}.
*
* @return The archive root.
* @deprecated in favor of {@link #getRootDirectories()}
*/
@Deprecated
public Path getArchiveRoot() {
return archiveRoot;
}
/**
* Collection of paths representing the archive's root directories. If there is a JAR among the paths
* (returned by {@link #getResolvedPaths()}) this method will return the path to the root of the mounted
* {@link java.nio.file.ZipFileSystem}
* instead.
*
* @deprecated in favor of {@link #getRootDirectories()}
* @return Collection of paths representing the archive's root directories
*/
@Deprecated
public PathsCollection getRootDirs() {
return PathsCollection.from(rootDirs);
}
/**
* Collection of paths representing the archive's root directories. If there is a JAR among the paths
* (returned by {@link #getResolvedPaths()}) this method will return the path to the root of the mounted
* {@link java.nio.file.ZipFileSystem}
* instead.
*
* @return Collection of paths representing the archive's root directories
*/
public PathCollection getRootDirectories() {
return rootDirs;
}
/**
* Collection of paths that collectively constitute the application archive's content.
*
* @deprecated in favor of {@link #getResolvedPaths()}
* @return collection of paths that collectively constitute the application archive content
*/
@Deprecated
public PathsCollection getPaths() {
return PathsCollection.from(paths);
}
/**
* Collection of paths that collectively constitute the application archive's content.
*
* @return collection of paths that collectively constitute the application archive content
*/
public PathCollection getResolvedPaths() {
return paths;
}
public boolean isExcludedFromIndexing(Path p) {
return excludedFromIndexing.contains(p);
}
}
|
Builder
|
java
|
apache__avro
|
lang/java/avro/src/main/java/org/apache/avro/SystemLimitException.java
|
{
"start": 1649,
"end": 9742
}
|
class ____ extends AvroRuntimeException {
/**
* The maximum length of array to allocate (unless necessary). Some VMs reserve
* some header words in an array. Attempts to allocate larger arrays may result
* in {@code OutOfMemoryError: Requested array size exceeds VM limit}
*
* @see <a href="https://bugs.openjdk.org/browse/JDK-8246725">JDK-8246725</a>
*/
// VisibleForTesting
static final int MAX_ARRAY_VM_LIMIT = Integer.MAX_VALUE - 8;
public static final String MAX_BYTES_LENGTH_PROPERTY = "org.apache.avro.limits.bytes.maxLength";
public static final String MAX_COLLECTION_LENGTH_PROPERTY = "org.apache.avro.limits.collectionItems.maxLength";
public static final String MAX_STRING_LENGTH_PROPERTY = "org.apache.avro.limits.string.maxLength";
private static int maxBytesLength = MAX_ARRAY_VM_LIMIT;
private static int maxCollectionLength = MAX_ARRAY_VM_LIMIT;
private static int maxStringLength = MAX_ARRAY_VM_LIMIT;
static {
resetLimits();
}
public SystemLimitException(String message) {
super(message);
}
/**
* Get an integer value stored in a system property, used to configure the
* system behaviour of decoders
*
* @param property The system property to fetch
* @param defaultValue The value to use if the system property is not present or
* parsable as an int
* @return The value from the system property
*/
private static int getLimitFromProperty(String property, int defaultValue) {
String o = System.getProperty(property);
int i = defaultValue;
if (o != null) {
try {
i = Integer.parseUnsignedInt(o);
} catch (NumberFormatException nfe) {
LoggerFactory.getLogger(SystemLimitException.class).warn("Could not parse property " + property + ": " + o,
nfe);
}
}
return i;
}
/**
* Check to ensure that reading the bytes is within the specified limits.
*
* @param length The proposed size of the bytes to read
* @return The size of the bytes if and only if it is within the limit and
* non-negative.
* @throws UnsupportedOperationException if reading the datum would allocate a
* collection that the Java VM would be
* unable to handle
* @throws SystemLimitException if the decoding should fail because it
* would otherwise result in an allocation
* exceeding the set limit
* @throws AvroRuntimeException if the length is negative
*/
public static int checkMaxBytesLength(long length) {
if (length < 0) {
throw new AvroRuntimeException("Malformed data. Length is negative: " + length);
}
if (length > MAX_ARRAY_VM_LIMIT) {
throw new UnsupportedOperationException(
"Cannot read arrays longer than " + MAX_ARRAY_VM_LIMIT + " bytes in Java library");
}
if (length > maxBytesLength) {
throw new SystemLimitException("Bytes length " + length + " exceeds maximum allowed");
}
return (int) length;
}
/**
* Check to ensure that reading the specified number of items remains within the
* specified limits.
*
* @param existing The number of elements items read in the collection
* @param items The next number of items to read. In normal usage, this is
* always a positive, permitted value. Negative and zero values
* have a special meaning in Avro decoding.
* @return The total number of items in the collection if and only if it is
* within the limit and non-negative.
* @throws UnsupportedOperationException if reading the items would allocate a
* collection that the Java VM would be
* unable to handle
* @throws SystemLimitException if the decoding should fail because it
* would otherwise result in an allocation
* exceeding the set limit
* @throws AvroRuntimeException if the length is negative
*/
public static int checkMaxCollectionLength(long existing, long items) {
long length = existing + items;
if (existing < 0) {
throw new AvroRuntimeException("Malformed data. Length is negative: " + existing);
}
if (items < 0) {
throw new AvroRuntimeException("Malformed data. Length is negative: " + items);
}
if (length > MAX_ARRAY_VM_LIMIT || length < existing) {
throw new UnsupportedOperationException(
"Cannot read collections larger than " + MAX_ARRAY_VM_LIMIT + " items in Java library");
}
if (length > maxCollectionLength) {
throw new SystemLimitException("Collection length " + length + " exceeds maximum allowed");
}
return (int) length;
}
/**
* Check to ensure that reading the specified number of items remains within the
* specified limits.
*
* @param items The next number of items to read. In normal usage, this is
* always a positive, permitted value. Negative and zero values
* have a special meaning in Avro decoding.
* @return The total number of items in the collection if and only if it is
* within the limit and non-negative.
* @throws UnsupportedOperationException if reading the items would allocate a
* collection that the Java VM would be
* unable to handle
* @throws SystemLimitException if the decoding should fail because it
* would otherwise result in an allocation
* exceeding the set limit
* @throws AvroRuntimeException if the length is negative
*/
public static int checkMaxCollectionLength(long items) {
if (items > MAX_ARRAY_VM_LIMIT) {
throw new UnsupportedOperationException(
"Cannot read collections larger than " + MAX_ARRAY_VM_LIMIT + " items in Java library");
}
if (items > maxCollectionLength) {
throw new SystemLimitException(
"Collection length " + items + " exceeds the maximum allowed of " + maxCollectionLength);
}
return (int) items;
}
/**
* Check to ensure that reading the string size is within the specified limits.
*
* @param length The proposed size of the string to read
* @return The size of the string if and only if it is within the limit and
* non-negative.
* @throws UnsupportedOperationException if reading the items would allocate a
* collection that the Java VM would be
* unable to handle
* @throws SystemLimitException if the decoding should fail because it
* would otherwise result in an allocation
* exceeding the set limit
* @throws AvroRuntimeException if the length is negative
*/
public static int checkMaxStringLength(long length) {
if (length < 0) {
throw new AvroRuntimeException("Malformed data. Length is negative: " + length);
}
if (length > MAX_ARRAY_VM_LIMIT) {
throw new UnsupportedOperationException("Cannot read strings longer than " + MAX_ARRAY_VM_LIMIT + " bytes");
}
if (length > maxStringLength) {
throw new SystemLimitException("String length " + length + " exceeds maximum allowed");
}
return (int) length;
}
/** Reread the limits from the system properties. */
// VisibleForTesting
static void resetLimits() {
maxBytesLength = getLimitFromProperty(MAX_BYTES_LENGTH_PROPERTY, MAX_ARRAY_VM_LIMIT);
maxCollectionLength = getLimitFromProperty(MAX_COLLECTION_LENGTH_PROPERTY, MAX_ARRAY_VM_LIMIT);
maxStringLength = getLimitFromProperty(MAX_STRING_LENGTH_PROPERTY, MAX_ARRAY_VM_LIMIT);
}
}
|
SystemLimitException
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/impl/event/EventNotifierServiceStoppingFailedEventTest.java
|
{
"start": 3039,
"end": 3638
}
|
class ____ implements Service {
private final String name;
private final boolean fail;
private MyService(String name, boolean fail) {
this.name = name;
this.fail = fail;
}
@Override
public void start() {
}
@Override
public void stop() {
stopOrder = stopOrder + name;
if (fail) {
throw new IllegalArgumentException("Fail " + name);
}
}
@Override
public String toString() {
return name;
}
}
}
|
MyService
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/cfg/GeneratorSettings.java
|
{
"start": 374,
"end": 4976
}
|
class ____
implements java.io.Serializable
{
private static final long serialVersionUID = 1L;
protected final static GeneratorSettings EMPTY = new GeneratorSettings(null, null, null, null);
/**
* Also need to use a null marker for root value separator
*/
protected final static SerializedString NULL_ROOT_VALUE_SEPARATOR = new SerializedString("");
public final static GeneratorSettings empty = new GeneratorSettings(null, null, null,
null);
/**
* To allow for dynamic enabling/disabling of pretty printing,
* pretty printer can be optionally configured for writer
* as well
*/
public final PrettyPrinter prettyPrinter;
/**
* When using data format that uses a schema, schema is passed
* to generator.
*/
public final FormatSchema schema;
/**
* Caller may want to specify character escaping details, either as
* defaults, or on call-by-call basis.
*/
public final CharacterEscapes characterEscapes;
/**
* Caller may want to override so-called "root value separator",
* String added (verbatim, with no quoting or escaping) between
* values in root context. Default value is a single space character,
* but this is often changed to linefeed.
*/
public final SerializableString rootValueSeparator;
public GeneratorSettings(PrettyPrinter pp, FormatSchema sch,
CharacterEscapes esc, SerializableString rootSep) {
prettyPrinter = pp;
schema = sch;
characterEscapes = esc;
rootValueSeparator = rootSep;
}
public static GeneratorSettings empty() {
return EMPTY;
}
public GeneratorSettings with(PrettyPrinter pp) {
return (pp == prettyPrinter) ? this
: new GeneratorSettings(pp, schema, characterEscapes, rootValueSeparator);
}
public GeneratorSettings with(FormatSchema sch) {
return (schema == sch) ? this
: new GeneratorSettings(prettyPrinter, sch, characterEscapes, rootValueSeparator);
}
public GeneratorSettings with(CharacterEscapes esc) {
return (characterEscapes == esc) ? this
: new GeneratorSettings(prettyPrinter, schema, esc, rootValueSeparator);
}
public GeneratorSettings withRootValueSeparator(String sep) {
if (sep == null) {
if (rootValueSeparator == NULL_ROOT_VALUE_SEPARATOR) {
return this;
}
return new GeneratorSettings(prettyPrinter, schema, characterEscapes, NULL_ROOT_VALUE_SEPARATOR);
}
if (sep.equals(_rootValueSeparatorAsString())) {
return this;
}
return new GeneratorSettings(prettyPrinter, schema, characterEscapes,
new SerializedString(sep));
}
public GeneratorSettings withRootValueSeparator(SerializableString sep) {
if (sep == null) {
if (rootValueSeparator == null) {
return this;
}
return new GeneratorSettings(prettyPrinter, schema, characterEscapes, null);
}
if (sep.equals(rootValueSeparator)) {
return this;
}
return new GeneratorSettings(prettyPrinter, schema, characterEscapes, sep);
}
private final String _rootValueSeparatorAsString() {
return (rootValueSeparator == null) ? null : rootValueSeparator.getValue();
}
/*
/**********************************************************
/* ObjectWriteContext support methods
/**********************************************************
*/
public FormatSchema getSchema() {
return schema;
}
public CharacterEscapes getCharacterEscapes() {
return characterEscapes;
}
public PrettyPrinter getPrettyPrinter() {
PrettyPrinter pp = prettyPrinter;
if (pp != null) {
// Important! Must return actual instance to use, NOT just blueprint
if (pp instanceof Instantiatable<?> pInstantiatable) {
pp = (PrettyPrinter) pInstantiatable.createInstance();
}
return pp;
}
return null;
}
public boolean hasPrettyPrinter() {
return (prettyPrinter != null);
}
public SerializableString getRootValueSeparator(SerializableString defaultSep) {
if (rootValueSeparator == null) {
return defaultSep;
}
if (rootValueSeparator == NULL_ROOT_VALUE_SEPARATOR) {
return null;
}
return rootValueSeparator;
}
}
|
GeneratorSettings
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/FluxDoFinally.java
|
{
"start": 4379,
"end": 4793
}
|
class ____<T> extends DoFinallySubscriber<T>
implements ConditionalSubscriber<T> {
DoFinallyConditionalSubscriber(ConditionalSubscriber<? super T> actual,
Consumer<SignalType> onFinally) {
super(actual, onFinally);
}
@Override
@SuppressWarnings("unchecked")
public boolean tryOnNext(T t) {
return ((ConditionalSubscriber<? super T>)actual).tryOnNext(t);
}
}
}
|
DoFinallyConditionalSubscriber
|
java
|
apache__camel
|
components/camel-hazelcast/src/test/java/org/apache/camel/component/hazelcast/testutil/Dummy.java
|
{
"start": 896,
"end": 1374
}
|
class ____ implements Serializable {
private static final long serialVersionUID = 1L;
private String foo;
private int bar;
public Dummy(String foo, int bar) {
this.foo = foo;
this.bar = bar;
}
public String getFoo() {
return foo;
}
public void setFoo(String foo) {
this.foo = foo;
}
public int getBar() {
return bar;
}
public void setBar(int bar) {
this.bar = bar;
}
}
|
Dummy
|
java
|
alibaba__fastjson
|
src/main/java/com/alibaba/fastjson/serializer/MiscCodec.java
|
{
"start": 1887,
"end": 14097
}
|
class ____ implements ObjectSerializer, ObjectDeserializer {
private static boolean FILE_RELATIVE_PATH_SUPPORT = false;
public final static MiscCodec instance = new MiscCodec();
private static Method method_paths_get;
private static boolean method_paths_get_error = false;
static {
FILE_RELATIVE_PATH_SUPPORT = "true".equals(IOUtils.getStringProperty("fastjson.deserializer.fileRelativePathSupport"));
}
public void write(JSONSerializer serializer, Object object, Object fieldName, Type fieldType,
int features) throws IOException {
SerializeWriter out = serializer.out;
if (object == null) {
out.writeNull();
return;
}
Class<?> objClass = object.getClass();
String strVal;
if (objClass == SimpleDateFormat.class) {
String pattern = ((SimpleDateFormat) object).toPattern();
if (out.isEnabled(SerializerFeature.WriteClassName)) {
if (object.getClass() != fieldType) {
out.write('{');
out.writeFieldName(JSON.DEFAULT_TYPE_KEY);
serializer.write(object.getClass().getName());
out.writeFieldValue(',', "val", pattern);
out.write('}');
return;
}
}
strVal = pattern;
} else if (objClass == Class.class) {
Class<?> clazz = (Class<?>) object;
strVal = clazz.getName();
} else if (objClass == InetSocketAddress.class) {
InetSocketAddress address = (InetSocketAddress) object;
InetAddress inetAddress = address.getAddress();
out.write('{');
if (inetAddress != null) {
out.writeFieldName("address");
serializer.write(inetAddress);
out.write(',');
}
out.writeFieldName("port");
out.writeInt(address.getPort());
out.write('}');
return;
} else if (object instanceof File) {
strVal = ((File) object).getPath();
} else if (object instanceof InetAddress) {
strVal = ((InetAddress) object).getHostAddress();
} else if (object instanceof TimeZone) {
TimeZone timeZone = (TimeZone) object;
strVal = timeZone.getID();
} else if (object instanceof Currency) {
Currency currency = (Currency) object;
strVal = currency.getCurrencyCode();
} else if (object instanceof JSONStreamAware) {
JSONStreamAware aware = (JSONStreamAware) object;
aware.writeJSONString(out);
return;
} else if (object instanceof Iterator) {
Iterator<?> it = ((Iterator<?>) object);
writeIterator(serializer, out, it);
return;
} else if (object instanceof Iterable) {
Iterator<?> it = ((Iterable<?>) object).iterator();
writeIterator(serializer, out, it);
return;
} else if (object instanceof Map.Entry) {
Map.Entry entry = (Map.Entry) object;
Object objKey = entry.getKey();
Object objVal = entry.getValue();
if (objKey instanceof String) {
String key = (String) objKey;
if (objVal instanceof String) {
String value = (String) objVal;
out.writeFieldValueStringWithDoubleQuoteCheck('{', key, value);
} else {
out.write('{');
out.writeFieldName(key);
serializer.write(objVal);
}
} else {
out.write('{');
serializer.write(objKey);
out.write(':');
serializer.write(objVal);
}
out.write('}');
return;
} else if (object.getClass().getName().equals("net.sf.json.JSONNull")) {
out.writeNull();
return;
} else if (object instanceof org.w3c.dom.Node) {
strVal = toString((Node) object);
} else {
throw new JSONException("not support class : " + objClass);
}
out.writeString(strVal);
}
private static String toString(org.w3c.dom.Node node) {
try {
TransformerFactory transFactory = TransformerFactory.newInstance();
Transformer transformer = transFactory.newTransformer();
DOMSource domSource = new DOMSource(node);
StringWriter out = new StringWriter();
transformer.transform(domSource, new StreamResult(out));
return out.toString();
} catch (TransformerException e) {
throw new JSONException("xml node to string error", e);
}
}
protected void writeIterator(JSONSerializer serializer, SerializeWriter out, Iterator<?> it) {
int i = 0;
out.write('[');
while (it.hasNext()) {
if (i != 0) {
out.write(',');
}
Object item = it.next();
serializer.write(item);
++i;
}
out.write(']');
return;
}
@SuppressWarnings("unchecked")
public <T> T deserialze(DefaultJSONParser parser, Type clazz, Object fieldName) {
JSONLexer lexer = parser.lexer;
if (clazz == InetSocketAddress.class) {
if (lexer.token() == JSONToken.NULL) {
lexer.nextToken();
return null;
}
parser.accept(JSONToken.LBRACE);
InetAddress address = null;
int port = 0;
for (;;) {
String key = lexer.stringVal();
lexer.nextToken(JSONToken.COLON);
if (key.equals("address")) {
parser.accept(JSONToken.COLON);
address = parser.parseObject(InetAddress.class);
} else if (key.equals("port")) {
parser.accept(JSONToken.COLON);
if (lexer.token() != JSONToken.LITERAL_INT) {
throw new JSONException("port is not int");
}
port = lexer.intValue();
lexer.nextToken();
} else {
parser.accept(JSONToken.COLON);
parser.parse();
}
if (lexer.token() == JSONToken.COMMA) {
lexer.nextToken();
continue;
}
break;
}
parser.accept(JSONToken.RBRACE);
return (T) new InetSocketAddress(address, port);
}
Object objVal;
if (parser.resolveStatus == DefaultJSONParser.TypeNameRedirect) {
parser.resolveStatus = DefaultJSONParser.NONE;
parser.accept(JSONToken.COMMA);
if (lexer.token() == JSONToken.LITERAL_STRING) {
if (!"val".equals(lexer.stringVal())) {
throw new JSONException("syntax error");
}
lexer.nextToken();
} else {
throw new JSONException("syntax error");
}
parser.accept(JSONToken.COLON);
objVal = parser.parse();
parser.accept(JSONToken.RBRACE);
} else {
objVal = parser.parse();
}
String strVal;
if (objVal == null) {
strVal = null;
} else if (objVal instanceof String) {
strVal = (String) objVal;
} else {
if (objVal instanceof JSONObject) {
JSONObject jsonObject = (JSONObject) objVal;
if (clazz == Currency.class) {
String currency = jsonObject.getString("currency");
if (currency != null) {
return (T) Currency.getInstance(currency);
}
String symbol = jsonObject.getString("currencyCode");
if (symbol != null) {
return (T) Currency.getInstance(symbol);
}
}
if (clazz == Map.Entry.class) {
return (T) jsonObject.entrySet().iterator().next();
}
return jsonObject.toJavaObject(clazz);
}
throw new JSONException("expect string");
}
if (strVal == null || strVal.length() == 0) {
return null;
}
if (clazz == UUID.class) {
return (T) UUID.fromString(strVal);
}
if (clazz == URI.class) {
return (T) URI.create(strVal);
}
if (clazz == URL.class) {
try {
return (T) new URL(strVal);
} catch (MalformedURLException e) {
throw new JSONException("create url error", e);
}
}
if (clazz == Pattern.class) {
return (T) Pattern.compile(strVal);
}
if (clazz == Locale.class) {
return (T) TypeUtils.toLocale(strVal);
}
if (clazz == SimpleDateFormat.class) {
SimpleDateFormat dateFormat = new SimpleDateFormat(strVal, lexer.getLocale());
dateFormat.setTimeZone(lexer.getTimeZone());
return (T) dateFormat;
}
if (clazz == InetAddress.class || clazz == Inet4Address.class || clazz == Inet6Address.class) {
try {
return (T) InetAddress.getByName(strVal);
} catch (UnknownHostException e) {
throw new JSONException("deserialize inet adress error", e);
}
}
if (clazz == File.class) {
if (strVal.indexOf("..") >= 0 && !FILE_RELATIVE_PATH_SUPPORT) {
throw new JSONException("file relative path not support.");
}
return (T) new File(strVal);
}
if (clazz == TimeZone.class) {
return (T) TimeZone.getTimeZone(strVal);
}
if (clazz instanceof ParameterizedType) {
ParameterizedType parmeterizedType = (ParameterizedType) clazz;
clazz = parmeterizedType.getRawType();
}
if (clazz == Class.class) {
return (T) TypeUtils.loadClass(strVal, parser.getConfig().getDefaultClassLoader(), false);
}
if (clazz == Charset.class) {
return (T) Charset.forName(strVal);
}
if (clazz == Currency.class) {
return (T) Currency.getInstance(strVal);
}
if (clazz == JSONPath.class) {
return (T) new JSONPath(strVal);
}
if (clazz instanceof Class) {
String className = ((Class) clazz).getName();
if (className.equals("java.nio.file.Path")) {
try {
if (method_paths_get == null && !method_paths_get_error) {
Class<?> paths = TypeUtils.loadClass("java.nio.file.Paths");
method_paths_get = paths.getMethod("get", String.class, String[].class);
}
if (method_paths_get != null) {
return (T) method_paths_get.invoke(null, strVal, new String[0]);
}
throw new JSONException("Path deserialize erorr");
} catch (NoSuchMethodException ex) {
method_paths_get_error = true;
} catch (IllegalAccessException ex) {
throw new JSONException("Path deserialize erorr", ex);
} catch (InvocationTargetException ex) {
throw new JSONException("Path deserialize erorr", ex);
}
}
throw new JSONException("MiscCodec not support " + className);
}
throw new JSONException("MiscCodec not support " + clazz.toString());
}
public int getFastMatchToken() {
return JSONToken.LITERAL_STRING;
}
}
|
MiscCodec
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/specialmappings/NormalPointEntity.java
|
{
"start": 102,
"end": 313
}
|
class ____ extends PointEntity {
private String place;
public String getPlace() {
return place;
}
public void setPlace(String place) {
this.place = place;
}
}
|
NormalPointEntity
|
java
|
spring-projects__spring-framework
|
spring-aop/src/main/java/org/springframework/aop/support/StaticMethodMatcherPointcut.java
|
{
"start": 1187,
"end": 1692
}
|
class ____ extends StaticMethodMatcher implements Pointcut {
private ClassFilter classFilter = ClassFilter.TRUE;
/**
* Set the {@link ClassFilter} to use for this pointcut.
* Default is {@link ClassFilter#TRUE}.
*/
public void setClassFilter(ClassFilter classFilter) {
this.classFilter = classFilter;
}
@Override
public ClassFilter getClassFilter() {
return this.classFilter;
}
@Override
public final MethodMatcher getMethodMatcher() {
return this;
}
}
|
StaticMethodMatcherPointcut
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/StreamingDeltaJoinOperatorFactory.java
|
{
"start": 1705,
"end": 4647
}
|
class ____ extends AbstractStreamOperatorFactory<RowData>
implements TwoInputStreamOperatorFactory<RowData, RowData, RowData>,
YieldingOperatorFactory<RowData> {
private final AsyncDeltaJoinRunner rightLookupTableAsyncFunction;
private final AsyncDeltaJoinRunner leftLookupTableAsyncFunction;
private final RowDataKeySelector leftJoinKeySelector;
private final RowDataKeySelector rightJoinKeySelector;
private final long timeout;
private final int capacity;
private final long leftSideCacheSize;
private final long rightSideCacheSize;
private final RowType leftStreamType;
private final RowType rightStreamType;
public StreamingDeltaJoinOperatorFactory(
AsyncDeltaJoinRunner rightLookupTableAsyncFunction,
AsyncDeltaJoinRunner leftLookupTableAsyncFunction,
RowDataKeySelector leftJoinKeySelector,
RowDataKeySelector rightJoinKeySelector,
long timeout,
int capacity,
long leftSideCacheSize,
long rightSideCacheSize,
RowType leftStreamType,
RowType rightStreamType) {
this.rightLookupTableAsyncFunction = rightLookupTableAsyncFunction;
this.leftLookupTableAsyncFunction = leftLookupTableAsyncFunction;
this.leftJoinKeySelector = leftJoinKeySelector;
this.rightJoinKeySelector = rightJoinKeySelector;
this.timeout = timeout;
this.capacity = capacity;
this.leftSideCacheSize = leftSideCacheSize;
this.rightSideCacheSize = rightSideCacheSize;
this.leftStreamType = leftStreamType;
this.rightStreamType = rightStreamType;
}
@Override
public <T extends StreamOperator<RowData>> T createStreamOperator(
StreamOperatorParameters<RowData> parameters) {
MailboxExecutor mailboxExecutor = getMailboxExecutor();
StreamingDeltaJoinOperator deltaJoinOperator =
new StreamingDeltaJoinOperator(
rightLookupTableAsyncFunction,
leftLookupTableAsyncFunction,
leftJoinKeySelector,
rightJoinKeySelector,
timeout,
capacity,
processingTimeService,
mailboxExecutor,
leftSideCacheSize,
rightSideCacheSize,
leftStreamType,
rightStreamType);
deltaJoinOperator.setup(
parameters.getContainingTask(),
parameters.getStreamConfig(),
parameters.getOutput());
return (T) deltaJoinOperator;
}
@Override
public Class<? extends StreamOperator<?>> getStreamOperatorClass(ClassLoader classLoader) {
return StreamingDeltaJoinOperator.class;
}
}
|
StreamingDeltaJoinOperatorFactory
|
java
|
quarkusio__quarkus
|
extensions/kafka-client/runtime/src/test/java/io/quarkus/kafka/client/serialization/ObjectMapperDeserializerTest.java
|
{
"start": 346,
"end": 1808
}
|
class ____ {
@Test
void shouldDeserializeEntity() {
MyEntity expected = new MyEntity(1, "entity1");
ObjectMapperDeserializer<MyEntity> deserializer = new ObjectMapperDeserializer<>(MyEntity.class);
MyEntity actual = deserializer.deserialize("topic", "{\"id\":1,\"name\":\"entity1\"}".getBytes());
assertNotNull(actual);
assertEquals(expected, actual);
}
@Test
void shouldDeserializeListOfEntities() {
TypeReference<List<MyEntity>> listType = new TypeReference<>() {
};
ObjectMapperDeserializer<List<MyEntity>> deserializer = new ObjectMapperDeserializer<>(listType);
List<MyEntity> actuals = deserializer.deserialize("topic",
"[{\"id\":1,\"name\":\"entity1\"},{\"id\":2,\"name\":\"entity2\"}]".getBytes());
assertNotNull(actuals);
assertEquals(2, actuals.size());
}
@Test
void shouldDeserializeNullAsNullString() {
ObjectMapperDeserializer<MyEntity> deserializer = new ObjectMapperDeserializer<>(MyEntity.class);
MyEntity results = deserializer.deserialize("topic", "null".getBytes());
assertNull(results);
}
@Test
void shouldDeserializeNullAsNull() {
ObjectMapperDeserializer<MyEntity> deserializer = new ObjectMapperDeserializer<>(MyEntity.class);
MyEntity results = deserializer.deserialize("topic", null);
assertNull(results);
}
}
|
ObjectMapperDeserializerTest
|
java
|
apache__camel
|
components/camel-kubernetes/src/generated/java/org/apache/camel/component/kubernetes/hpa/KubernetesHPAEndpointConfigurer.java
|
{
"start": 741,
"end": 13257
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
KubernetesHPAEndpoint target = (KubernetesHPAEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": target.getConfiguration().setApiVersion(property(camelContext, java.lang.String.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "cacertdata":
case "caCertData": target.getConfiguration().setCaCertData(property(camelContext, java.lang.String.class, value)); return true;
case "cacertfile":
case "caCertFile": target.getConfiguration().setCaCertFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientcertdata":
case "clientCertData": target.getConfiguration().setClientCertData(property(camelContext, java.lang.String.class, value)); return true;
case "clientcertfile":
case "clientCertFile": target.getConfiguration().setClientCertFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeyalgo":
case "clientKeyAlgo": target.getConfiguration().setClientKeyAlgo(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeydata":
case "clientKeyData": target.getConfiguration().setClientKeyData(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeyfile":
case "clientKeyFile": target.getConfiguration().setClientKeyFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeypassphrase":
case "clientKeyPassphrase": target.getConfiguration().setClientKeyPassphrase(property(camelContext, java.lang.String.class, value)); return true;
case "connectiontimeout":
case "connectionTimeout": target.getConfiguration().setConnectionTimeout(property(camelContext, java.lang.Integer.class, value)); return true;
case "crdgroup":
case "crdGroup": target.getConfiguration().setCrdGroup(property(camelContext, java.lang.String.class, value)); return true;
case "crdname":
case "crdName": target.getConfiguration().setCrdName(property(camelContext, java.lang.String.class, value)); return true;
case "crdplural":
case "crdPlural": target.getConfiguration().setCrdPlural(property(camelContext, java.lang.String.class, value)); return true;
case "crdscope":
case "crdScope": target.getConfiguration().setCrdScope(property(camelContext, java.lang.String.class, value)); return true;
case "crdversion":
case "crdVersion": target.getConfiguration().setCrdVersion(property(camelContext, java.lang.String.class, value)); return true;
case "dnsdomain":
case "dnsDomain": target.getConfiguration().setDnsDomain(property(camelContext, java.lang.String.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "kubernetesclient":
case "kubernetesClient": target.getConfiguration().setKubernetesClient(property(camelContext, io.fabric8.kubernetes.client.KubernetesClient.class, value)); return true;
case "labelkey":
case "labelKey": target.getConfiguration().setLabelKey(property(camelContext, java.lang.String.class, value)); return true;
case "labelvalue":
case "labelValue": target.getConfiguration().setLabelValue(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "namespace": target.getConfiguration().setNamespace(property(camelContext, java.lang.String.class, value)); return true;
case "oauthtoken":
case "oauthToken": target.getConfiguration().setOauthToken(property(camelContext, java.lang.String.class, value)); return true;
case "operation": target.getConfiguration().setOperation(property(camelContext, java.lang.String.class, value)); return true;
case "password": target.getConfiguration().setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "poolsize":
case "poolSize": target.getConfiguration().setPoolSize(property(camelContext, int.class, value)); return true;
case "portname":
case "portName": target.getConfiguration().setPortName(property(camelContext, java.lang.String.class, value)); return true;
case "portprotocol":
case "portProtocol": target.getConfiguration().setPortProtocol(property(camelContext, java.lang.String.class, value)); return true;
case "resourcename":
case "resourceName": target.getConfiguration().setResourceName(property(camelContext, java.lang.String.class, value)); return true;
case "trustcerts":
case "trustCerts": target.getConfiguration().setTrustCerts(property(camelContext, java.lang.Boolean.class, value)); return true;
case "username": target.getConfiguration().setUsername(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return java.lang.String.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "cacertdata":
case "caCertData": return java.lang.String.class;
case "cacertfile":
case "caCertFile": return java.lang.String.class;
case "clientcertdata":
case "clientCertData": return java.lang.String.class;
case "clientcertfile":
case "clientCertFile": return java.lang.String.class;
case "clientkeyalgo":
case "clientKeyAlgo": return java.lang.String.class;
case "clientkeydata":
case "clientKeyData": return java.lang.String.class;
case "clientkeyfile":
case "clientKeyFile": return java.lang.String.class;
case "clientkeypassphrase":
case "clientKeyPassphrase": return java.lang.String.class;
case "connectiontimeout":
case "connectionTimeout": return java.lang.Integer.class;
case "crdgroup":
case "crdGroup": return java.lang.String.class;
case "crdname":
case "crdName": return java.lang.String.class;
case "crdplural":
case "crdPlural": return java.lang.String.class;
case "crdscope":
case "crdScope": return java.lang.String.class;
case "crdversion":
case "crdVersion": return java.lang.String.class;
case "dnsdomain":
case "dnsDomain": return java.lang.String.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "kubernetesclient":
case "kubernetesClient": return io.fabric8.kubernetes.client.KubernetesClient.class;
case "labelkey":
case "labelKey": return java.lang.String.class;
case "labelvalue":
case "labelValue": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "namespace": return java.lang.String.class;
case "oauthtoken":
case "oauthToken": return java.lang.String.class;
case "operation": return java.lang.String.class;
case "password": return java.lang.String.class;
case "poolsize":
case "poolSize": return int.class;
case "portname":
case "portName": return java.lang.String.class;
case "portprotocol":
case "portProtocol": return java.lang.String.class;
case "resourcename":
case "resourceName": return java.lang.String.class;
case "trustcerts":
case "trustCerts": return java.lang.Boolean.class;
case "username": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
KubernetesHPAEndpoint target = (KubernetesHPAEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return target.getConfiguration().getApiVersion();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "cacertdata":
case "caCertData": return target.getConfiguration().getCaCertData();
case "cacertfile":
case "caCertFile": return target.getConfiguration().getCaCertFile();
case "clientcertdata":
case "clientCertData": return target.getConfiguration().getClientCertData();
case "clientcertfile":
case "clientCertFile": return target.getConfiguration().getClientCertFile();
case "clientkeyalgo":
case "clientKeyAlgo": return target.getConfiguration().getClientKeyAlgo();
case "clientkeydata":
case "clientKeyData": return target.getConfiguration().getClientKeyData();
case "clientkeyfile":
case "clientKeyFile": return target.getConfiguration().getClientKeyFile();
case "clientkeypassphrase":
case "clientKeyPassphrase": return target.getConfiguration().getClientKeyPassphrase();
case "connectiontimeout":
case "connectionTimeout": return target.getConfiguration().getConnectionTimeout();
case "crdgroup":
case "crdGroup": return target.getConfiguration().getCrdGroup();
case "crdname":
case "crdName": return target.getConfiguration().getCrdName();
case "crdplural":
case "crdPlural": return target.getConfiguration().getCrdPlural();
case "crdscope":
case "crdScope": return target.getConfiguration().getCrdScope();
case "crdversion":
case "crdVersion": return target.getConfiguration().getCrdVersion();
case "dnsdomain":
case "dnsDomain": return target.getConfiguration().getDnsDomain();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "kubernetesclient":
case "kubernetesClient": return target.getConfiguration().getKubernetesClient();
case "labelkey":
case "labelKey": return target.getConfiguration().getLabelKey();
case "labelvalue":
case "labelValue": return target.getConfiguration().getLabelValue();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "namespace": return target.getConfiguration().getNamespace();
case "oauthtoken":
case "oauthToken": return target.getConfiguration().getOauthToken();
case "operation": return target.getConfiguration().getOperation();
case "password": return target.getConfiguration().getPassword();
case "poolsize":
case "poolSize": return target.getConfiguration().getPoolSize();
case "portname":
case "portName": return target.getConfiguration().getPortName();
case "portprotocol":
case "portProtocol": return target.getConfiguration().getPortProtocol();
case "resourcename":
case "resourceName": return target.getConfiguration().getResourceName();
case "trustcerts":
case "trustCerts": return target.getConfiguration().getTrustCerts();
case "username": return target.getConfiguration().getUsername();
default: return null;
}
}
}
|
KubernetesHPAEndpointConfigurer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/loader/internal/AliasConstantsHelper.java
|
{
"start": 182,
"end": 947
}
|
class ____ {
private static final int MAX_POOL_SIZE = 40;
private static final String[] pool = initPool( MAX_POOL_SIZE );
/**
* Returns the same as Integer.toString( i ) + '_'
* Strings might be returned from a pool of constants, when `i`
* is within the range of expected most commonly requested elements.
*/
public static String get(final int i) {
return i < MAX_POOL_SIZE && i >= 0
? pool[i]
: internalAlias( i );
}
private static String[] initPool(final int maxPoolSize) {
final String[] pool = new String[maxPoolSize];
for ( int i = 0; i < maxPoolSize; i++ ) {
pool[i] = internalAlias( i );
}
return pool;
}
private static String internalAlias(final int i) {
return Integer.toString( i ) + '_';
}
}
|
AliasConstantsHelper
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/visitor/ExportParameterizedOutputVisitor.java
|
{
"start": 715,
"end": 1762
}
|
class ____ extends SQLASTOutputVisitor implements ExportParameterVisitor {
/**
* true= if require parameterized sql output
*/
private final boolean requireParameterizedOutput;
public ExportParameterizedOutputVisitor(final List<Object> parameters,
final StringBuilder appender,
final boolean wantParameterizedOutput) {
super(appender, true);
this.parameters = parameters;
this.requireParameterizedOutput = wantParameterizedOutput;
}
public ExportParameterizedOutputVisitor() {
this(new ArrayList<Object>());
}
public ExportParameterizedOutputVisitor(final List<Object> parameters) {
this(parameters, new StringBuilder(), false);
}
public ExportParameterizedOutputVisitor(final StringBuilder appender) {
this(new ArrayList<Object>(), appender, true);
}
public List<Object> getParameters() {
return parameters;
}
}
|
ExportParameterizedOutputVisitor
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java
|
{
"start": 39346,
"end": 39421
}
|
class ____ extends Plugin {
public PluginOther() {}
}
}
|
PluginOther
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
|
{
"start": 89852,
"end": 91839
}
|
class ____ implements Iterable<ShardRouting> {
private final Set<ShardRouting> shards = Sets.newHashSetWithExpectedSize(4); // expect few shards of same index to be allocated on
// same node
private int highestPrimary = -1;
ModelIndex() {}
public int highestPrimary() {
if (highestPrimary == -1) {
int maxId = -1;
for (ShardRouting shard : shards) {
if (shard.primary()) {
maxId = Math.max(maxId, shard.id());
}
}
return highestPrimary = maxId;
}
return highestPrimary;
}
public int numShards() {
return shards.size();
}
@Override
public Iterator<ShardRouting> iterator() {
return shards.iterator();
}
public void removeShard(ShardRouting shard) {
highestPrimary = -1;
assert shards.contains(shard) : "Shard not allocated on current node: " + shard;
shards.remove(shard);
}
public void addShard(ShardRouting shard) {
highestPrimary = -1;
assert shards.contains(shard) == false : "Shard already allocated on current node: " + shard;
shards.add(shard);
}
public boolean containsShard(ShardRouting shard) {
return shards.contains(shard);
}
}
/**
* A NodeSorter sorts the set of nodes for a single partition using the {@link WeightFunction}
* for that partition. In partitioned cluster topologies there will be one for each partition
* (e.g. search/indexing in stateless). By default, there is a single partition containing
* a single weight function that applies to all nodes and shards.
*
* @see BalancingWeightsFactory
*/
public static final
|
ModelIndex
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/UriTemplateServletAnnotationControllerHandlerMethodTests.java
|
{
"start": 19459,
"end": 19676
}
|
class ____ {
@RequestMapping("{hotel}")
void handleHotel(@PathVariable String hotel, Writer writer) throws IOException {
writer.write("test-" + hotel);
}
}
@Controller
public static
|
ImplicitSubPathController
|
java
|
elastic__elasticsearch
|
x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/expression/predicate/operator/comparison/InsensitiveBinaryComparisonPipe.java
|
{
"start": 805,
"end": 2221
}
|
class ____ extends BinaryPipe {
private final InsensitiveBinaryComparisonOperation operation;
public InsensitiveBinaryComparisonPipe(
Source source,
Expression expression,
Pipe left,
Pipe right,
InsensitiveBinaryComparisonOperation operation
) {
super(source, expression, left, right);
this.operation = operation;
}
@Override
protected NodeInfo<InsensitiveBinaryComparisonPipe> info() {
return NodeInfo.create(this, InsensitiveBinaryComparisonPipe::new, expression(), left(), right(), operation);
}
@Override
protected InsensitiveBinaryComparisonPipe replaceChildren(Pipe left, Pipe right) {
return new InsensitiveBinaryComparisonPipe(source(), expression(), left, right, operation);
}
@Override
public InsensitiveBinaryComparisonProcessor asProcessor() {
return new InsensitiveBinaryComparisonProcessor(left().asProcessor(), right().asProcessor(), operation);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), operation);
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj)) {
InsensitiveBinaryComparisonPipe other = (InsensitiveBinaryComparisonPipe) obj;
return Objects.equals(operation, other.operation);
}
return false;
}
}
|
InsensitiveBinaryComparisonPipe
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/annotation/AnnotationUtilsTests.java
|
{
"start": 65829,
"end": 66231
}
|
interface ____ {
@AliasFor(annotation = ImplicitAliasesForAliasPairContextConfig.class, attribute = "xmlFile")
String xml() default "";
@AliasFor(annotation = ImplicitAliasesForAliasPairContextConfig.class, attribute = "groovyScript")
String groovy() default "";
}
@TransitiveImplicitAliasesForAliasPairContextConfig(xml = "test.xml")
static
|
TransitiveImplicitAliasesForAliasPairContextConfig
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/schedulers/IoScheduler.java
|
{
"start": 3105,
"end": 7586
}
|
class ____ implements Runnable {
private final long keepAliveTime;
private final ConcurrentLinkedQueue<ThreadWorker> expiringWorkerQueue;
final CompositeDisposable allWorkers;
private final ScheduledExecutorService evictorService;
private final Future<?> evictorTask;
private final ThreadFactory threadFactory;
CachedWorkerPool(long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory) {
this.keepAliveTime = unit != null ? unit.toNanos(keepAliveTime) : 0L;
this.expiringWorkerQueue = new ConcurrentLinkedQueue<>();
this.allWorkers = new CompositeDisposable();
this.threadFactory = threadFactory;
ScheduledExecutorService evictor = null;
Future<?> task = null;
if (unit != null) {
evictor = Executors.newScheduledThreadPool(1, EVICTOR_THREAD_FACTORY);
task = evictor.scheduleWithFixedDelay(this, this.keepAliveTime, this.keepAliveTime, TimeUnit.NANOSECONDS);
}
evictorService = evictor;
evictorTask = task;
}
@Override
public void run() {
evictExpiredWorkers(expiringWorkerQueue, allWorkers);
}
ThreadWorker get() {
if (allWorkers.isDisposed()) {
return SHUTDOWN_THREAD_WORKER;
}
while (!expiringWorkerQueue.isEmpty()) {
ThreadWorker threadWorker = expiringWorkerQueue.poll();
if (threadWorker != null) {
return threadWorker;
}
}
// No cached worker found, so create a new one.
ThreadWorker w = new ThreadWorker(threadFactory);
allWorkers.add(w);
return w;
}
void release(ThreadWorker threadWorker) {
// Refresh expire time before putting worker back in pool
threadWorker.setExpirationTime(now() + keepAliveTime);
expiringWorkerQueue.offer(threadWorker);
}
static void evictExpiredWorkers(ConcurrentLinkedQueue<ThreadWorker> expiringWorkerQueue, CompositeDisposable allWorkers) {
if (!expiringWorkerQueue.isEmpty()) {
long currentTimestamp = now();
for (ThreadWorker threadWorker : expiringWorkerQueue) {
if (threadWorker.getExpirationTime() <= currentTimestamp) {
if (expiringWorkerQueue.remove(threadWorker)) {
allWorkers.remove(threadWorker);
}
} else {
// Queue is ordered with the worker that will expire first in the beginning, so when we
// find a non-expired worker we can stop evicting.
break;
}
}
}
}
static long now() {
return System.nanoTime();
}
void shutdown() {
allWorkers.dispose();
if (evictorTask != null) {
evictorTask.cancel(true);
}
if (evictorService != null) {
evictorService.shutdownNow();
}
}
}
public IoScheduler() {
this(WORKER_THREAD_FACTORY);
}
/**
* Constructs an IoScheduler with the given thread factory and starts the pool of workers.
* @param threadFactory thread factory to use for creating worker threads. Note that this takes precedence over any
* system properties for configuring new thread creation. Cannot be null.
*/
public IoScheduler(ThreadFactory threadFactory) {
this.threadFactory = threadFactory;
this.pool = new AtomicReference<>(NONE);
start();
}
@Override
public void start() {
CachedWorkerPool update = new CachedWorkerPool(KEEP_ALIVE_TIME, KEEP_ALIVE_UNIT, threadFactory);
if (!pool.compareAndSet(NONE, update)) {
update.shutdown();
}
}
@Override
public void shutdown() {
CachedWorkerPool curr = pool.getAndSet(NONE);
if (curr != NONE) {
curr.shutdown();
}
}
@NonNull
@Override
public Worker createWorker() {
return new EventLoopWorker(pool.get());
}
public int size() {
return pool.get().allWorkers.size();
}
static final
|
CachedWorkerPool
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/dialect/oracle/ast/OracleSQLObject.java
|
{
"start": 789,
"end": 875
}
|
interface ____ extends SQLObject {
void accept0(OracleASTVisitor v);
}
|
OracleSQLObject
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/matchers/AnnotationMatcherTest.java
|
{
"start": 4894,
"end": 5511
}
|
class ____ {}
""");
assertCompiles(
nodeWithAnnotationMatches(
/* shouldMatch= */ false,
new AnnotationMatcher<Tree>(AT_LEAST_ONE, isType("com.google.WrongAnnotation"))));
assertCompiles(
nodeWithAnnotationMatches(
/* shouldMatch= */ false,
new AnnotationMatcher<Tree>(ALL, isType("com.google.WrongAnnotation"))));
}
@Test
public void shouldNotMatchNonmatchingSingleFullyQualifiedAnnotationOnClass() {
writeFile(
"A.java",
"""
package com.google.foo;
@com.google.SampleAnnotation1
public
|
A
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/util/xml/StaxEventHandlerTests.java
|
{
"start": 860,
"end": 1218
}
|
class ____ extends AbstractStaxHandlerTests {
@Override
protected AbstractStaxHandler createStaxHandler(Result result) throws XMLStreamException {
XMLOutputFactory outputFactory = XMLOutputFactory.newFactory();
XMLEventWriter eventWriter = outputFactory.createXMLEventWriter(result);
return new StaxEventHandler(eventWriter);
}
}
|
StaxEventHandlerTests
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDeciderIT.java
|
{
"start": 791,
"end": 1787
}
|
class ____ extends ESIntegTestCase {
public void testDefault() {
internalCluster().startNode();
assertEquals(
ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS,
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(
internalCluster().getInstance(ClusterService.class).getSettings()
)
);
}
public void testDefaultLegacyAllocator() {
internalCluster().startNode(
Settings.builder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), ClusterModule.BALANCED_ALLOCATOR)
);
assertEquals(
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE,
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(
internalCluster().getInstance(ClusterService.class).getSettings()
)
);
}
}
|
ClusterRebalanceAllocationDeciderIT
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/OracleListAggTest.java
|
{
"start": 439,
"end": 2855
}
|
class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "SELECT prod_id, LISTAGG(cust_first_name||' '||cust_last_name, '; ') \n"
+ " WITHIN GROUP (ORDER BY amount_sold DESC) cust_list\n"
+ "FROM sales, customers\n"
+ "WHERE sales.cust_id = customers.cust_id AND cust_gender = 'M' \n"
+ " AND cust_credit_limit = 15000 AND prod_id BETWEEN 15 AND 18 \n"
+ " AND channel_id = 2 AND time_id > '01-JAN-01'\n"
+ "GROUP BY prod_id;";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
assertEquals("SELECT prod_id\n" +
"\t, LISTAGG(cust_first_name || ' ' || cust_last_name, '; ') WITHIN GROUP (ORDER BY amount_sold DESC) AS cust_list\n" +
"FROM sales, customers\n" +
"WHERE sales.cust_id = customers.cust_id\n" +
"\tAND cust_gender = 'M'\n" +
"\tAND cust_credit_limit = 15000\n" +
"\tAND prod_id BETWEEN 15 AND 18\n" +
"\tAND channel_id = 2\n" +
"\tAND time_id > '01-JAN-01'\n" +
"GROUP BY prod_id;",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
assertEquals(2, visitor.getTables().size());
assertEquals(10, visitor.getColumns().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("sales")));
assertTrue(visitor.getTables().containsKey(new TableStat.Name("customers")));
assertTrue(visitor.getColumns().contains(new TableStat.Column("sales", "cust_id")));
assertTrue(visitor.getColumns().contains(new TableStat.Column("customers", "cust_id")));
}
}
|
OracleListAggTest
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/ClusterResourceStatisticsProvider.java
|
{
"start": 1052,
"end": 2150
}
|
interface ____ {
/** Get total number of registered slots. */
int getNumberRegisteredSlots();
/** Get number of registered slots from the TaskManager with the given instance id. */
int getNumberRegisteredSlotsOf(InstanceID instanceId);
/** Get total number of free slots. */
int getNumberFreeSlots();
/** Get number of free slots from the TaskManager with the given instance id. */
int getNumberFreeSlotsOf(InstanceID instanceId);
/** Get profile of total registered resources. */
ResourceProfile getRegisteredResource();
/** Get profile of registered resources from the TaskManager with the given instance id. */
ResourceProfile getRegisteredResourceOf(InstanceID instanceId);
/** Get profile of total free resources. */
ResourceProfile getFreeResource();
/** Get profile of free resources from the TaskManager with the given instance id. */
ResourceProfile getFreeResourceOf(InstanceID instanceId);
/** Get profile of total pending resources. */
ResourceProfile getPendingResource();
}
|
ClusterResourceStatisticsProvider
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/history/ArchivedJsonTest.java
|
{
"start": 999,
"end": 1874
}
|
class ____ {
@Test
void testEquals() {
ArchivedJson original = new ArchivedJson("path", "json");
ArchivedJson twin = new ArchivedJson("path", "json");
ArchivedJson identicalPath = new ArchivedJson("path", "hello");
ArchivedJson identicalJson = new ArchivedJson("hello", "json");
assertThat(original).isEqualTo(original);
assertThat(twin).isEqualTo(original);
assertThat(identicalPath).isNotEqualTo(original);
assertThat(identicalJson).isNotEqualTo(original);
}
@Test
void testHashCode() {
ArchivedJson original = new ArchivedJson("path", "json");
ArchivedJson twin = new ArchivedJson("path", "json");
assertThat(original).isEqualTo(original);
assertThat(twin).isEqualTo(original);
assertThat(twin).hasSameHashCodeAs(original);
}
}
|
ArchivedJsonTest
|
java
|
apache__avro
|
lang/java/ipc/src/test/java/org/apache/avro/io/Perf.java
|
{
"start": 40780,
"end": 41900
}
|
class ____ extends GenericTest {
public GenericNested() throws IOException {
super("GenericNested_", NESTED_RECORD_SCHEMA);
}
@Override
void genSourceData() {
sourceData = generateGenericNested(schema, count);
}
}
static GenericRecord[] generateGenericNested(Schema schema, int count) {
Random r = newRandom();
GenericRecord[] sourceData = new GenericRecord[count];
Schema doubleSchema = schema.getFields().get(0).schema();
for (int i = 0; i < sourceData.length; i++) {
GenericRecord rec = new GenericData.Record(schema);
GenericRecord inner;
inner = new GenericData.Record(doubleSchema);
inner.put(0, r.nextDouble());
rec.put(0, inner);
inner = new GenericData.Record(doubleSchema);
inner.put(0, r.nextDouble());
rec.put(1, inner);
inner = new GenericData.Record(doubleSchema);
inner.put(0, r.nextDouble());
rec.put(2, inner);
rec.put(3, r.nextInt());
rec.put(4, r.nextInt());
rec.put(5, r.nextInt());
sourceData[i] = rec;
}
return sourceData;
}
static
|
GenericNested
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/GoogleMailStreamComponentBuilderFactory.java
|
{
"start": 1392,
"end": 1916
}
|
interface ____ {
/**
* Google Mail Stream (camel-google-mail)
* Poll for incoming messages in Google Mail.
*
* Category: cloud,mail
* Since: 2.22
* Maven coordinates: org.apache.camel:camel-google-mail
*
* @return the dsl builder
*/
static GoogleMailStreamComponentBuilder googleMailStream() {
return new GoogleMailStreamComponentBuilderImpl();
}
/**
* Builder for the Google Mail Stream component.
*/
|
GoogleMailStreamComponentBuilderFactory
|
java
|
quarkusio__quarkus
|
extensions/smallrye-reactive-messaging-rabbitmq/deployment/src/main/java/io/quarkus/smallrye/reactivemessaging/rabbitmq/deployment/devui/RabbitDevUIProcessor.java
|
{
"start": 588,
"end": 1300
}
|
class ____ {
@BuildStep(onlyIf = IsLocalDevelopment.class)
AdditionalBeanBuildItem beans() {
return AdditionalBeanBuildItem.unremovableOf(RabbitHttpPortFinder.class);
}
@BuildStep(onlyIf = IsLocalDevelopment.class)
void createCard(BuildProducer<CardPageBuildItem> cardPageBuildItemBuildProducer) {
final CardPageBuildItem card = new CardPageBuildItem();
card.setCustomCard("qwc-rabbitmq-card.js");
cardPageBuildItemBuildProducer.produce(card);
}
@BuildStep(onlyIf = IsLocalDevelopment.class)
JsonRPCProvidersBuildItem registerJsonRpcBackend() {
return new JsonRPCProvidersBuildItem(RabbitMqJsonRpcService.class);
}
}
|
RabbitDevUIProcessor
|
java
|
alibaba__nacos
|
plugin/datasource/src/main/java/com/alibaba/nacos/plugin/datasource/constants/TableConstant.java
|
{
"start": 736,
"end": 1484
}
|
class ____ {
public static final String CONFIG_INFO = "config_info";
public static final String CONFIG_INFO_BETA = "config_info_beta";
public static final String CONFIG_INFO_TAG = "config_info_tag";
public static final String CONFIG_INFO_GRAY = "config_info_gray";
public static final String CONFIG_TAGS_RELATION = "config_tags_relation";
public static final String GROUP_CAPACITY = "group_capacity";
public static final String HIS_CONFIG_INFO = "his_config_info";
public static final String TENANT_CAPACITY = "tenant_capacity";
public static final String TENANT_INFO = "tenant_info";
public static final String MIGRATE_CONFIG = "migrate_config";
}
|
TableConstant
|
java
|
grpc__grpc-java
|
grpclb/src/main/java/io/grpc/grpclb/SecretGrpclbNameResolverProvider.java
|
{
"start": 2011,
"end": 3461
}
|
class ____ extends NameResolverProvider {
private static final String SCHEME = "dns";
private static final boolean IS_ANDROID = InternalServiceProviders
.isAndroid(SecretGrpclbNameResolverProvider.class.getClassLoader());
@Override
public GrpclbNameResolver newNameResolver(URI targetUri, Args args) {
if (SCHEME.equals(targetUri.getScheme())) {
String targetPath = Preconditions.checkNotNull(targetUri.getPath(), "targetPath");
Preconditions.checkArgument(
targetPath.startsWith("/"),
"the path component (%s) of the target (%s) must start with '/'",
targetPath, targetUri);
String name = targetPath.substring(1);
return new GrpclbNameResolver(
targetUri.getAuthority(),
name,
args,
GrpcUtil.SHARED_CHANNEL_EXECUTOR,
Stopwatch.createUnstarted(),
IS_ANDROID);
} else {
return null;
}
}
@Override
public String getDefaultScheme() {
return SCHEME;
}
@Override
protected boolean isAvailable() {
return true;
}
@Override
public int priority() {
// Must be higher than DnsNameResolverProvider#priority.
return 6;
}
@Override
public Collection<Class<? extends SocketAddress>> getProducedSocketAddressTypes() {
return Collections.singleton(InetSocketAddress.class);
}
}
}
|
Provider
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/validators/InternalWrongNaming.java
|
{
"start": 816,
"end": 6387
}
|
class ____ {
static void checkInternalOperatorNaming(String baseClassName, String consumerClassName, String... ignore) throws Exception {
File f = TestHelper.findSource(baseClassName);
if (f == null) {
return;
}
String rxdir = f.getParentFile().getParentFile().getAbsolutePath().replace('\\', '/');
if (!rxdir.endsWith("/")) {
rxdir += "/";
}
rxdir += "internal/operators/" + baseClassName.toLowerCase() + "/";
File[] list = new File(rxdir).listFiles();
if (list != null && list.length != 0) {
StringBuilder fail = new StringBuilder();
int count = 0;
outer:
for (File g : list) {
for (String s : ignore) {
if (g.getName().equals(s + ".java")) {
continue outer;
}
}
List<String> lines = readFile(g);
for (int i = 0; i < lines.size(); i++) {
String line = lines.get(i);
if (line.contains(consumerClassName)) {
fail.append("java.lang.RuntimeException: " + g.getName() + " mentions " + consumerClassName)
.append("\r\n at io.reactivex.internal.operators.")
.append(baseClassName.toLowerCase()).append(".").append(g.getName().replace(".java", ""))
.append(".method(").append(g.getName()).append(":").append(i + 1).append(")\r\n\r\n");
count++;
}
}
}
if (fail.length() != 0) {
System.out.println(fail);
System.out.println();
System.out.println("Total: " + count);
throw new AssertionError(fail.toString());
}
}
}
static List<String> readFile(File u) throws Exception {
List<String> lines = new ArrayList<>();
BufferedReader in = new BufferedReader(new FileReader(u));
try {
for (;;) {
String line = in.readLine();
if (line == null) {
break;
}
lines.add(line);
}
} finally {
in.close();
}
return lines;
}
@Test
public void observableNoSubscriber() throws Exception {
checkInternalOperatorNaming("Observable", "Subscriber",
"ObservableFromPublisher"
);
}
@Test
public void observableNoSubscribers() throws Exception {
checkInternalOperatorNaming("Observable", "subscribers");
}
@Test
public void observableNoSubscription() throws Exception {
checkInternalOperatorNaming("Observable", "Subscription",
"ObservableFromPublisher", "ObservableDelaySubscriptionOther");
}
@Test
public void observableNoPublisher() throws Exception {
checkInternalOperatorNaming("Observable", "Publisher",
"ObservableFromPublisher");
}
@Test
public void observableNoFlowable() throws Exception {
checkInternalOperatorNaming("Observable", "Flowable", "ObservableFromPublisher");
}
@Test
public void observableProducer() throws Exception {
checkInternalOperatorNaming("Observable", "Producer");
}
@Test
public void observableProducers() throws Exception {
checkInternalOperatorNaming("Observable", "producers");
}
@Test
public void flowableNoProducer() throws Exception {
checkInternalOperatorNaming("Flowable", "Producer");
}
@Test
public void flowableNoProducers() throws Exception {
checkInternalOperatorNaming("Flowable", "producers");
}
@Test
public void flowableNoUnsubscrib() throws Exception {
checkInternalOperatorNaming("Flowable", "unsubscrib");
}
@Test
public void observableNoUnsubscrib() throws Exception {
checkInternalOperatorNaming("Observable", "unsubscrib");
}
@Test
public void flowableNoObserver() throws Exception {
checkInternalOperatorNaming("Flowable", "Observer",
"FlowableFromObservable",
"FlowableLastSingle",
"FlowableAnySingle",
"FlowableAllSingle",
"FlowableToListSingle",
"FlowableCollectSingle",
"FlowableCountSingle",
"FlowableElementAtMaybe",
"FlowableElementAtSingle",
"FlowableElementAtMaybePublisher",
"FlowableElementAtSinglePublisher",
"FlowableFromCompletable",
"FlowableSingleSingle",
"FlowableSingleMaybe",
"FlowableLastMaybe",
"FlowableIgnoreElementsCompletable",
"FlowableReduceMaybe",
"FlowableReduceWithSingle",
"FlowableReduceSeedSingle",
"FlowableFlatMapCompletable",
"FlowableFlatMapCompletableCompletable",
"FlowableFlatMapSingle",
"FlowableFlatMapMaybe",
"FlowableSequenceEqualSingle",
"FlowableConcatWithSingle",
"FlowableConcatWithMaybe",
"FlowableConcatWithCompletable",
"FlowableMergeWithSingle",
"FlowableMergeWithMaybe",
"FlowableMergeWithCompletable"
);
}
}
|
InternalWrongNaming
|
java
|
quarkusio__quarkus
|
core/deployment/src/test/java/io/quarkus/deployment/runnerjar/PackageAppTestBase.java
|
{
"start": 8146,
"end": 10923
}
|
class
____(MAIN_CLS, mainAttrs.getValue("Main-Class"));
// assert the Class-Path contains all the entries in the lib dir
final String cp = mainAttrs.getValue("Class-Path");
assertNotNull(cp);
String[] cpEntries = Arrays.stream(cp.trim().split("\\s+"))
.filter(s -> !s.trim().isEmpty())
.toArray(String[]::new);
assertEquals(actualBootLib.size(), cpEntries.length);
for (String entry : cpEntries) {
assertThat(entry).startsWith(LIB_BOOT_PREFIX);
String entryFile = entry.substring(LIB_BOOT_PREFIX.length());
assertThat(actualBootLib).contains(entryFile);
}
}
assertLibDirectoryContent(actualMainLib);
}
protected void assertLibDirectoryContent(Set<String> actualMainLib) {
assertThat(actualMainLib).containsExactlyInAnyOrderElementsOf(expectedLib);
}
protected Set<String> getDirContent(final Path dir) throws IOException {
final Set<String> actualBootLib = new HashSet<>();
try (Stream<Path> stream = Files.list(dir)) {
final Iterator<Path> i = stream.iterator();
while (i.hasNext()) {
actualBootLib.add(i.next().getFileName().toString());
}
}
return actualBootLib;
}
private static void assertExtensionDependencies(ApplicationModel appModel, String[] expectedExtensions) {
final Set<String> expectedRuntime = new HashSet<>(expectedExtensions.length);
final Set<String> expectedDeployment = new HashSet<>(expectedExtensions.length);
for (String rtId : expectedExtensions) {
expectedRuntime.add(TsArtifact.DEFAULT_GROUP_ID + ":" + rtId + "::jar:" + TsArtifact.DEFAULT_VERSION);
expectedDeployment
.add(TsArtifact.DEFAULT_GROUP_ID + ":" + rtId + "-deployment" + "::jar:" + TsArtifact.DEFAULT_VERSION);
}
final Collection<ResolvedDependency> rtDeps = appModel.getRuntimeDependencies();
for (Dependency dep : rtDeps) {
final String coords = dep.toGACTVString();
assertTrue(expectedRuntime.contains(coords), coords);
}
assertEquals(expectedExtensions.length, rtDeps.size());
final List<Dependency> deploymentOnly = appModel.getDependencies().stream()
.filter(d -> d.isDeploymentCp() && !d.isRuntimeCp()).collect(Collectors.toList());
for (Dependency dep : deploymentOnly) {
final String coords = dep.toGACTVString();
assertTrue(expectedDeployment.contains(coords), coords);
}
assertEquals(expectedExtensions.length, deploymentOnly.size());
}
}
|
assertEquals
|
java
|
apache__avro
|
lang/java/thrift/src/test/java/org/apache/avro/thrift/test/Foo.java
|
{
"start": 38906,
"end": 48969
}
|
enum ____ implements org.apache.thrift.TFieldIdEnum {
NUM1((short) 1, "num1"), NUM2((short) 2, "num2");
private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
static {
for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
@org.apache.thrift.annotation.Nullable
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1: // NUM1
return NUM1;
case 2: // NUM2
return NUM2;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it
* is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
@org.apache.thrift.annotation.Nullable
public static _Fields findByName(java.lang.String name) {
return byName.get(name);
}
private final short _thriftId;
private final java.lang.String _fieldName;
_Fields(short thriftId, java.lang.String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public java.lang.String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __NUM1_ISSET_ID = 0;
private static final int __NUM2_ISSET_ID = 1;
private byte __isset_bitfield = 0;
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(
_Fields.class);
tmpMap.put(_Fields.NUM1,
new org.apache.thrift.meta_data.FieldMetaData("num1", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
tmpMap.put(_Fields.NUM2,
new org.apache.thrift.meta_data.FieldMetaData("num2", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_args.class, metaDataMap);
}
public add_args() {
}
public add_args(int num1, int num2) {
this();
this.num1 = num1;
setNum1IsSet(true);
this.num2 = num2;
setNum2IsSet(true);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public add_args(add_args other) {
__isset_bitfield = other.__isset_bitfield;
this.num1 = other.num1;
this.num2 = other.num2;
}
public add_args deepCopy() {
return new add_args(this);
}
@Override
public void clear() {
setNum1IsSet(false);
this.num1 = 0;
setNum2IsSet(false);
this.num2 = 0;
}
public int getNum1() {
return this.num1;
}
public void setNum1(int num1) {
this.num1 = num1;
setNum1IsSet(true);
}
public void unsetNum1() {
__isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUM1_ISSET_ID);
}
/**
* Returns true if field num1 is set (has been assigned a value) and false
* otherwise
*/
public boolean isSetNum1() {
return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __NUM1_ISSET_ID);
}
public void setNum1IsSet(boolean value) {
__isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUM1_ISSET_ID, value);
}
public int getNum2() {
return this.num2;
}
public void setNum2(int num2) {
this.num2 = num2;
setNum2IsSet(true);
}
public void unsetNum2() {
__isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUM2_ISSET_ID);
}
/**
* Returns true if field num2 is set (has been assigned a value) and false
* otherwise
*/
public boolean isSetNum2() {
return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __NUM2_ISSET_ID);
}
public void setNum2IsSet(boolean value) {
__isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUM2_ISSET_ID, value);
}
public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
switch (field) {
case NUM1:
if (value == null) {
unsetNum1();
} else {
setNum1((java.lang.Integer) value);
}
break;
case NUM2:
if (value == null) {
unsetNum2();
} else {
setNum2((java.lang.Integer) value);
}
break;
}
}
@org.apache.thrift.annotation.Nullable
public java.lang.Object getFieldValue(_Fields field) {
switch (field) {
case NUM1:
return getNum1();
case NUM2:
return getNum2();
}
throw new java.lang.IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a
* value) and false otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new java.lang.IllegalArgumentException();
}
switch (field) {
case NUM1:
return isSetNum1();
case NUM2:
return isSetNum2();
}
throw new java.lang.IllegalStateException();
}
@Override
public boolean equals(java.lang.Object that) {
if (that instanceof add_args)
return this.equals((add_args) that);
return false;
}
public boolean equals(add_args that) {
if (that == null)
return false;
if (this == that)
return true;
boolean this_present_num1 = true;
boolean that_present_num1 = true;
if (this_present_num1 || that_present_num1) {
if (!(this_present_num1 && that_present_num1))
return false;
if (this.num1 != that.num1)
return false;
}
boolean this_present_num2 = true;
boolean that_present_num2 = true;
if (this_present_num2 || that_present_num2) {
if (!(this_present_num2 && that_present_num2))
return false;
if (this.num2 != that.num2)
return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 1;
hashCode = hashCode * 8191 + num1;
hashCode = hashCode * 8191 + num2;
return hashCode;
}
@Override
public int compareTo(add_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = java.lang.Boolean.compare(isSetNum1(), other.isSetNum1());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetNum1()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num1, other.num1);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = java.lang.Boolean.compare(isSetNum2(), other.isSetNum2());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetNum2()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num2, other.num2);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
@org.apache.thrift.annotation.Nullable
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
scheme(iprot).read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
scheme(oprot).write(oprot, this);
}
@Override
public java.lang.String toString() {
java.lang.StringBuilder sb = new java.lang.StringBuilder("add_args(");
boolean first = true;
sb.append("num1:");
sb.append(this.num1);
first = false;
if (!first)
sb.append(", ");
sb.append("num2:");
sb.append(this.num2);
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is
// wacky, and doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static
|
_Fields
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java
|
{
"start": 1256,
"end": 2894
}
|
class ____ extends LegacyActionRequest implements ToXContentObject {
private Detector detector;
public static Request parseRequest(XContentParser parser) {
Detector detector = Detector.STRICT_PARSER.apply(parser, null).build();
return new Request(detector);
}
public Request() {
this.detector = null;
}
public Request(Detector detector) {
this.detector = detector;
}
public Request(StreamInput in) throws IOException {
super(in);
detector = new Detector(in);
}
public Detector getDetector() {
return detector;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
detector.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
detector.toXContent(builder, params);
return builder;
}
@Override
public int hashCode() {
return Objects.hash(detector);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(detector, other.detector);
}
}
}
|
Request
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/LiteProtoToStringTest.java
|
{
"start": 3444,
"end": 4041
}
|
class ____ {
private void test(GeneratedMessageLite message) {
atVerbose().log(message.toString());
}
public Test atVerbose() {
return this;
}
public Test log(String s) {
return this;
}
}
""")
.doTest();
}
@Test
public void androidLogAtInfoOrFiner_noWarning() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.protobuf.GeneratedMessageLite;
|
Test
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.