language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/config/SetFactoryBean.java | {
"start": 1173,
"end": 1540
} | class ____ extends AbstractFactoryBean<Set<Object>> {
private @Nullable Set<?> sourceSet;
@SuppressWarnings("rawtypes")
private @Nullable Class<? extends Set> targetSetClass;
/**
* Set the source Set, typically populated via XML "set" elements.
*/
public void setSourceSet(Set<?> sourceSet) {
this.sourceSet = sourceSet;
}
/**
* Set the | SetFactoryBean |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/runtime/client/config/ProxyConfig.java | {
"start": 224,
"end": 1684
} | interface ____ {
/**
* Set proxy username.
* Honored only when {@code quarkus.redis.tcp.proxy-options.host} is set.
*
* @deprecated use {@code quarkus.redis.tcp.proxy-configuration-name} and {@code quarkus.proxy.*}
*/
@Deprecated
Optional<String> username();
/**
* Set proxy password.
* Honored only when {@code quarkus.redis.tcp.proxy-options.host} is set.
*
* @deprecated use {@code quarkus.redis.tcp.proxy-configuration-name} and {@code quarkus.proxy.*}
*/
@Deprecated
Optional<String> password();
/**
* Set proxy port. Defaults to 3128.
* Honored only when {@code quarkus.redis.tcp.proxy-options.host} is set.
*
* @deprecated use {@code quarkus.redis.tcp.proxy-configuration-name} and {@code quarkus.proxy.*}
*/
@WithDefault("3128")
@Deprecated
int port();
/**
* Set proxy host.
*
* @deprecated use {@code quarkus.redis.tcp.proxy-configuration-name} and {@code quarkus.proxy.*}
*/
@Deprecated
Optional<String> host();
/**
* Set proxy type.
* Accepted values are: {@code HTTP} (default), {@code SOCKS4} and {@code SOCKS5}.
* Honored only when {@code quarkus.redis.tcp.proxy-options.host} is set.
*
* @deprecated use {@code quarkus.redis.tcp.proxy-configuration-name} and {@code quarkus.proxy.*}
*/
@WithDefault("http")
@Deprecated
ProxyType type();
}
| ProxyConfig |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/generic/GenericTest2.java | {
"start": 221,
"end": 982
} | class ____ extends TestCase {
public void test_for_bingyang() throws Exception {
String text = "{\"count\":123,\"index\":7,\"items\":[{\"id\":234,\"latitude\":2.5,\"longtitude\":3.7}]}";
PageBean<ActiveBase> pageBean = JSON.parseObject(text, new TypeReference<PageBean<ActiveBase>>() {});
Assert.assertNotNull(pageBean);
Assert.assertEquals(123, pageBean.getCount());
Assert.assertEquals(7, pageBean.getIndex());
Assert.assertNotNull(pageBean.getItems());
Assert.assertEquals(1, pageBean.getItems().size());
ActiveBase active = pageBean.getItems().get(0);
Assert.assertEquals(new Integer(234), active.getId());
Assert.assertTrue(3.7D == active.getLongtitude());
Assert.assertTrue(2.5D == active.getLatitude());
}
public static | GenericTest2 |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/method/configuration/PrePostMethodSecurityConfigurationTests.java | {
"start": 68824,
"end": 69122
} | class ____ {
@Bean
@Role(BeanDefinition.ROLE_INFRASTRUCTURE)
Advisor returnBeforePreFilter() {
return returnAdvisor(AuthorizationInterceptorsOrder.PRE_FILTER.getOrder() + OffsetConfig.OFFSET - 1);
}
}
@Configuration
@Import(OffsetConfig.class)
static | ReturnBeforeOffsetPreFilterConfig |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/pattern/ThrowableExtendedStackTraceRenderer.java | {
"start": 1373,
"end": 3131
} | class ____
extends ThrowableStackTraceRenderer<ThrowableExtendedStackTraceRenderer.ExtendedContext> {
ThrowableExtendedStackTraceRenderer(final List<String> ignoredPackageNames, final int maxLineCount) {
super(ignoredPackageNames, maxLineCount);
}
@Override
ExtendedContext createContext(final Throwable throwable) {
return ExtendedContext.ofThrowable(throwable);
}
@Override
void renderStackTraceElement(
final StringBuilder buffer,
final StackTraceElement stackTraceElement,
final ExtendedContext context,
final String prefix,
final String lineSeparator) {
// Short-circuit on ignored stack trace elements
final boolean stackTraceElementIgnored = isStackTraceElementIgnored(stackTraceElement);
if (stackTraceElementIgnored) {
context.ignoredStackTraceElementCount += 1;
return;
}
// Render the suppressed stack trace element count
if (context.ignoredStackTraceElementCount > 0) {
renderSuppressedCount(buffer, context, prefix, lineSeparator);
context.ignoredStackTraceElementCount = 0;
}
// Render the stack trace element
acquireLineCapacity(context);
buffer.append(prefix);
buffer.append("\tat ");
buffer.append(stackTraceElement);
final ClassResourceInfo classResourceInfo =
context.classResourceInfoByName.get(stackTraceElement.getClassName());
if (classResourceInfo != null) {
buffer.append(' ');
classResourceInfo.render(buffer);
}
buffer.append(lineSeparator);
}
static final | ThrowableExtendedStackTraceRenderer |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/stubbing/StrictStubbingEndToEndTest.java | {
"start": 3632,
"end": 4202
} | class ____ {
@Mock IMethods mock;
MockitoSession mockito =
Mockito.mockitoSession()
.initMocks(this)
.strictness(Strictness.STRICT_STUBS)
.startMocking();
@After
public void after() {
mockito.finishMocking();
}
@Test
public void unnecessary_stubbing() {
given(mock.simpleMethod("1")).willReturn("one");
throw new IllegalStateException();
}
}
public static | UnnecessaryStubbing |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/jsontype/impl/TypeDeserializerBase.java | {
"start": 6238,
"end": 6571
} | class ____ will not work as expected)
*/
if ((_baseType != null)
&& _baseType.getClass() == type.getClass()) {
/* 09-Aug-2015, tatu: Not sure if the second part of the check makes sense;
* but it appears to check that JavaType impl | which |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/ConverterRegistration.java | {
"start": 1036,
"end": 1956
} | class ____ register
*/
Class<? extends AttributeConverter<?,?>> converter();
/**
* The domain type to which this converter should be applied. This allows
* refining the domain type associated with the converter e.g. to apply to
* a subtype.
* <p>
* With {@link #autoApply()} set to true, this will effectively override converters
* defined with {@link Converter#autoApply()} set to {@code false} and auto-apply them.
* <p>
* With {@link #autoApply()} set to false, this will effectively override converters
* defined with {@link Converter#autoApply()} set to {@code true} and disable auto-apply
* for them.
*/
Class<?> domainType() default void.class;
/**
* Should the registered converter be auto applied for
* converting values of its reported domain type?
* <p>
* Defaults to true as that is the more common use case for this annotation.
*/
boolean autoApply() default true;
}
| to |
java | micronaut-projects__micronaut-core | http-client-core/src/main/java/io/micronaut/http/client/multipart/MultipartBody.java | {
"start": 1043,
"end": 2015
} | class ____ {
private final List<Part<?>> parts;
/**
* Initialize with all the parts.
*
* @param parts The List of all parts to be sent in the body of Netty multipart request, such a File, String, Bytes etc.
*/
private MultipartBody(List<Part<?>> parts) {
this.parts = parts;
}
/**
* Create a list of data objects using the given factory.
*
* @param factory The factory used to create the data objects.
* @return List of data objects
* @param <T> The data type
*/
@Internal
public <T> List<T> getData(MultipartDataFactory<T> factory) {
List<T> data = new ArrayList<>(parts.size());
for (Part<?> part : parts) {
data.add(part.getData(factory));
}
return data;
}
/**
* @return A Builder to build MultipartBody.
*/
public static Builder builder() {
return new Builder();
}
/**
* A builder | MultipartBody |
java | quarkusio__quarkus | extensions/funqy/funqy-knative-events/deployment/src/test/java/io/quarkus/funqy/test/PrimitiveFunctions.java | {
"start": 128,
"end": 551
} | class ____ {
@Funq
public String toLowerCase(String val) {
return val.toLowerCase();
}
@Funq
public int doubleIt(int val) {
return val * 2;
}
@Funq
public void noop() {
}
@Funq
@CloudEventMapping(trigger = "echo", responseType = "echo.output", responseSource = "echo")
public String annotatedEcho(String echo) {
return echo;
}
}
| PrimitiveFunctions |
java | apache__kafka | group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorConfigTest.java | {
"start": 15571,
"end": 15917
} | class ____.lang.Object for configuration group.consumer.assignors: Expected a comma separated list.",
assertThrows(ConfigException.class, () -> createConfig(configs)).getMessage());
configs.clear();
configs.put(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(Object.class));
assertEquals(" | java |
java | quarkusio__quarkus | extensions/agroal/deployment/src/main/java/io/quarkus/agroal/deployment/AggregatedDataSourceBuildTimeConfigBuildItem.java | {
"start": 281,
"end": 1521
} | class ____ extends MultiBuildItem {
private final String name;
private final DataSourceBuildTimeConfig dataSourceConfig;
private final DataSourceJdbcBuildTimeConfig jdbcConfig;
private final String dbKind;
private final String resolvedDriverClass;
AggregatedDataSourceBuildTimeConfigBuildItem(String name, DataSourceBuildTimeConfig dataSourceConfig,
DataSourceJdbcBuildTimeConfig jdbcConfig,
String dbKind,
String resolvedDriverClass) {
this.name = name;
this.dataSourceConfig = dataSourceConfig;
this.jdbcConfig = jdbcConfig;
this.dbKind = dbKind;
this.resolvedDriverClass = resolvedDriverClass;
}
public String getName() {
return name;
}
public DataSourceBuildTimeConfig getDataSourceConfig() {
return dataSourceConfig;
}
public DataSourceJdbcBuildTimeConfig getJdbcConfig() {
return jdbcConfig;
}
public boolean isDefault() {
return DataSourceUtil.isDefault(name);
}
public String getDbKind() {
return dbKind;
}
public String getResolvedDriverClass() {
return resolvedDriverClass;
}
}
| AggregatedDataSourceBuildTimeConfigBuildItem |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/OrderedClassTests.java | {
"start": 10563,
"end": 10823
} | class ____ {
@BeforeEach
void trackInvocations(TestInfo testInfo) {
var testClass = testInfo.getTestClass().orElseThrow();
callSequence.add(testClass.getSimpleName());
}
@Test
void a() {
}
}
@Order(2)
@DisplayName("Z")
static | BaseTestCase |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/server/handler/ResponseStatusExceptionHandler.java | {
"start": 1596,
"end": 4233
} | class ____ implements WebExceptionHandler {
private static final Log logger = LogFactory.getLog(ResponseStatusExceptionHandler.class);
private @Nullable Log warnLogger;
/**
* Set the log category for warn logging.
* <p>Default is no warn logging. Specify this setting to activate warn
* logging into a specific category.
* @since 5.1
* @see org.apache.commons.logging.LogFactory#getLog(String)
* @see java.util.logging.Logger#getLogger(String)
*/
public void setWarnLogCategory(String loggerName) {
this.warnLogger = LogFactory.getLog(loggerName);
}
@Override
public Mono<Void> handle(ServerWebExchange exchange, Throwable ex) {
if (!updateResponse(exchange.getResponse(), ex)) {
return Mono.error(ex);
}
// Mirrors AbstractHandlerExceptionResolver in spring-webmvc...
String logPrefix = exchange.getLogPrefix();
if (this.warnLogger != null && this.warnLogger.isWarnEnabled()) {
this.warnLogger.warn(logPrefix + formatError(ex, exchange.getRequest()));
}
else if (logger.isDebugEnabled()) {
logger.debug(logPrefix + formatError(ex, exchange.getRequest()));
}
return exchange.getResponse().setComplete();
}
private String formatError(Throwable ex, ServerHttpRequest request) {
String className = ex.getClass().getSimpleName();
String message = LogFormatUtils.formatValue(ex.getMessage(), -1, true);
String path = request.getURI().getRawPath();
return "Resolved [" + className + ": " + message + "] for HTTP " + request.getMethod() + " " + path;
}
private boolean updateResponse(ServerHttpResponse response, Throwable ex) {
boolean result = false;
HttpStatusCode statusCode = determineStatus(ex);
int code = (statusCode != null ? statusCode.value() : -1);
if (code != -1) {
if (response.setStatusCode(statusCode)) {
if (ex instanceof ResponseStatusException responseStatusException) {
responseStatusException.getHeaders().forEach((name, values) ->
values.forEach(value -> response.getHeaders().add(name, value)));
}
result = true;
}
}
else {
Throwable cause = ex.getCause();
if (cause != null) {
result = updateResponse(response, cause);
}
}
return result;
}
/**
* Determine the HTTP status for the given exception.
* @param ex the exception to check
* @return the associated HTTP status code, or {@code null} if it can't be
* derived
*/
protected @Nullable HttpStatusCode determineStatus(Throwable ex) {
if (ex instanceof ResponseStatusException responseStatusException) {
return responseStatusException.getStatusCode();
}
else {
return null;
}
}
}
| ResponseStatusExceptionHandler |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java | {
"start": 1364,
"end": 3339
} | class ____ extends TestParallelReadUtil {
private static TemporarySocketDirectory sockDir;
@BeforeAll
static public void setupCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir = new TemporarySocketDirectory();
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
"TestParallelShortCircuitReadUnCached._PORT.sock").getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
// Enabling data transfer encryption should have no effect when using
// short-circuit local reads. This is a regression test for HDFS-5353.
conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
false);
conf.setBoolean(HdfsClientConfigKeys.
DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
// We want to test reading from stale sockets.
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 1);
conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
5 * 60 * 1000);
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 32);
// Avoid using the FileInputStreamCache.
conf.setInt(HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
0);
DomainSocket.disableBindPathValidation();
DFSInputStream.tcpReadsDisabledForTesting = true;
setupCluster(1, conf);
}
@BeforeEach
public void before() {
assumeThat(DomainSocket.getLoadingFailureReason()).isNull();
}
@AfterAll
static public void teardownCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir.close();
TestParallelReadUtil.teardownCluster();
}
} | TestParallelShortCircuitReadUnCached |
java | google__guava | android/guava/src/com/google/common/collect/CompactLinkedHashSet.java | {
"start": 2303,
"end": 9574
} | class ____<E extends @Nullable Object> extends CompactHashSet<E> {
/** Creates an empty {@code CompactLinkedHashSet} instance. */
public static <E extends @Nullable Object> CompactLinkedHashSet<E> create() {
return new CompactLinkedHashSet<>();
}
/**
* Creates a <i>mutable</i> {@code CompactLinkedHashSet} instance containing the elements of the
* given collection in the order returned by the collection's iterator.
*
* @param collection the elements that the set should contain
* @return a new {@code CompactLinkedHashSet} containing those elements (minus duplicates)
*/
public static <E extends @Nullable Object> CompactLinkedHashSet<E> create(
Collection<? extends E> collection) {
CompactLinkedHashSet<E> set = createWithExpectedSize(collection.size());
set.addAll(collection);
return set;
}
/**
* Creates a {@code CompactLinkedHashSet} instance containing the given elements in unspecified
* order.
*
* @param elements the elements that the set should contain
* @return a new {@code CompactLinkedHashSet} containing those elements (minus duplicates)
*/
@SafeVarargs
public static <E extends @Nullable Object> CompactLinkedHashSet<E> create(E... elements) {
CompactLinkedHashSet<E> set = createWithExpectedSize(elements.length);
Collections.addAll(set, elements);
return set;
}
/**
* Creates a {@code CompactLinkedHashSet} instance, with a high enough "initial capacity" that it
* <i>should</i> hold {@code expectedSize} elements without rebuilding internal data structures.
*
* @param expectedSize the number of elements you expect to add to the returned set
* @return a new, empty {@code CompactLinkedHashSet} with enough capacity to hold {@code
* expectedSize} elements without resizing
* @throws IllegalArgumentException if {@code expectedSize} is negative
*/
public static <E extends @Nullable Object> CompactLinkedHashSet<E> createWithExpectedSize(
int expectedSize) {
return new CompactLinkedHashSet<>(expectedSize);
}
private static final int ENDPOINT = -2;
// TODO(user): predecessors and successors should be collocated (reducing cache misses).
// Might also explore collocating all of [hash, next, predecessor, successor] fields of an
// entry in a *single* long[], though that reduces the maximum size of the set by a factor of 2
/**
* Pointer to the predecessor of an entry in insertion order. ENDPOINT indicates a node is the
* first node in insertion order; all values at indices ≥ {@link #size()} are UNSET.
*/
private transient int @Nullable [] predecessor;
/**
* Pointer to the successor of an entry in insertion order. ENDPOINT indicates a node is the last
* node in insertion order; all values at indices ≥ {@link #size()} are UNSET.
*/
private transient int @Nullable [] successor;
/** Pointer to the first node in the linked list, or {@code ENDPOINT} if there are no entries. */
private transient int firstEntry;
/** Pointer to the last node in the linked list, or {@code ENDPOINT} if there are no entries. */
private transient int lastEntry;
CompactLinkedHashSet() {}
CompactLinkedHashSet(int expectedSize) {
super(expectedSize);
}
@Override
void init(int expectedSize) {
super.init(expectedSize);
this.firstEntry = ENDPOINT;
this.lastEntry = ENDPOINT;
}
@Override
int allocArrays() {
int expectedSize = super.allocArrays();
this.predecessor = new int[expectedSize];
this.successor = new int[expectedSize];
return expectedSize;
}
@Override
@CanIgnoreReturnValue
Set<E> convertToHashFloodingResistantImplementation() {
Set<E> result = super.convertToHashFloodingResistantImplementation();
this.predecessor = null;
this.successor = null;
return result;
}
/*
* For discussion of the safety of the following methods for operating on predecessors and
* successors, see the comments near the end of CompactHashMap, noting that the methods here call
* requirePredecessors() and requireSuccessors(), which are defined at the end of this file.
*/
private int getPredecessor(int entry) {
return requirePredecessors()[entry] - 1;
}
@Override
int getSuccessor(int entry) {
return requireSuccessors()[entry] - 1;
}
private void setSuccessor(int entry, int succ) {
requireSuccessors()[entry] = succ + 1;
}
private void setPredecessor(int entry, int pred) {
requirePredecessors()[entry] = pred + 1;
}
private void setSucceeds(int pred, int succ) {
if (pred == ENDPOINT) {
firstEntry = succ;
} else {
setSuccessor(pred, succ);
}
if (succ == ENDPOINT) {
lastEntry = pred;
} else {
setPredecessor(succ, pred);
}
}
@Override
void insertEntry(int entryIndex, @ParametricNullness E object, int hash, int mask) {
super.insertEntry(entryIndex, object, hash, mask);
setSucceeds(lastEntry, entryIndex);
setSucceeds(entryIndex, ENDPOINT);
}
@Override
void moveLastEntry(int dstIndex, int mask) {
int srcIndex = size() - 1;
super.moveLastEntry(dstIndex, mask);
setSucceeds(getPredecessor(dstIndex), getSuccessor(dstIndex));
if (dstIndex < srcIndex) {
setSucceeds(getPredecessor(srcIndex), dstIndex);
setSucceeds(dstIndex, getSuccessor(srcIndex));
}
requirePredecessors()[srcIndex] = 0;
requireSuccessors()[srcIndex] = 0;
}
@Override
void resizeEntries(int newCapacity) {
super.resizeEntries(newCapacity);
predecessor = Arrays.copyOf(requirePredecessors(), newCapacity);
successor = Arrays.copyOf(requireSuccessors(), newCapacity);
}
@Override
int firstEntryIndex() {
return firstEntry;
}
@Override
int adjustAfterRemove(int indexBeforeRemove, int indexRemoved) {
return (indexBeforeRemove >= size()) ? indexRemoved : indexBeforeRemove;
}
@Override
public @Nullable Object[] toArray() {
return ObjectArrays.toArrayImpl(this);
}
@Override
@SuppressWarnings("nullness") // b/192354773 in our checker affects toArray declarations
public <T extends @Nullable Object> T[] toArray(T[] a) {
return ObjectArrays.toArrayImpl(this, a);
}
@Override
public void clear() {
if (needsAllocArrays()) {
return;
}
this.firstEntry = ENDPOINT;
this.lastEntry = ENDPOINT;
// Either both arrays are null or neither is, but we check both to satisfy the nullness checker.
if (predecessor != null && successor != null) {
Arrays.fill(predecessor, 0, size(), 0);
Arrays.fill(successor, 0, size(), 0);
}
super.clear();
}
/*
* For discussion of the safety of the following methods, see the comments near the end of
* CompactHashMap.
*/
private int[] requirePredecessors() {
return requireNonNull(predecessor);
}
private int[] requireSuccessors() {
return requireNonNull(successor);
}
/*
* We don't define getPredecessor+getSuccessor and setPredecessor+setSuccessor here because
* they're defined above -- including logic to add and subtract 1 to map between the values stored
* in the predecessor/successor arrays and the indexes in the elements array that they identify.
*/
}
| CompactLinkedHashSet |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/RestEndpointBuilderFactory.java | {
"start": 23303,
"end": 23590
} | interface ____
extends
AdvancedRestEndpointConsumerBuilder,
AdvancedRestEndpointProducerBuilder {
default RestEndpointBuilder basic() {
return (RestEndpointBuilder) this;
}
}
public | AdvancedRestEndpointBuilder |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/datasource/embedded/ConnectionProperties.java | {
"start": 1111,
"end": 1657
} | class ____ use to connect to the database.
* @param driverClass the jdbc driver class
*/
void setDriverClass(Class<? extends Driver> driverClass);
/**
* Set the JDBC connection URL for the database.
* @param url the connection url
*/
void setUrl(String url);
/**
* Set the username to use to connect to the database.
* @param username the username
*/
void setUsername(String username);
/**
* Set the password to use to connect to the database.
* @param password the password
*/
void setPassword(String password);
}
| to |
java | hibernate__hibernate-orm | tooling/hibernate-gradle-plugin/src/test/java/org/hibernate/orm/tooling/gradle/TestsBase.java | {
"start": 2886,
"end": 3738
} | class ____ enhanced
final ClassLoader classLoader = Helper.toClassLoader( classesDir );
TestHelper.verifyEnhanced( classLoader, "TheEntity" );
}
{
System.out.println( "Starting second execution ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" );
final GradleRunner gradleRunner = TestHelper.usingGradleRunner()
.withProjectDir( projectDir.toFile() )
.withArguments( compileTaskName, "--stacktrace", "--no-build-cache", "--configuration-cache" );
final BuildResult result = gradleRunner.build();
final BuildTask task = result.task( ":" + compileTaskName );
assertThat( task ).isNotNull();
assertThat( task.getOutcome() ).isEqualTo( TaskOutcome.UP_TO_DATE );
// and again
final ClassLoader classLoader = Helper.toClassLoader( classesDir );
TestHelper.verifyEnhanced( classLoader, "TheEntity" );
}
}
}
| is |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/completable/CompletableTimerTest.java | {
"start": 948,
"end": 1642
} | class ____ extends RxJavaTest {
@Test
public void timer() {
final TestScheduler testScheduler = new TestScheduler();
final AtomicLong atomicLong = new AtomicLong();
Completable.timer(2, TimeUnit.SECONDS, testScheduler).subscribe(new Action() {
@Override
public void run() throws Exception {
atomicLong.incrementAndGet();
}
});
assertEquals(0, atomicLong.get());
testScheduler.advanceTimeBy(1, TimeUnit.SECONDS);
assertEquals(0, atomicLong.get());
testScheduler.advanceTimeBy(1, TimeUnit.SECONDS);
assertEquals(1, atomicLong.get());
}
}
| CompletableTimerTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/TestPolymorphicWithDefaultImpl.java | {
"start": 4162,
"end": 10078
} | class ____ {
public AsProperty value;
}
/*
/**********************************************************
/* Unit tests, deserialization
/**********************************************************
*/
private final ObjectMapper MAPPER = new ObjectMapper();
@Test
public void testDeserializationWithObject() throws Exception
{
Inter inter = MAPPER.readerFor(Inter.class).readValue("{\"type\": \"mine\", \"blah\": [\"a\", \"b\", \"c\"]}");
assertTrue(inter instanceof MyInter);
assertFalse(inter instanceof LegacyInter);
assertEquals(Arrays.asList("a", "b", "c"), ((MyInter) inter).blah);
}
@Test
public void testDeserializationWithString() throws Exception
{
Inter inter = MAPPER.readerFor(Inter.class).readValue("\"a,b,c,d\"");
assertTrue(inter instanceof LegacyInter);
assertEquals(Arrays.asList("a", "b", "c", "d"), ((MyInter) inter).blah);
}
@Test
public void testDeserializationWithArray() throws Exception
{
Inter inter = MAPPER.readerFor(Inter.class).readValue("[\"a\", \"b\", \"c\", \"d\"]");
assertTrue(inter instanceof LegacyInter);
assertEquals(Arrays.asList("a", "b", "c", "d"), ((MyInter) inter).blah);
}
@Test
public void testDeserializationWithArrayOfSize2() throws Exception
{
Inter inter = MAPPER.readerFor(Inter.class).readValue("[\"a\", \"b\"]");
assertTrue(inter instanceof LegacyInter);
assertEquals(Arrays.asList("a", "b"), ((MyInter) inter).blah);
}
// [databind#148]
@Test
public void testDefaultAsVoid() throws Exception
{
// 07-Mar-2018, tatu: Specifically, use of `Void` should infer that unknown type
// values should become `null`s
Object ob = MAPPER.readerFor(DefaultWithVoidAsDefault.class).readValue("{ }");
assertNull(ob);
ob = MAPPER.readerFor(DefaultWithVoidAsDefault.class).readValue("{ \"bogus\":3 }");
assertNull(ob);
}
// [databind#148]
@Test
public void testBadTypeAsNull() throws Exception
{
ObjectReader r = MAPPER.readerFor(MysteryPolymorphic.class)
.without(DeserializationFeature.FAIL_ON_INVALID_SUBTYPE);
Object ob = r.readValue("{}");
assertNull(ob);
ob = r.readValue("{ \"whatever\":13}");
assertNull(ob);
}
// [databind#511]
@Test
public void testInvalidTypeId511() throws Exception {
ObjectReader reader = MAPPER.reader().without(
DeserializationFeature.FAIL_ON_INVALID_SUBTYPE,
DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES,
DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES
);
String json = "{\"many\":[{\"sub1\":{\"a\":\"foo\"}},{\"sub2\":{\"b\":\"bar\"}}]}" ;
Good goodResult = reader.forType(Good.class).readValue(json) ;
assertNotNull(goodResult) ;
Bad badResult = reader.forType(Bad.class).readValue(json);
assertNotNull(badResult);
}
// [databind#656]
@Test
public void testDefaultImplWithObjectWrapper() throws Exception
{
BaseFor656 value = MAPPER.readValue(a2q("{'foobar':{'a':3}}"), BaseFor656.class);
assertNotNull(value);
assertEquals(ImplFor656.class, value.getClass());
assertEquals(3, ((ImplFor656) value).a);
}
@Test
public void testUnknownTypeIDRecovery() throws Exception
{
ObjectReader reader = MAPPER.readerFor(CallRecord.class).without(
DeserializationFeature.FAIL_ON_INVALID_SUBTYPE);
String json = a2q("{'version':0.0,'application':'123',"
+"'item':{'type':'xevent','location':'location1'},"
+"'item2':{'type':'event','location':'location1'}}");
// can't read item2 - which is valid
CallRecord r = reader.readValue(json);
assertNull(r.item);
assertNotNull(r.item2);
json = a2q("{'item':{'type':'xevent','location':'location1'}, 'version':0.0,'application':'123'}");
CallRecord r3 = reader.readValue(json);
assertNull(r3.item);
assertEquals("123", r3.application);
}
@Test
public void testUnknownClassAsSubtype() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.configure(DeserializationFeature.FAIL_ON_INVALID_SUBTYPE, false)
.build();
BaseWrapper w = mapper.readValue(a2q
("{'value':{'clazz':'com.foobar.Nothing'}}"),
BaseWrapper.class);
assertNotNull(w);
assertNull(w.value);
}
@Test
public void testWithoutEmptyStringAsNullObject1533() throws Exception
{
ObjectReader r = MAPPER.readerFor(AsPropertyWrapper.class)
.without(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT);
try {
r.readValue("{ \"value\": \"\" }");
fail("Expected InvalidTypeIdException");
} catch (InvalidTypeIdException e) {
verifyException(e, "missing type id property 'type'");
}
}
// [databind#1533]
@Test
public void testWithEmptyStringAsNullObject1533() throws Exception
{
ObjectReader r = MAPPER.readerFor(AsPropertyWrapper.class)
.with(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT);
AsPropertyWrapper wrapper = r.readValue("{ \"value\": \"\" }");
assertNull(wrapper.value);
}
/*
/**********************************************************
/* Unit tests, serialization
/**********************************************************
*/
/*
@Test
public void testDontWriteIfDefaultImpl() throws Exception {
String json = MAPPER.writeValueAsString(new MyInter());
assertEquals("{\"blah\":null}", json);
}
*/
}
| AsPropertyWrapper |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContainsCartesianPointDocValuesAndSourceEvaluator.java | {
"start": 3948,
"end": 4831
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory left;
private final EvalOperator.ExpressionEvaluator.Factory right;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory left,
EvalOperator.ExpressionEvaluator.Factory right) {
this.source = source;
this.left = left;
this.right = right;
}
@Override
public SpatialContainsCartesianPointDocValuesAndSourceEvaluator get(DriverContext context) {
return new SpatialContainsCartesianPointDocValuesAndSourceEvaluator(source, left.get(context), right.get(context), context);
}
@Override
public String toString() {
return "SpatialContainsCartesianPointDocValuesAndSourceEvaluator[" + "left=" + left + ", right=" + right + "]";
}
}
}
| Factory |
java | grpc__grpc-java | stub/src/main/java/io/grpc/stub/ClientCalls.java | {
"start": 23909,
"end": 24666
} | class
____(ClientCall<?, RespT> call) {
this.call = call;
}
@Override
protected void interruptTask() {
call.cancel("GrpcFuture was cancelled", null);
}
@Override
protected boolean set(@Nullable RespT resp) {
return super.set(resp);
}
@Override
protected boolean setException(Throwable throwable) {
return super.setException(throwable);
}
@SuppressWarnings("MissingOverride") // Add @Override once Java 6 support is dropped
protected String pendingToString() {
return MoreObjects.toStringHelper(this).add("clientCall", call).toString();
}
}
/**
* Convert events on a {@link io.grpc.ClientCall.Listener} into a blocking {@link Iterator}.
*
* <p>The | GrpcFuture |
java | spring-projects__spring-security | saml2/saml2-service-provider/src/test/java/org/springframework/security/saml2/provider/service/registration/InMemoryRelyingPartyRegistrationRepositoryTests.java | {
"start": 875,
"end": 2896
} | class ____ {
@Test
void findByRegistrationIdWhenGivenIdThenReturnsMatchingRegistration() {
RelyingPartyRegistration registration = TestRelyingPartyRegistrations.relyingPartyRegistration().build();
InMemoryRelyingPartyRegistrationRepository registrations = new InMemoryRelyingPartyRegistrationRepository(
registration);
assertThat(registrations.findByRegistrationId(registration.getRegistrationId())).isSameAs(registration);
}
@Test
void findByRegistrationIdWhenGivenWrongIdThenReturnsNull() {
RelyingPartyRegistration registration = TestRelyingPartyRegistrations.relyingPartyRegistration().build();
InMemoryRelyingPartyRegistrationRepository registrations = new InMemoryRelyingPartyRegistrationRepository(
registration);
assertThat(registrations.findByRegistrationId(registration.getRegistrationId() + "wrong")).isNull();
assertThat(registrations.findByRegistrationId(null)).isNull();
}
@Test
void findByAssertingPartyEntityIdWhenGivenEntityIdThenReturnsMatchingRegistrations() {
RelyingPartyRegistration registration = TestRelyingPartyRegistrations.relyingPartyRegistration().build();
InMemoryRelyingPartyRegistrationRepository registrations = new InMemoryRelyingPartyRegistrationRepository(
registration);
String assertingPartyEntityId = registration.getAssertingPartyMetadata().getEntityId();
assertThat(registrations.findUniqueByAssertingPartyEntityId(assertingPartyEntityId)).isEqualTo(registration);
}
@Test
void findByAssertingPartyEntityIdWhenGivenWrongEntityIdThenReturnsEmpty() {
RelyingPartyRegistration registration = TestRelyingPartyRegistrations.relyingPartyRegistration().build();
InMemoryRelyingPartyRegistrationRepository registrations = new InMemoryRelyingPartyRegistrationRepository(
registration);
String assertingPartyEntityId = registration.getAssertingPartyMetadata().getEntityId();
assertThat(registrations.findUniqueByAssertingPartyEntityId(assertingPartyEntityId + "wrong")).isNull();
}
}
| InMemoryRelyingPartyRegistrationRepositoryTests |
java | grpc__grpc-java | interop-testing/src/test/java/io/grpc/testing/integration/RetryTest.java | {
"start": 17870,
"end": 18051
} | class ____ extends ClientStreamTracer {
@Override
public void streamClosed(Status status) {
fakeClock.forwardTime(10, SECONDS);
}
}
| CloseDelayedTracer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/insertordering/InsertOrderingDuplicateTest.java | {
"start": 5739,
"end": 6658
} | class ____ {
@Id
@GeneratedValue
private Long id;
@Column(name = "sale_number")
private String number;
@OneToMany(fetch = FetchType.LAZY, mappedBy = "summary")
private Set<SaleDocumentItem> items = new HashSet<>();
private BigDecimal totalPrice;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getNumber() {
return number;
}
public void setNumber(String number) {
this.number = number;
}
public Set<SaleDocumentItem> getItems() {
return items;
}
public void setItems(Set<SaleDocumentItem> items) {
this.items = items;
}
public BigDecimal getTotalPrice() {
return totalPrice;
}
public void setTotalPrice(BigDecimal totalPrice) {
this.totalPrice = totalPrice;
}
public void addItem(SaleDocumentItem sdi) {
this.getItems().add( sdi );
sdi.setSummary( this );
}
}
}
| SaleDocumentSummary |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedResultPartition.java | {
"start": 2790,
"end": 11469
} | class ____ extends BufferWritingResultPartition
implements CheckpointedResultPartition, ChannelStateHolder {
private static final int PIPELINED_RESULT_PARTITION_ITSELF = -42;
/**
* The lock that guard operations which can be asynchronously propagated from the networks
* threads.
*/
private final Object lock = new Object();
/**
* A flag for each subpartition indicating whether the downstream task has processed all the
* user records.
*/
@GuardedBy("lock")
private final boolean[] allRecordsProcessedSubpartitions;
/**
* The total number of subpartitions whose user records have not been fully processed by the
* downstream tasks yet.
*/
@GuardedBy("lock")
private int numNotAllRecordsProcessedSubpartitions;
@GuardedBy("lock")
private boolean hasNotifiedEndOfUserRecords;
/**
* The future represents whether all the records has been processed by all the downstream tasks.
*/
@GuardedBy("lock")
private final CompletableFuture<Void> allRecordsProcessedFuture = new CompletableFuture<>();
/**
* A flag for each subpartition indicating whether it was already consumed or not, to make
* releases idempotent.
*/
@GuardedBy("lock")
private final boolean[] consumedSubpartitions;
/**
* The total number of references to subpartitions of this result. The result partition can be
* safely released, iff the reference count is zero. Every subpartition is a user of the result
* as well the {@link PipelinedResultPartition} is a user itself, as it's writing to those
* results. Even if all consumers are released, partition can not be released until writer
* releases the partition as well.
*/
@GuardedBy("lock")
private int numberOfUsers;
public PipelinedResultPartition(
String owningTaskName,
int partitionIndex,
ResultPartitionID partitionId,
ResultPartitionType partitionType,
ResultSubpartition[] subpartitions,
int numTargetKeyGroups,
ResultPartitionManager partitionManager,
@Nullable BufferCompressor bufferCompressor,
SupplierWithException<BufferPool, IOException> bufferPoolFactory) {
super(
owningTaskName,
partitionIndex,
partitionId,
checkResultPartitionType(partitionType),
subpartitions,
numTargetKeyGroups,
partitionManager,
bufferCompressor,
bufferPoolFactory);
this.allRecordsProcessedSubpartitions = new boolean[subpartitions.length];
this.numNotAllRecordsProcessedSubpartitions = subpartitions.length;
this.consumedSubpartitions = new boolean[subpartitions.length];
this.numberOfUsers = subpartitions.length + 1;
}
@Override
public void setChannelStateWriter(ChannelStateWriter channelStateWriter) {
for (final ResultSubpartition subpartition : subpartitions) {
if (subpartition instanceof ChannelStateHolder) {
((PipelinedSubpartition) subpartition).setChannelStateWriter(channelStateWriter);
}
}
}
/**
* The pipelined partition releases automatically once all subpartition readers are released.
* That is because pipelined partitions cannot be consumed multiple times, or reconnect.
*/
@Override
void onConsumedSubpartition(int subpartitionIndex) {
decrementNumberOfUsers(subpartitionIndex);
}
private void decrementNumberOfUsers(int subpartitionIndex) {
if (isReleased()) {
return;
}
final int remainingUnconsumed;
// we synchronize only the bookkeeping section, to avoid holding the lock during any
// calls into other components
synchronized (lock) {
if (subpartitionIndex != PIPELINED_RESULT_PARTITION_ITSELF) {
if (consumedSubpartitions[subpartitionIndex]) {
// repeated call - ignore
return;
}
consumedSubpartitions[subpartitionIndex] = true;
}
remainingUnconsumed = (--numberOfUsers);
}
LOG.debug(
"{}: Received consumed notification for subpartition {}.", this, subpartitionIndex);
if (remainingUnconsumed == 0) {
partitionManager.onConsumedPartition(this);
} else if (remainingUnconsumed < 0) {
throw new IllegalStateException(
"Received consume notification even though all subpartitions are already consumed.");
}
}
@Override
public ResultSubpartitionInfo getCheckpointedSubpartitionInfo(int subpartitionIndex) {
return subpartitions[subpartitionIndex].getSubpartitionInfo();
}
@Override
public void flushAll() {
flushAllSubpartitions(false);
}
@Override
public void flush(int targetSubpartition) {
flushSubpartition(targetSubpartition, false);
}
@Override
public void notifyEndOfData(StopMode mode) throws IOException {
synchronized (lock) {
if (!hasNotifiedEndOfUserRecords) {
broadcastEvent(new EndOfData(mode), false);
hasNotifiedEndOfUserRecords = true;
}
}
}
@Override
public CompletableFuture<Void> getAllDataProcessedFuture() {
return allRecordsProcessedFuture;
}
@Override
public void onSubpartitionAllDataProcessed(int subpartition) {
synchronized (lock) {
if (allRecordsProcessedSubpartitions[subpartition]) {
return;
}
allRecordsProcessedSubpartitions[subpartition] = true;
numNotAllRecordsProcessedSubpartitions--;
if (numNotAllRecordsProcessedSubpartitions == 0) {
allRecordsProcessedFuture.complete(null);
}
}
}
@Override
@SuppressWarnings("FieldAccessNotGuarded")
public String toString() {
return "PipelinedResultPartition "
+ partitionId.toString()
+ " ["
+ partitionType
+ ", "
+ subpartitions.length
+ " subpartitions, "
+ numberOfUsers
+ " pending consumptions]";
}
// ------------------------------------------------------------------------
// miscellaneous utils
// ------------------------------------------------------------------------
private static ResultPartitionType checkResultPartitionType(ResultPartitionType type) {
checkArgument(
type == ResultPartitionType.PIPELINED
|| type == ResultPartitionType.PIPELINED_BOUNDED
|| type == ResultPartitionType.PIPELINED_APPROXIMATE);
return type;
}
@Override
public void finishReadRecoveredState(boolean notifyAndBlockOnCompletion) throws IOException {
if (!notifyAndBlockOnCompletion) {
return;
}
try (BufferConsumer eventBufferConsumer =
EventSerializer.toBufferConsumer(EndOfOutputChannelStateEvent.INSTANCE, false)) {
for (int i = 0; i < subpartitions.length; i++) {
if (((PipelinedSubpartition) subpartitions[i]).isSupportChannelStateRecover()) {
addToSubpartition(i, eventBufferConsumer.copy(), 0);
}
}
}
}
@Override
public BufferBuilder requestBufferBuilderBlocking()
throws IOException, RuntimeException, InterruptedException {
return getBufferPool().requestBufferBuilderBlocking();
}
@Override
public void addRecovered(int subpartitionIndex, BufferConsumer bufferConsumer)
throws IOException {
ResultSubpartition subpartition = subpartitions[subpartitionIndex];
NetworkActionsLogger.traceRecover(
"PipelinedSubpartition#addRecovered",
bufferConsumer,
getOwningTaskName(),
subpartition.subpartitionInfo);
if (addToSubpartition(subpartitionIndex, bufferConsumer, Integer.MIN_VALUE)
== ResultSubpartition.ADD_BUFFER_ERROR_CODE) {
throw new IOException("Buffer consumer couldn't be added to ResultSubpartition");
}
}
@Override
public void close() {
decrementNumberOfUsers(PIPELINED_RESULT_PARTITION_ITSELF);
super.close();
}
}
| PipelinedResultPartition |
java | micronaut-projects__micronaut-core | inject-java/src/main/java/io/micronaut/annotation/processing/visitor/JavaParameterElement.java | {
"start": 1483,
"end": 1564
} | interface ____ Java.
*
* @author graemerocher
* @since 1.0
*/
@Internal
final | for |
java | google__guava | android/guava-tests/test/com/google/common/collect/UnmodifiableRowSortedTableColumnMapTest.java | {
"start": 936,
"end": 1619
} | class ____ extends ColumnMapTests {
public UnmodifiableRowSortedTableColumnMapTest() {
super(false, false, false, false);
}
@Override
Table<Integer, String, Character> makeTable() {
RowSortedTable<Integer, String, Character> original = TreeBasedTable.create();
return unmodifiableRowSortedTable(original);
}
@Override
protected Map<String, Map<Integer, Character>> makePopulatedMap() {
RowSortedTable<Integer, String, Character> table = TreeBasedTable.create();
table.put(1, "foo", 'a');
table.put(1, "bar", 'b');
table.put(3, "foo", 'c');
return unmodifiableRowSortedTable(table).columnMap();
}
}
| UnmodifiableRowSortedTableColumnMapTest |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringTokenizerTest.java | {
"start": 1043,
"end": 1301
} | class ____ extends SplitterMethodCallTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/splitterTokenizerTest.xml");
}
}
| SpringTokenizerTest |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/workload/PayloadIterator.java | {
"start": 940,
"end": 1622
} | class ____ implements Iterator<byte[]> {
private final PayloadGenerator generator;
private long position = 0;
public PayloadIterator(PayloadGenerator generator) {
this.generator = generator;
}
@Override
public boolean hasNext() {
return true;
}
@Override
public synchronized byte[] next() {
return generator.generate(position++);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
public synchronized void seek(long position) {
this.position = position;
}
public synchronized long position() {
return this.position;
}
}
| PayloadIterator |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/AutoValueTest.java | {
"start": 61807,
"end": 62334
} | class ____<T, U> {
public abstract Builder<T, U> setT(T t);
public abstract Builder<T, U> setU(U u);
public abstract InheritedToBuilder<T, U> build();
}
}
@Test
public void testInheritedToBuilder() {
InheritedToBuilder<Integer, String> x =
InheritedToBuilder.<Integer, String>builder().setT(17).setU("wibble").build();
InheritedToBuilder<Integer, String> y = x.toBuilder().setT(23).build();
assertThat(y.u()).isEqualTo("wibble");
}
@AutoValue
public abstract static | Builder |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java | {
"start": 14844,
"end": 15378
} | class ____ {
private final boolean result;
private final FileStatus stat;
public TruncateResult(boolean result, FileStatus stat) {
this.result = result;
this.stat = stat;
}
/**
* @return true if client does not need to wait for block recovery,
* false if client needs to wait for block recovery.
*/
boolean getResult() {
return result;
}
/**
* @return file information.
*/
FileStatus getFileStatus() {
return stat;
}
}
}
| TruncateResult |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithHA.java | {
"start": 1798,
"end": 4908
} | class ____ {
private Configuration conf;
private MiniDFSCluster cluster;
private NameNode nn0;
private NameNode nn1;
private DistributedFileSystem fs;
private HdfsAdmin dfsAdmin0;
private HdfsAdmin dfsAdmin1;
private FileSystemTestHelper fsHelper;
private File testRootDir;
private final String TEST_KEY = "test_key";
protected static final EnumSet< CreateEncryptionZoneFlag > NO_TRASH =
EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH);
@BeforeEach
public void setupCluster() throws Exception {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
HAUtil.setAllowStandbyReads(conf, true);
fsHelper = new FileSystemTestHelper();
String testRoot = fsHelper.getTestRootDir();
testRootDir = new File(testRoot).getAbsoluteFile();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
JavaKeyStoreProvider.SCHEME_NAME + "://file" +
new Path(testRootDir.toString(), "test.jks").toUri()
);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(1)
.build();
cluster.waitActive();
cluster.transitionToActive(0);
fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
nn0 = cluster.getNameNode(0);
nn1 = cluster.getNameNode(1);
dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
KeyProviderCryptoExtension nn0Provider =
cluster.getNameNode(0).getNamesystem().getProvider();
fs.getClient().setKeyProvider(nn0Provider);
}
@AfterEach
public void shutdownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
/**
* Test that encryption zones are properly tracked by the standby.
*/
@Test
@Timeout(value = 60)
public void testEncryptionZonesTrackedOnStandby() throws Exception {
final int len = 8196;
final Path dir = new Path("/enc");
final Path dirChild = new Path(dir, "child");
final Path dirFile = new Path(dir, "file");
fs.mkdir(dir, FsPermission.getDirDefault());
dfsAdmin0.createEncryptionZone(dir, TEST_KEY, NO_TRASH);
fs.mkdir(dirChild, FsPermission.getDirDefault());
DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED);
String contents = DFSTestUtil.readFile(fs, dirFile);
// Failover the current standby to active.
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertEquals(dir.toString(), dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString(),
"Got unexpected ez path");
assertEquals(dir.toString(), dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString(),
"Got unexpected ez path");
assertEquals(contents, DFSTestUtil.readFile(fs, dirFile),
"File contents after failover were changed");
}
}
| TestEncryptionZonesWithHA |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/naming/Described.java | {
"start": 798,
"end": 1252
} | interface ____ {
/**
* A description that describes this object.
*
* @return The description
*/
@NonNull String getDescription();
/**
* Allows returning a more simple description if requested.
*
* @param simple True if a simple description is required
* @return The simple description
*/
default @NonNull String getDescription(boolean simple) {
return getDescription();
}
}
| Described |
java | netty__netty | buffer/src/main/java/io/netty/buffer/UnpooledByteBufAllocator.java | {
"start": 9849,
"end": 11106
} | class ____ implements CleanableDirectBuffer {
private final UnpooledByteBufAllocator alloc;
private final CleanableDirectBuffer delegate;
private DecrementingCleanableDirectBuffer(
ByteBufAllocator alloc, CleanableDirectBuffer delegate) {
this(alloc, delegate, delegate.buffer().capacity());
}
private DecrementingCleanableDirectBuffer(
ByteBufAllocator alloc, CleanableDirectBuffer delegate, int capacityConsumed) {
this.alloc = (UnpooledByteBufAllocator) alloc;
this.alloc.incrementDirect(capacityConsumed);
this.delegate = delegate;
}
@Override
public ByteBuffer buffer() {
return delegate.buffer();
}
@Override
public void clean() {
int capacity = delegate.buffer().capacity();
delegate.clean();
alloc.decrementDirect(capacity);
}
@Override
public boolean hasMemoryAddress() {
return delegate.hasMemoryAddress();
}
@Override
public long memoryAddress() {
return delegate.memoryAddress();
}
}
private static final | DecrementingCleanableDirectBuffer |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java | {
"start": 3297,
"end": 4600
} | class ____ extends AbstractQueryBuilder<TestQueryBuilder> {
private static final String NAME = "test";
private static TestQueryBuilder fromXContent(XContentParser parser) {
return new TestQueryBuilder();
}
TestQueryBuilder() {}
TestQueryBuilder(StreamInput in) throws IOException {
super(in);
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.current();
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.endObject();
}
@Override
protected Query doToQuery(SearchExecutionContext context) throws IOException {
return new MatchNoDocsQuery();
}
@Override
protected boolean doEquals(TestQueryBuilder other) {
return true;
}
@Override
protected int doHashCode() {
return 0;
}
}
public static | TestQueryBuilder |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/introspect/POJOPropertiesCollectorTest.java | {
"start": 6020,
"end": 6194
} | class ____
{
@A
public boolean isBloop() { return true; }
@B
public boolean getBloop() { return true; }
}
static | DuplicateGetterBean |
java | apache__camel | tooling/maven/camel-package-maven-plugin/src/main/java/org/apache/camel/maven/packaging/PrepareKameletMainMojo.java | {
"start": 3030,
"end": 6362
} | class ____ one of the threads it generated failed.
* @throws MojoFailureException something bad happened...
*/
@Override
public void execute() throws MojoExecutionException, MojoFailureException {
try {
updateKnownDependencies();
} catch (Exception e) {
throw new MojoFailureException("Error updating camel-component-known-dependencies.properties", e);
}
}
protected void updateKnownDependencies() throws Exception {
Collection<Path> allJsonFiles = new TreeSet<>();
File path = new File(catalogDir, "src/generated/resources/org/apache/camel/catalog/components");
for (File p : path.listFiles()) {
String f = p.getName();
if (f.endsWith(PackageHelper.JSON_SUFIX)) {
allJsonFiles.add(p.toPath());
}
}
for (Path p : allJsonFiles) {
var m = JsonMapper.generateModel(p);
if (m != null) {
allModels.put(p, m);
}
}
List<String> lines = new ArrayList<>();
for (BaseModel<?> model : allModels.values()) {
String fqn = model.getJavaType();
if (model instanceof ArtifactModel) {
String aid = ((ArtifactModel<?>) model).getArtifactId();
if (aid.startsWith("camel-")) {
aid = aid.substring(6);
}
String line = fqn + "=camel:" + aid;
lines.add(line);
}
}
// remove duplicate
lines = lines.stream().distinct().collect(Collectors.toList());
// and sort
Collections.sort(lines);
// load license header
try (InputStream is = getClass().getClassLoader().getResourceAsStream("license-header.txt")) {
this.licenseHeader = loadText(is);
} catch (Exception e) {
throw new MojoFailureException("Error loading license-header.txt file", e);
}
String source = String.join("\n", lines) + "\n";
writeSourceIfChanged(source, "resources", "camel-component-known-dependencies.properties", genDir);
}
protected boolean writeSourceIfChanged(String source, String filePath, String fileName, File outputDir)
throws MojoFailureException {
Path target = outputDir.toPath().resolve(filePath).resolve(fileName);
try {
final String code = joinHeaderAndSource(licenseHeader, source);
if (getLog().isDebugEnabled()) {
getLog().debug("Source code generated:\n" + code);
}
return updateResource(buildContext, target, code);
} catch (Exception e) {
throw new MojoFailureException("IOError with file " + target, e);
}
}
public static boolean updateResource(BuildContext buildContext, Path out, String data) {
try {
if (FileUtil.updateFile(out, data)) {
refresh(buildContext, out);
return true;
}
} catch (IOException e) {
throw new IOError(e);
}
return false;
}
public static void refresh(BuildContext buildContext, Path file) {
if (buildContext != null) {
buildContext.refresh(file.toFile());
}
}
}
| or |
java | FasterXML__jackson-core | src/test/java/tools/jackson/core/unittest/testutil/AsyncReaderWrapperForByteArray.java | {
"start": 237,
"end": 277
} | class ____ with async parser
*/
public | used |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassEnhancer.java | {
"start": 13401,
"end": 23150
} | class ____ implements MethodInterceptor, ConditionalCallback {
/**
* Enhance a {@link Bean @Bean} method to check the supplied BeanFactory for the
* existence of this bean object.
* @throws Throwable as a catch-all for any exception that may be thrown when invoking the
* super implementation of the proxied method i.e., the actual {@code @Bean} method
*/
@Override
public @Nullable Object intercept(Object enhancedConfigInstance, Method beanMethod, Object[] beanMethodArgs,
MethodProxy cglibMethodProxy) throws Throwable {
ConfigurableBeanFactory beanFactory = getBeanFactory(enhancedConfigInstance);
String beanName = BeanAnnotationHelper.determineBeanNameFor(beanMethod, beanFactory);
// Determine whether this bean is a scoped-proxy
if (BeanAnnotationHelper.isScopedProxy(beanMethod)) {
String scopedBeanName = ScopedProxyCreator.getTargetBeanName(beanName);
if (beanFactory.isCurrentlyInCreation(scopedBeanName)) {
beanName = scopedBeanName;
}
}
// To handle the case of an inter-bean method reference, we must explicitly check the
// container for already cached instances.
// First, check to see if the requested bean is a FactoryBean. If so, create a subclass
// proxy that intercepts calls to getObject() and returns any cached bean instance.
// This ensures that the semantics of calling a FactoryBean from within @Bean methods
// is the same as that of referring to a FactoryBean within XML. See SPR-6602.
String factoryBeanName = BeanFactory.FACTORY_BEAN_PREFIX + beanName;
if (factoryContainsBean(beanFactory, factoryBeanName) && factoryContainsBean(beanFactory, beanName)) {
Object factoryBean = beanFactory.getBean(factoryBeanName);
if (factoryBean instanceof ScopedProxyFactoryBean) {
// Scoped proxy factory beans are a special case and should not be further proxied
}
else {
// It is a candidate FactoryBean - go ahead with enhancement
return enhanceFactoryBean(factoryBean, beanMethod.getReturnType(), beanFactory, beanName);
}
}
if (isCurrentlyInvokedFactoryMethod(beanMethod)) {
// The factory is calling the bean method in order to instantiate and register the bean
// (i.e. via a getBean() call) -> invoke the super implementation of the method to actually
// create the bean instance.
if (logger.isInfoEnabled() &&
BeanFactoryPostProcessor.class.isAssignableFrom(beanMethod.getReturnType())) {
logger.info(String.format("@Bean method %s.%s is non-static and returns an object " +
"assignable to Spring's BeanFactoryPostProcessor interface. This will " +
"result in a failure to process annotations such as @Autowired, " +
"@Resource and @PostConstruct within the method's declaring " +
"@Configuration class. Add the 'static' modifier to this method to avoid " +
"these container lifecycle issues; see @Bean javadoc for complete details.",
beanMethod.getDeclaringClass().getSimpleName(), beanMethod.getName()));
}
return cglibMethodProxy.invokeSuper(enhancedConfigInstance, beanMethodArgs);
}
return resolveBeanReference(beanMethod, beanMethodArgs, beanFactory, beanName);
}
private @Nullable Object resolveBeanReference(Method beanMethod, Object[] beanMethodArgs,
ConfigurableBeanFactory beanFactory, String beanName) {
// The user (i.e. not the factory) is requesting this bean through a call to
// the bean method, direct or indirect. The bean may have already been marked
// as 'in creation' in certain autowiring scenarios; if so, temporarily set
// the in-creation status to false in order to avoid an exception.
boolean alreadyInCreation = beanFactory.isCurrentlyInCreation(beanName);
try {
if (alreadyInCreation) {
beanFactory.setCurrentlyInCreation(beanName, false);
}
boolean useArgs = !ObjectUtils.isEmpty(beanMethodArgs);
if (useArgs && beanFactory.isSingleton(beanName)) {
// Stubbed null arguments just for reference purposes,
// expecting them to be autowired for regular singleton references?
// A safe assumption since @Bean singleton arguments cannot be optional...
for (Object arg : beanMethodArgs) {
if (arg == null) {
useArgs = false;
break;
}
}
}
Object beanInstance = (useArgs ? beanFactory.getBean(beanName, beanMethodArgs) :
beanFactory.getBean(beanName));
if (!ClassUtils.isAssignableValue(beanMethod.getReturnType(), beanInstance)) {
// Detect package-protected NullBean instance through equals(null) check
if (beanInstance.equals(null)) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("@Bean method %s.%s called as bean reference " +
"for type [%s] returned null bean; resolving to null value.",
beanMethod.getDeclaringClass().getSimpleName(), beanMethod.getName(),
beanMethod.getReturnType().getName()));
}
beanInstance = null;
}
else {
String msg = String.format("@Bean method %s.%s called as bean reference " +
"for type [%s] but overridden by non-compatible bean instance of type [%s].",
beanMethod.getDeclaringClass().getSimpleName(), beanMethod.getName(),
beanMethod.getReturnType().getName(), beanInstance.getClass().getName());
try {
BeanDefinition beanDefinition = beanFactory.getMergedBeanDefinition(beanName);
msg += " Overriding bean of same name declared in: " + beanDefinition.getResourceDescription();
}
catch (NoSuchBeanDefinitionException ex) {
// Ignore - simply no detailed message then.
}
throw new IllegalStateException(msg);
}
}
Method currentlyInvoked = SimpleInstantiationStrategy.getCurrentlyInvokedFactoryMethod();
if (currentlyInvoked != null) {
String outerBeanName = BeanAnnotationHelper.determineBeanNameFor(currentlyInvoked, beanFactory);
beanFactory.registerDependentBean(beanName, outerBeanName);
}
return beanInstance;
}
finally {
if (alreadyInCreation) {
beanFactory.setCurrentlyInCreation(beanName, true);
}
}
}
@Override
public boolean isMatch(Method candidateMethod) {
return (candidateMethod.getDeclaringClass() != Object.class &&
!BeanFactoryAwareMethodInterceptor.isSetBeanFactory(candidateMethod) &&
BeanAnnotationHelper.isBeanAnnotated(candidateMethod));
}
private ConfigurableBeanFactory getBeanFactory(Object enhancedConfigInstance) {
Field field = ReflectionUtils.findField(enhancedConfigInstance.getClass(), BEAN_FACTORY_FIELD);
Assert.state(field != null, "Unable to find generated bean factory field");
Object beanFactory = ReflectionUtils.getField(field, enhancedConfigInstance);
Assert.state(beanFactory != null, "BeanFactory has not been injected into @Configuration class");
Assert.state(beanFactory instanceof ConfigurableBeanFactory,
"Injected BeanFactory is not a ConfigurableBeanFactory");
return (ConfigurableBeanFactory) beanFactory;
}
/**
* Check the BeanFactory to see whether the bean named <var>beanName</var> already
* exists. Accounts for the fact that the requested bean may be "in creation", i.e.:
* we're in the middle of servicing the initial request for this bean. From an enhanced
* factory method's perspective, this means that the bean does not actually yet exist,
* and that it is now our job to create it for the first time by executing the logic
* in the corresponding factory method.
* <p>Said another way, this check repurposes
* {@link ConfigurableBeanFactory#isCurrentlyInCreation(String)} to determine whether
* the container is calling this method or the user is calling this method.
* @param beanName name of bean to check for
* @return whether <var>beanName</var> already exists in the factory
*/
private boolean factoryContainsBean(ConfigurableBeanFactory beanFactory, String beanName) {
return (beanFactory.containsBean(beanName) && !beanFactory.isCurrentlyInCreation(beanName));
}
/**
* Check whether the given method corresponds to the container's currently invoked
* factory method. Compares method name and parameter types only in order to work
* around a potential problem with covariant return types (currently only known
* to happen on Groovy classes).
*/
private boolean isCurrentlyInvokedFactoryMethod(Method method) {
Method currentlyInvoked = SimpleInstantiationStrategy.getCurrentlyInvokedFactoryMethod();
return (currentlyInvoked != null && method.getName().equals(currentlyInvoked.getName()) &&
Arrays.equals(method.getParameterTypes(), currentlyInvoked.getParameterTypes()));
}
/**
* Create a subclass proxy that intercepts calls to getObject(), delegating to the current BeanFactory
* instead of creating a new instance. These proxies are created only when calling a FactoryBean from
* within a Bean method, allowing for proper scoping semantics even when working against the FactoryBean
* instance directly. If a FactoryBean instance is fetched through the container via &-dereferencing,
* it will not be proxied. This too is aligned with the way XML configuration works.
*/
private Object enhanceFactoryBean(Object factoryBean, Class<?> exposedType,
ConfigurableBeanFactory beanFactory, String beanName) {
try {
Class<?> clazz = factoryBean.getClass();
boolean finalClass = Modifier.isFinal(clazz.getModifiers());
boolean finalMethod = Modifier.isFinal(clazz.getMethod("getObject").getModifiers());
if (finalClass || finalMethod) {
if (exposedType.isInterface()) {
if (logger.isTraceEnabled()) {
logger.trace("Creating | BeanMethodInterceptor |
java | apache__flink | flink-kubernetes/src/main/java/org/apache/flink/kubernetes/executors/KubernetesSessionClusterExecutorFactory.java | {
"start": 1278,
"end": 1896
} | class ____ implements PipelineExecutorFactory {
@Override
public String getName() {
return KubernetesSessionClusterExecutor.NAME;
}
@Override
public boolean isCompatibleWith(@Nonnull final Configuration configuration) {
return configuration
.get(DeploymentOptions.TARGET)
.equalsIgnoreCase(KubernetesSessionClusterExecutor.NAME);
}
@Override
public PipelineExecutor getExecutor(@Nonnull final Configuration configuration) {
return new KubernetesSessionClusterExecutor(configuration);
}
}
| KubernetesSessionClusterExecutorFactory |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java | {
"start": 2301,
"end": 6201
} | class ____ extends LocalClusterStateRequest implements IndicesRequest.Replaceable {
private String[] names;
private IndicesOptions indicesOptions = IndicesOptions.builder()
.concreteTargetOptions(IndicesOptions.ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS)
.wildcardOptions(
IndicesOptions.WildcardOptions.builder()
.matchOpen(true)
.matchClosed(true)
.includeHidden(false)
.resolveAliases(false)
.allowEmptyExpressions(true)
.build()
)
.gatekeeperOptions(
IndicesOptions.GatekeeperOptions.builder()
.allowAliasToMultipleIndices(false)
.allowClosedIndices(true)
.ignoreThrottled(false)
.allowSelectors(false)
.build()
)
.build();
private boolean includeDefaults = false;
public Request(TimeValue masterNodeTimeout, String[] names) {
super(masterNodeTimeout);
this.names = names;
}
public Request(TimeValue masterNodeTimeout, String[] names, boolean includeDefaults) {
super(masterNodeTimeout);
this.names = names;
this.includeDefaults = includeDefaults;
}
public String[] getNames() {
return names;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
return new CancellableTask(id, type, action, "", parentTaskId, headers);
}
/**
* NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until
* we no longer need to support calling this action remotely.
*/
@UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT)
public Request(StreamInput in) throws IOException {
super(in);
this.names = in.readOptionalStringArray();
this.indicesOptions = IndicesOptions.readIndicesOptions(in);
this.includeDefaults = in.readBoolean();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Request request = (Request) o;
return Arrays.equals(names, request.names)
&& indicesOptions.equals(request.indicesOptions)
&& includeDefaults == request.includeDefaults;
}
@Override
public int hashCode() {
int result = Objects.hash(indicesOptions, includeDefaults);
result = 31 * result + Arrays.hashCode(names);
return result;
}
@Override
public String[] indices() {
return names;
}
@Override
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public boolean includeDefaults() {
return includeDefaults;
}
public Request indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
@Override
public boolean includeDataStreams() {
return true;
}
@Override
public IndicesRequest indices(String... indices) {
this.names = indices;
return this;
}
public Request includeDefaults(boolean includeDefaults) {
this.includeDefaults = includeDefaults;
return this;
}
}
public static | Request |
java | apache__maven | its/core-it-suite/src/test/resources/mng-3703/maven-mng3703-plugin/src/main/java/jar/CheckMojo.java | {
"start": 1549,
"end": 1799
} | class ____ extends AbstractCheckMojo {
@Override
protected MavenProject getTestProject() {
return getExecutionProject();
}
@Override
protected String getTestProjectLabel() {
return "forked project";
}
}
| CheckMojo |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/observers/ConsumerSingleObserverTest.java | {
"start": 831,
"end": 1448
} | class ____ extends RxJavaTest {
@Test
public void onErrorMissingShouldReportNoCustomOnError() {
ConsumerSingleObserver<Integer> o = new ConsumerSingleObserver<>(Functions.<Integer>emptyConsumer(),
Functions.ON_ERROR_MISSING);
assertFalse(o.hasCustomOnError());
}
@Test
public void customOnErrorShouldReportCustomOnError() {
ConsumerSingleObserver<Integer> o = new ConsumerSingleObserver<>(Functions.<Integer>emptyConsumer(),
Functions.<Throwable>emptyConsumer());
assertTrue(o.hasCustomOnError());
}
}
| ConsumerSingleObserverTest |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/shutdown/ShutdownBuildTimeConfig.java | {
"start": 376,
"end": 848
} | interface ____ {
/**
* Whether Quarkus should wait between shutdown being requested and actually initiated.
* This delay gives the infrastructure time to detect that the application instance is shutting down and
* stop routing traffic to it.
*/
@WithDefault("false")
boolean delayEnabled();
default boolean isDelayEnabled() {
return delayEnabled() && LaunchMode.current() != LaunchMode.DEVELOPMENT;
}
}
| ShutdownBuildTimeConfig |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | {
"start": 22516,
"end": 31273
} | class ____ {
private final ArrayList<MemorySegment> segments = new ArrayList<>();
private final RecordAreaOutputView outView;
private final RandomAccessInputView inView;
private final int segmentSizeBits;
private final int segmentSizeMask;
private long appendPosition = 0;
public RecordArea(int segmentSize) {
int segmentSizeBits = MathUtils.log2strict(segmentSize);
if ((segmentSize & (segmentSize - 1)) != 0) {
throw new IllegalArgumentException("Segment size must be a power of 2!");
}
this.segmentSizeBits = segmentSizeBits;
this.segmentSizeMask = segmentSize - 1;
outView = new RecordAreaOutputView(segmentSize);
try {
addSegment();
} catch (EOFException ex) {
throw new RuntimeException(
"Bug in InPlaceMutableHashTable: we should have caught it earlier "
+ "that we don't have enough segments.");
}
inView = new RandomAccessInputView(segments, segmentSize);
}
private void addSegment() throws EOFException {
MemorySegment m = allocateSegment();
if (m == null) {
throw new EOFException();
}
segments.add(m);
}
/**
* Moves all its memory segments to freeMemorySegments. Warning: this will leave the
* RecordArea in an unwritable state: you have to call setWritePosition before writing
* again.
*/
public void giveBackSegments() {
freeMemorySegments.addAll(segments);
segments.clear();
resetAppendPosition();
}
public long getTotalSize() {
return segments.size() * (long) segmentSize;
}
// ----------------------- Output -----------------------
private void setWritePosition(long position) throws EOFException {
if (position > appendPosition) {
throw new IndexOutOfBoundsException();
}
final int segmentIndex = (int) (position >>> segmentSizeBits);
final int offset = (int) (position & segmentSizeMask);
// If position == appendPosition and the last buffer is full,
// then we will be seeking to the beginning of a new segment
if (segmentIndex == segments.size()) {
addSegment();
}
outView.currentSegmentIndex = segmentIndex;
outView.seekOutput(segments.get(segmentIndex), offset);
}
/**
* Sets appendPosition and the write position to 0, so that appending starts overwriting
* elements from the beginning. (This is used in rebuild.)
*
* <p>Note: if data was written to the area after the current appendPosition before a call
* to resetAppendPosition, it should still be readable. To release the segments after the
* current append position, call freeSegmentsAfterAppendPosition()
*/
public void resetAppendPosition() {
appendPosition = 0;
// this is just for safety (making sure that we fail immediately
// if a write happens without calling setWritePosition)
outView.currentSegmentIndex = -1;
outView.seekOutput(null, -1);
}
/**
* Releases the memory segments that are after the current append position. Note: The
* situation that there are segments after the current append position can arise from a call
* to resetAppendPosition().
*/
public void freeSegmentsAfterAppendPosition() {
final int appendSegmentIndex = (int) (appendPosition >>> segmentSizeBits);
while (segments.size() > appendSegmentIndex + 1 && !closed) {
freeMemorySegments.add(segments.get(segments.size() - 1));
segments.remove(segments.size() - 1);
}
}
/**
* Overwrites the long value at the specified position.
*
* @param pointer Points to the position to overwrite.
* @param value The value to write.
* @throws IOException
*/
public void overwritePointerAt(long pointer, long value) throws IOException {
setWritePosition(pointer);
outView.writeLong(value);
}
/**
* Overwrites a record at the specified position. The record is read from a DataInputView
* (this will be the staging area). WARNING: The record must not be larger than the original
* record.
*
* @param pointer Points to the position to overwrite.
* @param input The DataInputView to read the record from
* @param size The size of the record
* @throws IOException
*/
public void overwriteRecordAt(long pointer, DataInputView input, int size)
throws IOException {
setWritePosition(pointer);
outView.write(input, size);
}
/**
* Appends a pointer and a record. The record is read from a DataInputView (this will be the
* staging area).
*
* @param pointer The pointer to write (Note: this is NOT the position to write to!)
* @param input The DataInputView to read the record from
* @param recordSize The size of the record
* @return A pointer to the written data
* @throws IOException (EOFException specifically, if memory ran out)
*/
public long appendPointerAndCopyRecord(long pointer, DataInputView input, int recordSize)
throws IOException {
setWritePosition(appendPosition);
final long oldLastPosition = appendPosition;
outView.writeLong(pointer);
outView.write(input, recordSize);
appendPosition += 8 + recordSize;
return oldLastPosition;
}
/**
* Appends a pointer and a record.
*
* @param pointer The pointer to write (Note: this is NOT the position to write to!)
* @param record The record to write
* @return A pointer to the written data
* @throws IOException (EOFException specifically, if memory ran out)
*/
public long appendPointerAndRecord(long pointer, T record) throws IOException {
setWritePosition(appendPosition);
return noSeekAppendPointerAndRecord(pointer, record);
}
/**
* Appends a pointer and a record. Call this function only if the write position is at the
* end!
*
* @param pointer The pointer to write (Note: this is NOT the position to write to!)
* @param record The record to write
* @return A pointer to the written data
* @throws IOException (EOFException specifically, if memory ran out)
*/
public long noSeekAppendPointerAndRecord(long pointer, T record) throws IOException {
final long oldLastPosition = appendPosition;
final long oldPositionInSegment = outView.getCurrentPositionInSegment();
final long oldSegmentIndex = outView.currentSegmentIndex;
outView.writeLong(pointer);
buildSideSerializer.serialize(record, outView);
appendPosition +=
outView.getCurrentPositionInSegment()
- oldPositionInSegment
+ outView.getSegmentSize()
* (outView.currentSegmentIndex - oldSegmentIndex);
return oldLastPosition;
}
public long getAppendPosition() {
return appendPosition;
}
// ----------------------- Input -----------------------
public void setReadPosition(long position) {
inView.setReadPosition(position);
}
public long getReadPosition() {
return inView.getReadPosition();
}
/**
* Note: this is sometimes a negated length instead of a pointer (see
* HashTableProber.updateMatch).
*/
public long readPointer() throws IOException {
return inView.readLong();
}
public T readRecord(T reuse) throws IOException {
return buildSideSerializer.deserialize(reuse, inView);
}
public void skipBytesToRead(int numBytes) throws IOException {
inView.skipBytesToRead(numBytes);
}
// -----------------------------------------------------
private final | RecordArea |
java | alibaba__nacos | test/core-test/src/test/java/com/alibaba/nacos/test/ability/AbilityDiscovery.java | {
"start": 11041,
"end": 11884
} | class ____ extends RequestHandler<ConfigQueryRequest, ConfigQueryResponse> {
public ClientRequestHandler(RequestFilters requestFilters) throws NoSuchFieldException, IllegalAccessException {
Field declaredField = RequestHandler.class.getDeclaredField("requestFilters");
declaredField.setAccessible(true);
declaredField.set(this, requestFilters);
}
@Override
public ConfigQueryResponse handle(ConfigQueryRequest request, RequestMeta meta) throws NacosException {
if (meta.getConnectionAbility(AbilityKey.SDK_CLIENT_FUZZY_WATCH).equals(AbilityStatus.SUPPORTED)) {
serverSuccess = true;
}
return new ConfigQueryResponse();
}
}
/**
* just to test ability.
*/
| ClientRequestHandler |
java | quarkusio__quarkus | extensions/cache/deployment/src/main/java/io/quarkus/cache/deployment/RestClientMethodEnhancer.java | {
"start": 792,
"end": 1005
} | class ____ a similar job as CacheAnnotationsTransformer and since it relies on bytecode
* transformation, the @CacheKeyParameterPositions annotation is available during the MP REST Client cache interception.
*/
| does |
java | junit-team__junit5 | documentation/src/test/java/example/testkit/EngineTestKitSkippedMethodDemo.java | {
"start": 863,
"end": 1430
} | class ____ {
@Test
void verifyJupiterMethodWasSkipped() {
String methodName = "skippedTest";
Events testEvents = EngineTestKit // <5>
.engine("junit-jupiter") // <1>
.selectors(selectMethod(ExampleTestCase.class, methodName)) // <2>
.execute() // <3>
.testEvents(); // <4>
testEvents.assertStatistics(stats -> stats.skipped(1)); // <6>
testEvents.assertThatEvents() // <7>
.haveExactly(1, event(test(methodName),
skippedWithReason("for demonstration purposes")));
}
}
// end::user_guide[]
// @formatter:on
| EngineTestKitSkippedMethodDemo |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/testing/springbootapplications/autoconfiguredrestclient/MyRestClientServiceTests.java | {
"start": 1307,
"end": 1755
} | class ____ {
@Autowired
private RemoteVehicleDetailsService service;
@Autowired
private MockRestServiceServer server;
@Test
void getVehicleDetailsWhenResultIsSuccessShouldReturnDetails() {
this.server.expect(requestTo("https://example.com/greet/details"))
.andRespond(withSuccess("hello", MediaType.TEXT_PLAIN));
String greeting = this.service.callRestService();
assertThat(greeting).isEqualTo("hello");
}
}
| MyRestClientServiceTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java | {
"start": 7087,
"end": 7658
} | class ____<T extends SignificanceHeuristic> extends SearchExtensionSpec<T, BiFunction<XContentParser, Void, T>> {
public SignificanceHeuristicSpec(ParseField name, Writeable.Reader<T> reader, BiFunction<XContentParser, Void, T> parser) {
super(name, reader, parser);
}
public SignificanceHeuristicSpec(String name, Writeable.Reader<T> reader, BiFunction<XContentParser, Void, T> parser) {
super(name, reader, parser);
}
}
/**
* Specification for a {@link Suggester}.
*/
| SignificanceHeuristicSpec |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/interop/ImmutablesTypeSerializationTest.java | {
"start": 1344,
"end": 1487
} | interface ____<T> {
T getId();
}
@JsonDeserialize(as = ImmutableEntry.class)
@JsonSerialize(as = ImmutableEntry.class)
| Key |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/TypeSerializerSingleton.java | {
"start": 993,
"end": 1531
} | class ____<T> extends TypeSerializer<T> {
private static final long serialVersionUID = 8766687317209282373L;
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSingleton<T> duplicate() {
return this;
}
@Override
public int hashCode() {
return this.getClass().hashCode();
}
@Override
public boolean equals(Object obj) {
return obj.getClass().equals(this.getClass());
}
}
| TypeSerializerSingleton |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java | {
"start": 16544,
"end": 17241
} | enum ____ we can directly
// compare with "!=".
if (m.getRealtimeAggregationOp()
!= aggrMetric.getRealtimeAggregationOp()) {
aggrMetric.setRealtimeAggregationOp(m.getRealtimeAggregationOp());
}
}
aggrRow.clear();
}
Set<TimelineMetric> metrics = e.getMetrics();
metrics.remove(aggrMetric);
metrics.add(aggrMetric);
}
return e;
}
public TimelineEntity aggregateAllTo(TimelineEntity e,
String aggregationGroupId) {
for (TimelineMetric m : aggregateTable.keySet()) {
aggregateTo(m, e, aggregationGroupId);
}
return e;
}
}
}
| so |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/node/TreeFromIncompleteJsonTest.java | {
"start": 291,
"end": 1111
} | class ____ extends DatabindTestUtil
{
final private ObjectMapper MAPPER = objectMapper(); // shared is fine
@Test
public void testErrorHandling() throws IOException {
String json = "{\"A\":{\"B\":\n";
JsonParser parser = MAPPER.createParser(json);
try {
parser.readValueAsTree();
} catch (UnexpectedEndOfInputException e) {
verifyException(e, "Unexpected end-of-input");
}
parser.close();
try {
MAPPER.readTree(json);
} catch (UnexpectedEndOfInputException e) {
verifyException(e, "Unexpected end-of-input");
}
try {
MAPPER.reader().readTree(json);
} catch (UnexpectedEndOfInputException e) {
verifyException(e, "Unexpected end-of-input");
}
}
}
| TreeFromIncompleteJsonTest |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GoogleCloudStorageEndpointBuilderFactory.java | {
"start": 49914,
"end": 52023
} | interface ____
extends
GoogleCloudStorageEndpointConsumerBuilder,
GoogleCloudStorageEndpointProducerBuilder {
default AdvancedGoogleCloudStorageEndpointBuilder advanced() {
return (AdvancedGoogleCloudStorageEndpointBuilder) this;
}
/**
* Setting the autocreation of the bucket bucketName.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param autoCreateBucket the value to set
* @return the dsl builder
*/
default GoogleCloudStorageEndpointBuilder autoCreateBucket(boolean autoCreateBucket) {
doSetProperty("autoCreateBucket", autoCreateBucket);
return this;
}
/**
* Setting the autocreation of the bucket bucketName.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param autoCreateBucket the value to set
* @return the dsl builder
*/
default GoogleCloudStorageEndpointBuilder autoCreateBucket(String autoCreateBucket) {
doSetProperty("autoCreateBucket", autoCreateBucket);
return this;
}
/**
* The Service account key that can be used as credentials for the
* Storage client. It can be loaded by default from classpath, but you
* can prefix with classpath:, file:, or http: to load the resource from
* different systems.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param serviceAccountKey the value to set
* @return the dsl builder
*/
default GoogleCloudStorageEndpointBuilder serviceAccountKey(String serviceAccountKey) {
doSetProperty("serviceAccountKey", serviceAccountKey);
return this;
}
/**
* The Cloud Storage | GoogleCloudStorageEndpointBuilder |
java | apache__flink | flink-libraries/flink-state-processing-api/src/test/java/org/apache/flink/state/api/SavepointWriterITCase.java | {
"start": 3213,
"end": 10091
} | class ____ extends AbstractTestBaseJUnit4 {
private static final long CHECKPOINT_ID = 42;
private static final String ACCOUNT_UID = "accounts";
private static final String CURRENCY_UID = "currency";
private static final String MODIFY_UID = "numbers";
private static final MapStateDescriptor<String, Double> descriptor =
new MapStateDescriptor<>("currency-rate", Types.STRING, Types.DOUBLE);
private static final Collection<Account> accounts =
Arrays.asList(new Account(1, 100.0), new Account(2, 100.0), new Account(3, 100.0));
private static final Collection<CurrencyRate> currencyRates =
Arrays.asList(new CurrencyRate("USD", 1.0), new CurrencyRate("EUR", 1.3));
@Test
public void testDefaultStateBackend() throws Exception {
testStateBootstrapAndModification(new Configuration(), null);
}
@Test
public void testHashMapStateBackend() throws Exception {
testStateBootstrapAndModification(
new Configuration().set(StateBackendOptions.STATE_BACKEND, "hashmap"),
new HashMapStateBackend());
}
@Test
public void testEmbeddedRocksDBStateBackend() throws Exception {
testStateBootstrapAndModification(
new Configuration().set(StateBackendOptions.STATE_BACKEND, "rocksdb"),
new EmbeddedRocksDBStateBackend());
}
public void testStateBootstrapAndModification(Configuration config, StateBackend backend)
throws Exception {
final String savepointPath = getTempDirPath(new AbstractID().toHexString());
bootstrapState(backend, savepointPath);
validateBootstrap(config, savepointPath);
final String modifyPath = getTempDirPath(new AbstractID().toHexString());
modifySavepoint(backend, savepointPath, modifyPath);
validateModification(config, modifyPath);
}
private void bootstrapState(StateBackend backend, String savepointPath) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setRuntimeMode(RuntimeExecutionMode.AUTOMATIC);
StateBootstrapTransformation<Account> transformation =
OperatorTransformation.bootstrapWith(env.fromData(accounts), CHECKPOINT_ID)
.keyBy(acc -> acc.id)
.transform(new AccountBootstrapper());
StateBootstrapTransformation<CurrencyRate> broadcastTransformation =
OperatorTransformation.bootstrapWith(env.fromData(currencyRates), CHECKPOINT_ID)
.transform(new CurrencyBootstrapFunction());
SavepointWriter writer =
backend == null
? SavepointWriter.newSavepoint(env, CHECKPOINT_ID, 128)
: SavepointWriter.newSavepoint(env, backend, CHECKPOINT_ID, 128);
writer.withOperator(OperatorIdentifier.forUid(ACCOUNT_UID), transformation)
.withOperator(getUidHashFromUid(CURRENCY_UID), broadcastTransformation)
.write(savepointPath);
env.execute("Bootstrap");
}
private void validateBootstrap(Configuration configuration, String savepointPath)
throws Exception {
CheckpointMetadata metadata = SavepointLoader.loadSavepointMetadata(savepointPath);
assertThat(metadata.getCheckpointId()).isEqualTo(CHECKPOINT_ID);
StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
DataStream<Account> stream =
env.fromData(accounts)
.keyBy(acc -> acc.id)
.flatMap(new UpdateAndGetAccount())
.uid(ACCOUNT_UID);
final CloseableIterator<Account> results = stream.collectAsync();
env.fromData(currencyRates)
.connect(env.fromData(currencyRates).broadcast(descriptor))
.process(new CurrencyValidationFunction())
.uid(CURRENCY_UID)
.sinkTo(new DiscardingSink<>());
final StreamGraph streamGraph = env.getStreamGraph();
streamGraph.setSavepointRestoreSettings(
SavepointRestoreSettings.forPath(savepointPath, false));
env.execute(streamGraph);
assertThat(results).toIterable().hasSize(3);
results.close();
}
private void modifySavepoint(StateBackend backend, String savepointPath, String modifyPath)
throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setRuntimeMode(RuntimeExecutionMode.AUTOMATIC);
StateBootstrapTransformation<Integer> transformation =
OperatorTransformation.bootstrapWith(env.fromData(1, 2, 3))
.transform(new ModifyProcessFunction());
SavepointWriter writer =
backend == null
? SavepointWriter.fromExistingSavepoint(env, savepointPath)
: SavepointWriter.fromExistingSavepoint(env, savepointPath, backend);
writer.removeOperator(OperatorIdentifier.forUid(CURRENCY_UID))
.withOperator(getUidHashFromUid(MODIFY_UID), transformation)
.write(modifyPath);
env.execute("Modifying");
}
private void validateModification(Configuration configuration, String savepointPath)
throws Exception {
CheckpointMetadata metadata = SavepointLoader.loadSavepointMetadata(savepointPath);
assertThat(metadata.getCheckpointId()).isEqualTo(CHECKPOINT_ID);
StreamExecutionEnvironment sEnv =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
DataStream<Account> stream =
sEnv.fromData(accounts)
.keyBy(acc -> acc.id)
.flatMap(new UpdateAndGetAccount())
.uid(ACCOUNT_UID);
final CloseableIterator<Account> results = stream.collectAsync();
stream.map(acc -> acc.id)
.map(new StatefulOperator())
.uid(MODIFY_UID)
.sinkTo(new DiscardingSink<>());
final StreamGraph streamGraph = sEnv.getStreamGraph();
streamGraph.setSavepointRestoreSettings(
SavepointRestoreSettings.forPath(savepointPath, false));
sEnv.execute(streamGraph);
assertThat(results).toIterable().hasSize(3);
results.close();
}
private static OperatorIdentifier getUidHashFromUid(String uid) {
return OperatorIdentifier.forUidHash(
OperatorIdentifier.forUid(uid).getOperatorId().toHexString());
}
/** A simple pojo. */
@SuppressWarnings("WeakerAccess")
public static | SavepointWriterITCase |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/AsyncUtil.java | {
"start": 2626,
"end": 10165
} | class ____ {
private static final Boolean BOOLEAN_RESULT = false;
private static final Long LONG_RESULT = -1L;
private static final Integer INT_RESULT = -1;
private static final Object NULL_RESULT = null;
private AsyncUtil(){}
/**
* Provides a default value based on the type specified.
*
* @param clazz The {@link Class} object representing the type of the value
* to be returned.
* @param <R> The type of the value to be returned.
* @return An object with a value determined by the type:
* <ul>
* <li>{@code false} if {@code clazz} is {@link Boolean},
* <li>-1 if {@code clazz} is {@link Long},
* <li>-1 if {@code clazz} is {@link Integer},
* <li>{@code null} for any other type.
* </ul>
*/
public static <R> R asyncReturn(Class<R> clazz) {
if (clazz == null) {
return null;
}
if (clazz.equals(Boolean.class)
|| clazz.equals(boolean.class)) {
return (R) BOOLEAN_RESULT;
} else if (clazz.equals(Long.class)
|| clazz.equals(long.class)) {
return (R) LONG_RESULT;
} else if (clazz.equals(Integer.class)
|| clazz.equals(int.class)) {
return (R) INT_RESULT;
}
return (R) NULL_RESULT;
}
/**
* Synchronously returns the result of the current asynchronous operation.
* This method is designed to be used in scenarios where the result of an
* asynchronous operation is needed synchronously, and it is known that
* the operation has completed.
*
* <p>The method retrieves the current thread's {@link CompletableFuture} and
* attempts to get the result. If the future is not yet complete, this
* method will block until the result is available. If the future completed
* exceptionally, the cause of the exception is thrown as a runtime
* exception wrapped in an {@link ExecutionException}.</p>
*
* <p>This method is typically used after an asynchronous operation has been
* initiated and the caller needs to obtain the result in a synchronous
* manner, for example, when bridging between asynchronous and synchronous
* code paths.</p>
*
* @param <R> the type of the result to be returned
* @param clazz the {@link Class} object representing the type of the value
* to be returned, used to cast the result to the correct type
* @return the result of the asynchronous operation as an object of the
* specified class
* @throws Exception if an error occurs during the synchronous retrieval of
* the result, including the original exception if the
* future completed exceptionally
*/
public static <R> R syncReturn(Class<R> clazz)
throws Exception {
CompletableFuture<Object> completableFuture = CUR_COMPLETABLE_FUTURE.get();
assert completableFuture != null;
try {
return (R) completableFuture.get();
} catch (ExecutionException e) {
throw (Exception) e.getCause();
}
}
/**
* Completes the current asynchronous operation with the specified value.
* This method sets the result of the current thread's {@link CompletableFuture}
* to the provided value, effectively completing the asynchronous operation.
*
* @param value The value to complete the future with.
* @param <R> The type of the value to be completed.
*/
public static <R> void asyncComplete(R value) {
CUR_COMPLETABLE_FUTURE.set(
CompletableFuture.completedFuture(value));
}
/**
* Completes the current asynchronous operation with the specified completableFuture.
*
* @param completableFuture The completableFuture to complete the future with.
* @param <R> The type of the value to be completed.
*/
public static <R> void asyncCompleteWith(CompletableFuture<R> completableFuture) {
CUR_COMPLETABLE_FUTURE.set((CompletableFuture<Object>) completableFuture);
}
public static CompletableFuture<Object> getAsyncUtilCompletableFuture() {
assert CUR_COMPLETABLE_FUTURE.get() != null;
return CUR_COMPLETABLE_FUTURE.get();
}
/**
* Completes the current asynchronous operation with an exception.
* This method sets the result of the current thread's {@link CompletableFuture}
* to an exceptional completion, using the provided {@link Throwable} as the cause.
* This is typically used to handle errors in asynchronous operations.
*
* @param e The exception to complete the future exceptionally with.
*/
public static void asyncThrowException(Throwable e) {
CompletableFuture<Object> result = new CompletableFuture<>();
result.completeExceptionally(warpCompletionException(e));
CUR_COMPLETABLE_FUTURE.set(result);
}
/**
* Applies an asynchronous function to the current {@link CompletableFuture}.
* This method retrieves the current thread's {@link CompletableFuture} and applies
* the provided {@link ApplyFunction} to it. It is used to chain asynchronous
* operations, where the result of one operation is used as the input for the next.
*
* @param function The asynchronous function to apply, which takes a type T and
* produces a type R.
* @param <T> The type of the input to the function.
* @param <R> The type of the result of the function.
* @see CompletableFuture
* @see ApplyFunction
*/
public static <T, R> void asyncApply(ApplyFunction<T, R> function) {
CompletableFuture<T> completableFuture =
(CompletableFuture<T>) CUR_COMPLETABLE_FUTURE.get();
assert completableFuture != null;
CompletableFuture<R> result = function.apply(completableFuture);
CUR_COMPLETABLE_FUTURE.set((CompletableFuture<Object>) result);
}
/**
* Applies an asynchronous function to the current {@link CompletableFuture}
* using the specified executor. This method retrieves the current thread's
* {@link CompletableFuture} and applies the provided{@link ApplyFunction} to
* it with the given executor service. It allows for more control over the
* execution context, such as running the operation in a separate thread or
* thread pool.
*
* <p>This is particularly useful when you need to perform blocking I/O operations
* or other long-running tasks without blocking the main thread or
* when you want to manage the thread resources more efficiently.</p>
*
* @param function The asynchronous function to apply, which takes a type T and
* produces a type R.
* @param executor The executor service used to run the asynchronous function.
* @param <T> The type of the input to the function.
* @param <R> The type of the result of the function.
* @see CompletableFuture
* @see ApplyFunction
*/
public static <T, R> void asyncApplyUseExecutor(
ApplyFunction<T, R> function, Executor executor) {
CompletableFuture<T> completableFuture =
(CompletableFuture<T>) CUR_COMPLETABLE_FUTURE.get();
assert completableFuture != null;
CompletableFuture<R> result = function.apply(completableFuture, executor);
CUR_COMPLETABLE_FUTURE.set((CompletableFuture<Object>) result);
}
/**
* Attempts to execute an asynchronous task defined by the provided
* {@link AsyncRun} and associates it with the current thread's
* {@link CompletableFuture}. This method is useful for trying operations
* that may throw exceptions and handling them asynchronously.
*
* <p>The provided {@code asyncRun} is a functional | AsyncUtil |
java | apache__flink | flink-core/src/main/java/org/apache/flink/configuration/ConfigOptions.java | {
"start": 7585,
"end": 8779
} | class ____<E> {
private final String key;
private final Class<E> clazz;
ListConfigOptionBuilder(String key, Class<E> clazz) {
this.key = key;
this.clazz = clazz;
}
/**
* Creates a ConfigOption with the given default value.
*
* @param values The list of default values for the config option
* @return The config option with the default value.
*/
@SafeVarargs
public final ConfigOption<List<E>> defaultValues(E... values) {
return new ConfigOption<>(
key, clazz, ConfigOption.EMPTY_DESCRIPTION, Arrays.asList(values), true);
}
/**
* Creates a ConfigOption without a default value.
*
* @return The config option without a default value.
*/
public ConfigOption<List<E>> noDefaultValue() {
return new ConfigOption<>(key, clazz, ConfigOption.EMPTY_DESCRIPTION, null, true);
}
}
// ------------------------------------------------------------------------
/** Not intended to be instantiated. */
private ConfigOptions() {}
}
| ListConfigOptionBuilder |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/client/AmazonBedrockMockClientCache.java | {
"start": 739,
"end": 2308
} | class ____ implements AmazonBedrockClientCache {
private ConverseResponse converseResponse = null;
private InvokeModelResponse invokeModelResponse = null;
private ElasticsearchException exceptionToThrow = null;
public AmazonBedrockMockClientCache() {}
public AmazonBedrockMockClientCache(
@Nullable ConverseResponse converseResponse,
@Nullable InvokeModelResponse invokeModelResponse,
@Nullable ElasticsearchException exceptionToThrow
) {
this.converseResponse = converseResponse;
this.invokeModelResponse = invokeModelResponse;
this.exceptionToThrow = exceptionToThrow;
}
@Override
public AmazonBedrockBaseClient getOrCreateClient(AmazonBedrockModel model, TimeValue timeout) {
var client = AmazonBedrockMockInferenceClient.create(model, timeout);
client.setConverseResponse(converseResponse);
client.setInvokeModelResponse(invokeModelResponse);
client.setExceptionToThrow(exceptionToThrow);
return client;
}
@Override
public void close() throws IOException {
// nothing to do
}
public void setConverseResponse(ConverseResponse converseResponse) {
this.converseResponse = converseResponse;
}
public void setInvokeModelResponse(InvokeModelResponse invokeModelResponse) {
this.invokeModelResponse = invokeModelResponse;
}
public void setExceptionToThrow(ElasticsearchException exceptionToThrow) {
this.exceptionToThrow = exceptionToThrow;
}
}
| AmazonBedrockMockClientCache |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/ability/constant/AbilityMode.java | {
"start": 749,
"end": 946
} | enum ____ {
/**
* for server ability.
*/
SERVER,
/**
* for sdk client.
*/
SDK_CLIENT,
/**
* for cluster client.
*/
CLUSTER_CLIENT;
}
| AbilityMode |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/configuration/ConfigurationClassProcessingTests.java | {
"start": 16325,
"end": 16588
} | class ____ {
static TestBean testBean = new TestBean(ConfigWithBeanWithAliases.class.getSimpleName());
@Bean({"name1", "alias1", "alias2", "alias3"})
public TestBean methodName() {
return testBean;
}
}
@Configuration
static | ConfigWithBeanWithAliases |
java | spring-projects__spring-boot | module/spring-boot-artemis/src/main/java/org/springframework/boot/artemis/testcontainers/ArtemisContainerConnectionDetailsFactory.java | {
"start": 1422,
"end": 1777
} | class ____
extends ContainerConnectionDetailsFactory<ArtemisContainer, ArtemisConnectionDetails> {
@Override
protected ArtemisConnectionDetails getContainerConnectionDetails(
ContainerConnectionSource<ArtemisContainer> source) {
return new ArtemisContainerConnectionDetails(source);
}
private static final | ArtemisContainerConnectionDetailsFactory |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/codec/Elasticsearch900Lucene101Codec.java | {
"start": 1470,
"end": 5476
} | class ____ extends CodecService.DeduplicateFieldInfosCodec {
static final PostingsFormat DEFAULT_POSTINGS_FORMAT = new Lucene101PostingsFormat();
private final StoredFieldsFormat storedFieldsFormat;
private final PostingsFormat defaultPostingsFormat;
private final PostingsFormat postingsFormat = new PerFieldPostingsFormat() {
@Override
public PostingsFormat getPostingsFormatForField(String field) {
return Elasticsearch900Lucene101Codec.this.getPostingsFormatForField(field);
}
};
private final DocValuesFormat defaultDVFormat;
private final DocValuesFormat docValuesFormat = new XPerFieldDocValuesFormat() {
@Override
public DocValuesFormat getDocValuesFormatForField(String field) {
return Elasticsearch900Lucene101Codec.this.getDocValuesFormatForField(field);
}
};
private final KnnVectorsFormat defaultKnnVectorsFormat;
private final KnnVectorsFormat knnVectorsFormat = new PerFieldKnnVectorsFormat() {
@Override
public KnnVectorsFormat getKnnVectorsFormatForField(String field) {
return Elasticsearch900Lucene101Codec.this.getKnnVectorsFormatForField(field);
}
};
/** Public no-arg constructor, needed for SPI loading at read-time. */
public Elasticsearch900Lucene101Codec() {
this(Zstd814StoredFieldsFormat.Mode.BEST_SPEED);
}
/**
* Constructor. Takes a {@link Zstd814StoredFieldsFormat.Mode} that describes whether to optimize for retrieval speed at the expense of
* worse space-efficiency or vice-versa.
*/
public Elasticsearch900Lucene101Codec(Zstd814StoredFieldsFormat.Mode mode) {
super("Elasticsearch900Lucene101", new Lucene101Codec());
this.storedFieldsFormat = mode.getFormat();
this.defaultPostingsFormat = DEFAULT_POSTINGS_FORMAT;
this.defaultDVFormat = new Lucene90DocValuesFormat();
this.defaultKnnVectorsFormat = new Lucene99HnswVectorsFormat();
}
@Override
public StoredFieldsFormat storedFieldsFormat() {
return storedFieldsFormat;
}
@Override
public final PostingsFormat postingsFormat() {
return postingsFormat;
}
@Override
public final DocValuesFormat docValuesFormat() {
return docValuesFormat;
}
@Override
public final KnnVectorsFormat knnVectorsFormat() {
return knnVectorsFormat;
}
/**
* Returns the postings format that should be used for writing new segments of <code>field</code>.
*
* <p>The default implementation always returns "Lucene912".
*
* <p><b>WARNING:</b> if you subclass, you are responsible for index backwards compatibility:
* future version of Lucene are only guaranteed to be able to read the default implementation,
*/
public PostingsFormat getPostingsFormatForField(String field) {
return defaultPostingsFormat;
}
/**
* Returns the docvalues format that should be used for writing new segments of <code>field</code>
* .
*
* <p>The default implementation always returns "Lucene912".
*
* <p><b>WARNING:</b> if you subclass, you are responsible for index backwards compatibility:
* future version of Lucene are only guaranteed to be able to read the default implementation.
*/
public DocValuesFormat getDocValuesFormatForField(String field) {
return defaultDVFormat;
}
/**
* Returns the vectors format that should be used for writing new segments of <code>field</code>
*
* <p>The default implementation always returns "Lucene912".
*
* <p><b>WARNING:</b> if you subclass, you are responsible for index backwards compatibility:
* future version of Lucene are only guaranteed to be able to read the default implementation.
*/
public KnnVectorsFormat getKnnVectorsFormatForField(String field) {
return defaultKnnVectorsFormat;
}
}
| Elasticsearch900Lucene101Codec |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/typeinfo/NumericTypeInfo.java | {
"start": 1260,
"end": 2276
} | class ____<T> extends BasicTypeInfo<T> {
private static final long serialVersionUID = -5937777910658986986L;
private static final HashSet<Class<?>> numericalTypes =
new HashSet<>(
Arrays.asList(
Integer.class,
Long.class,
Double.class,
Byte.class,
Short.class,
Float.class,
Character.class));
protected NumericTypeInfo(
Class<T> clazz,
Class<?>[] possibleCastTargetTypes,
TypeSerializer<T> serializer,
Class<? extends TypeComparator<T>> comparatorClass) {
super(clazz, possibleCastTargetTypes, serializer, comparatorClass);
checkArgument(
numericalTypes.contains(clazz),
"The given class %s is not a numerical type",
clazz.getSimpleName());
}
}
| NumericTypeInfo |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/util/StackLocatorUtil.java | {
"start": 3289,
"end": 3737
} | class ____ null.
* @throws IndexOutOfBoundsException if depth is negative.
* @since 2.17.2
*/
@PerformanceSensitive
public static ClassLoader getCallerClassLoader(final int depth) {
final Class<?> callerClass = stackLocator.getCallerClass(depth + 1);
return callerClass != null ? callerClass.getClassLoader() : null;
}
/**
* Search for a calling class.
*
* @param sentinelClass Sentinel | or |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/json/H2JsonTableFunction.java | {
"start": 3948,
"end": 5733
} | class ____ extends JsonTableFunction {
private final int maximumArraySize;
public H2JsonTableFunction(int maximumArraySize, TypeConfiguration typeConfiguration) {
super( new H2JsonTableSetReturningFunctionTypeResolver(), typeConfiguration );
this.maximumArraySize = maximumArraySize;
}
@Override
protected <T> SelfRenderingSqmSetReturningFunction<T> generateSqmSetReturningFunctionExpression(
List<? extends SqmTypedNode<?>> sqmArguments,
QueryEngine queryEngine) {
//noinspection unchecked
return new SqmJsonTableFunction<>(
this,
this,
getArgumentsValidator(),
getSetReturningTypeResolver(),
queryEngine.getCriteriaBuilder(),
(SqmExpression<?>) sqmArguments.get( 0 ),
sqmArguments.size() > 1 ? (SqmExpression<String>) sqmArguments.get( 1 ) : null
) {
@Override
public TableGroup convertToSqlAst(
NavigablePath navigablePath,
String identifierVariable,
boolean lateral,
boolean canUseInnerJoins,
boolean withOrdinality,
SqmToSqlAstConverter walker) {
// Register a transformer that adds a join predicate "array_length(array) <= index"
final FunctionTableGroup functionTableGroup = (FunctionTableGroup) super.convertToSqlAst(
navigablePath,
identifierVariable,
lateral,
canUseInnerJoins,
withOrdinality,
walker
);
final JsonTableArguments arguments = JsonTableArguments.extract(
functionTableGroup.getPrimaryTableReference().getFunctionExpression().getArguments()
);
// Register a query transformer to register a join predicate
walker.registerQueryTransformer(
new JsonTableQueryTransformer( functionTableGroup, arguments, maximumArraySize ) );
return functionTableGroup;
}
};
}
private static | H2JsonTableFunction |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/utils/ClassUtils.java | {
"start": 6517,
"end": 7224
} | class ____ of this class.
cl = clazz.getClassLoader();
if (cl == null) {
// getClassLoader() returning null indicates the bootstrap ClassLoader
try {
cl = ClassLoader.getSystemClassLoader();
} catch (Exception ignored) {
// Cannot access system ClassLoader - oh well, maybe the caller can live with null...
}
}
}
}
return cl;
}
/**
* Return the default ClassLoader to use: typically the thread context
* ClassLoader, if available; the ClassLoader that loaded the ClassUtils
* | loader |
java | quarkusio__quarkus | extensions/oidc/runtime/src/test/java/io/quarkus/oidc/runtime/TokenIntrospectionTest.java | {
"start": 450,
"end": 3144
} | class ____ {
TokenIntrospection introspection = new TokenIntrospection(
"{"
+ "\"active\": true,"
+ "\"username\": \"alice\","
+ "\"sub\": \"1234567\","
+ "\"aud\": \"http://localhost:8080\","
+ "\"iss\": \"http://keycloak/realm\","
+ "\"client_id\": \"quarkus\","
+ "\"custom\": null,"
+ "\"id\": 1234,"
+ "\"permissions\": [\"read\", \"write\"],"
+ "\"scope\": \"add divide\","
+ "\"scopes\": {\"scope\": \"see\"}"
+ "}");
@Test
public void testActive() {
assertTrue(introspection.isActive());
}
@Test
public void testGetUsername() {
assertEquals("alice", introspection.getUsername());
}
@Test
public void testGetSubject() {
assertEquals("1234567", introspection.getSubject());
}
@Test
public void testGetAudience() {
assertEquals("http://localhost:8080", introspection.getAudience());
}
@Test
public void testGetIssuer() {
assertEquals("http://keycloak/realm", introspection.getIssuer());
}
@Test
public void testGetScopes() {
assertEquals(Set.of("add", "divide"), introspection.getScopes());
}
@Test
public void testGetClientId() {
assertEquals("quarkus", introspection.getClientId());
}
@Test
public void testGetString() {
assertEquals("alice", introspection.getString("username"));
assertNull(introspection.getString("usernames"));
}
@Test
public void testGetBoolean() {
assertTrue(introspection.getBoolean("active"));
assertNull(introspection.getBoolean("activate"));
}
@Test
public void testGetLong() {
assertEquals(1234, introspection.getLong("id"));
assertNull(introspection.getLong("ids"));
}
@Test
public void testGetArray() {
JsonArray array = introspection.getArray("permissions");
assertNotNull(array);
assertEquals(2, array.size());
assertEquals("read", array.getString(0));
assertEquals("write", array.getString(1));
assertNull(introspection.getArray("permit"));
}
@Test
public void testGetObject() {
JsonObject map = introspection.getObject("scopes");
assertNotNull(map);
assertEquals(1, map.size());
assertEquals("see", map.getString("scope"));
}
@Test
public void testGetNullProperty() {
assertNull(introspection.getString("custom"));
}
}
| TokenIntrospectionTest |
java | google__dagger | javatests/dagger/internal/codegen/ComponentCreatorTest.java | {
"start": 30131,
"end": 30559
} | interface ____ {",
" SimpleComponent create(AbstractModule abstractModule, String s);",
" }")
.addLines("}")
.buildSource();
Source abstractModule =
CompilerTests.javaSource(
"test.AbstractModule",
"package test;",
"",
"import dagger.Module;",
"",
"@Module",
"abstract | Factory |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/assertj/AbstractHttpServletResponseAssertTests.java | {
"start": 5243,
"end": 5553
} | class ____ extends AbstractHttpServletResponseAssert<HttpServletResponse, ResponseAssert, HttpServletResponse> {
ResponseAssert(HttpServletResponse actual) {
super(actual, ResponseAssert.class);
}
@Override
protected HttpServletResponse getResponse() {
return this.actual;
}
}
}
| ResponseAssert |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java | {
"start": 10426,
"end": 11090
} | class ____ extends BroadcastShardResponse {
private final long numIndexed;
public ShardDownsampleResponse(ShardId shardId, long numIndexed) {
super(shardId);
this.numIndexed = numIndexed;
}
public ShardDownsampleResponse(StreamInput in) throws IOException {
super(in);
numIndexed = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeLong(numIndexed);
}
public long getNumIndexed() {
return numIndexed;
}
}
}
| ShardDownsampleResponse |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java | {
"start": 576,
"end": 721
} | class ____ the status of the watch. If the watch was successfully de/activates
* this will reflected the new state of the watch.
*/
public | contains |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/collect/CollectCoordinationRequest.java | {
"start": 1496,
"end": 2528
} | class ____ implements CoordinationRequest {
private static final long serialVersionUID = 1L;
private static final TypeSerializer<String> versionSerializer = StringSerializer.INSTANCE;
private static final TypeSerializer<Long> offsetSerializer = LongSerializer.INSTANCE;
private final String version;
private final long offset;
public CollectCoordinationRequest(String version, long offset) {
this.version = version;
this.offset = offset;
}
public CollectCoordinationRequest(DataInputView inView) throws IOException {
this.version = versionSerializer.deserialize(inView);
this.offset = offsetSerializer.deserialize(inView);
}
public String getVersion() {
return version;
}
public long getOffset() {
return offset;
}
public void serialize(DataOutputView outView) throws IOException {
versionSerializer.serialize(version, outView);
offsetSerializer.serialize(offset, outView);
}
}
| CollectCoordinationRequest |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/output/MapOutput.java | {
"start": 1097,
"end": 2837
} | class ____<K, V> extends CommandOutput<K, V, Map<K, V>> {
private boolean initialized;
private K key;
private boolean hasKey;
public MapOutput(RedisCodec<K, V> codec) {
super(codec, Collections.emptyMap());
}
@Override
public void set(ByteBuffer bytes) {
if (!hasKey) {
key = (bytes == null) ? null : codec.decodeKey(bytes);
hasKey = true;
return;
}
V value = (bytes == null) ? null : codec.decodeValue(bytes);
output.put(key, value);
key = null;
hasKey = false;
}
@Override
@SuppressWarnings("unchecked")
public void set(long integer) {
if (!hasKey) {
key = (K) Long.valueOf(integer);
hasKey = true;
return;
}
V value = (V) Long.valueOf(integer);
output.put(key, value);
key = null;
hasKey = false;
}
@Override
public void set(double number) {
if (!hasKey) {
key = (K) Double.valueOf(number);
hasKey = true;
return;
}
V value = (V) Double.valueOf(number);
output.put(key, value);
key = null;
hasKey = false;
}
@Override
public void set(boolean flag) {
if (!hasKey) {
key = (K) Boolean.valueOf(flag);
hasKey = true;
return;
}
V value = (V) Boolean.valueOf(flag);
output.put(key, value);
key = null;
hasKey = false;
}
@Override
public void multi(int count) {
if (!initialized) {
output = new LinkedHashMap<>(count / 2, 1);
initialized = true;
}
}
}
| MapOutput |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/STS2EndpointBuilderFactory.java | {
"start": 21881,
"end": 22188
} | class ____ extends AbstractEndpointBuilder implements STS2EndpointBuilder, AdvancedSTS2EndpointBuilder {
public STS2EndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new STS2EndpointBuilderImpl(path);
}
} | STS2EndpointBuilderImpl |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/TestCreators.java | {
"start": 2465,
"end": 3102
} | class ____ { // static just to be able to use static methods
/**
* Note: signature (name and parameter types) must match; but
* only annotations will be used, not code or such. And use
* is by augmentation, so we only need to add things to add
* or override.
*/
static FactoryBean createIt(@JsonProperty("mixed") double xyz) {
return null;
}
}
/**
* Bean that defines both creator and factory method as
* creators. Constructors have priority; but it is possible
* to hide it using mix-in annotations.
*/
static | FactoryBeanMixIn |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/QueueProperties.java | {
"start": 5731,
"end": 10705
} | class ____ {
private Map<String, Resource> minQueueResources = new HashMap<>();
private Map<String, ConfigurableResource> maxQueueResources =
new HashMap<>();
private Map<String, ConfigurableResource> maxChildQueueResources =
new HashMap<>();
private Map<String, Integer> queueMaxApps = new HashMap<>();
private Map<String, Float> queueMaxAMShares = new HashMap<>();
private Map<String, Resource> queueMaxContainerAllocation = new HashMap<>();
private Map<String, Float> queueWeights = new HashMap<>();
private Map<String, SchedulingPolicy> queuePolicies = new HashMap<>();
private Map<String, Long> minSharePreemptionTimeouts = new HashMap<>();
private Map<String, Long> fairSharePreemptionTimeouts = new HashMap<>();
private Map<String, Float> fairSharePreemptionThresholds = new HashMap<>();
private Map<String, Map<AccessType, AccessControlList>> queueAcls =
new HashMap<>();
private Map<String, Map<ReservationACL, AccessControlList>>
reservationAcls = new HashMap<>();
private Set<String> reservableQueues = new HashSet<>();
private Set<String> nonPreemptableQueues = new HashSet<>();
// Remember all queue names so we can display them on web UI, etc.
// configuredQueues is segregated based on whether it is a leaf queue
// or a parent queue. This information is used for creating queues.
private Map<FSQueueType, Set<String>> configuredQueues = new HashMap<>();
Builder() {
for (FSQueueType queueType : FSQueueType.values()) {
configuredQueues.put(queueType, new HashSet<>());
}
}
public static Builder create() {
return new Builder();
}
public Builder minQueueResources(String queueName, Resource resource) {
this.minQueueResources.put(queueName, resource);
return this;
}
public Builder maxQueueResources(String queueName,
ConfigurableResource resource) {
this.maxQueueResources.put(queueName, resource);
return this;
}
public Builder maxChildQueueResources(String queueName,
ConfigurableResource resource) {
this.maxChildQueueResources.put(queueName, resource);
return this;
}
public Builder queueMaxApps(String queueName, int value) {
this.queueMaxApps.put(queueName, value);
return this;
}
public Builder queueMaxAMShares(String queueName, float value) {
this.queueMaxAMShares.put(queueName, value);
return this;
}
public Builder queueWeights(String queueName, float value) {
this.queueWeights.put(queueName, value);
return this;
}
public Builder queuePolicies(String queueName, SchedulingPolicy policy) {
this.queuePolicies.put(queueName, policy);
return this;
}
public Builder minSharePreemptionTimeouts(String queueName, long value) {
this.minSharePreemptionTimeouts.put(queueName, value);
return this;
}
public Builder fairSharePreemptionTimeouts(String queueName, long value) {
this.fairSharePreemptionTimeouts.put(queueName, value);
return this;
}
public Builder fairSharePreemptionThresholds(String queueName,
float value) {
this.fairSharePreemptionThresholds.put(queueName, value);
return this;
}
public Builder queueAcls(String queueName, AccessType accessType,
AccessControlList acls) {
this.queueAcls.putIfAbsent(queueName, new HashMap<>());
this.queueAcls.get(queueName).put(accessType, acls);
return this;
}
public Builder reservationAcls(String queueName,
ReservationACL reservationACL, AccessControlList acls) {
this.reservationAcls.putIfAbsent(queueName, new HashMap<>());
this.reservationAcls.get(queueName).put(reservationACL, acls);
return this;
}
public Builder reservableQueues(String queue) {
this.reservableQueues.add(queue);
return this;
}
public Builder nonPreemptableQueues(String queue) {
this.nonPreemptableQueues.add(queue);
return this;
}
public Builder queueMaxContainerAllocation(String queueName,
Resource value) {
queueMaxContainerAllocation.put(queueName, value);
return this;
}
public void configuredQueues(FSQueueType queueType, String queueName) {
this.configuredQueues.get(queueType).add(queueName);
}
public boolean isAclDefinedForAccessType(String queueName,
AccessType accessType) {
Map<AccessType, AccessControlList> aclsForQueue =
this.queueAcls.get(queueName);
return aclsForQueue != null && aclsForQueue.get(accessType) != null;
}
public Map<String, Resource> getMinQueueResources() {
return minQueueResources;
}
public Map<String, ConfigurableResource> getMaxQueueResources() {
return maxQueueResources;
}
public QueueProperties build() {
return new QueueProperties(this);
}
}
}
| Builder |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/aot/AotContextLoaderTests.java | {
"start": 3686,
"end": 3959
} | class ____ extends AbstractAotContextLoader {
@Override
public GenericApplicationContext loadContextForAotProcessing(MergedContextConfiguration mergedConfig,
RuntimeHints runtimeHints) {
return loadContext(mergedConfig);
}
}
}
| RuntimeHintsAwareAotContextLoader |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/FileProducerTempFileExistsIssueTest.java | {
"start": 1221,
"end": 4472
} | class ____ extends ContextTestSupport {
public static final String TEST_FILE_NAME = "hello." + UUID.randomUUID() + ".txt";
@Test
public void testIllegalConfigurationPrefix() {
IllegalArgumentException e = assertThrows(IllegalArgumentException.class,
() -> context.getEndpoint(fileUri("?fileExist=Append&tempPrefix=foo")).createProducer());
assertEquals("You cannot set both fileExist=Append and tempPrefix/tempFileName options", e.getMessage());
}
@Test
public void testIllegalConfigurationFileName() {
IllegalArgumentException e = assertThrows(IllegalArgumentException.class,
() -> context.getEndpoint(fileUri("?fileExist=Append&tempFileName=foo")).createProducer());
assertEquals("You cannot set both fileExist=Append and tempPrefix/tempFileName options", e.getMessage());
}
@Test
public void testWriteUsingTempPrefixButFileExist() throws Exception {
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, TEST_FILE_NAME);
template.sendBodyAndHeader(fileUri("?tempPrefix=foo"), "Bye World", Exchange.FILE_NAME, TEST_FILE_NAME);
assertFileExists(testFile(TEST_FILE_NAME), "Bye World");
}
@Test
public void testWriteUsingTempPrefixButBothFileExist() throws Exception {
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, TEST_FILE_NAME);
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, "foohello.txt");
template.sendBodyAndHeader(fileUri("?tempPrefix=foo"), "Bye World", Exchange.FILE_NAME, TEST_FILE_NAME);
assertFileExists(testFile(TEST_FILE_NAME), "Bye World");
}
@Test
public void testWriteUsingTempPrefixButFileExistOverride() throws Exception {
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, TEST_FILE_NAME);
template.sendBodyAndHeader(fileUri("?tempPrefix=foo&fileExist=Override"), "Bye World", Exchange.FILE_NAME,
TEST_FILE_NAME);
assertFileExists(testFile(TEST_FILE_NAME), "Bye World");
}
@Test
public void testWriteUsingTempPrefixButFileExistIgnore() throws Exception {
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, TEST_FILE_NAME);
template.sendBodyAndHeader(fileUri("?tempPrefix=foo&fileExist=Ignore"), "Bye World", Exchange.FILE_NAME,
TEST_FILE_NAME);
assertFileExists(testFile(TEST_FILE_NAME), "Hello World");
}
@Test
public void testWriteUsingTempPrefixButFileExistFail() throws Exception {
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, TEST_FILE_NAME);
CamelExecutionException e = assertThrows(CamelExecutionException.class, () -> template
.sendBodyAndHeader(fileUri("?tempPrefix=foo&fileExist=Fail"), "Bye World", Exchange.FILE_NAME, TEST_FILE_NAME));
GenericFileOperationFailedException cause = assertIsInstanceOf(GenericFileOperationFailedException.class, e.getCause());
assertTrue(cause.getMessage().startsWith("File already exist"));
assertFileExists(testFile(TEST_FILE_NAME), "Hello World");
}
}
| FileProducerTempFileExistsIssueTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/OneToManyJpaAnnotation.java | {
"start": 528,
"end": 2982
} | class ____ implements OneToMany, AttributeMarker.Fetchable, AttributeMarker.Cascadeable {
private java.lang.Class<?> targetEntity;
private jakarta.persistence.CascadeType[] cascade;
private jakarta.persistence.FetchType fetch;
private String mappedBy;
private boolean orphanRemoval;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public OneToManyJpaAnnotation(ModelsContext modelContext) {
this.targetEntity = void.class;
this.cascade = new jakarta.persistence.CascadeType[0];
this.fetch = jakarta.persistence.FetchType.LAZY;
this.mappedBy = "";
this.orphanRemoval = false;
}
/**
* Used in creating annotation instances from JDK variant
*/
public OneToManyJpaAnnotation(OneToMany annotation, ModelsContext modelContext) {
this.targetEntity = annotation.targetEntity();
this.cascade = annotation.cascade();
this.fetch = annotation.fetch();
this.mappedBy = annotation.mappedBy();
this.orphanRemoval = annotation.orphanRemoval();
}
/**
* Used in creating annotation instances from Jandex variant
*/
public OneToManyJpaAnnotation(Map<String, Object> attributeValues, ModelsContext modelContext) {
this.targetEntity = (Class<?>) attributeValues.get( "targetEntity" );
this.cascade = (jakarta.persistence.CascadeType[]) attributeValues.get( "cascade" );
this.fetch = (jakarta.persistence.FetchType) attributeValues.get( "fetch" );
this.mappedBy = (String) attributeValues.get( "mappedBy" );
this.orphanRemoval = (boolean) attributeValues.get( "orphanRemoval" );
}
@Override
public Class<? extends Annotation> annotationType() {
return OneToMany.class;
}
@Override
public java.lang.Class<?> targetEntity() {
return targetEntity;
}
public void targetEntity(java.lang.Class<?> value) {
this.targetEntity = value;
}
@Override
public jakarta.persistence.CascadeType[] cascade() {
return cascade;
}
public void cascade(jakarta.persistence.CascadeType[] value) {
this.cascade = value;
}
@Override
public jakarta.persistence.FetchType fetch() {
return fetch;
}
public void fetch(jakarta.persistence.FetchType value) {
this.fetch = value;
}
@Override
public String mappedBy() {
return mappedBy;
}
public void mappedBy(String value) {
this.mappedBy = value;
}
@Override
public boolean orphanRemoval() {
return orphanRemoval;
}
public void orphanRemoval(boolean value) {
this.orphanRemoval = value;
}
}
| OneToManyJpaAnnotation |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/DeepInheritanceProxyTest.java | {
"start": 20404,
"end": 20777
} | class ____ extends AMappedSuperclass {
private Boolean fieldInAEntity;
public AEntity(String id) {
super(id);
}
protected AEntity() {
}
public Boolean getFieldInAEntity() {
return fieldInAEntity;
}
public void setFieldInAEntity(Boolean fieldInAEntity) {
this.fieldInAEntity = fieldInAEntity;
}
}
@Entity(name="AAEntity")
public static | AEntity |
java | quarkusio__quarkus | extensions/reactive-routes/deployment/src/test/java/io/quarkus/vertx/web/SimpleRouteTest.java | {
"start": 3072,
"end": 4957
} | class ____ {
@Inject
Transformer transformer;
@Inject
SecurityIdentity securityIdentity;
@Route(path = "/hello")
@Route(path = "/foo")
@Route(path = "no-slash")
void hello(RoutingContext context) {
String name = context.request().getParam("name");
context.response().setStatusCode(200).end("Hello " + (name != null ? name : "world") + "!");
}
@Route(path = "/secured")
@RolesAllowed("admin") //we are just testing that this is actually denied
void secure(RoutingContext context) {
context.response().setStatusCode(200).end();
}
@Route(path = "/security-identity")
//we are just testing that this does not throw an exception, see https://github.com/quarkusio/quarkus/issues/13835
void secIdentity(RoutingContext context) {
context.response().setStatusCode(200).end(securityIdentity.getPrincipal().getName());
}
@Route // path is derived from the method name
void bzuk(RoutingExchange exchange) {
exchange.ok("Hello " + exchange.getParam("name").orElse("world") + "!");
}
@Route(path = "/delete", methods = DELETE)
void deleteHttpMethod(RoutingExchange exchange) {
exchange.ok("deleted");
}
@Route(path = "/body", methods = POST, consumes = "text/plain")
void post(RoutingContext context) {
context.response().setStatusCode(200).end("Hello " + context.getBodyAsString() + "!");
}
@Route
void request(RoutingContext context) {
context.response().setStatusCode(200).end(transformer.transform("Hello!"));
}
@Route
void inject(RoutingExchange exchange) {
exchange.ok(transformer.getFoo());
}
}
static | SimpleBean |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/validation/NormalScopedConstructorTest.java | {
"start": 910,
"end": 1023
} | class ____ {
@Inject
public Unproxyable(Instance<String> instance) {
}
}
}
| Unproxyable |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_242.java | {
"start": 890,
"end": 1652
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "SELECT ip.*\n" +
"FROM ba_customer_connect connect, ba_customer_connect_ip ip\n" +
"WHERE connect.siteId = 2844001\n" +
" AND connect.cnId = ip.cnId;";
SQLSelectStatement stmt = (SQLSelectStatement) SQLUtils.parseSingleMysqlStatement(sql);
assertEquals("SELECT ip.*\n" +
"FROM ba_customer_connect connect, ba_customer_connect_ip ip\n" +
"WHERE connect.siteId = 2844001\n" +
"\tAND connect.cnId = ip.cnId;", stmt.toString());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
}
}
| MySqlSelectTest_242 |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/api/filtering/ApiFilteringActionFilterTests.java | {
"start": 5736,
"end": 6181
} | class ____ extends ApiFilteringActionFilter<TestResponse> {
TestFilter(ThreadContext threadContext, boolean filterOperatorRequests) {
super(threadContext, "test.action", TestResponse.class, filterOperatorRequests);
}
@Override
protected TestResponse filterResponse(TestResponse response) {
response.modified = true;
return response;
}
}
private static | TestFilter |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java | {
"start": 3316,
"end": 8740
} | class ____ extends AbstractS3ACostTest {
/**
* This test suite is parameterized for the different create file
* options.
* @return a list of test parameters.
*/
public static Collection<Object[]> params() {
return Arrays.asList(new Object[][]{
{false},
{true}
});
}
/**
* Flag for performance creation; all cost asserts need changing.
*/
private final boolean createPerformance;
/**
* Create.
* @param createPerformance use the performance flag
*/
public ITestCreateFileCost(final boolean createPerformance) {
this.createPerformance = createPerformance;
}
/**
* Determine the expected cost of a create operation;
* if {@link #createPerformance} is true, then the cost is always "no IO".
* @param source source cost
* @return cost to assert
*/
private OperationCost expected(OperationCost source) {
return createPerformance ? NO_HEAD_OR_LIST : source;
}
@Override
public Configuration createConfiguration() {
final Configuration conf = setPerformanceFlags(
super.createConfiguration(),
createPerformance ? "create" : "");
S3ATestUtils.disableFilesystemCaching(conf);
return conf;
}
@Test
public void testCreateNoOverwrite() throws Throwable {
describe("Test file creation without overwrite");
Path testFile = methodPath();
// when overwrite is false, the path is checked for existence.
create(testFile, false,
expected(CREATE_FILE_NO_OVERWRITE));
}
@Test
public void testCreateOverwrite() throws Throwable {
describe("Test file creation with overwrite");
Path testFile = methodPath();
// when overwrite is true: only the directory checks take place.
create(testFile, true, expected(CREATE_FILE_OVERWRITE));
}
@Test
public void testCreateNoOverwriteFileExists() throws Throwable {
describe("Test cost of create file failing with existing file");
Path testFile = file(methodPath());
// now there is a file there, an attempt with overwrite == false will
// fail on the first HEAD.
if (!createPerformance) {
interceptOperation(FileAlreadyExistsException.class, "",
FILE_STATUS_FILE_PROBE,
() -> file(testFile, false));
} else {
create(testFile, false, NO_HEAD_OR_LIST);
}
}
@Test
public void testCreateFileOverDirNoOverwrite() throws Throwable {
describe("Test cost of create file overwrite=false failing with existing dir");
Path testFile = dir(methodPath());
// now there is a dir marker there, an attempt with overwrite == true will
// fail on the first HEAD.
if (!createPerformance) {
interceptOperation(FileAlreadyExistsException.class, "",
GET_FILE_STATUS_ON_DIR_MARKER,
() -> file(testFile, false));
} else {
create(testFile, false, NO_HEAD_OR_LIST);
}
}
@Test
public void testCreateFileOverDirWithOverwrite() throws Throwable {
describe("Test cost of create file overwrite=false failing with existing dir");
Path testFile = dir(methodPath());
// now there is a dir marker there, an attempt with overwrite == true will
// fail on the LIST; no HEAD is issued.
if (!createPerformance) {
interceptOperation(FileAlreadyExistsException.class, "",
LIST_OPERATION,
() -> file(testFile, true));
} else {
create(testFile, true, NO_HEAD_OR_LIST);
}
}
/**
* Use the builder API.
* on s3a this skips parent checks, always.
*/
@Test
public void testCreateBuilderSequence() throws Throwable {
describe("Test builder file creation cost");
Path testFile = methodPath();
dir(testFile.getParent());
// s3a fs skips the recursive checks to avoid race
// conditions with other processes/threads deleting
// files and so briefly the path not being present
// only make sure the dest path isn't a directory.
buildFile(testFile, true, false,
expected(FILE_STATUS_DIR_PROBE));
// now there is a file there, an attempt with overwrite == false will
// fail on the first HEAD.
if (!createPerformance) {
interceptOperation(FileAlreadyExistsException.class, "",
GET_FILE_STATUS_ON_FILE,
() -> buildFile(testFile, false, true,
GET_FILE_STATUS_ON_FILE));
} else {
if (getFileSystem().getConf().getBoolean(FS_S3A_CONDITIONAL_CREATE_ENABLED, true)) {
// will trigger conditional create and throw RemoteFileChangedException
intercept(RemoteFileChangedException.class,
() -> buildFile(testFile, false, true, NO_HEAD_OR_LIST));
} else {
// third party store w/out conditional overwrite support
buildFile(testFile, false, true, NO_HEAD_OR_LIST);
}
}
}
@Test
public void testCreateFilePerformanceFlag() throws Throwable {
describe("createFile with performance flag skips safety checks");
S3AFileSystem fs = getFileSystem();
Path path = methodPath();
// increment progress events
AtomicLong progressEvents = new AtomicLong(0);
FSDataOutputStreamBuilder builder = fs.createFile(path)
.overwrite(false)
.progress(progressEvents::incrementAndGet)
.recursive();
// this has a broken return type; something to do with the return value of
// the createFile() call. only fixable via risky changes to the FileSystem | ITestCreateFileCost |
java | dropwizard__dropwizard | dropwizard-health/src/test/java/io/dropwizard/health/HealthCheckManagerTest.java | {
"start": 1331,
"end": 19714
} | class ____ {
private static final String NAME = "test";
private static final String NAME_2 = "test2";
private static final HealthCheckType READY = HealthCheckType.READY;
private static final Duration SHUTDOWN_WAIT = Duration.seconds(5);
@Mock
private HealthCheckScheduler scheduler;
@Test
void shouldIgnoreUnconfiguredAddedHealthChecks() {
// given
final HealthCheckManager manager = new HealthCheckManager(Collections.emptyList(), scheduler,
new MetricRegistry(), SHUTDOWN_WAIT, true, Collections.emptyList());
// when
manager.onHealthCheckAdded(NAME, mock(HealthCheck.class));
// then
verifyNoInteractions(scheduler);
assertThat(manager.healthStateView(NAME))
.isEmpty();
assertThat(manager.healthStateViews())
.isEmpty();
}
@Test
void shouldScheduleHealthCheckWhenConfiguredHealthCheckAdded() {
// given
final HealthCheckConfiguration config = new HealthCheckConfiguration();
config.setName(NAME);
config.setCritical(true);
config.setSchedule(new Schedule());
final HealthCheckManager manager = new HealthCheckManager(singletonList(config), scheduler,
new MetricRegistry(), SHUTDOWN_WAIT, true, Collections.emptyList());
// when
manager.onHealthCheckAdded(NAME, mock(HealthCheck.class));
// then
verifyCheckWasScheduled(scheduler, true);
assertThat(manager.healthStateViews())
.singleElement()
.isEqualTo(manager.healthStateView(NAME).orElseThrow(IllegalStateException::new))
.satisfies(view -> assertThat(view.getName()).isEqualTo(NAME));
}
@Test
void shouldUnscheduleTaskWhenHealthCheckRemoved() {
// given
final ScheduledHealthCheck healthCheck = mock(ScheduledHealthCheck.class);
final HealthCheckManager manager = new HealthCheckManager(Collections.emptyList(), scheduler,
new MetricRegistry(), SHUTDOWN_WAIT, true, Collections.emptyList());
manager.setChecks(singletonMap(NAME, healthCheck));
// when
manager.onHealthCheckRemoved(NAME, mock(HealthCheck.class));
// then
verify(scheduler).unschedule(NAME);
assertThat(manager.healthStateView(NAME))
.isEmpty();
assertThat(manager.healthStateViews())
.singleElement()
.isNull();
}
@Test
void shouldDoNothingWhenStateChangesForUnconfiguredHealthCheck() {
// given
final HealthCheckManager manager = new HealthCheckManager(Collections.emptyList(), scheduler,
new MetricRegistry(), SHUTDOWN_WAIT, true, Collections.emptyList());
// when
manager.onStateChanged(NAME, false);
// then
verifyNoInteractions(scheduler);
}
@Test
void shouldReportUnhealthyWhenInitialOverallStateIsFalse() {
final HealthCheckConfiguration config = new HealthCheckConfiguration();
config.setName(NAME);
config.setCritical(true);
config.setInitialState(false);
config.setSchedule(new Schedule());
final HealthCheckManager manager = new HealthCheckManager(singletonList(config), scheduler,
new MetricRegistry(), SHUTDOWN_WAIT, false, Collections.emptyList());
manager.initializeAppHealth();
final HealthCheck check = mock(HealthCheck.class);
manager.onHealthCheckAdded(NAME, check);
assertThat(manager)
.satisfies(m -> assertThat(m.isHealthy()).isFalse())
.satisfies(m -> assertThat(m.isHealthy("alive")).isTrue());
manager.onStateChanged(NAME, true);
assertThat(manager)
.satisfies(m -> assertThat(m.isHealthy()).isTrue())
.satisfies(m -> assertThat(m.isHealthy("alive")).isTrue());
verifyCheckWasScheduled(scheduler, true);
}
@Test
void shouldReportHealthyWhenInitialOverallStateIsFalseAndReadyCheckIsHealthy() {
final HealthCheckConfiguration config = new HealthCheckConfiguration();
config.setName(NAME);
config.setType(HealthCheckType.READY);
config.setCritical(false);
config.setInitialState(false);
config.setSchedule(new Schedule());
final HealthCheckManager manager = new HealthCheckManager(singletonList(config), scheduler,
new MetricRegistry(), SHUTDOWN_WAIT, false, Collections.emptyList());
manager.initializeAppHealth();
final HealthCheck check = mock(HealthCheck.class);
manager.onHealthCheckAdded(NAME, check);
assertThat(manager)
.satisfies(m -> assertThat(m.isHealthy()).isFalse())
.satisfies(m -> assertThat(m.isHealthy("alive")).isTrue());
manager.onStateChanged(NAME, true);
assertThat(manager)
.satisfies(m -> assertThat(m.isHealthy()).isTrue())
.satisfies(m -> assertThat(m.isHealthy("alive")).isTrue())
.satisfies(m -> assertThat(m.isHealthy("ready")).isTrue());
verifyCheckWasScheduled(scheduler, false);
}
@Test
void shouldMarkServerUnhealthyWhenCriticalHealthCheckFails() {
final HealthCheckConfiguration config = new HealthCheckConfiguration();
config.setName(NAME);
config.setCritical(true);
config.setSchedule(new Schedule());
final HealthCheckManager manager = new HealthCheckManager(singletonList(config), scheduler,
new MetricRegistry(), SHUTDOWN_WAIT, true, Collections.emptyList());
manager.initializeAppHealth();
final HealthCheck check = mock(HealthCheck.class);
manager.onHealthCheckAdded(NAME, check);
assertThat(manager)
.satisfies(m -> assertThat(m.isHealthy()).isTrue())
.satisfies(m -> assertThat(m.isHealthy("alive")).isTrue());
manager.onStateChanged(NAME, false);
assertThat(manager)
.satisfies(m -> assertThat(m.isHealthy()).isFalse())
.satisfies(m -> assertThat(m.isHealthy("alive")).isTrue());
verifyCheckWasScheduled(scheduler, true);
}
@Test
void shouldMarkServerNotAliveAndUnhealthyWhenCriticalAliveCheckFails() {
final HealthCheckConfiguration config = new HealthCheckConfiguration();
config.setName(NAME);
config.setType(HealthCheckType.ALIVE);
config.setSchedule(new Schedule());
final HealthCheckManager manager = new HealthCheckManager(singletonList(config), scheduler,
new MetricRegistry(), SHUTDOWN_WAIT, true, Collections.emptyList());
manager.initializeAppHealth();
final HealthCheck check = mock(HealthCheck.class);
manager.onHealthCheckAdded(NAME, check);
assertThat(manager)
.satisfies(m -> assertThat(m.isHealthy()).isTrue())
.satisfies(m -> assertThat(m.isHealthy("alive")).isTrue());
manager.onStateChanged(NAME, false);
assertThat(manager)
.satisfies(m -> assertThat(m.isHealthy()).isFalse())
.satisfies(m -> assertThat(m.isHealthy("alive")).isFalse());
verifyCheckWasScheduled(scheduler, true);
}
@Test
void shouldMarkServerHealthyWhenCriticalHealthCheckRecovers() {
final HealthCheckConfiguration config = new HealthCheckConfiguration();
config.setName(NAME);
config.setCritical(true);
config.setSchedule(new Schedule());
final HealthCheckManager manager = new HealthCheckManager(singletonList(config), scheduler,
new MetricRegistry(), SHUTDOWN_WAIT, true, Collections.emptyList());
final HealthCheck check = mock(HealthCheck.class);
manager.onHealthCheckAdded(NAME, check);
manager.onStateChanged(NAME, false);
assertThat(manager.isHealthy()).isFalse();
manager.onStateChanged(NAME, true);
assertThat(manager.isHealthy()).isTrue();
ArgumentCaptor<ScheduledHealthCheck> checkCaptor = ArgumentCaptor.forClass(ScheduledHealthCheck.class);
ArgumentCaptor<Boolean> healthyCaptor = ArgumentCaptor.forClass(Boolean.class);
verify(scheduler).scheduleInitial(checkCaptor.capture());
verify(scheduler, times(2)).schedule(checkCaptor.capture(), healthyCaptor.capture());
assertThat(checkCaptor.getAllValues())
.hasSize(3)
.allMatch(value -> NAME.equals(value.getName()))
.allMatch(ScheduledHealthCheck::isCritical);
assertThat(healthyCaptor.getAllValues())
.containsExactly(false, true);
}
@Test
void shouldNotChangeServerStateWhenNonCriticalHealthCheckFails() {
final HealthCheckConfiguration config = new HealthCheckConfiguration();
config.setName(NAME);
config.setCritical(false);
config.setSchedule(new Schedule());
final HealthCheck check = mock(HealthCheck.class);
final HealthCheckManager manager = new HealthCheckManager(singletonList(config), scheduler,
new MetricRegistry(), SHUTDOWN_WAIT, true, Collections.emptyList());
manager.initializeAppHealth();
manager.onHealthCheckAdded(NAME, check);
manager.onStateChanged(NAME, false);
assertThat(manager.isHealthy()).isTrue();
verifyCheckWasScheduled(scheduler, false);
}
@Test
void shouldNotChangeServerStateWhenNonCriticalHealthCheckRecovers() {
final List<HealthCheckConfiguration> configs = new ArrayList<>();
final HealthCheckConfiguration nonCriticalConfig = new HealthCheckConfiguration();
nonCriticalConfig.setName(NAME);
nonCriticalConfig.setCritical(false);
nonCriticalConfig.setSchedule(new Schedule());
nonCriticalConfig.setInitialState(false);
configs.add(nonCriticalConfig);
final HealthCheckConfiguration criticalConfig = new HealthCheckConfiguration();
criticalConfig.setName(NAME_2);
criticalConfig.setCritical(true);
criticalConfig.setSchedule(new Schedule());
criticalConfig.setInitialState(false);
configs.add(criticalConfig);
final HealthCheckManager manager = new HealthCheckManager(unmodifiableList(configs), scheduler, new MetricRegistry(),
SHUTDOWN_WAIT, true, Collections.emptyList());
final HealthCheck nonCriticalCheck = mock(HealthCheck.class);
final HealthCheck criticalCheck = mock(HealthCheck.class);
manager.onHealthCheckAdded(NAME, nonCriticalCheck);
manager.onHealthCheckAdded(NAME_2, criticalCheck);
manager.onStateChanged(NAME, false);
manager.onStateChanged(NAME_2, false);
assertThat(manager.isHealthy()).isFalse();
manager.onStateChanged(NAME, true);
assertThat(manager.isHealthy()).isFalse();
ArgumentCaptor<ScheduledHealthCheck> checkCaptor = ArgumentCaptor.forClass(ScheduledHealthCheck.class);
ArgumentCaptor<Boolean> healthyCaptor = ArgumentCaptor.forClass(Boolean.class);
verify(scheduler, times(2)).scheduleInitial(checkCaptor.capture());
verify(scheduler, times(3)).schedule(checkCaptor.capture(), healthyCaptor.capture());
assertThat(checkCaptor.getAllValues())
.hasSize(5)
.satisfies(values -> assertThat(values).element(0)
.satisfies(value -> assertThat(value.getName()).isEqualTo(NAME))
.satisfies(value -> assertThat(value.isCritical()).isFalse()))
.satisfies(values -> assertThat(values).element(1)
.satisfies(value -> assertThat(value.getName()).isEqualTo(NAME_2))
.satisfies(value -> assertThat(value.isCritical()).isTrue()))
.satisfies(values -> assertThat(values).element(2)
.satisfies(value -> assertThat(value.getName()).isEqualTo(NAME))
.satisfies(value -> assertThat(value.isCritical()).isFalse()))
.satisfies(values -> assertThat(values).element(3)
.satisfies(value -> assertThat(value.getName()).isEqualTo(NAME_2))
.satisfies(value -> assertThat(value.isCritical()).isTrue()))
.satisfies(values -> assertThat(values).element(4)
.satisfies(value -> assertThat(value.getName()).isEqualTo(NAME))
.satisfies(value -> assertThat(value.isCritical()).isFalse()));
assertThat(healthyCaptor.getAllValues()).containsExactly(false, false, true);
}
@Test
void shouldRecordNumberOfHealthyAndUnhealthyHealthChecks() {
// given
final Schedule schedule = new Schedule();
final List<HealthCheckConfiguration> configs = new ArrayList<>();
final HealthCheckConfiguration nonCriticalConfig = new HealthCheckConfiguration();
nonCriticalConfig.setName(NAME);
nonCriticalConfig.setCritical(false);
nonCriticalConfig.setSchedule(schedule);
configs.add(nonCriticalConfig);
final HealthCheckConfiguration criticalConfig = new HealthCheckConfiguration();
criticalConfig.setName(NAME_2);
criticalConfig.setCritical(true);
criticalConfig.setSchedule(schedule);
configs.add(criticalConfig);
final HealthCheck check = mock(HealthCheck.class);
final MetricRegistry metrics = new MetricRegistry();
final AtomicInteger healthyCounter = new AtomicInteger();
final AtomicInteger unhealthyCounter = new AtomicInteger();
final HealthStateListener countingListener = new HealthStateListener() {
@Override
public void onHealthyCheck(String healthCheckName) {
healthyCounter.incrementAndGet();
}
@Override
public void onUnhealthyCheck(String healthCheckName) {
unhealthyCounter.incrementAndGet();
}
@Override
public void onStateChanged(String healthCheckName, boolean healthy) {
}
};
final HealthCheckManager manager = new HealthCheckManager(unmodifiableList(configs), scheduler, metrics, SHUTDOWN_WAIT, true,
Collections.singleton(countingListener));
final ScheduledHealthCheck check1 = new ScheduledHealthCheck(NAME, READY, nonCriticalConfig.isCritical(), check,
schedule, new State(NAME, schedule.getFailureAttempts(), schedule.getSuccessAttempts(), true, manager),
metrics.counter(NAME + ".healthy"), metrics.counter(NAME + ".unhealthy"));
final ScheduledHealthCheck check2 = new ScheduledHealthCheck(NAME_2, READY, criticalConfig.isCritical(), check,
schedule, new State(NAME, schedule.getFailureAttempts(), schedule.getSuccessAttempts(), true, manager),
metrics.counter(NAME_2 + ".healthy"), metrics.counter(NAME_2 + ".unhealthy"));
manager.setChecks(Map.of(NAME, check1, NAME_2, check2));
// then
assertThat(metrics.gauge(manager.getAggregateHealthyName(), null).getValue())
.isEqualTo(2L);
assertThat(metrics.gauge(manager.getAggregateUnhealthyName(), null).getValue())
.isEqualTo(0L);
// when
when(check.execute()).thenReturn(HealthCheck.Result.unhealthy("because"));
// Fail 3 times, to trigger unhealthy state change
check2.run();
check2.run();
check2.run();
// then
assertThat(metrics.gauge(manager.getAggregateHealthyName(), null).getValue())
.isEqualTo(1L);
assertThat(metrics.gauge(manager.getAggregateUnhealthyName(), null).getValue())
.isEqualTo(1L);
assertThat(unhealthyCounter).hasValue(3);
assertThat(healthyCounter).hasValue(0);
}
@Test
@DisabledOnOs(OS.WINDOWS)
void shouldContinueScheduledCheckingWhileDelayingShutdown() throws Exception {
// given
final int checkIntervalMillis = 10;
final int shutdownWaitTimeMillis = 50;
final int expectedCount = shutdownWaitTimeMillis / checkIntervalMillis - 1;
final AtomicBoolean shutdownFailure = new AtomicBoolean(false);
final CountingHealthCheck check = new CountingHealthCheck();
final Schedule schedule = new Schedule();
schedule.setCheckInterval(Duration.milliseconds(checkIntervalMillis));
final HealthCheckConfiguration checkConfig = new HealthCheckConfiguration();
checkConfig.setName("check1");
checkConfig.setCritical(true);
checkConfig.setSchedule(schedule);
final List<HealthCheckConfiguration> configs = singletonList(checkConfig);
final ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1);
final HealthCheckScheduler scheduler = new HealthCheckScheduler(executorService);
final MetricRegistry metrics = new MetricRegistry();
final Duration shutdownWaitPeriod = Duration.milliseconds(shutdownWaitTimeMillis);
// when
final HealthCheckManager manager = new HealthCheckManager(configs, scheduler, metrics, shutdownWaitPeriod,
true, Collections.emptyList());
manager.onHealthCheckAdded("check1", check);
// simulate JVM shutdown hook
final Thread shutdownThread = new Thread(() -> {
try {
manager.notifyShutdownStarted();
} catch (Exception e) {
shutdownFailure.set(true);
e.printStackTrace();
}
});
Thread.sleep(20);
long beforeCount = check.getCount();
shutdownThread.start();
shutdownThread.join();
Thread.sleep(20);
long afterCount = check.getCount();
// then
assertThat(shutdownFailure).isFalse();
assertThat(afterCount - beforeCount).isGreaterThanOrEqualTo(expectedCount);
}
private void verifyCheckWasScheduled(HealthCheckScheduler scheduler, boolean critical) {
ArgumentCaptor<ScheduledHealthCheck> checkCaptor = ArgumentCaptor.forClass(ScheduledHealthCheck.class);
verify(scheduler).scheduleInitial(checkCaptor.capture());
assertThat(checkCaptor.getValue())
.satisfies(value -> assertThat(value.getName()).isEqualTo(HealthCheckManagerTest.NAME))
.satisfies(value -> assertThat(value.isCritical()).isEqualTo(critical));
}
private static | HealthCheckManagerTest |
java | mockito__mockito | mockito-core/src/test/java/org/concurrentmockito/ThreadsShareGenerouslyStubbedMockTest.java | {
"start": 436,
"end": 1994
} | class ____ extends TestBase {
private IMethods mock;
@Test
public void shouldAllowVerifyingInThreads() throws Exception {
for (int i = 0; i < 50; i++) {
performTest();
}
}
private void performTest() throws InterruptedException {
mock = mock(IMethods.class);
when(mock.simpleMethod("foo"))
.thenReturn("foo")
.thenReturn("bar")
.thenReturn("baz")
.thenReturn("foo")
.thenReturn("bar")
.thenReturn("baz");
final Thread[] listeners = new Thread[100];
for (int i = 0; i < listeners.length; i++) {
listeners[i] =
new Thread() {
@Override
public void run() {
try {
mock.simpleMethod("foo");
mock.simpleMethod("foo");
mock.simpleMethod("foo");
mock.simpleMethod("foo");
mock.simpleMethod("foo");
mock.simpleMethod("foo");
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
listeners[i].start();
}
for (Thread listener : listeners) {
listener.join();
}
}
}
| ThreadsShareGenerouslyStubbedMockTest |
java | spring-projects__spring-framework | spring-jms/src/main/java/org/springframework/jms/listener/AbstractMessageListenerContainer.java | {
"start": 15777,
"end": 16679
} | class ____ is good enough as subscription name).
* <p>Only makes sense when listening to a topic (pub-sub domain),
* therefore this method switches the "pubSubDomain" flag as well.
* @see #setSubscriptionName
* @see #setPubSubDomain
*/
public void setSubscriptionDurable(boolean subscriptionDurable) {
this.subscriptionDurable = subscriptionDurable;
if (subscriptionDurable) {
setPubSubDomain(true);
}
}
/**
* Return whether to make the subscription durable.
*/
public boolean isSubscriptionDurable() {
return this.subscriptionDurable;
}
/**
* Set whether to make the subscription shared. The shared subscription name
* to be used can be specified through the "subscriptionName" property.
* <p>Default is "false". Set this to "true" to register a shared subscription,
* typically in combination with a "subscriptionName" value (unless
* your message listener | name |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/hql/internal/QualifiedJoinPredicatePathConsumer.java | {
"start": 1029,
"end": 5039
} | class ____ extends BasicDotIdentifierConsumer {
private final SqmJoin<?, ?> sqmJoin;
public QualifiedJoinPredicatePathConsumer(
SqmJoin<?, ?> sqmJoin,
SqmCreationState creationState) {
super( creationState );
this.sqmJoin = sqmJoin;
}
@Override
protected SemanticPathPart createBasePart() {
return new BaseLocalSequencePart() {
@Override
protected void validateAsRoot(SqmFrom<?, ?> pathRoot) {
final SqmRoot<?> root = pathRoot.findRoot();
final SqmRoot<?> joinRoot = sqmJoin.findRoot();
if ( root != joinRoot ) {
// The root of a path within a join condition doesn't have the same root as the
// current join we are processing.
// The aim of this check is to prevent uses of different roots i.e.
// `from A a, B b join C c c.id = a.id` would be illegal
final SqmCreationProcessingState processingState = getCreationState().getCurrentProcessingState();
// First, we need to find out if the current join is part of current processing query
final SqmQuery<?> currentProcessingQuery = processingState.getProcessingQuery();
if ( currentProcessingQuery instanceof SqmSelectQuery<?> selectQuery ) {
final SqmQuerySpec<?> querySpec = selectQuery.getQuerySpec();
final SqmFromClause fromClause = querySpec.getFromClause();
// If the current processing query contains the root of the current join,
// then the root of the processing path must be a root of one of the parent queries
if ( fromClause != null && contains( fromClause.getRoots(), joinRoot ) ) {
// It is allowed to use correlations from the same query
if ( !( root instanceof SqmCorrelation<?, ?> ) || !contains( fromClause.getRoots(), root ) ) {
validateAsRootOnParentQueryClosure( pathRoot, root,
processingState.getParentProcessingState() );
}
return;
}
}
// If the current join is not part of the processing query, this must be a subquery in the ON clause
// in which case the path root is allowed to occur in the current processing query as root
if ( currentProcessingQuery instanceof SqmSubQuery<?> ) {
validateAsRootOnParentQueryClosure( pathRoot, root, processingState );
return;
}
throw new SemanticException(
String.format(
Locale.ROOT,
"SqmQualifiedJoin predicate referred to SqmRoot [%s] other than the join's root [%s]",
pathRoot.getNavigablePath(),
sqmJoin.getNavigablePath()
)
);
}
super.validateAsRoot( pathRoot );
}
private void validateAsRootOnParentQueryClosure(
SqmFrom<?, ?> pathRoot,
SqmRoot<?> root,
SqmCreationProcessingState processingState) {
while ( processingState != null ) {
final SqmQuery<?> processingQuery = processingState.getProcessingQuery();
if ( processingQuery instanceof SqmSelectQuery<?> selectQuery ) {
final SqmQuerySpec<?> querySpec = selectQuery.getQuerySpec();
final SqmFromClause fromClause = querySpec.getFromClause();
// If we are in a subquery, the "foreign" from element could be one of the subquery roots,
// which is totally fine. The aim of this check is to prevent uses of different "spaces"
// i.e. `from A a, B b join b.id = a.id` would be illegal
if ( fromClause != null && contains( fromClause.getRoots(), root ) ) {
super.validateAsRoot( pathRoot );
return;
}
}
processingState = processingState.getParentProcessingState();
}
throw new SemanticException(
String.format(
Locale.ROOT,
"SqmQualifiedJoin predicate referred to SqmRoot [%s] other than the join's root [%s]",
pathRoot.getNavigablePath(),
sqmJoin.getNavigablePath()
)
);
}
private boolean contains(List<SqmRoot<?>> roots, SqmRoot<?> root) {
for ( SqmRoot<?> sqmRoot : roots ) {
if ( sqmRoot == root ) {
return true;
}
}
return false;
}
};
}
}
| QualifiedJoinPredicatePathConsumer |
java | elastic__elasticsearch | x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java | {
"start": 8118,
"end": 8811
} | class ____ {
public static final FieldType FIELD_TYPE = new FieldType();
static {
FIELD_TYPE.setTokenized(false);
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
FIELD_TYPE.setStoreTermVectorOffsets(false);
FIELD_TYPE.setOmitNorms(true);
FIELD_TYPE.freeze();
}
public static final TextSearchInfo TEXT_SEARCH_INFO = new TextSearchInfo(
FIELD_TYPE,
null,
Lucene.KEYWORD_ANALYZER,
Lucene.KEYWORD_ANALYZER
);
}
private static WildcardFieldMapper toType(FieldMapper in) {
return (WildcardFieldMapper) in;
}
public static | Defaults |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEvent.java | {
"start": 1021,
"end": 1250
} | class ____ extends AbstractEvent<JobEventType> {
private JobId jobID;
public JobEvent(JobId jobID, JobEventType type) {
super(type);
this.jobID = jobID;
}
public JobId getJobId() {
return jobID;
}
}
| JobEvent |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/http/WebConfigUtilsTests.java | {
"start": 1005,
"end": 1404
} | class ____ {
public static final String URL = "/url";
@Mock
private ParserContext parserContext;
// SEC-1980
@Test
public void validateHttpRedirectSpELNoParserWarning() {
WebConfigUtils.validateHttpRedirect("#{T(org.springframework.security.config.http.WebConfigUtilsTest).URL}",
this.parserContext, "fakeSource");
verifyNoMoreInteractions(this.parserContext);
}
}
| WebConfigUtilsTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/SqmNode.java | {
"start": 348,
"end": 680
} | interface ____ extends JpaCriteriaNode, SqmCacheable {
Logger LOG = Logger.getLogger( SqmNode.class );
default String asLoggableText() {
LOG.debugf( "#asLoggableText not defined for %s - using #toString", getClass().getName() );
return toString();
}
NodeBuilder nodeBuilder();
SqmNode copy(SqmCopyContext context);
}
| SqmNode |
java | apache__camel | components/camel-tarfile/src/test/java/org/apache/camel/processor/aggregate/tarfile/AggregationStrategyWithFilenameHeaderTest.java | {
"start": 1744,
"end": 4165
} | class ____ extends CamelTestSupport {
private static final List<String> FILE_NAMES = Arrays.asList("foo", "bar");
private TarAggregationStrategy tar = new TarAggregationStrategy(false, true);
@BeforeEach
public void cleanOutputDirectories() {
tar.setParentDir("target/temp");
deleteDirectory("target/temp");
deleteDirectory("target/out");
}
@Test
void testSplitter() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:aggregateToTarEntry");
mock.expectedMessageCount(1);
template.setDefaultEndpointUri("direct:start");
template.sendBodyAndHeader("foo", Exchange.FILE_NAME, FILE_NAMES.get(0));
template.sendBodyAndHeader("bar", Exchange.FILE_NAME, FILE_NAMES.get(1));
MockEndpoint.assertIsSatisfied(context);
await("Should be a file in target/out directory").until(() -> {
File[] files = new File("target/out").listFiles();
return files != null && files.length > 0;
});
File[] files = new File("target/out").listFiles();
File resultFile = files[0];
final TarArchiveInputStream tis
= new ArchiveStreamFactory().createArchiveInputStream(ArchiveStreamFactory.TAR,
new BufferedInputStream(new FileInputStream(resultFile)));
try {
int fileCount = 0;
for (TarArchiveEntry entry = tis.getNextEntry(); entry != null; entry = tis.getNextEntry()) {
fileCount++;
assertTrue(FILE_NAMES.contains(entry.getName()), "Tar entry file name should be on of: " + FILE_NAMES);
}
assertEquals(FILE_NAMES.size(), fileCount, "Tar file should contain " + FILE_NAMES.size() + " files");
} finally {
IOHelper.close(tis);
}
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.aggregate(tar)
.constant(true)
.completionTimeout(50)
.to("file:target/out")
.to("mock:aggregateToTarEntry")
.log("Done processing tar file: ${header.CamelFileName}");
}
};
}
}
| AggregationStrategyWithFilenameHeaderTest |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/snowflake/SnowflakeStatementParser.java | {
"start": 211,
"end": 621
} | class ____ extends SQLStatementParser {
public SnowflakeStatementParser(String sql) {
super(new SnowflakeExprParser(sql));
}
public SnowflakeStatementParser(String sql, SQLParserFeature... features) {
super(new SnowflakeExprParser(sql, features));
}
public SnowflakeStatementParser(Lexer lexer) {
super(new SnowflakeExprParser(lexer));
}
}
| SnowflakeStatementParser |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/MultiVersionTest.java | {
"start": 1560,
"end": 17488
} | class ____ {
private static Plugins setUpPlugins(Map<Path, List<VersionedPluginBuilder.BuildInfo>> artifacts, PluginDiscoveryMode mode) {
String pluginPath = artifacts.keySet().stream().map(Path::toString).collect(Collectors.joining(","));
Map<String, String> configs = new HashMap<>();
configs.put(WorkerConfig.PLUGIN_PATH_CONFIG, pluginPath);
configs.put(WorkerConfig.PLUGIN_DISCOVERY_CONFIG, mode.name());
return new Plugins(configs);
}
private void assertPluginLoad(Map<Path, List<VersionedPluginBuilder.BuildInfo>> artifacts, PluginDiscoveryMode mode)
throws InvalidVersionSpecificationException, ClassNotFoundException {
Plugins plugins = setUpPlugins(artifacts, mode);
for (Map.Entry<Path, List<VersionedPluginBuilder.BuildInfo>> entry : artifacts.entrySet()) {
String pluginLocation = entry.getKey().toAbsolutePath().toString();
for (VersionedPluginBuilder.BuildInfo buildInfo : entry.getValue()) {
ClassLoader pluginLoader = plugins.pluginLoader(buildInfo.plugin().className(), PluginUtils.connectorVersionRequirement(buildInfo.version()), null);
Assertions.assertInstanceOf(PluginClassLoader.class, pluginLoader);
Assertions.assertTrue(((PluginClassLoader) pluginLoader).location().contains(pluginLocation));
Object p = plugins.newPlugin(buildInfo.plugin().className(), PluginUtils.connectorVersionRequirement(buildInfo.version()));
Assertions.assertInstanceOf(Versioned.class, p);
Assertions.assertEquals(buildInfo.version(), ((Versioned) p).version());
}
}
}
private void assertCorrectLatestPluginVersion(
Map<Path, List<VersionedPluginBuilder.BuildInfo>> artifacts,
PluginDiscoveryMode mode,
String latestVersion
) {
Plugins plugins = setUpPlugins(artifacts, mode);
List<String> classes = artifacts.values().stream()
.flatMap(List::stream)
.map(VersionedPluginBuilder.BuildInfo::plugin)
.map(VersionedPluginBuilder.VersionedTestPlugin::className)
.distinct()
.toList();
for (String className : classes) {
String version = plugins.latestVersion(className, PluginType.values());
Assertions.assertEquals(latestVersion, version);
}
}
private static Map<Path, List<VersionedPluginBuilder.BuildInfo>> buildIsolatedArtifacts(
String[] versions,
VersionedPluginBuilder.VersionedTestPlugin[] pluginTypes
) throws IOException {
Map<Path, List<VersionedPluginBuilder.BuildInfo>> artifacts = new HashMap<>();
for (String v : versions) {
for (VersionedPluginBuilder.VersionedTestPlugin pluginType: pluginTypes) {
VersionedPluginBuilder builder = new VersionedPluginBuilder();
builder.include(pluginType, v);
artifacts.put(builder.build(pluginType + "-" + v), builder.buildInfos());
}
}
return artifacts;
}
public static final String DEFAULT_ISOLATED_ARTIFACTS_LATEST_VERSION;
public static final Map<Path, List<VersionedPluginBuilder.BuildInfo>> DEFAULT_ISOLATED_ARTIFACTS;
public static final Map<Path, List<VersionedPluginBuilder.BuildInfo>> DEFAULT_COMBINED_ARTIFACT;
public static final Plugins MULTI_VERSION_PLUGINS;
public static final Map<VersionedPluginBuilder.VersionedTestPlugin, String> DEFAULT_COMBINED_ARTIFACT_VERSIONS;
static {
String[] defaultIsolatedArtifactsVersions = new String[]{"1.1.0", "2.3.0", "4.3.0"};
try {
DEFAULT_ISOLATED_ARTIFACTS = buildIsolatedArtifacts(
defaultIsolatedArtifactsVersions, VersionedPluginBuilder.VersionedTestPlugin.values()
);
DEFAULT_ISOLATED_ARTIFACTS_LATEST_VERSION = "4.3.0";
DEFAULT_COMBINED_ARTIFACT_VERSIONS = new HashMap<>();
VersionedPluginBuilder builder = new VersionedPluginBuilder();
builder.include(VersionedPluginBuilder.VersionedTestPlugin.SOURCE_CONNECTOR,
DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.SOURCE_CONNECTOR, k -> "0.0.0"));
builder.include(VersionedPluginBuilder.VersionedTestPlugin.SINK_CONNECTOR,
DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.SINK_CONNECTOR, k -> "0.1.0"));
builder.include(VersionedPluginBuilder.VersionedTestPlugin.CONVERTER,
DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.CONVERTER, k -> "0.2.0"));
builder.include(VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER,
DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER, k -> "0.3.0"));
builder.include(VersionedPluginBuilder.VersionedTestPlugin.TRANSFORMATION,
DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.TRANSFORMATION, k -> "0.4.0"));
builder.include(VersionedPluginBuilder.VersionedTestPlugin.PREDICATE,
DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.PREDICATE, k -> "0.5.0"));
DEFAULT_COMBINED_ARTIFACT = Map.of(builder.build("all_versioned_artifact"), builder.buildInfos());
Map<Path, List<VersionedPluginBuilder.BuildInfo>> artifacts = new HashMap<>();
artifacts.putAll(DEFAULT_COMBINED_ARTIFACT);
artifacts.putAll(DEFAULT_ISOLATED_ARTIFACTS);
MULTI_VERSION_PLUGINS = setUpPlugins(artifacts, PluginDiscoveryMode.SERVICE_LOAD);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Test
public void testVersionedPluginLoaded() throws InvalidVersionSpecificationException, ClassNotFoundException {
assertPluginLoad(DEFAULT_COMBINED_ARTIFACT, PluginDiscoveryMode.SERVICE_LOAD);
assertPluginLoad(DEFAULT_COMBINED_ARTIFACT, PluginDiscoveryMode.ONLY_SCAN);
}
@Test
public void testMultipleIsolatedVersionedPluginLoading() throws InvalidVersionSpecificationException, ClassNotFoundException {
assertPluginLoad(DEFAULT_ISOLATED_ARTIFACTS, PluginDiscoveryMode.SERVICE_LOAD);
assertPluginLoad(DEFAULT_ISOLATED_ARTIFACTS, PluginDiscoveryMode.ONLY_SCAN);
}
@Test
public void testLatestVersion() {
assertCorrectLatestPluginVersion(DEFAULT_ISOLATED_ARTIFACTS, PluginDiscoveryMode.SERVICE_LOAD, DEFAULT_ISOLATED_ARTIFACTS_LATEST_VERSION);
assertCorrectLatestPluginVersion(DEFAULT_ISOLATED_ARTIFACTS, PluginDiscoveryMode.ONLY_SCAN, DEFAULT_ISOLATED_ARTIFACTS_LATEST_VERSION);
}
@Test
public void testBundledPluginLoading() throws InvalidVersionSpecificationException, ClassNotFoundException {
Plugins plugins = MULTI_VERSION_PLUGINS;
// get the connector loader of the combined artifact which includes all plugin types
ClassLoader connectorLoader = plugins.pluginLoader(
VersionedPluginBuilder.VersionedTestPlugin.SINK_CONNECTOR.className(),
PluginUtils.connectorVersionRequirement("0.1.0"),
null
);
Assertions.assertInstanceOf(PluginClassLoader.class, connectorLoader);
List<VersionedPluginBuilder.VersionedTestPlugin> pluginTypes = List.of(
VersionedPluginBuilder.VersionedTestPlugin.CONVERTER,
VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER,
VersionedPluginBuilder.VersionedTestPlugin.TRANSFORMATION,
VersionedPluginBuilder.VersionedTestPlugin.PREDICATE
);
// should match the version used in setUp for creating the combined artifact
List<String> versions = pluginTypes.stream().map(DEFAULT_COMBINED_ARTIFACT_VERSIONS::get).toList();
for (int i = 0; i < 4; i++) {
String className = pluginTypes.get(i).className();
// when using the connector loader, the version and plugin returned should be from the ones in the combined artifact
String version = plugins.pluginVersion(className, connectorLoader, PluginType.values());
Assertions.assertEquals(versions.get(i), version);
Object p = plugins.newPlugin(className, null, connectorLoader);
Assertions.assertInstanceOf(Versioned.class, p);
Assertions.assertEquals(versions.get(i), ((Versioned) p).version());
String latestVersion = plugins.latestVersion(className, PluginType.values());
Assertions.assertEquals(DEFAULT_ISOLATED_ARTIFACTS_LATEST_VERSION, latestVersion);
}
}
@Test
public void testCorrectVersionRange() throws IOException, InvalidVersionSpecificationException, ClassNotFoundException {
Map<Path, List<VersionedPluginBuilder.BuildInfo>> artifacts = buildIsolatedArtifacts(
new String[]{"1.0.0", "1.1.0", "1.1.2", "2.0.0", "2.0.2", "3.0.0", "4.0.0"},
VersionedPluginBuilder.VersionedTestPlugin.values()
);
Plugins plugins = setUpPlugins(artifacts, PluginDiscoveryMode.SERVICE_LOAD);
Map<VersionRange, String> requiredVersions = new HashMap<>();
requiredVersions.put(PluginUtils.connectorVersionRequirement("latest"), "4.0.0");
requiredVersions.put(PluginUtils.connectorVersionRequirement(null), "4.0.0");
requiredVersions.put(PluginUtils.connectorVersionRequirement("1.0.0"), "1.0.0");
requiredVersions.put(PluginUtils.connectorVersionRequirement("[2.0.2]"), "2.0.2");
requiredVersions.put(PluginUtils.connectorVersionRequirement("[1.1.0,3.0.1]"), "3.0.0");
requiredVersions.put(PluginUtils.connectorVersionRequirement("(,2.0.0)"), "1.1.2");
requiredVersions.put(PluginUtils.connectorVersionRequirement("(,1.0.0]"), "1.0.0");
requiredVersions.put(PluginUtils.connectorVersionRequirement("[2.0.0,)"), "4.0.0");
requiredVersions.put(PluginUtils.connectorVersionRequirement("(,2.0.0],[2.0.3, 2.0.4)"), "2.0.0");
requiredVersions.put(PluginUtils.connectorVersionRequirement("(2.0.0,3.0.0)"), "2.0.2");
requiredVersions.put(PluginUtils.connectorVersionRequirement("(,1.1.0),[4.1.1,)"), "1.0.0");
requiredVersions.put(PluginUtils.connectorVersionRequirement("[1.1.0,1.1.0]"), "1.1.0");
requiredVersions.put(PluginUtils.connectorVersionRequirement("(,1.1.0),(2.0.0, 2.0.2]"), "2.0.2");
requiredVersions.put(PluginUtils.connectorVersionRequirement("[1.1.0,1.1.3)"), "1.1.2");
for (Map.Entry<VersionRange, String> entry : requiredVersions.entrySet()) {
for (VersionedPluginBuilder.VersionedTestPlugin pluginType: VersionedPluginBuilder.VersionedTestPlugin.values()) {
Object p = plugins.newPlugin(pluginType.className(), entry.getKey());
Assertions.assertInstanceOf(Versioned.class, p);
Assertions.assertEquals(entry.getValue(), ((Versioned) p).version(),
String.format("Provided Version Range %s for class %s should return plugin version %s instead of %s",
entry.getKey(), pluginType.className(), entry.getValue(), ((Versioned) p).version()));
}
}
}
@Test
public void testInvalidVersionRange() throws IOException, InvalidVersionSpecificationException {
String[] validVersions = new String[]{"1.0.0", "1.1.0", "1.1.2", "2.0.0", "2.0.2", "3.0.0", "4.0.0"};
Map<Path, List<VersionedPluginBuilder.BuildInfo>> artifacts = buildIsolatedArtifacts(
validVersions,
VersionedPluginBuilder.VersionedTestPlugin.values()
);
Plugins plugins = setUpPlugins(artifacts, PluginDiscoveryMode.SERVICE_LOAD);
Set<VersionRange> invalidVersions = new HashSet<>();
invalidVersions.add(PluginUtils.connectorVersionRequirement("0.9.0"));
invalidVersions.add(PluginUtils.connectorVersionRequirement("[4.0.1,)"));
invalidVersions.add(PluginUtils.connectorVersionRequirement("(4.0.0,)"));
invalidVersions.add(PluginUtils.connectorVersionRequirement("[4.0.1]"));
invalidVersions.add(PluginUtils.connectorVersionRequirement("(2.0.0, 2.0.1)"));
invalidVersions.add(PluginUtils.connectorVersionRequirement("(,1.0.0)"));
invalidVersions.add(PluginUtils.connectorVersionRequirement("(1.1.0, 1.1.2)"));
invalidVersions.add(PluginUtils.connectorVersionRequirement("(1.1.0, 1.1.2),[1.1.3, 2.0.0)"));
for (VersionRange versionRange : invalidVersions) {
for (VersionedPluginBuilder.VersionedTestPlugin pluginType: VersionedPluginBuilder.VersionedTestPlugin.values()) {
VersionedPluginLoadingException e = Assertions.assertThrows(VersionedPluginLoadingException.class, () -> {
plugins.newPlugin(pluginType.className(), versionRange);
}, String.format("Provided Version Range %s for class %s should throw VersionedPluginLoadingException", versionRange, pluginType.className()));
Assertions.assertEquals(e.availableVersions(), List.of(validVersions));
}
}
}
@Test
public void testVersionedConverter() {
Plugins plugins = setUpPlugins(DEFAULT_ISOLATED_ARTIFACTS, PluginDiscoveryMode.SERVICE_LOAD);
Map<String, String> converterConfig = new HashMap<>();
converterConfig.put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className());
converterConfig.put(WorkerConfig.KEY_CONVERTER_VERSION, "1.1.0");
converterConfig.put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className());
converterConfig.put(WorkerConfig.VALUE_CONVERTER_VERSION, "2.3.0");
converterConfig.put(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER.className());
converterConfig.put(WorkerConfig.HEADER_CONVERTER_VERSION, "4.3.0");
converterConfig.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
AbstractConfig config;
try (LoaderSwap swap = plugins.safeLoaderSwapper().apply(plugins.delegatingLoader())) {
config = new PluginsTest.TestableWorkerConfig(converterConfig);
}
Converter keyConverter = plugins.newConverter(config, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, WorkerConfig.KEY_CONVERTER_VERSION);
Assertions.assertEquals(keyConverter.getClass().getName(), VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className());
Assertions.assertInstanceOf(Versioned.class, keyConverter);
Assertions.assertEquals("1.1.0", ((Versioned) keyConverter).version());
Converter valueConverter = plugins.newConverter(config, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, WorkerConfig.VALUE_CONVERTER_VERSION);
Assertions.assertEquals(valueConverter.getClass().getName(), VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className());
Assertions.assertInstanceOf(Versioned.class, valueConverter);
Assertions.assertEquals("2.3.0", ((Versioned) valueConverter).version());
HeaderConverter headerConverter = plugins.newHeaderConverter(config, WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, WorkerConfig.HEADER_CONVERTER_VERSION);
Assertions.assertEquals(headerConverter.getClass().getName(), VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER.className());
Assertions.assertInstanceOf(Versioned.class, headerConverter);
Assertions.assertEquals("4.3.0", ((Versioned) headerConverter).version());
}
}
| MultiVersionTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java | {
"start": 1829,
"end": 2115
} | class ____ extends RestRefCountedChunkedToXContentListener<EsqlQueryResponse> {
/**
* A simple, thread-safe stop watch for timing a single action.
* Allows to stop the time for building a response and to log it at a later point.
*/
private static | EsqlResponseListener |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.