language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
elastic__elasticsearch
|
x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlStatsResponse.java
|
{
"start": 978,
"end": 1880
}
|
class ____ extends BaseNodesResponse<EqlStatsResponse.NodeStatsResponse> implements ToXContentObject {
public EqlStatsResponse(ClusterName clusterName, List<NodeStatsResponse> nodes, List<FailedNodeException> failures) {
super(clusterName, nodes, failures);
}
@Override
protected List<NodeStatsResponse> readNodesFrom(StreamInput in) {
return TransportAction.localOnly();
}
@Override
protected void writeNodesTo(StreamOutput out, List<NodeStatsResponse> nodes) {
TransportAction.localOnly();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray("stats");
for (NodeStatsResponse node : getNodes()) {
node.toXContent(builder, params);
}
builder.endArray();
return builder;
}
public static
|
EqlStatsResponse
|
java
|
google__dagger
|
dagger-compiler/main/java/dagger/internal/codegen/componentgenerator/CurrentImplementationSubcomponent.java
|
{
"start": 3746,
"end": 4348
}
|
interface ____ {
@BindsInstance
Builder bindingGraph(BindingGraph bindingGraph);
@BindsInstance
Builder parentImplementation(
@ParentComponent Optional<ComponentImplementation> parentImplementation);
@BindsInstance
Builder parentRequestRepresentations(
@ParentComponent Optional<ComponentRequestRepresentations> parentRequestRepresentations);
@BindsInstance
Builder parentRequirementExpressions(
@ParentComponent Optional<ComponentRequirementExpressions> parentRequirementExpressions);
CurrentImplementationSubcomponent build();
}
}
|
Builder
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/exceptionhandling/NonActiveTransactionSessionFindJdbcExceptionHandlingTest.java
|
{
"start": 1287,
"end": 2452
}
|
class ____ {
@Test
public void testJdbcExceptionThrown(EntityManagerFactoryScope factoryScope) {
// delete "description" column so that a JDBCException caused by a SQLException is thrown when looking up the AnEntity
factoryScope.inTransaction( (entityManager) -> {
entityManager.createNativeQuery( "alter table AnEntity drop column description" ).executeUpdate();
} );
factoryScope.inTransaction( (entityManager) -> {
try {
entityManager.find( AnEntity.class, 1 );
fail( "A PersistenceException should have been thrown." );
}
catch ( PersistenceException ex ) {
assertInstanceOf( JDBCException.class, ex );
assertInstanceOf( SQLException.class, ex.getCause() );
}
} );
}
@AfterEach
void tearDown(EntityManagerFactoryScope factoryScope) {
factoryScope.dropData();
}
@BeforeEach
public void setupData(EntityManagerFactoryScope factoryScope) {
factoryScope.inTransaction( (entityManager) -> {
entityManager.persist( new AnEntity( 1, "description" ) );
} );
}
@SuppressWarnings({"FieldCanBeLocal", "unused"})
@Entity(name = "AnEntity")
public static
|
NonActiveTransactionSessionFindJdbcExceptionHandlingTest
|
java
|
resilience4j__resilience4j
|
resilience4j-spring/src/main/java/io/github/resilience4j/bulkhead/configure/BulkheadAspect.java
|
{
"start": 3033,
"end": 8540
}
|
class ____ implements Ordered {
private static final Logger logger = LoggerFactory.getLogger(BulkheadAspect.class);
private final BulkheadConfigurationProperties bulkheadConfigurationProperties;
private final BulkheadRegistry bulkheadRegistry;
private final ThreadPoolBulkheadRegistry threadPoolBulkheadRegistry;
private final @Nullable
List<BulkheadAspectExt> bulkheadAspectExts;
private final FallbackExecutor fallbackExecutor;
private final SpelResolver spelResolver;
public BulkheadAspect(BulkheadConfigurationProperties backendMonitorPropertiesRegistry,
ThreadPoolBulkheadRegistry threadPoolBulkheadRegistry, BulkheadRegistry bulkheadRegistry,
@Autowired(required = false) List<BulkheadAspectExt> bulkheadAspectExts,
FallbackExecutor fallbackExecutor,
SpelResolver spelResolver) {
this.bulkheadConfigurationProperties = backendMonitorPropertiesRegistry;
this.bulkheadRegistry = bulkheadRegistry;
this.bulkheadAspectExts = bulkheadAspectExts;
this.fallbackExecutor = fallbackExecutor;
this.threadPoolBulkheadRegistry = threadPoolBulkheadRegistry;
this.spelResolver = spelResolver;
}
@Pointcut(value = "@within(Bulkhead) || @annotation(Bulkhead)", argNames = "Bulkhead")
public void matchAnnotatedClassOrMethod(Bulkhead Bulkhead) {
}
@Around(value = "matchAnnotatedClassOrMethod(bulkheadAnnotation)", argNames = "proceedingJoinPoint, bulkheadAnnotation")
public Object bulkheadAroundAdvice(ProceedingJoinPoint proceedingJoinPoint,
@Nullable Bulkhead bulkheadAnnotation) throws Throwable {
Method method = ((MethodSignature) proceedingJoinPoint.getSignature()).getMethod();
String methodName = method.getDeclaringClass().getName() + "#" + method.getName();
if (bulkheadAnnotation == null) {
bulkheadAnnotation = getBulkheadAnnotation(proceedingJoinPoint);
}
if (bulkheadAnnotation == null) { //because annotations wasn't found
return proceedingJoinPoint.proceed();
}
Class<?> returnType = method.getReturnType();
String backend = spelResolver.resolve(method, proceedingJoinPoint.getArgs(), bulkheadAnnotation.name());
if (bulkheadAnnotation.type() == Bulkhead.Type.THREADPOOL) {
final CheckedSupplier<Object> bulkheadExecution =
() -> proceedInThreadPoolBulkhead(proceedingJoinPoint, methodName, returnType, backend);
return fallbackExecutor.execute(proceedingJoinPoint, method, bulkheadAnnotation.fallbackMethod(), bulkheadExecution);
} else {
io.github.resilience4j.bulkhead.Bulkhead bulkhead = getOrCreateBulkhead(methodName,
backend);
final CheckedSupplier<Object> bulkheadExecution = () -> proceed(proceedingJoinPoint, methodName, bulkhead, returnType);
return fallbackExecutor.execute(proceedingJoinPoint, method, bulkheadAnnotation.fallbackMethod(), bulkheadExecution);
}
}
/**
* entry logic for semaphore bulkhead execution
*
* @param proceedingJoinPoint AOP proceedingJoinPoint
* @param methodName AOP method name
* @param bulkhead the configured bulkhead
* @param returnType the AOP method return type
* @return the result Object of the method call
* @throws Throwable
*/
private Object proceed(ProceedingJoinPoint proceedingJoinPoint, String methodName,
io.github.resilience4j.bulkhead.Bulkhead bulkhead, Class<?> returnType) throws Throwable {
if (bulkheadAspectExts != null && !bulkheadAspectExts.isEmpty()) {
for (BulkheadAspectExt bulkHeadAspectExt : bulkheadAspectExts) {
if (bulkHeadAspectExt.canHandleReturnType(returnType)) {
return bulkHeadAspectExt.handle(proceedingJoinPoint, bulkhead, methodName);
}
}
}
if (CompletionStage.class.isAssignableFrom(returnType)) {
return handleJoinPointCompletableFuture(proceedingJoinPoint, bulkhead);
}
return handleJoinPoint(proceedingJoinPoint, bulkhead);
}
private io.github.resilience4j.bulkhead.Bulkhead getOrCreateBulkhead(String methodName,
String backend) {
BulkheadConfig config = bulkheadRegistry.getConfiguration(backend)
.orElse(bulkheadRegistry.getDefaultConfig());
io.github.resilience4j.bulkhead.Bulkhead bulkhead = bulkheadRegistry.bulkhead(backend, config);
if (logger.isDebugEnabled()) {
logger.debug(
"Created or retrieved bulkhead '{}' with max concurrent call '{}' and max wait time '{}ms' for method: '{}'",
backend, bulkhead.getBulkheadConfig().getMaxConcurrentCalls(),
bulkhead.getBulkheadConfig().getMaxWaitDuration().toMillis(), methodName);
}
return bulkhead;
}
/**
* @param proceedingJoinPoint AOP proceedingJoinPoint
* @return Bulkhead annotation
*/
@Nullable
private Bulkhead getBulkheadAnnotation(ProceedingJoinPoint proceedingJoinPoint) {
if (logger.isDebugEnabled()) {
logger.debug("bulkhead parameter is null");
}
if (proceedingJoinPoint.getTarget() instanceof Proxy) {
logger
.debug("The bulkhead annotation is kept on a
|
BulkheadAspect
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/script/LongFieldScript.java
|
{
"start": 793,
"end": 2633
}
|
class ____ extends AbstractLongFieldScript {
public static final ScriptContext<Factory> CONTEXT = newContext("long_field", Factory.class);
public static final Factory PARSE_FROM_SOURCE = new Factory() {
@Override
public LeafFactory newFactory(String field, Map<String, Object> params, SearchLookup lookup, OnScriptError onScriptError) {
return ctx -> new LongFieldScript(field, params, lookup, OnScriptError.FAIL, ctx) {
@Override
public void execute() {
emitFromSource();
}
};
}
@Override
public boolean isResultDeterministic() {
return true;
}
@Override
public boolean isParsedFromSource() {
return true;
}
};
public static Factory leafAdapter(Function<SearchLookup, CompositeFieldScript.LeafFactory> parentFactory) {
return (leafFieldName, params, searchLookup, onScriptError) -> {
CompositeFieldScript.LeafFactory parentLeafFactory = parentFactory.apply(searchLookup);
return (LeafFactory) ctx -> {
CompositeFieldScript compositeFieldScript = parentLeafFactory.newInstance(ctx);
return new LongFieldScript(leafFieldName, params, searchLookup, onScriptError, ctx) {
@Override
public void setDocument(int docId) {
compositeFieldScript.setDocument(docId);
}
@Override
public void execute() {
emitFromCompositeScript(compositeFieldScript);
}
};
};
};
}
@SuppressWarnings("unused")
public static final String[] PARAMETERS = {};
public
|
LongFieldScript
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/util/ClassUtilTest.java
|
{
"start": 1103,
"end": 1235
}
|
class ____ {
protected Inner() {
throw new IllegalStateException("test");
}
}
static abstract
|
Inner
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/fielddata/RamAccountingTermsEnum.java
|
{
"start": 1007,
"end": 3053
}
|
class ____ extends FilteredTermsEnum {
// Flush every 5mb
private static final long FLUSH_BUFFER_SIZE = 1024 * 1024 * 5;
private final CircuitBreaker breaker;
private final TermsEnum termsEnum;
private final AbstractIndexOrdinalsFieldData.PerValueEstimator estimator;
private final String fieldName;
private long totalBytes;
private long flushBuffer;
public RamAccountingTermsEnum(
TermsEnum termsEnum,
CircuitBreaker breaker,
AbstractIndexOrdinalsFieldData.PerValueEstimator estimator,
String fieldName
) {
super(termsEnum);
this.breaker = breaker;
this.termsEnum = termsEnum;
this.estimator = estimator;
this.fieldName = fieldName;
this.totalBytes = 0;
this.flushBuffer = 0;
}
/**
* Always accept the term.
*/
@Override
protected AcceptStatus accept(BytesRef term) {
return AcceptStatus.YES;
}
/**
* Flush the {@code flushBuffer} to the breaker, incrementing the total
* bytes and resetting the buffer.
*/
public void flush() {
breaker.addEstimateBytesAndMaybeBreak(this.flushBuffer, this.fieldName);
this.totalBytes += this.flushBuffer;
this.flushBuffer = 0;
}
/**
* Proxy to the original next() call, but estimates the overhead of
* loading the next term.
*/
@Override
public BytesRef next() throws IOException {
BytesRef term = termsEnum.next();
if (term == null && this.flushBuffer != 0) {
// We have reached the end of the termsEnum, flush the buffer
flush();
} else {
this.flushBuffer += estimator.bytesPerValue(term);
if (this.flushBuffer >= FLUSH_BUFFER_SIZE) {
flush();
}
}
return term;
}
/**
* @return the total number of bytes that have been aggregated
*/
public long getTotalBytes() {
return this.totalBytes;
}
}
|
RamAccountingTermsEnum
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/AnnotationBeanNameGeneratorTests.java
|
{
"start": 8693,
"end": 8794
}
|
class ____ {
}
@TestRestController("restController")
static
|
ComposedControllerAnnotationWithBlankName
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/TestSignerManager.java
|
{
"start": 14519,
"end": 15916
}
|
class ____ implements AwsSignerInitializer {
private static int registerCount = 0;
private static int unregisterCount = 0;
private static int instanceCount = 0;
private static final Map<StoreKey, StoreValue> storeCache = new HashMap<>();
public SignerInitializerForTest() {
instanceCount++;
}
@Override
public void registerStore(String bucketName, Configuration storeConf,
DelegationTokenProvider dtProvider, UserGroupInformation storeUgi) {
registerCount++;
StoreKey storeKey = new StoreKey(bucketName, storeUgi);
StoreValue storeValue = new StoreValue(bucketName, storeConf, dtProvider);
storeCache.put(storeKey, storeValue);
}
@Override
public void unregisterStore(String bucketName, Configuration storeConf,
DelegationTokenProvider dtProvider, UserGroupInformation storeUgi) {
unregisterCount++;
StoreKey storeKey = new StoreKey(bucketName, storeUgi);
storeCache.remove(storeKey);
}
public static void reset() {
registerCount = 0;
unregisterCount = 0;
instanceCount = 0;
storeCache.clear();
}
public static StoreValue getStoreInfo(String bucketName,
UserGroupInformation storeUgi) {
StoreKey storeKey = new StoreKey(bucketName, storeUgi);
return storeCache.get(storeKey);
}
private static
|
SignerInitializerForTest
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/SystemUtils.java
|
{
"start": 43079,
"end": 43599
}
|
class ____ loaded.
* </p>
*
* @since 3.13.0
*/
public static final boolean IS_JAVA_18 = getJavaVersionMatches("18");
/**
* The constant {@code true} if this is Java version 19 (also 19.x versions).
* <p>
* The result depends on the value of the {@link #JAVA_SPECIFICATION_VERSION} constant.
* </p>
* <p>
* The field will return {@code false} if {@link #JAVA_SPECIFICATION_VERSION} is {@code null}.
* </p>
* <p>
* This value is initialized when the
|
is
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/api/java/typeutils/TupleTypeInfoTest.java
|
{
"start": 1292,
"end": 3111
}
|
class ____ extends TypeInformationTestBase<TupleTypeInfo<?>> {
@Override
protected TupleTypeInfo<?>[] getTestData() {
return new TupleTypeInfo<?>[] {
new TupleTypeInfo<>(BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO),
new TupleTypeInfo<>(BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.BOOLEAN_TYPE_INFO)
};
}
@Test
void testTupleTypeInfoSymmetricEqualityRelation() {
TupleTypeInfo<Tuple1<Integer>> tupleTypeInfo =
new TupleTypeInfo<>(BasicTypeInfo.INT_TYPE_INFO);
TupleTypeInfoBase<Tuple1> anonymousTupleTypeInfo =
new TupleTypeInfoBase<Tuple1>(Tuple1.class, BasicTypeInfo.INT_TYPE_INFO) {
private static final long serialVersionUID = -7985593598027660836L;
@Override
public TypeSerializer<Tuple1> createSerializer(SerializerConfig config) {
return null;
}
@Override
protected TypeComparatorBuilder<Tuple1> createTypeComparatorBuilder() {
return null;
}
@Override
public String[] getFieldNames() {
return new String[0];
}
@Override
public int getFieldIndex(String fieldName) {
return 0;
}
};
boolean tupleVsAnonymous = tupleTypeInfo.equals(anonymousTupleTypeInfo);
boolean anonymousVsTuple = anonymousTupleTypeInfo.equals(tupleTypeInfo);
assertThat(tupleVsAnonymous)
.as("Equality relation should be symmetric")
.isEqualTo(anonymousVsTuple);
}
}
|
TupleTypeInfoTest
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/core/dynamic/RedisCommandsIntegrationTests.java
|
{
"start": 1267,
"end": 5567
}
|
class ____ extends TestSupport {
private final RedisClient client;
private final RedisCommands<String, String> redis;
@Inject
RedisCommandsIntegrationTests(RedisClient client, StatefulRedisConnection<String, String> connection) {
this.client = client;
this.redis = connection.sync();
}
@Test
void verifierShouldCatchMisspelledDeclarations() {
RedisCommandFactory factory = new RedisCommandFactory(redis.getStatefulConnection());
assertThat(factory).hasFieldOrPropertyWithValue("verifyCommandMethods", true);
try {
factory.getCommands(WithTypo.class);
fail("Missing CommandCreationException");
} catch (CommandCreationException e) {
assertThat(e).hasMessageContaining("Command GAT does not exist.");
}
}
@Test
void disabledVerifierDoesNotReportTypo() {
RedisCommandFactory factory = new RedisCommandFactory(redis.getStatefulConnection());
factory.setVerifyCommandMethods(false);
assertThat(factory.getCommands(WithTypo.class)).isNotNull();
}
@Test
void doesNotFailIfCommandRetrievalFails() {
StatefulRedisConnection connectionMock = Mockito.mock(StatefulRedisConnection.class);
RedisCommands commandsMock = Mockito.mock(RedisCommands.class);
when(connectionMock.sync()).thenReturn(commandsMock);
doThrow(new RedisCommandExecutionException("ERR unknown command 'COMMAND'")).when(commandsMock).command();
RedisCommandFactory factory = new RedisCommandFactory(connectionMock);
assertThat(factory).hasFieldOrPropertyWithValue("verifyCommandMethods", false);
}
@Test
void verifierShouldCatchTooFewParametersDeclarations() {
RedisCommandFactory factory = new RedisCommandFactory(redis.getStatefulConnection());
try {
factory.getCommands(TooFewParameters.class);
fail("Missing CommandCreationException");
} catch (CommandCreationException e) {
assertThat(e).hasMessageContaining("Command GET accepts 1 parameters but method declares 0 parameter");
}
}
@Test
void shouldWorkWithPooledConnection() throws Exception {
GenericObjectPool<StatefulRedisConnection<String, String>> pool = ConnectionPoolSupport
.createGenericObjectPool(client::connect, new GenericObjectPoolConfig<>());
try (StatefulRedisConnection<String, String> connection = pool.borrowObject()) {
RedisCommandFactory factory = new RedisCommandFactory(connection);
SimpleCommands commands = factory.getCommands(SimpleCommands.class);
commands.get("foo");
}
pool.close();
}
@Test
void shouldWorkWithPooledConnectionAndCustomValidation() throws Exception {
GenericObjectPool<StatefulRedisConnection<String, String>> pool = ConnectionPoolSupport
.createGenericObjectPool(client::connect, new GenericObjectPoolConfig<>(), connection -> {
try {
return "PONG".equals(connection.sync().ping());
} catch (Exception e) {
return false;
}
});
try (StatefulRedisConnection<String, String> connection = pool.borrowObject()) {
RedisCommandFactory factory = new RedisCommandFactory(connection);
SimpleCommands commands = factory.getCommands(SimpleCommands.class);
commands.get("foo");
}
pool.close();
}
@Test
void shouldWorkWithAsyncPooledConnection() {
BoundedAsyncPool<StatefulRedisConnection<String, String>> pool = AsyncConnectionPoolSupport.createBoundedObjectPool(
() -> client.connectAsync(StringCodec.ASCII, RedisURI.create(TestSettings.host(), TestSettings.port())),
BoundedPoolConfig.create());
try (StatefulRedisConnection<String, String> connection = pool.acquire().join()) {
RedisCommandFactory factory = new RedisCommandFactory(connection);
SimpleCommands commands = factory.getCommands(SimpleCommands.class);
commands.get("foo");
}
pool.close();
}
private
|
RedisCommandsIntegrationTests
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DialectFeatureChecks.java
|
{
"start": 11377,
"end": 11553
}
|
class ____ implements DialectFeatureCheck {
public boolean apply(Dialect dialect) {
return dialect.supportsExistsInSelect();
}
}
public static
|
SupportsExistsInSelectCheck
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestWrappedRRClassloader.java
|
{
"start": 3597,
"end": 3711
}
|
class ____ has not been inherited from "
+ CompositeRecordReader.class.getSimpleName());
}
}
}
|
loader
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/resource/basic/MultipleAcceptHeaderTest.java
|
{
"start": 869,
"end": 1751
}
|
class ____ {
protected static String APPLICATION_JSON = "Content-Type: application/json";
protected static String APPLICATION_XML = "Content-Type: application/xml";
private TestInterfaceClient service;
private ClientImpl client;
@RegisterExtension
static QuarkusUnitTest testExtension = new QuarkusUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
JavaArchive war = ShrinkWrap.create(JavaArchive.class);
war.addClasses(TestResourceServer.class, PortProviderUtil.class);
return war;
}
});
private String generateBaseUrl() {
return PortProviderUtil.generateBaseUrl();
}
@Path("/test")
@DisplayName("Test Resource Server")
public static
|
MultipleAcceptHeaderTest
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/suite/engine/BeforeAndAfterSuiteTests.java
|
{
"start": 3630,
"end": 9815
}
|
class ____ {
@BeforeEach
void setUp() {
StatefulTestCase.callSequence = new ArrayList<>();
}
@Test
void successfulBeforeAndAfterSuite() {
// @formatter:off
executeSuite(SuccessfulBeforeAndAfterSuite.class)
.allEvents()
.assertStatistics(stats -> stats.started(7).finished(7).succeeded(6).failed(1))
.assertThatEvents()
.haveExactly(1, event(test(StatefulTestCase.Test1.class.getName()), finishedSuccessfully()))
.haveExactly(1, event(test(StatefulTestCase.Test2.class.getName()), finishedWithFailure()));
assertThat(StatefulTestCase.callSequence).containsExactly(
"beforeSuiteMethod",
"test1",
"test2",
"afterSuiteMethod"
);
// @formatter:on
}
@Test
void beforeAndAfterSuiteInheritance() {
// @formatter:off
executeSuite(SubclassWithBeforeAndAfterSuite.class)
.allEvents()
.assertStatistics(stats -> stats.started(7).finished(7).succeeded(6).failed(1));
assertThat(StatefulTestCase.callSequence).containsExactly(
"superclassBeforeSuiteMethod",
"subclassBeforeSuiteMethod",
"test1",
"test2",
"subclassAfterSuiteMethod",
"superclassAfterSuiteMethod"
);
// @formatter:on
}
@Test
void failingBeforeSuite() {
// @formatter:off
executeSuite(FailingBeforeSuite.class)
.allEvents()
.assertStatistics(stats -> stats.started(2).finished(2).succeeded(1).failed(1))
.assertThatEvents()
.haveExactly(1, event(
container(FailingBeforeSuite.class),
finishedWithFailure(instanceOf(RuntimeException.class),
message("Exception thrown by @BeforeSuite method"))));
assertThat(StatefulTestCase.callSequence).containsExactly(
"beforeSuiteMethod",
"afterSuiteMethod"
);
// @formatter:on
}
@Test
void failingAfterSuite() {
// @formatter:off
executeSuite(FailingAfterSuite.class)
.allEvents()
.assertStatistics(stats -> stats.started(7).finished(7).succeeded(5).failed(2))
.assertThatEvents()
.haveExactly(1, event(
container(FailingAfterSuite.class),
finishedWithFailure(instanceOf(RuntimeException.class),
message("Exception thrown by @AfterSuite method"))));
assertThat(StatefulTestCase.callSequence).containsExactly(
"beforeSuiteMethod",
"test1",
"test2",
"afterSuiteMethod"
);
// @formatter:on
}
@Test
void failingBeforeAndAfterSuite() {
// @formatter:off
executeSuite(FailingBeforeAndAfterSuite.class)
.allEvents()
.assertStatistics(stats -> stats.started(2).finished(2).succeeded(1).failed(1))
.assertThatEvents()
.haveExactly(1, event(
container(FailingBeforeAndAfterSuite.class),
finishedWithFailure(instanceOf(RuntimeException.class),
message("Exception thrown by @BeforeSuite method"),
suppressed(0, instanceOf(RuntimeException.class),
message("Exception thrown by @AfterSuite method")))));
assertThat(StatefulTestCase.callSequence).containsExactly(
"beforeSuiteMethod",
"afterSuiteMethod"
);
// @formatter:on
}
@Test
void severalFailingBeforeAndAfterSuite() {
// @formatter:off
executeSuite(SeveralFailingBeforeAndAfterSuite.class)
.allEvents()
.assertStatistics(stats -> stats.started(2).finished(2).succeeded(1).failed(1))
.assertThatEvents()
.haveExactly(1, event(
container(SeveralFailingBeforeAndAfterSuite.class),
finishedWithFailure(instanceOf(RuntimeException.class),
message("Exception thrown by @BeforeSuite method"),
suppressed(0, instanceOf(RuntimeException.class),
message("Exception thrown by @AfterSuite method")),
suppressed(1, instanceOf(RuntimeException.class),
message("Exception thrown by @AfterSuite method")))));
assertThat(StatefulTestCase.callSequence).containsExactly(
"beforeSuiteMethod",
"afterSuiteMethod",
"afterSuiteMethod"
);
// @formatter:on
}
@ParameterizedTest(name = "{0}")
@MethodSource
void invalidBeforeOrAfterSuiteMethod(Class<?> testSuiteClass, Predicate<String> failureMessagePredicate) {
var results = engineWithSelectedSuite(testSuiteClass).discover();
var issue = getOnlyElement(results.getDiscoveryIssues());
assertThat(issue.severity()).isEqualTo(Severity.ERROR);
assertThat(issue.message()).matches(failureMessagePredicate);
assertThat(issue.source()).containsInstanceOf(org.junit.platform.engine.support.descriptor.MethodSource.class);
}
private static Stream<Arguments> invalidBeforeOrAfterSuiteMethod() {
return Stream.of(
invalidBeforeOrAfterSuiteCase(NonVoidBeforeSuite.class, "@BeforeSuite method", "must not return a value."),
invalidBeforeOrAfterSuiteCase(ParameterAcceptingBeforeSuite.class, "@BeforeSuite method",
"must not accept parameters."),
invalidBeforeOrAfterSuiteCase(NonStaticBeforeSuite.class, "@BeforeSuite method", "must be static."),
invalidBeforeOrAfterSuiteCase(PrivateBeforeSuite.class, "@BeforeSuite method", "must not be private."),
invalidBeforeOrAfterSuiteCase(NonVoidAfterSuite.class, "@AfterSuite method", "must not return a value."),
invalidBeforeOrAfterSuiteCase(ParameterAcceptingAfterSuite.class, "@AfterSuite method",
"must not accept parameters."),
invalidBeforeOrAfterSuiteCase(NonStaticAfterSuite.class, "@AfterSuite method", "must be static."),
invalidBeforeOrAfterSuiteCase(PrivateAfterSuite.class, "@AfterSuite method", "must not be private."));
}
private static Arguments invalidBeforeOrAfterSuiteCase(Class<?> suiteClass, String failureMessageStart,
String failureMessageEnd) {
return arguments(named(suiteClass.getSimpleName(), suiteClass),
expectedMessage(failureMessageStart, failureMessageEnd));
}
private static Predicate<String> expectedMessage(String failureMessageStart, String failureMessageEnd) {
return message -> message.startsWith(failureMessageStart) && message.endsWith(failureMessageEnd);
}
private static EngineExecutionResults executeSuite(Class<?> suiteClass) {
return engineWithSelectedSuite(suiteClass).execute();
}
private static EngineTestKit.Builder engineWithSelectedSuite(Class<?> suiteClass) {
return EngineTestKit.engine(ENGINE_ID).selectors(selectClass(suiteClass));
}
}
|
BeforeAndAfterSuiteTests
|
java
|
playframework__playframework
|
core/play/src/main/java/play/core/cookie/encoding/CookieDecoder.java
|
{
"start": 877,
"end": 2886
}
|
class ____ {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final boolean strict;
protected CookieDecoder(boolean strict) {
this.strict = strict;
}
protected DefaultCookie initCookie(
String header, int nameBegin, int nameEnd, int valueBegin, int valueEnd) {
if (nameBegin == -1 || nameBegin == nameEnd) {
logger.debug("Skipping cookie with null name");
return null;
}
if (valueBegin == -1) {
logger.debug("Skipping cookie with null value");
return null;
}
CharSequence wrappedValue = CharBuffer.wrap(header, valueBegin, valueEnd);
CharSequence unwrappedValue = unwrapValue(wrappedValue);
if (unwrappedValue == null) {
if (logger.isDebugEnabled()) {
logger.debug(
"Skipping cookie because starting quotes are not properly balanced in '"
+ wrappedValue
+ "'");
}
return null;
}
final String name = header.substring(nameBegin, nameEnd);
int invalidOctetPos;
if (strict && (invalidOctetPos = firstInvalidCookieNameOctet(name)) >= 0) {
if (logger.isDebugEnabled()) {
logger.debug(
"Skipping cookie because name '"
+ name
+ "' contains invalid char '"
+ name.charAt(invalidOctetPos)
+ "'");
}
return null;
}
final boolean wrap = unwrappedValue.length() != valueEnd - valueBegin;
if (strict && (invalidOctetPos = firstInvalidCookieValueOctet(unwrappedValue)) >= 0) {
if (logger.isDebugEnabled()) {
logger.debug(
"Skipping cookie because value '"
+ unwrappedValue
+ "' contains invalid char '"
+ unwrappedValue.charAt(invalidOctetPos)
+ "'");
}
return null;
}
DefaultCookie cookie = new DefaultCookie(name, unwrappedValue.toString());
cookie.setWrap(wrap);
return cookie;
}
}
|
CookieDecoder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesAggregator.java
|
{
"start": 1246,
"end": 1627
}
|
class ____ extends CentroidPointAggregator {
public static void combine(CentroidState current, long v) {
current.add(decodeX(v), decodeY(v));
}
public static void combine(GroupingCentroidState current, int groupId, long encoded) {
current.add(decodeX(encoded), 0d, decodeY(encoded), 0d, 1, groupId);
}
}
|
SpatialCentroidCartesianPointDocValuesAggregator
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/model/ContextResolvers.java
|
{
"start": 567,
"end": 3978
}
|
class ____ {
private final Map<Class<?>, List<ResourceContextResolver>> resolvers = new HashMap<>();
public <T> void addContextResolver(Class<T> contextType, ResourceContextResolver contextResolver) {
List<ResourceContextResolver> list = resolvers.get(contextType);
if (list == null) {
list = new ArrayList<>(1);
resolvers.put(contextType, list);
}
list.add(contextResolver);
}
public <T> ContextResolver<T> getContextResolver(Class<T> clazz, MediaType mediaType) {
List<ResourceContextResolver> goodResolvers = resolvers.get(clazz);
if ((goodResolvers != null) && !goodResolvers.isEmpty()) {
List<MediaType> mt = Collections.singletonList(mediaType);
final List<ContextResolver<T>> delegates = new ArrayList<>();
MediaType bestMatch = null;
for (ResourceContextResolver goodResolver : goodResolvers) {
boolean add = false;
// we don't care
if (mediaType == null) {
add = true;
} else {
MediaType match;
// wildcard handling
if (goodResolver.mediaTypes().isEmpty()) {
match = MediaType.WILDCARD_TYPE;
} else {
match = MediaTypeHelper.getBestMatch(mt, goodResolver.mediaTypes());
// if there's no match, we must skip it
if (match == null)
continue;
}
if (bestMatch == null) {
bestMatch = match;
add = true;
} else {
int cmp = MediaTypeHelper.Q_COMPARATOR.compare(bestMatch, match);
if (cmp == 0) {
// same fitness
add = true;
} else if (cmp > 0) {
// wrong order means that our best match is not as good as the new match
delegates.clear();
add = true;
bestMatch = match;
}
// otherwise this is not as good as our delegate list, so let's not add it
}
}
if (add) {
delegates.add((ContextResolver<T>) goodResolver.getFactory().createInstance().getInstance());
}
}
if (delegates.isEmpty()) {
return null;
} else if (delegates.size() == 1) {
return delegates.get(0);
}
return new ContextResolverDelegate<>(delegates);
}
return null;
}
public Map<Class<?>, List<ResourceContextResolver>> getResolvers() {
return resolvers;
}
public void initializeDefaultFactories(Function<String, BeanFactory<?>> factoryCreator) {
for (Map.Entry<Class<?>, List<ResourceContextResolver>> entry : resolvers.entrySet()) {
for (ResourceContextResolver i : entry.getValue()) {
if (i.getFactory() == null) {
i.setFactory((BeanFactory) factoryCreator.apply(i.getClassName()));
}
}
}
}
}
|
ContextResolvers
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/query/EqlParserQueryEnhancerUnitTests.java
|
{
"start": 967,
"end": 1865
}
|
class ____ extends QueryEnhancerTckTests {
@Override
QueryEnhancer createQueryEnhancer(DeclaredQuery query) {
assumeThat(query.isNative()).describedAs("EQL (non-native) only").isFalse();
return JpaQueryEnhancer.forEql(query.getQueryString());
}
@Override
@ParameterizedTest // GH-2773
@MethodSource("jpqlCountQueries")
void shouldDeriveJpqlCountQuery(String query, String expected) {
assumeThat(query).as("EqlParser replaces the column name with alias name for count queries") //
.doesNotContain("SELECT name FROM table_name some_alias");
assumeThat(query).as("EqlParser does not support simple JPQL syntax") //
.doesNotStartWithIgnoringCase("FROM");
assumeThat(expected).as("EqlParser does turn 'select a.b' into 'select count(a.b)'") //
.doesNotContain("select count(a.b");
super.shouldDeriveJpqlCountQuery(query, expected);
}
}
|
EqlParserQueryEnhancerUnitTests
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
|
{
"start": 91346,
"end": 92084
}
|
class ____, or null if it could not be found.
*/
public Class<?> getClassByNameOrNull(String name) {
Map<String, WeakReference<Class<?>>> map;
synchronized (CACHE_CLASSES) {
map = CACHE_CLASSES.get(classLoader);
if (map == null) {
map = Collections.synchronizedMap(
new WeakHashMap<String, WeakReference<Class<?>>>());
CACHE_CLASSES.put(classLoader, map);
}
}
Class<?> clazz = null;
WeakReference<Class<?>> ref = map.get(name);
if (ref != null) {
clazz = ref.get();
}
if (clazz == null) {
try {
clazz = Class.forName(name, true, classLoader);
} catch (ClassNotFoundException e) {
// Leave a marker that the
|
object
|
java
|
elastic__elasticsearch
|
qa/packaging/src/test/java/org/elasticsearch/packaging/util/Shell.java
|
{
"start": 1188,
"end": 9326
}
|
class ____ {
public static final Result NO_OP = new Shell.Result(0, "", "");
protected final Logger logger = LogManager.getLogger(getClass());
protected final Map<String, String> env = new HashMap<>();
String umask;
Path workingDirectory;
public Shell() {
this.workingDirectory = null;
}
/**
* Reset the shell to its newly created state.
*/
public void reset() {
env.clear();
workingDirectory = null;
umask = null;
}
public Map<String, String> getEnv() {
return env;
}
public void setWorkingDirectory(Path workingDirectory) {
this.workingDirectory = workingDirectory;
}
public void setUmask(String umask) {
this.umask = umask;
}
/**
* Run the provided string as a shell script. On Linux the {@code bash -c [script]} syntax will be used, and on Windows
* the {@code powershell.exe -Command [script]} syntax will be used. Throws an exception if the exit code of the script is nonzero
*/
public Result run(String script) {
return runScript(getScriptCommand(script));
}
/**
* Same as {@link #run(String)}, but does not throw an exception if the exit code of the script is nonzero
*/
public Result runIgnoreExitCode(String script) {
return runScriptIgnoreExitCode(getScriptCommand(script));
}
public void chown(Path path) throws Exception {
chown(path, System.getenv("username"));
}
public void chown(Path path, String newOwner) throws Exception {
logger.info("Chowning " + path + " to " + newOwner);
Platforms.onLinux(() -> run("chown -R elasticsearch:elasticsearch " + path));
Platforms.onWindows(() -> run(String.format(Locale.ROOT, """
$account = New-Object System.Security.Principal.NTAccount '%s';
$pathInfo = Get-Item '%s';
$toChown = @();
if ($pathInfo.PSIsContainer) {
$toChown += Get-ChildItem '%s' -Recurse;
}
$toChown += $pathInfo;
$toChown | ForEach-Object {
$acl = Get-Acl $_.FullName;
$acl.SetOwner($account);
Set-Acl $_.FullName $acl
}""", newOwner, path, path)));
}
public void extractZip(Path zipPath, Path destinationDir) throws Exception {
Platforms.onLinux(() -> run("unzip \"" + zipPath + "\" -d \"" + destinationDir + "\""));
Platforms.onWindows(() -> run("Expand-Archive -Path \"" + zipPath + "\" -DestinationPath \"" + destinationDir + "\""));
}
public Result run(String command, Object... args) {
String formattedCommand = String.format(Locale.ROOT, command, args);
return run(formattedCommand);
}
protected String[] getScriptCommand(String script) {
if (Platforms.WINDOWS) {
return powershellCommand(script);
} else {
return bashCommand(script);
}
}
private String[] bashCommand(String script) {
List<String> command = new ArrayList<>();
command.add("bash");
command.add("-c");
if (umask == null) {
command.add(script);
} else {
command.add(String.format(Locale.ROOT, "umask %s && %s", umask, script));
}
return command.toArray(new String[0]);
}
private static String[] powershellCommand(String script) {
return new String[] { "powershell.exe", "-Command", script };
}
private Result runScript(String[] command) {
logger.warn("Running command with env: " + env);
Result result = runScriptIgnoreExitCode(command);
if (result.isSuccess() == false) {
throw new ShellException("Command was not successful: [" + String.join(" ", command) + "]\n result: " + result);
}
return result;
}
private Result runScriptIgnoreExitCode(String[] command) {
ProcessBuilder builder = new ProcessBuilder();
builder.command(command);
if (workingDirectory != null) {
setWorkingDirectory(builder, workingDirectory);
}
builder.environment().keySet().remove("ES_JAVA_HOME"); // start with a fresh environment
for (Map.Entry<String, String> entry : env.entrySet()) {
builder.environment().put(entry.getKey(), entry.getValue());
}
final Path stdOut;
final Path stdErr;
try {
Path tmpDir = Paths.get(System.getProperty("java.io.tmpdir"));
Files.createDirectories(tmpDir);
stdOut = Files.createTempFile(tmpDir, getClass().getName(), ".out");
stdErr = Files.createTempFile(tmpDir, getClass().getName(), ".err");
} catch (IOException e) {
throw new UncheckedIOException(e);
}
redirectOutAndErr(builder, stdOut, stdErr);
try {
Process process = builder.start();
if (process.waitFor(10, TimeUnit.MINUTES) == false) {
if (process.isAlive()) {
process.destroyForcibly();
}
Result result = new Result(-1, readFileIfExists(stdOut), readFileIfExists(stdErr));
throw new IllegalStateException(
"Timed out running shell command: " + Arrays.toString(command) + "\n" + "Result:\n" + result
);
}
Result result = new Result(process.exitValue(), readFileIfExists(stdOut), readFileIfExists(stdErr));
logger.info("Ran: {} {}", Arrays.toString(command), result);
return result;
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} finally {
try {
FileUtils.deleteIfExists(stdOut);
FileUtils.deleteIfExists(stdErr);
} catch (UncheckedIOException e) {
logger.info("Cleanup of output files failed", e);
}
}
}
private String readFileIfExists(Path path) throws IOException {
if (Files.exists(path)) {
long size = Files.size(path);
final int maxFileSize = 1024 * 1024;
if (size > maxFileSize) {
// file is really big, truncate
try (var br = Files.newBufferedReader(path, StandardCharsets.UTF_8)) {
char[] buf = new char[maxFileSize];
int nRead = br.read(buf);
return new String(buf, 0, nRead) + "\n<<Too large to read (" + size + " bytes), truncated>>";
}
}
try (Stream<String> lines = Files.lines(path, StandardCharsets.UTF_8)) {
return lines.collect(Collectors.joining("\n"));
}
} else {
return "";
}
}
@SuppressForbidden(reason = "ProcessBuilder expects java.io.File")
private void redirectOutAndErr(ProcessBuilder builder, Path stdOut, Path stdErr) {
builder.redirectOutput(stdOut.toFile());
builder.redirectError(stdErr.toFile());
}
@SuppressForbidden(reason = "ProcessBuilder expects java.io.File")
private static void setWorkingDirectory(ProcessBuilder builder, Path path) {
builder.directory(path.toFile());
}
public String toString() {
return String.format(Locale.ROOT, " env = [%s] workingDirectory = [%s]", env, workingDirectory);
}
public record Result(int exitCode, String stdout, String stderr) {
public boolean isSuccess() {
return exitCode == 0;
}
@Override
public String toString() {
return String.format(Locale.ROOT, "exitCode = [%d] stdout = [%s] stderr = [%s]", exitCode, stdout.trim(), stderr.trim());
}
}
/**
* An exception to model failures to run a shell command. This exists so that calling code can differentiate between
* shell / command errors, and other runtime errors.
*/
public static
|
Shell
|
java
|
spring-projects__spring-framework
|
spring-expression/src/test/java/org/springframework/expression/spel/SpelCompilationCoverageTests.java
|
{
"start": 252154,
"end": 252297
}
|
class ____ {
public String getWorld() {
return "world";
}
public Object getObject() {
return "object";
}
}
public static
|
Greeter
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/AmbiguousMethodReferenceTest.java
|
{
"start": 1764,
"end": 2252
}
|
interface ____ {}
// BUG: Diagnostic contains: c(A, D)
public B c(D d) {
return null;
}
public static B c(A a, D d) {
return null;
}
}
""")
.doTest();
}
@Test
public void suppressedAtClass() {
testHelper
.addSourceLines(
"A.java",
"""
@SuppressWarnings("AmbiguousMethodReference")
public
|
D
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/EmbeddingResults.java
|
{
"start": 802,
"end": 1044
}
|
interface ____<E extends EmbeddingResults.Embedding<E>> extends InferenceServiceResults {
String EMBEDDING = "embedding";
/**
* A resulting embedding for one of the input texts to the inference service.
*/
|
EmbeddingResults
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhance/internal/bytebuddy/DirtyCheckingWithEmbeddableAndNonVisibleGenericMappedSuperclassWithDifferentGenericParameterNameTest.java
|
{
"start": 2831,
"end": 2892
}
|
class ____ {
}
@Embeddable
public static
|
MyAbstractEmbeddable
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/CommonAnnotationBeanPostProcessorTests.java
|
{
"start": 2393,
"end": 20508
}
|
class ____ {
DefaultListableBeanFactory bf = new DefaultListableBeanFactory();
CommonAnnotationBeanPostProcessor bpp = new CommonAnnotationBeanPostProcessor();
@BeforeEach
void setup() {
bpp.setResourceFactory(bf);
bf.addBeanPostProcessor(bpp);
}
@Test
void processInjection() {
ResourceInjectionBean bean = new ResourceInjectionBean();
assertThat(bean.getTestBean()).isNull();
assertThat(bean.getTestBean2()).isNull();
TestBean tb = new TestBean();
bf.registerSingleton("testBean", tb);
bpp.processInjection(bean);
assertThat(bean.getTestBean()).isSameAs(tb);
assertThat(bean.getTestBean2()).isSameAs(tb);
}
@Test
void postConstructAndPreDestroy() {
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(AnnotatedInitDestroyBean.class));
AnnotatedInitDestroyBean bean = (AnnotatedInitDestroyBean) bf.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
bf.destroySingletons();
assertThat(bean.destroyCalled).isTrue();
}
@Test
void postConstructAndPreDestroyWithPostProcessor() {
bf.addBeanPostProcessor(new InitDestroyBeanPostProcessor());
bf.addBeanPostProcessor(bpp);
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(AnnotatedInitDestroyBean.class));
AnnotatedInitDestroyBean bean = (AnnotatedInitDestroyBean) bf.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
bf.destroySingletons();
assertThat(bean.destroyCalled).isTrue();
}
@Test
void postConstructAndPreDestroyWithApplicationContextAndPostProcessor() {
GenericApplicationContext ctx = new GenericApplicationContext();
ctx.registerBeanDefinition("bpp1", new RootBeanDefinition(InitDestroyBeanPostProcessor.class));
ctx.registerBeanDefinition("bpp2", new RootBeanDefinition(CommonAnnotationBeanPostProcessor.class));
ctx.registerBeanDefinition("annotatedBean", new RootBeanDefinition(AnnotatedInitDestroyBean.class));
ctx.refresh();
AnnotatedInitDestroyBean bean = (AnnotatedInitDestroyBean) ctx.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
ctx.close();
assertThat(bean.destroyCalled).isTrue();
}
@Test
void postConstructAndPreDestroyWithManualConfiguration() {
InitDestroyAnnotationBeanPostProcessor bpp = new InitDestroyAnnotationBeanPostProcessor();
bpp.setInitAnnotationType(PostConstruct.class);
bpp.setDestroyAnnotationType(PreDestroy.class);
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(AnnotatedInitDestroyBean.class));
AnnotatedInitDestroyBean bean = (AnnotatedInitDestroyBean) bf.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
bf.destroySingletons();
assertThat(bean.destroyCalled).isTrue();
}
@Test
void postProcessorWithNullBean() {
RootBeanDefinition rbd = new RootBeanDefinition(NullFactory.class);
rbd.setFactoryMethodName("create");
bf.registerBeanDefinition("bean", rbd);
assertThat(bf.getBean("bean").toString()).isEqualTo("null");
bf.destroySingletons();
}
@Test
void serialization() throws Exception {
CommonAnnotationBeanPostProcessor bpp2 = SerializationTestUtils.serializeAndDeserialize(bpp);
AnnotatedInitDestroyBean bean = new AnnotatedInitDestroyBean();
bpp2.postProcessBeforeDestruction(bean, "annotatedBean");
assertThat(bean.destroyCalled).isTrue();
}
@Test
void serializationWithManualConfiguration() throws Exception {
InitDestroyAnnotationBeanPostProcessor bpp = new InitDestroyAnnotationBeanPostProcessor();
bpp.setInitAnnotationType(PostConstruct.class);
bpp.setDestroyAnnotationType(PreDestroy.class);
InitDestroyAnnotationBeanPostProcessor bpp2 = SerializationTestUtils.serializeAndDeserialize(bpp);
AnnotatedInitDestroyBean bean = new AnnotatedInitDestroyBean();
bpp2.postProcessBeforeDestruction(bean, "annotatedBean");
assertThat(bean.destroyCalled).isTrue();
}
@Test
void resourceInjection() {
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(ResourceInjectionBean.class));
TestBean tb = new TestBean();
bf.registerSingleton("testBean", tb);
TestBean tb2 = new TestBean();
bf.registerSingleton("testBean2", tb2);
ResourceInjectionBean bean = (ResourceInjectionBean) bf.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
assertThat(bean.init2Called).isTrue();
assertThat(bean.init3Called).isTrue();
assertThat(bean.getTestBean()).isSameAs(tb);
assertThat(bean.getTestBean2()).isSameAs(tb2);
bf.destroySingletons();
assertThat(bean.destroyCalled).isTrue();
assertThat(bean.destroy2Called).isTrue();
assertThat(bean.destroy3Called).isTrue();
}
@Test
void resourceInjectionWithPrototypes() {
RootBeanDefinition abd = new RootBeanDefinition(ResourceInjectionBean.class);
abd.setScope(BeanDefinition.SCOPE_PROTOTYPE);
bf.registerBeanDefinition("annotatedBean", abd);
RootBeanDefinition tbd1 = new RootBeanDefinition(TestBean.class);
tbd1.setScope(BeanDefinition.SCOPE_PROTOTYPE);
bf.registerBeanDefinition("testBean", tbd1);
RootBeanDefinition tbd2 = new RootBeanDefinition(TestBean.class);
tbd2.setScope(BeanDefinition.SCOPE_PROTOTYPE);
bf.registerBeanDefinition("testBean2", tbd2);
ResourceInjectionBean bean = (ResourceInjectionBean) bf.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
assertThat(bean.init2Called).isTrue();
assertThat(bean.init3Called).isTrue();
TestBean tb = bean.getTestBean();
TestBean tb2 = bean.getTestBean2();
assertThat(tb).isNotNull();
assertThat(tb2).isNotNull();
ResourceInjectionBean anotherBean = (ResourceInjectionBean) bf.getBean("annotatedBean");
assertThat(bean).isNotSameAs(anotherBean);
assertThat(tb).isNotSameAs(anotherBean.getTestBean());
assertThat(tb2).isNotSameAs(anotherBean.getTestBean2());
bf.destroyBean("annotatedBean", bean);
assertThat(bean.destroyCalled).isTrue();
assertThat(bean.destroy2Called).isTrue();
assertThat(bean.destroy3Called).isTrue();
}
@Test
void resourceInjectionWithResolvableDependencyType() {
bpp.setBeanFactory(bf);
bf.addBeanPostProcessor(bpp);
RootBeanDefinition abd = new RootBeanDefinition(ExtendedResourceInjectionBean.class);
abd.setScope(BeanDefinition.SCOPE_PROTOTYPE);
bf.registerBeanDefinition("annotatedBean", abd);
RootBeanDefinition tbd = new RootBeanDefinition(TestBean.class);
tbd.setScope(BeanDefinition.SCOPE_PROTOTYPE);
bf.registerBeanDefinition("testBean4", tbd);
bf.registerResolvableDependency(BeanFactory.class, bf);
bf.registerResolvableDependency(INestedTestBean.class, (ObjectFactory<Object>) NestedTestBean::new);
@SuppressWarnings({"deprecation", "removal"})
org.springframework.beans.factory.config.PropertyPlaceholderConfigurer ppc = new org.springframework.beans.factory.config.PropertyPlaceholderConfigurer();
Properties props = new Properties();
props.setProperty("tb", "testBean4");
ppc.setProperties(props);
ppc.postProcessBeanFactory(bf);
ExtendedResourceInjectionBean bean = (ExtendedResourceInjectionBean) bf.getBean("annotatedBean");
INestedTestBean tb = bean.getTestBean6();
assertThat(tb).isNotNull();
ExtendedResourceInjectionBean anotherBean = (ExtendedResourceInjectionBean) bf.getBean("annotatedBean");
assertThat(bean).isNotSameAs(anotherBean);
assertThat(tb).isNotSameAs(anotherBean.getTestBean6());
String[] depBeans = bf.getDependenciesForBean("annotatedBean");
assertThat(depBeans).hasSize(1);
assertThat(depBeans[0]).isEqualTo("testBean4");
}
@Test
void resourceInjectionWithDefaultMethod() {
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(DefaultMethodResourceInjectionBean.class));
TestBean tb2 = new TestBean();
bf.registerSingleton("testBean2", tb2);
NestedTestBean tb7 = new NestedTestBean();
bf.registerSingleton("testBean7", tb7);
DefaultMethodResourceInjectionBean bean = (DefaultMethodResourceInjectionBean) bf.getBean("annotatedBean");
assertThat(bean.getTestBean2()).isSameAs(tb2);
assertThat(bean.counter).isSameAs(2);
bf.destroySingletons();
assertThat(bean.counter).isSameAs(3);
}
@Test
void resourceInjectionWithTwoProcessors() {
CommonAnnotationBeanPostProcessor bpp2 = new CommonAnnotationBeanPostProcessor();
bpp2.setResourceFactory(bf);
bf.addBeanPostProcessor(bpp2);
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(ResourceInjectionBean.class));
TestBean tb = new TestBean();
bf.registerSingleton("testBean", tb);
TestBean tb2 = new TestBean();
bf.registerSingleton("testBean2", tb2);
ResourceInjectionBean bean = (ResourceInjectionBean) bf.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
assertThat(bean.init2Called).isTrue();
assertThat(bean.getTestBean()).isSameAs(tb);
assertThat(bean.getTestBean2()).isSameAs(tb2);
bf.destroySingletons();
assertThat(bean.destroyCalled).isTrue();
assertThat(bean.destroy2Called).isTrue();
}
@Test
void resourceInjectionFromJndi() {
SimpleJndiBeanFactory resourceFactory = new SimpleJndiBeanFactory();
ExpectedLookupTemplate jndiTemplate = new ExpectedLookupTemplate();
TestBean tb = new TestBean();
jndiTemplate.addObject("java:comp/env/testBean", tb);
TestBean tb2 = new TestBean();
jndiTemplate.addObject("java:comp/env/testBean2", tb2);
resourceFactory.setJndiTemplate(jndiTemplate);
bpp.setResourceFactory(resourceFactory);
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(ResourceInjectionBean.class));
ResourceInjectionBean bean = (ResourceInjectionBean) bf.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
assertThat(bean.init2Called).isTrue();
assertThat(bean.getTestBean()).isSameAs(tb);
assertThat(bean.getTestBean2()).isSameAs(tb2);
bf.destroySingletons();
assertThat(bean.destroyCalled).isTrue();
assertThat(bean.destroy2Called).isTrue();
}
@Test
void extendedResourceInjection() {
bpp.setBeanFactory(bf);
bf.addBeanPostProcessor(bpp);
bf.registerResolvableDependency(BeanFactory.class, bf);
@SuppressWarnings({"deprecation", "removal"})
org.springframework.beans.factory.config.PropertyPlaceholderConfigurer ppc = new org.springframework.beans.factory.config.PropertyPlaceholderConfigurer();
Properties props = new Properties();
props.setProperty("tb", "testBean3");
ppc.setProperties(props);
ppc.postProcessBeanFactory(bf);
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(ExtendedResourceInjectionBean.class));
bf.registerBeanDefinition("annotatedBean2", new RootBeanDefinition(NamedResourceInjectionBean.class));
bf.registerBeanDefinition("annotatedBean3", new RootBeanDefinition(ConvertedResourceInjectionBean.class));
TestBean tb = new TestBean();
bf.registerSingleton("testBean", tb);
TestBean tb2 = new TestBean();
bf.registerSingleton("testBean2", tb2);
TestBean tb3 = new TestBean();
bf.registerSingleton("testBean3", tb3);
TestBean tb4 = new TestBean();
bf.registerSingleton("testBean4", tb4);
NestedTestBean tb6 = new NestedTestBean();
bf.registerSingleton("value", "5");
bf.registerSingleton("xy", tb6);
bf.registerAlias("xy", "testBean9");
ExtendedResourceInjectionBean bean = (ExtendedResourceInjectionBean) bf.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
assertThat(bean.init2Called).isTrue();
assertThat(bean.getTestBean()).isSameAs(tb);
assertThat(bean.getTestBean2()).isSameAs(tb2);
assertThat(bean.getTestBean3()).isSameAs(tb4);
assertThat(bean.getTestBean4()).isSameAs(tb3);
assertThat(bean.testBean5).isSameAs(tb6);
assertThat(bean.testBean6).isSameAs(tb6);
assertThat(bean.beanFactory).isSameAs(bf);
NamedResourceInjectionBean bean2 = (NamedResourceInjectionBean) bf.getBean("annotatedBean2");
assertThat(bean2.testBean).isSameAs(tb6);
ConvertedResourceInjectionBean bean3 = (ConvertedResourceInjectionBean) bf.getBean("annotatedBean3");
assertThat(bean3.value).isSameAs(5);
bf.destroySingletons();
assertThat(bean.destroyCalled).isTrue();
assertThat(bean.destroy2Called).isTrue();
}
@Test
void extendedResourceInjectionWithOverriding() {
bpp.setBeanFactory(bf);
bf.addBeanPostProcessor(bpp);
bf.registerResolvableDependency(BeanFactory.class, bf);
@SuppressWarnings({"deprecation", "removal"})
org.springframework.beans.factory.config.PropertyPlaceholderConfigurer ppc = new org.springframework.beans.factory.config.PropertyPlaceholderConfigurer();
Properties props = new Properties();
props.setProperty("tb", "testBean3");
ppc.setProperties(props);
ppc.postProcessBeanFactory(bf);
RootBeanDefinition annotatedBd = new RootBeanDefinition(ExtendedResourceInjectionBean.class);
TestBean tb5 = new TestBean();
annotatedBd.getPropertyValues().add("testBean2", tb5);
bf.registerBeanDefinition("annotatedBean", annotatedBd);
bf.registerBeanDefinition("annotatedBean2", new RootBeanDefinition(NamedResourceInjectionBean.class));
TestBean tb = new TestBean();
bf.registerSingleton("testBean", tb);
TestBean tb2 = new TestBean();
bf.registerSingleton("testBean2", tb2);
TestBean tb3 = new TestBean();
bf.registerSingleton("testBean3", tb3);
TestBean tb4 = new TestBean();
bf.registerSingleton("testBean4", tb4);
NestedTestBean tb6 = new NestedTestBean();
bf.registerSingleton("xy", tb6);
ExtendedResourceInjectionBean bean = (ExtendedResourceInjectionBean) bf.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
assertThat(bean.init2Called).isTrue();
assertThat(bean.getTestBean()).isSameAs(tb);
assertThat(bean.getTestBean2()).isSameAs(tb5);
assertThat(bean.getTestBean3()).isSameAs(tb4);
assertThat(bean.getTestBean4()).isSameAs(tb3);
assertThat(bean.testBean5).isSameAs(tb6);
assertThat(bean.testBean6).isSameAs(tb6);
assertThat(bean.beanFactory).isSameAs(bf);
try {
bf.getBean("annotatedBean2");
}
catch (BeanCreationException ex) {
boolean condition = ex.getRootCause() instanceof NoSuchBeanDefinitionException;
assertThat(condition).isTrue();
NoSuchBeanDefinitionException innerEx = (NoSuchBeanDefinitionException) ex.getRootCause();
assertThat(innerEx.getBeanName()).isEqualTo("testBean9");
}
bf.destroySingletons();
assertThat(bean.destroyCalled).isTrue();
assertThat(bean.destroy2Called).isTrue();
}
@Test
void extendedEjbInjection() {
bpp.setBeanFactory(bf);
bf.addBeanPostProcessor(bpp);
bf.registerResolvableDependency(BeanFactory.class, bf);
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(ExtendedEjbInjectionBean.class));
TestBean tb = new TestBean();
bf.registerSingleton("testBean", tb);
TestBean tb2 = new TestBean();
bf.registerSingleton("testBean2", tb2);
TestBean tb3 = new TestBean();
bf.registerSingleton("testBean3", tb3);
TestBean tb4 = new TestBean();
bf.registerSingleton("testBean4", tb4);
NestedTestBean tb6 = new NestedTestBean();
bf.registerSingleton("xy", tb6);
bf.registerAlias("xy", "testBean9");
ExtendedEjbInjectionBean bean = (ExtendedEjbInjectionBean) bf.getBean("annotatedBean");
assertThat(bean.initCalled).isTrue();
assertThat(bean.init2Called).isTrue();
assertThat(bean.getTestBean()).isSameAs(tb);
assertThat(bean.getTestBean2()).isSameAs(tb2);
assertThat(bean.getTestBean3()).isSameAs(tb4);
assertThat(bean.getTestBean4()).isSameAs(tb3);
assertThat(bean.testBean5).isSameAs(tb6);
assertThat(bean.testBean6).isSameAs(tb6);
assertThat(bean.beanFactory).isSameAs(bf);
bf.destroySingletons();
assertThat(bean.destroyCalled).isTrue();
assertThat(bean.destroy2Called).isTrue();
}
@Test
void lazyResolutionWithResourceField() {
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(LazyResourceFieldInjectionBean.class));
bf.registerBeanDefinition("testBean", new RootBeanDefinition(TestBean.class));
LazyResourceFieldInjectionBean bean = (LazyResourceFieldInjectionBean) bf.getBean("annotatedBean");
assertThat(bf.containsSingleton("testBean")).isFalse();
bean.testBean.setName("notLazyAnymore");
assertThat(bf.containsSingleton("testBean")).isTrue();
TestBean tb = (TestBean) bf.getBean("testBean");
assertThat(tb.getName()).isEqualTo("notLazyAnymore");
}
@Test
void lazyResolutionWithResourceMethod() {
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(LazyResourceMethodInjectionBean.class));
bf.registerBeanDefinition("testBean", new RootBeanDefinition(TestBean.class));
LazyResourceMethodInjectionBean bean = (LazyResourceMethodInjectionBean) bf.getBean("annotatedBean");
assertThat(bf.containsSingleton("testBean")).isFalse();
bean.testBean.setName("notLazyAnymore");
assertThat(bf.containsSingleton("testBean")).isTrue();
TestBean tb = (TestBean) bf.getBean("testBean");
assertThat(tb.getName()).isEqualTo("notLazyAnymore");
}
@Test
void lazyResolutionWithCglibProxy() {
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(LazyResourceCglibInjectionBean.class));
bf.registerBeanDefinition("testBean", new RootBeanDefinition(TestBean.class));
LazyResourceCglibInjectionBean bean = (LazyResourceCglibInjectionBean) bf.getBean("annotatedBean");
assertThat(bf.containsSingleton("testBean")).isFalse();
bean.testBean.setName("notLazyAnymore");
assertThat(bf.containsSingleton("testBean")).isTrue();
TestBean tb = (TestBean) bf.getBean("testBean");
assertThat(tb.getName()).isEqualTo("notLazyAnymore");
}
@Test
void lazyResolutionWithFallbackTypeMatch() {
bf.setAutowireCandidateResolver(new ContextAnnotationAutowireCandidateResolver());
bpp.setBeanFactory(bf);
bf.addBeanPostProcessor(bpp);
bf.registerBeanDefinition("annotatedBean", new RootBeanDefinition(LazyResourceCglibInjectionBean.class));
bf.registerBeanDefinition("tb", new RootBeanDefinition(TestBean.class));
LazyResourceCglibInjectionBean bean = (LazyResourceCglibInjectionBean) bf.getBean("annotatedBean");
assertThat(bf.containsSingleton("tb")).isFalse();
bean.testBean.setName("notLazyAnymore");
assertThat(bf.containsSingleton("tb")).isTrue();
TestBean tb = (TestBean) bf.getBean("tb");
assertThat(tb.getName()).isEqualTo("notLazyAnymore");
}
public static
|
CommonAnnotationBeanPostProcessorTests
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsForLeaderEpochClient.java
|
{
"start": 1310,
"end": 2296
}
|
class ____ extends AsyncClient<
Map<TopicPartition, SubscriptionState.FetchPosition>,
OffsetsForLeaderEpochRequest,
OffsetsForLeaderEpochResponse,
OffsetsForLeaderEpochUtils.OffsetForEpochResult> {
OffsetsForLeaderEpochClient(ConsumerNetworkClient client, LogContext logContext) {
super(client, logContext);
}
@Override
protected AbstractRequest.Builder<OffsetsForLeaderEpochRequest> prepareRequest(
Node node, Map<TopicPartition, SubscriptionState.FetchPosition> requestData) {
return OffsetsForLeaderEpochUtils.prepareRequest(requestData);
}
@Override
protected OffsetsForLeaderEpochUtils.OffsetForEpochResult handleResponse(
Node node,
Map<TopicPartition, SubscriptionState.FetchPosition> requestData,
OffsetsForLeaderEpochResponse response) {
return OffsetsForLeaderEpochUtils.handleResponse(requestData, response);
}
}
|
OffsetsForLeaderEpochClient
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java
|
{
"start": 17841,
"end": 18421
}
|
class ____ provides a simple DSL to create composed `RuleConfig`s. It'll enforce that any rule to start with an action
* and then optionally define the max-time-on an action or a set of steps. In general, the output rule will be in the form:
*
* action-rule AND (step-rule-1 OR step-rule-2 OR step-rule-3 ...)?
*
* Where the list of `step-rule-xx` could be empty.
*
* To have a clearer idea of the final shape of the rules, check the methods {@link ActionRule#test} and {@link StepRule#test}
*/
|
that
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java
|
{
"start": 6898,
"end": 43896
}
|
class ____ extends ESTestCase {
private static ClusterGroup clusterGroup;
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.emptyList();
}
protected Settings leaderClusterSettings() {
return Settings.EMPTY;
}
protected Settings followerClusterSettings() {
final Settings.Builder builder = Settings.builder();
if (randomBoolean()) {
builder.put(RemoteClusterSettings.REMOTE_MAX_PENDING_CONNECTION_LISTENERS.getKey(), randomIntBetween(1, 100));
}
return builder.build();
}
@Before
public final void startClusters() throws Exception {
if (clusterGroup != null && reuseClusters()) {
clusterGroup.leaderCluster.ensureAtMostNumDataNodes(numberOfNodesPerCluster());
clusterGroup.followerCluster.ensureAtMostNumDataNodes(numberOfNodesPerCluster());
setupMasterNodeRequestsValidatorOnFollowerCluster();
return;
}
stopClusters();
Collection<Class<? extends Plugin>> mockPlugins = Arrays.asList(
ESIntegTestCase.TestSeedPlugin.class,
MockHttpTransport.TestPlugin.class,
MockTransportService.TestPlugin.class,
InternalSettingsPlugin.class,
getTestTransportPlugin()
);
InternalTestCluster leaderCluster = new InternalTestCluster(
randomLong(),
createTempDir(),
true,
true,
numberOfNodesPerCluster(),
numberOfNodesPerCluster(),
"leader_cluster",
createNodeConfigurationSource(null, true),
0,
"leader",
mockPlugins,
Function.identity(),
TEST_ENTITLEMENTS::addEntitledNodePaths
);
leaderCluster.beforeTest(random());
leaderCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster());
assertBusy(() -> {
ClusterService clusterService = leaderCluster.getInstance(ClusterService.class);
assertNotNull(clusterService.state().metadata().custom(LicensesMetadata.TYPE));
}, 60, TimeUnit.SECONDS);
String address = leaderCluster.getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString();
InternalTestCluster followerCluster = new InternalTestCluster(
randomLong(),
createTempDir(),
true,
true,
numberOfNodesPerCluster(),
numberOfNodesPerCluster(),
"follower_cluster",
createNodeConfigurationSource(address, false),
0,
"follower",
mockPlugins,
Function.identity(),
TEST_ENTITLEMENTS::addEntitledNodePaths
);
clusterGroup = new ClusterGroup(leaderCluster, followerCluster);
followerCluster.beforeTest(random());
followerCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster());
assertBusy(() -> {
ClusterService clusterService = followerCluster.getInstance(ClusterService.class);
assertNotNull(clusterService.state().metadata().custom(LicensesMetadata.TYPE));
}, 60, TimeUnit.SECONDS);
setupMasterNodeRequestsValidatorOnFollowerCluster();
}
protected void setupMasterNodeRequestsValidatorOnFollowerCluster() {
final InternalTestCluster followerCluster = clusterGroup.followerCluster;
for (String nodeName : followerCluster.getNodeNames()) {
MockTransportService transportService = (MockTransportService) followerCluster.getInstance(TransportService.class, nodeName);
transportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (isCcrAdminRequest(request) == false && request instanceof AcknowledgedRequest<?> acknowledgedRequest) {
final TimeValue masterTimeout = acknowledgedRequest.masterNodeTimeout();
if (masterTimeout == null || masterTimeout.nanos() != TimeValue.MAX_VALUE.nanos()) {
throw new AssertionError("time out of a master request [" + request + "] on the follower is not set to unbounded");
}
}
connection.sendRequest(requestId, action, request, options);
});
}
}
protected void removeMasterNodeRequestsValidatorOnFollowerCluster() {
final InternalTestCluster followerCluster = clusterGroup.followerCluster;
for (String nodeName : followerCluster.getNodeNames()) {
MockTransportService transportService = (MockTransportService) getFollowerCluster().getInstance(
TransportService.class,
nodeName
);
transportService.clearAllRules();
}
}
private static boolean isCcrAdminRequest(TransportRequest request) {
return request instanceof PutFollowAction.Request
|| request instanceof ResumeFollowAction.Request
|| request instanceof PauseFollowAction.Request
|| request instanceof UnfollowAction.Request
|| request instanceof ForgetFollowerAction.Request
|| request instanceof PutAutoFollowPatternAction.Request
|| request instanceof ActivateAutoFollowPatternAction.Request
|| request instanceof DeleteAutoFollowPatternAction.Request;
}
/**
* Follower indices don't get all the settings from leader, for example 'index.unassigned.node_left.delayed_timeout'
* is not replicated and if tests kill nodes, we have to wait 60s by default...
*/
protected void disableDelayedAllocation(String index) {
UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(index).masterNodeTimeout(TimeValue.MAX_VALUE);
Settings.Builder settingsBuilder = Settings.builder();
settingsBuilder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0);
updateSettingsRequest.settings(settingsBuilder);
assertAcked(followerClient().admin().indices().updateSettings(updateSettingsRequest).actionGet());
}
@After
public void afterTest() throws Exception {
ensureEmptyWriteBuffers();
removeMasterNodeRequestsValidatorOnFollowerCluster();
String masterNode = clusterGroup.followerCluster.getMasterName();
ClusterService clusterService = clusterGroup.followerCluster.getInstance(ClusterService.class, masterNode);
removeCCRRelatedMetadataFromClusterState(clusterService);
try {
clusterGroup.leaderCluster.beforeIndexDeletion();
clusterGroup.leaderCluster.assertSeqNos();
clusterGroup.leaderCluster.assertSameDocIdsOnShards();
clusterGroup.leaderCluster.assertConsistentHistoryBetweenTranslogAndLuceneIndex();
clusterGroup.followerCluster.beforeIndexDeletion();
clusterGroup.followerCluster.assertSeqNos();
clusterGroup.followerCluster.assertSameDocIdsOnShards();
clusterGroup.followerCluster.assertConsistentHistoryBetweenTranslogAndLuceneIndex();
} finally {
clusterGroup.leaderCluster.wipe(Collections.emptySet());
clusterGroup.followerCluster.wipe(Collections.emptySet());
}
clusterGroup.leaderCluster.assertAfterTest();
clusterGroup.followerCluster.assertAfterTest();
}
private NodeConfigurationSource createNodeConfigurationSource(final String leaderSeedAddress, final boolean leaderCluster) {
Settings.Builder builder = Settings.builder();
// Default the watermarks to absurdly low to prevent the tests
// from failing on nodes without enough disk space
builder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b");
builder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b");
builder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "1b");
// wait short time for other active shards before actually deleting, default 30s not needed in tests
builder.put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS));
builder.putList(DISCOVERY_SEED_HOSTS_SETTING.getKey()); // empty list disables a port scan for other nodes
builder.putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file");
builder.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType());
builder.put(XPackSettings.SECURITY_ENABLED.getKey(), false);
builder.put(XPackSettings.WATCHER_ENABLED.getKey(), false);
builder.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false);
builder.put(LicenseSettings.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial");
// Let cluster state api return quickly in order to speed up auto follow tests:
builder.put(CcrSettings.CCR_WAIT_FOR_METADATA_TIMEOUT.getKey(), TimeValue.timeValueMillis(100));
if (leaderCluster) {
builder.put(leaderClusterSettings());
} else {
builder.put(followerClusterSettings());
}
if (configureRemoteClusterViaNodeSettings() && leaderSeedAddress != null) {
builder.put("cluster.remote.leader_cluster.seeds", leaderSeedAddress);
}
return new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return builder.build();
}
@Override
public Path nodeConfigPath(int nodeOrdinal) {
return null;
}
@Override
public Collection<Class<? extends Plugin>> nodePlugins() {
return Stream.concat(
Stream.of(LocalStateCcr.class, CommonAnalysisPlugin.class),
CcrIntegTestCase.this.nodePlugins().stream()
).collect(Collectors.toList());
}
};
}
@AfterClass
public static void stopClusters() throws IOException {
IOUtils.close(clusterGroup);
clusterGroup = null;
}
protected int numberOfNodesPerCluster() {
return 2;
}
protected boolean reuseClusters() {
return true;
}
protected boolean configureRemoteClusterViaNodeSettings() {
return true;
}
protected final Client leaderClient() {
return clusterGroup.leaderCluster.client();
}
protected final Client followerClient() {
return clusterGroup.followerCluster.client();
}
protected final InternalTestCluster getLeaderCluster() {
return clusterGroup.leaderCluster;
}
protected final InternalTestCluster getFollowerCluster() {
return clusterGroup.followerCluster;
}
protected final ClusterHealthStatus ensureLeaderYellow(String... indices) {
return ensureColor(clusterGroup.leaderCluster, ClusterHealthStatus.YELLOW, TimeValue.timeValueSeconds(30), false, indices);
}
protected final ClusterHealthStatus ensureLeaderGreen(String... indices) {
logger.info("ensure green leader indices {}", Arrays.toString(indices));
return ensureColor(clusterGroup.leaderCluster, ClusterHealthStatus.GREEN, TimeValue.timeValueSeconds(30), false, indices);
}
protected final ClusterHealthStatus ensureFollowerGreen(String... indices) {
return ensureFollowerGreen(false, indices);
}
protected final ClusterHealthStatus ensureFollowerGreen(boolean waitForNoInitializingShards, String... indices) {
logger.info("ensure green follower indices {}", Arrays.toString(indices));
return ensureColor(
clusterGroup.followerCluster,
ClusterHealthStatus.GREEN,
TimeValue.timeValueSeconds(60),
waitForNoInitializingShards,
indices
);
}
private ClusterHealthStatus ensureColor(
TestCluster testCluster,
ClusterHealthStatus clusterHealthStatus,
TimeValue timeout,
boolean waitForNoInitializingShards,
String... indices
) {
String color = clusterHealthStatus.name().toLowerCase(Locale.ROOT);
String method = "ensure" + Strings.capitalize(color);
ClusterHealthRequest healthRequest = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, indices).masterNodeTimeout(timeout)
.timeout(timeout)
.waitForStatus(clusterHealthStatus)
.waitForEvents(Priority.LANGUID)
.waitForNoRelocatingShards(true)
.waitForNoInitializingShards(waitForNoInitializingShards)
.waitForNodes(Integer.toString(testCluster.size()));
ClusterHealthResponse actionGet = testCluster.client().admin().cluster().health(healthRequest).actionGet();
if (actionGet.isTimedOut()) {
logger.info(
"""
{} timed out:
leader cluster state:
{}
leader cluster tasks:
{}
follower cluster state:
{}
follower cluster tasks:
{}""",
method,
leaderClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(),
ESIntegTestCase.getClusterPendingTasks(leaderClient()),
followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(),
ESIntegTestCase.getClusterPendingTasks(followerClient())
);
HotThreads.logLocalHotThreads(logger, Level.INFO, "hot threads at timeout", ReferenceDocs.LOGGING);
fail("timed out waiting for " + color + " state");
}
assertThat(
"Expected at least " + clusterHealthStatus + " but got " + actionGet.getStatus(),
actionGet.getStatus().value(),
lessThanOrEqualTo(clusterHealthStatus.value())
);
logger.debug("indices {} are {}", indices.length == 0 ? "[_all]" : indices, color);
return actionGet.getStatus();
}
protected final Index resolveLeaderIndex(String index) {
GetIndexResponse getIndexResponse = leaderClient().admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(index).get();
assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index));
String uuid = getIndexResponse.getSettings().get(index).get(IndexMetadata.SETTING_INDEX_UUID);
return new Index(index, uuid);
}
protected final Index resolveFollowerIndex(String index) {
GetIndexResponse getIndexResponse = followerClient().admin()
.indices()
.prepareGetIndex(TEST_REQUEST_TIMEOUT)
.setIndices(index)
.get();
assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index));
String uuid = getIndexResponse.getSettings().get(index).get(IndexMetadata.SETTING_INDEX_UUID);
return new Index(index, uuid);
}
protected final BroadcastResponse refresh(Client client, String... indices) {
BroadcastResponse actionGet = client.admin().indices().prepareRefresh(indices).get();
assertNoFailures(actionGet);
return actionGet;
}
protected void ensureEmptyWriteBuffers() throws Exception {
assertBusy(() -> {
FollowStatsAction.StatsResponses statsResponses = leaderClient().execute(
FollowStatsAction.INSTANCE,
new FollowStatsAction.StatsRequest()
).actionGet();
for (FollowStatsAction.StatsResponse statsResponse : statsResponses.getStatsResponses()) {
ShardFollowNodeTaskStatus status = statsResponse.status();
assertThat(status.writeBufferOperationCount(), equalTo(0));
assertThat(status.writeBufferSizeInBytes(), equalTo(0L));
}
});
}
protected void pauseFollow(String... indices) throws Exception {
for (String index : indices) {
final PauseFollowAction.Request unfollowRequest = new PauseFollowAction.Request(TEST_REQUEST_TIMEOUT, index);
assertAcked(followerClient().execute(PauseFollowAction.INSTANCE, unfollowRequest).actionGet());
}
ensureNoCcrTasks();
}
protected void ensureNoCcrTasks() throws Exception {
assertBusy(() -> {
CcrStatsAction.Response statsResponse = followerClient().execute(
CcrStatsAction.INSTANCE,
new CcrStatsAction.Request(TEST_REQUEST_TIMEOUT)
).actionGet();
assertThat(
"Follow stats not empty: " + Strings.toString(statsResponse.getFollowStats()),
statsResponse.getFollowStats().getStatsResponses(),
empty()
);
final ClusterState clusterState = followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
PersistentTasksCustomMetadata tasks = clusterState.metadata().getProject().custom(PersistentTasksCustomMetadata.TYPE);
Collection<PersistentTasksCustomMetadata.PersistentTask<?>> ccrTasks = tasks.tasks()
.stream()
.filter(t -> t.getTaskName().equals(ShardFollowTask.NAME))
.toList();
assertThat(ccrTasks, empty());
ListTasksRequest listTasksRequest = new ListTasksRequest();
listTasksRequest.setDetailed(true);
ListTasksResponse listTasksResponse = followerClient().admin().cluster().listTasks(listTasksRequest).get();
int numNodeTasks = 0;
for (TaskInfo taskInfo : listTasksResponse.getTasks()) {
if (taskInfo.action().startsWith(ShardFollowTask.NAME)) {
numNodeTasks++;
}
}
assertThat(listTasksResponse.getTasks().toString(), numNodeTasks, equalTo(0));
}, 30, TimeUnit.SECONDS);
}
@Before
public void setupSourceEnabledOrDisabled() {
sourceEnabled = randomBoolean();
}
protected boolean sourceEnabled;
protected String getIndexSettings(final int numberOfShards, final int numberOfReplicas) throws IOException {
return getIndexSettings(numberOfShards, numberOfReplicas, Collections.emptyMap());
}
protected String getIndexSettings(
final int numberOfShards,
final int numberOfReplicas,
final Map<String, String> additionalIndexSettings
) throws IOException {
final String settings;
try (XContentBuilder builder = jsonBuilder()) {
builder.startObject();
{
builder.startObject("settings");
{
builder.field(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0);
builder.field(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s");
builder.field("index.number_of_shards", numberOfShards);
builder.field("index.number_of_replicas", numberOfReplicas);
for (final Map.Entry<String, String> additionalSetting : additionalIndexSettings.entrySet()) {
builder.field(additionalSetting.getKey(), additionalSetting.getValue());
}
}
builder.endObject();
builder.startObject("mappings");
{
builder.startObject("doc");
{
builder.startObject("properties");
{
builder.startObject("f");
{
builder.field("type", "integer");
}
builder.endObject();
}
builder.endObject();
if (sourceEnabled == false) {
builder.startObject("_source");
builder.field("enabled", false);
builder.endObject();
}
}
builder.endObject();
}
builder.endObject();
}
builder.endObject();
settings = BytesReference.bytes(builder).utf8ToString();
}
return settings;
}
public static PutFollowAction.Request putFollow(String leaderIndex, String followerIndex) {
return putFollow(leaderIndex, followerIndex, ActiveShardCount.ONE);
}
public static PutFollowAction.Request putFollow(String leaderIndex, String followerIndex, ActiveShardCount waitForActiveShards) {
PutFollowAction.Request request = new PutFollowAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
request.setRemoteCluster("leader_cluster");
request.setLeaderIndex(leaderIndex);
request.setFollowerIndex(followerIndex);
request.getParameters().setMaxRetryDelay(TimeValue.timeValueMillis(10));
request.getParameters().setReadPollTimeout(TimeValue.timeValueMillis(10));
request.getParameters().setMaxReadRequestSize(ByteSizeValue.ofBytes(between(1, 32 * 1024 * 1024)));
request.getParameters().setMaxReadRequestOperationCount(between(1, 10000));
request.waitForActiveShards(waitForActiveShards);
if (randomBoolean()) {
request.masterNodeTimeout(TimeValue.timeValueSeconds(randomFrom(10, 20, 30)));
}
return request;
}
public static ResumeFollowAction.Request resumeFollow(String followerIndex) {
ResumeFollowAction.Request request = new ResumeFollowAction.Request(TEST_REQUEST_TIMEOUT);
request.setFollowerIndex(followerIndex);
request.getParameters().setMaxRetryDelay(TimeValue.timeValueMillis(10));
request.getParameters().setReadPollTimeout(TimeValue.timeValueMillis(10));
if (randomBoolean()) {
request.masterNodeTimeout(TimeValue.timeValueSeconds(randomFrom(10, 20, 30)));
}
return request;
}
/**
* This asserts the index is fully replicated from the leader index to the follower index. It first verifies that the seq_no_stats
* on the follower equal the leader's; then verifies the existing pairs of (docId, seqNo) on the follower also equal the leader.
*/
protected void assertIndexFullyReplicatedToFollower(String leaderIndex, String followerIndex) throws Exception {
logger.info("--> asserting <<docId,seqNo>> between {} and {}", leaderIndex, followerIndex);
assertBusy(() -> {
Map<Integer, List<DocIdSeqNoAndSource>> docsOnFollower = getDocIdAndSeqNos(clusterGroup.followerCluster, followerIndex);
Map<Integer, List<DocIdSeqNoAndSource>> docsOnLeader = getDocIdAndSeqNos(clusterGroup.leaderCluster, leaderIndex);
Map<Integer, Set<DocIdSeqNoAndSource>> mismatchedDocs = new HashMap<>();
for (Map.Entry<Integer, List<DocIdSeqNoAndSource>> fe : docsOnFollower.entrySet()) {
Set<DocIdSeqNoAndSource> d1 = Sets.difference(
Sets.newHashSet(fe.getValue()),
Sets.newHashSet(docsOnLeader.getOrDefault(fe.getKey(), Collections.emptyList()))
);
Set<DocIdSeqNoAndSource> d2 = Sets.difference(
Sets.newHashSet(docsOnLeader.getOrDefault(fe.getKey(), Collections.emptyList())),
Sets.newHashSet(fe.getValue())
);
if (d1.isEmpty() == false || d2.isEmpty() == false) {
mismatchedDocs.put(fe.getKey(), Sets.union(d1, d2));
}
}
assertThat("mismatched documents [" + mismatchedDocs + "]", docsOnFollower, equalTo(docsOnLeader));
}, 120, TimeUnit.SECONDS);
logger.info("--> asserting seq_no_stats between {} and {}", leaderIndex, followerIndex);
assertBusy(() -> {
Map<Integer, SeqNoStats> leaderStats = new HashMap<>();
for (ShardStats shardStat : leaderClient().admin().indices().prepareStats(leaderIndex).clear().get().getShards()) {
if (shardStat.getSeqNoStats() == null) {
throw new AssertionError("leader seq_no_stats is not available [" + Strings.toString(shardStat) + "]");
}
leaderStats.put(shardStat.getShardRouting().shardId().id(), shardStat.getSeqNoStats());
}
Map<Integer, SeqNoStats> followerStats = new HashMap<>();
for (ShardStats shardStat : followerClient().admin().indices().prepareStats(followerIndex).clear().get().getShards()) {
if (shardStat.getSeqNoStats() == null) {
throw new AssertionError("follower seq_no_stats is not available [" + Strings.toString(shardStat) + "]");
}
followerStats.put(shardStat.getShardRouting().shardId().id(), shardStat.getSeqNoStats());
}
assertThat(followerStats, equalTo(leaderStats));
}, 120, TimeUnit.SECONDS);
}
private Map<Integer, List<DocIdSeqNoAndSource>> getDocIdAndSeqNos(InternalTestCluster cluster, String index) throws IOException {
final ClusterState state = cluster.client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
List<ShardRouting> shardRoutings = state.routingTable().allShards(index);
Randomness.shuffle(shardRoutings);
final Map<Integer, List<DocIdSeqNoAndSource>> docs = new HashMap<>();
for (ShardRouting shardRouting : shardRoutings) {
if (shardRouting == null || shardRouting.assignedToNode() == false) {
continue;
}
final var indexService = cluster.getInstance(IndicesService.class, state.nodes().get(shardRouting.currentNodeId()).getName())
.indexService(shardRouting.index());
if (indexService == null) {
continue;
}
final var indexShard = indexService.getShardOrNull(shardRouting.id());
if (indexShard == null || indexShard.routingEntry().started() == false) {
continue;
}
try {
final List<DocIdSeqNoAndSource> docsOnShard = IndexShardTestCase.getDocIdAndSeqNos(indexShard);
logger.info("--> shard {} docs {} seq_no_stats {}", shardRouting, docsOnShard, indexShard.seqNoStats());
docs.put(
shardRouting.shardId().id(),
docsOnShard.stream()
// normalize primary term as the follower use its own term
.map(d -> new DocIdSeqNoAndSource(d.id(), d.source(), d.seqNo(), 1L, d.version()))
.collect(Collectors.toList())
);
} catch (AlreadyClosedException e) {
// Ignore this exception and try getting List<DocIdSeqNoAndSource> from other IndexShard instance.
}
}
return docs;
}
protected void atLeastDocsIndexed(Client client, String index, long numDocsReplicated) throws Exception {
logger.info("waiting for at least [{}] documents to be indexed into index [{}]", numDocsReplicated, index);
assertBusy(() -> {
refresh(client, index);
SearchRequest request = new SearchRequest(index);
request.source(new SearchSourceBuilder().size(0));
assertResponse(client.search(request), response -> {
assertNotNull(response.getHits().getTotalHits());
assertThat(response.getHits().getTotalHits().value(), greaterThanOrEqualTo(numDocsReplicated));
});
}, 60, TimeUnit.SECONDS);
}
protected void awaitGlobalCheckpointAtLeast(Client client, ShardId shardId, long minimumGlobalCheckpoint) throws Exception {
logger.info("waiting for the global checkpoint on [{}] at least [{}]", shardId, minimumGlobalCheckpoint);
assertBusy(() -> {
ShardStats stats = client.admin()
.indices()
.prepareStats(shardId.getIndexName())
.clear()
.get()
.asMap()
.entrySet()
.stream()
.filter(e -> e.getKey().shardId().equals(shardId))
.map(Map.Entry::getValue)
.findFirst()
.orElse(null);
if (stats == null || stats.getSeqNoStats() == null) {
throw new AssertionError("seq_no_stats for shard [" + shardId + "] is not found"); // causes assertBusy to retry
}
assertThat(
Strings.toString(stats.getSeqNoStats()),
stats.getSeqNoStats().getGlobalCheckpoint(),
greaterThanOrEqualTo(minimumGlobalCheckpoint)
);
}, 60, TimeUnit.SECONDS);
}
protected void assertMaxSeqNoOfUpdatesIsTransferred(Index leaderIndex, Index followerIndex, int numberOfShards) throws Exception {
assertBusy(() -> {
long[] msuOnLeader = new long[numberOfShards];
for (int i = 0; i < msuOnLeader.length; i++) {
msuOnLeader[i] = SequenceNumbers.UNASSIGNED_SEQ_NO;
}
Set<String> leaderNodes = getLeaderCluster().nodesInclude(leaderIndex.getName());
for (String leaderNode : leaderNodes) {
IndicesService indicesService = getLeaderCluster().getInstance(IndicesService.class, leaderNode);
for (int i = 0; i < numberOfShards; i++) {
IndexShard shard = indicesService.getShardOrNull(new ShardId(leaderIndex, i));
if (shard != null) {
try {
msuOnLeader[i] = SequenceNumbers.max(msuOnLeader[i], shard.getMaxSeqNoOfUpdatesOrDeletes());
} catch (AlreadyClosedException ignored) {
return;
}
}
}
}
Set<String> followerNodes = getFollowerCluster().nodesInclude(followerIndex.getName());
for (String followerNode : followerNodes) {
IndicesService indicesService = getFollowerCluster().getInstance(IndicesService.class, followerNode);
for (int i = 0; i < numberOfShards; i++) {
IndexShard shard = indicesService.getShardOrNull(new ShardId(leaderIndex, i));
if (shard != null) {
try {
assertThat(shard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(msuOnLeader[i]));
} catch (AlreadyClosedException ignored) {
}
}
}
}
});
}
/**
* Waits until at least a give number of document is visible for searchers
*
* @param numDocs number of documents to wait for
* @param indexer a {@link org.elasticsearch.test.BackgroundIndexer}. Will be first checked for documents indexed.
* This saves on unneeded searches.
*/
public void waitForDocs(final long numDocs, final BackgroundIndexer indexer) throws Exception {
// indexing threads can wait for up to ~1m before retrying when they first try to index into a shard which is not STARTED.
final long maxWaitTimeMs = Math.max(90 * 1000, 200 * numDocs);
assertBusy(() -> {
long lastKnownCount = indexer.totalIndexedDocs();
if (lastKnownCount >= numDocs) {
try {
long count = SearchResponseUtils.getTotalHitsValue(
indexer.getClient().prepareSearch().setTrackTotalHits(true).setSize(0).setQuery(QueryBuilders.matchAllQuery())
);
if (count == lastKnownCount) {
// no progress - try to refresh for the next time
indexer.getClient().admin().indices().prepareRefresh().get();
}
lastKnownCount = count;
} catch (Exception e) { // count now acts like search and barfs if all shards failed...
logger.debug("failed to executed count", e);
throw e;
}
}
if (logger.isDebugEnabled()) {
if (lastKnownCount < numDocs) {
logger.debug("[{}] docs indexed. waiting for [{}]", lastKnownCount, numDocs);
} else {
logger.debug("[{}] docs visible for search (needed [{}])", lastKnownCount, numDocs);
}
}
assertThat(lastKnownCount, greaterThanOrEqualTo(numDocs));
}, maxWaitTimeMs, TimeUnit.MILLISECONDS);
}
protected PlainActionFuture<RestoreInfo> startRestore(
ClusterService clusterService,
RestoreService restoreService,
RestoreSnapshotRequest restoreSnapshotRequest
) {
final var future = new PlainActionFuture<RestoreInfo>();
restoreService.restoreSnapshot(
ProjectId.DEFAULT,
restoreSnapshotRequest,
future.delegateFailure((delegate, restoreCompletionResponse) -> {
assertNull(restoreCompletionResponse.restoreInfo());
// this would only be non-null if the restore was a no-op, but that would be a test bug
final Snapshot snapshot = restoreCompletionResponse.snapshot();
final String uuid = restoreCompletionResponse.uuid();
final ClusterStateListener clusterStateListener = new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent changedEvent) {
final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), uuid);
final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), uuid);
assertNotNull(prevEntry);
// prevEntry could be null if there was a master failover and (due to batching) we missed the cluster state update
// that completed the restore, but that doesn't happen in these tests
if (newEntry == null) {
clusterService.removeListener(this);
Map<ShardId, RestoreInProgress.ShardRestoreStatus> shards = prevEntry.shards();
RestoreInfo ri = new RestoreInfo(
prevEntry.snapshot().getSnapshotId().getName(),
prevEntry.indices(),
shards.size(),
shards.size() - RestoreService.failedShards(shards)
);
logger.debug("restore of [{}] completed", snapshot);
delegate.onResponse(ri);
} // else restore not completed yet, wait for next cluster state update
}
};
clusterService.addListener(clusterStateListener);
})
);
return future;
}
static void removeCCRRelatedMetadataFromClusterState(ClusterService clusterService) throws Exception {
CountDownLatch latch = new CountDownLatch(1);
clusterService.submitUnbatchedStateUpdateTask("remove-ccr-related-metadata", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
AutoFollowMetadata empty = new AutoFollowMetadata(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
ClusterState.Builder newState = ClusterState.builder(currentState);
newState.putProjectMetadata(
ProjectMetadata.builder(currentState.metadata().getProject())
.putCustom(AutoFollowMetadata.TYPE, empty)
.removeCustom(PersistentTasksCustomMetadata.TYPE)
.build()
);
return newState.build();
}
@Override
public void onFailure(Exception e) {
latch.countDown();
}
@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
latch.countDown();
}
});
latch.await();
}
static
|
CcrIntegTestCase
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/converters/AbstractCreateMaterializedTableConverter.java
|
{
"start": 2323,
"end": 2461
}
|
class ____ converting {@link SqlCreateMaterializedTable} and it's children to create
* materialized table operations.
*/
public abstract
|
for
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/support/PropertyComparatorTests.java
|
{
"start": 2648,
"end": 3302
}
|
class ____ implements Comparable<Object> {
private String nickName;
private String firstName;
private String lastName;
public String getNickName() {
return nickName;
}
public void setNickName(String nickName) {
this.nickName = nickName;
}
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
@Override
public int compareTo(Object o) {
return this.nickName.compareTo(((Dog) o).nickName);
}
}
}
|
Dog
|
java
|
processing__processing4
|
java/test/processing/mode/java/ModeSketchRuntimePathFactoryTest.java
|
{
"start": 1024,
"end": 1992
}
|
class ____ {
private RuntimePathBuilder.RuntimePathFactoryStrategy factory;
private JavaMode testMode;
private List<ImportStatement> testImports;
private Sketch testSketch;
private List<String> classpath;
@Before
public void setUp() throws Exception {
RuntimePathBuilder builder = new RuntimePathBuilder();
factory = builder::buildModeSketchPath;
testMode = RuntimePathFactoryTestUtil.createTestJavaMode();
testImports = RuntimePathFactoryTestUtil.createTestImports();
testSketch = RuntimePathFactoryTestUtil.createTestSketch();
classpath = factory.buildClasspath(testMode, testImports, testSketch);
}
@Test
public void buildClasspathLength() {
assertEquals(3, classpath.size());
}
@Test
public void buildClasspathValues() {
assertEquals("library6", classpath.get(0));
assertEquals("javax.library7", classpath.get(1));
assertEquals("library8", classpath.get(2));
}
}
|
ModeSketchRuntimePathFactoryTest
|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-auth/src/test/java/org/apache/dubbo/auth/DefaultAccessKeyStorageTest.java
|
{
"start": 1161,
"end": 1771
}
|
class ____ {
@Test
void testGetAccessKey() {
URL url = URL.valueOf("dubbo://10.10.10.10:2181")
.addParameter(Constants.ACCESS_KEY_ID_KEY, "ak")
.addParameter(Constants.SECRET_ACCESS_KEY_KEY, "sk");
DefaultAccessKeyStorage defaultAccessKeyStorage = new DefaultAccessKeyStorage();
AccessKeyPair accessKey = defaultAccessKeyStorage.getAccessKey(url, mock(Invocation.class));
assertNotNull(accessKey);
assertEquals(accessKey.getAccessKey(), "ak");
assertEquals(accessKey.getSecretKey(), "sk");
}
}
|
DefaultAccessKeyStorageTest
|
java
|
alibaba__nacos
|
client-basic/src/main/java/com/alibaba/nacos/client/env/convert/CompositeConverter.java
|
{
"start": 817,
"end": 1342
}
|
class ____ {
private final Map<Class<?>, AbstractPropertyConverter<?>> converterRegistry = new HashMap<>();
public CompositeConverter() {
converterRegistry.put(Boolean.class, new BooleanConverter());
converterRegistry.put(Integer.class, new IntegerConverter());
converterRegistry.put(Long.class, new LongConverter());
}
/**
* convert property to target type.
* @param property the property gets from environments
* @param targetClass target
|
CompositeConverter
|
java
|
spring-projects__spring-boot
|
module/spring-boot-session-jdbc/src/test/java/org/springframework/boot/session/jdbc/autoconfigure/JdbcSessionAutoConfigurationTests.java
|
{
"start": 12693,
"end": 13035
}
|
class ____ {
@Bean
JdbcSessionDataSourceScriptDatabaseInitializer customInitializer(DataSource dataSource,
JdbcSessionProperties properties) {
return new JdbcSessionDataSourceScriptDatabaseInitializer(dataSource, properties);
}
}
@Configuration(proxyBeanMethods = false)
static
|
CustomJdbcSessionDatabaseInitializerConfiguration
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-api/src/test/java/org/apache/dubbo/config/bootstrap/builders/AbstractInterfaceBuilderTest.java
|
{
"start": 11049,
"end": 11442
}
|
class ____ extends AbstractInterfaceBuilder<InterfaceConfig, InterfaceBuilder> {
public InterfaceConfig build() {
InterfaceConfig config = new InterfaceConfig();
super.build(config);
return config;
}
@Override
protected InterfaceBuilder getThis() {
return this;
}
}
private static
|
InterfaceBuilder
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
|
{
"start": 1886,
"end": 9077
}
|
class ____ implements ResourceProfilesManager {
private static final Logger LOG =
LoggerFactory.getLogger(ResourceProfilesManagerImpl.class);
private final Map<String, Resource> profiles = new ConcurrentHashMap<>();
private Configuration conf;
private boolean profileEnabled = false;
private static final String MEMORY = ResourceInformation.MEMORY_MB.getName();
private static final String VCORES = ResourceInformation.VCORES.getName();
public static final String DEFAULT_PROFILE = "default";
public static final String MINIMUM_PROFILE = "minimum";
public static final String MAXIMUM_PROFILE = "maximum";
protected final ReentrantReadWriteLock.ReadLock readLock;
protected final ReentrantReadWriteLock.WriteLock writeLock;
private static final String[] MANDATORY_PROFILES = {DEFAULT_PROFILE,
MINIMUM_PROFILE, MAXIMUM_PROFILE};
private static final String FEATURE_NOT_ENABLED_MSG =
"Resource profile is not enabled, please "
+ "enable resource profile feature before using its functions."
+ " (by setting " + YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED
+ " to true)";
public ResourceProfilesManagerImpl() {
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
readLock = lock.readLock();
writeLock = lock.writeLock();
}
public void init(Configuration config) throws IOException {
conf = config;
loadProfiles();
}
private void loadProfiles() throws IOException {
profileEnabled =
conf.getBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED,
YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_ENABLED);
if (!profileEnabled) {
return;
}
String sourceFile =
conf.get(YarnConfiguration.RM_RESOURCE_PROFILES_SOURCE_FILE,
YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE);
String resourcesFile = sourceFile;
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
if (classLoader == null) {
classLoader = ResourceProfilesManagerImpl.class.getClassLoader();
}
if (classLoader != null) {
URL tmp = classLoader.getResource(sourceFile);
if (tmp != null) {
resourcesFile = tmp.getPath();
}
}
ObjectMapper mapper = new ObjectMapper();
Map data = mapper.readValue(new File(resourcesFile), Map.class);
Iterator iterator = data.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry entry = (Map.Entry) iterator.next();
String profileName = entry.getKey().toString();
if (profileName.isEmpty()) {
throw new IOException(
"Name of resource profile cannot be an empty string");
}
if (profileName.equals(MINIMUM_PROFILE) || profileName.equals(
MAXIMUM_PROFILE)) {
throw new IOException(String.format(
"profile={%s, %s} is should not be specified "
+ "inside %s, they will be loaded from resource-types.xml",
MINIMUM_PROFILE, MAXIMUM_PROFILE, sourceFile));
}
if (entry.getValue() instanceof Map) {
Map profileInfo = (Map) entry.getValue();
// ensure memory and vcores are specified
if (!profileInfo.containsKey(MEMORY)
|| !profileInfo.containsKey(VCORES)) {
throw new IOException(
"Illegal resource profile definition; profile '" + profileName
+ "' must contain '" + MEMORY + "' and '" + VCORES + "'");
}
Resource resource = parseResource(profileInfo);
profiles.put(profileName, resource);
LOG.info(
"Added profile '" + profileName + "' with resources: " + resource);
}
}
// add minimum/maximum profile
profiles.put(MINIMUM_PROFILE,
ResourceUtils.getResourceTypesMinimumAllocation());
profiles.put(MAXIMUM_PROFILE,
ResourceUtils.getResourceTypesMaximumAllocation());
// check to make sure mandatory profiles are present
for (String profile : MANDATORY_PROFILES) {
if (!profiles.containsKey(profile)) {
throw new IOException(
"Mandatory profile missing '" + profile + "' missing. "
+ Arrays.toString(MANDATORY_PROFILES) + " must be present");
}
}
LOG.info("Loaded profiles: " + profiles.keySet());
}
private Resource parseResource(Map profileInfo) throws IOException {
Resource resource = Resource.newInstance(0, 0);
Iterator iterator = profileInfo.entrySet().iterator();
Map<String, ResourceInformation> resourceTypes = ResourceUtils
.getResourceTypes();
while (iterator.hasNext()) {
Map.Entry resourceEntry = (Map.Entry) iterator.next();
String resourceName = resourceEntry.getKey().toString();
ResourceInformation resourceValue = fromString(resourceName,
resourceEntry.getValue().toString());
if (resourceName.equals(MEMORY)) {
resource.setMemorySize(resourceValue.getValue());
continue;
}
if (resourceName.equals(VCORES)) {
resource
.setVirtualCores(Long.valueOf(resourceValue.getValue()).intValue());
continue;
}
if (resourceTypes.containsKey(resourceName)) {
resource.setResourceInformation(resourceName, resourceValue);
} else {
throw new IOException("Unrecognized resource type '" + resourceName
+ "'. Recognized resource types are '" + resourceTypes.keySet()
+ "'");
}
}
return resource;
}
private void checkAndThrowExceptionWhenFeatureDisabled()
throws YARNFeatureNotEnabledException {
if (!profileEnabled) {
throw new YARNFeatureNotEnabledException(FEATURE_NOT_ENABLED_MSG);
}
}
@Override
public Resource getProfile(String profile) throws YarnException{
checkAndThrowExceptionWhenFeatureDisabled();
if (profile == null) {
throw new YarnException("Profile name cannot be null");
}
Resource profileRes = profiles.get(profile);
if (profileRes == null) {
throw new YarnException(
"Resource profile '" + profile + "' not found");
}
return Resources.clone(profileRes);
}
@Override
public Map<String, Resource> getResourceProfiles()
throws YARNFeatureNotEnabledException {
checkAndThrowExceptionWhenFeatureDisabled();
return Collections.unmodifiableMap(profiles);
}
@Override
@VisibleForTesting
public void reloadProfiles() throws IOException {
profiles.clear();
loadProfiles();
}
@Override
public Resource getDefaultProfile() throws YarnException {
return getProfile(DEFAULT_PROFILE);
}
@Override
public Resource getMinimumProfile() throws YarnException {
return getProfile(MINIMUM_PROFILE);
}
@Override
public Resource getMaximumProfile() throws YarnException {
return getProfile(MAXIMUM_PROFILE);
}
private ResourceInformation fromString(String name, String value) {
String units = ResourceUtils.getUnits(value);
Long resourceValue =
Long.valueOf(value.substring(0, value.length() - units.length()));
return ResourceInformation.newInstance(name, units, resourceValue);
}
}
|
ResourceProfilesManagerImpl
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/state/internals/SessionStoreBuilder.java
|
{
"start": 1142,
"end": 2671
}
|
class ____<K, V> extends AbstractStoreBuilder<K, V, SessionStore<K, V>> {
private final SessionBytesStoreSupplier storeSupplier;
public SessionStoreBuilder(final SessionBytesStoreSupplier storeSupplier,
final Serde<K> keySerde,
final Serde<V> valueSerde,
final Time time) {
super(Objects.requireNonNull(storeSupplier, "storeSupplier cannot be null").name(), keySerde, valueSerde, time);
Objects.requireNonNull(storeSupplier.metricsScope(), "storeSupplier's metricsScope can't be null");
this.storeSupplier = storeSupplier;
}
@Override
public SessionStore<K, V> build() {
return new MeteredSessionStore<>(
maybeWrapCaching(maybeWrapLogging(storeSupplier.get())),
storeSupplier.metricsScope(),
keySerde,
valueSerde,
time);
}
private SessionStore<Bytes, byte[]> maybeWrapCaching(final SessionStore<Bytes, byte[]> inner) {
if (!enableCaching) {
return inner;
}
return new CachingSessionStore(inner, storeSupplier.segmentIntervalMs());
}
private SessionStore<Bytes, byte[]> maybeWrapLogging(final SessionStore<Bytes, byte[]> inner) {
if (!enableLogging) {
return inner;
}
return new ChangeLoggingSessionBytesStore(inner);
}
public long retentionPeriod() {
return storeSupplier.retentionPeriod();
}
}
|
SessionStoreBuilder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinIpAggregatorFunctionTests.java
|
{
"start": 891,
"end": 1979
}
|
class ____ extends AggregatorFunctionTestCase {
@Override
protected SourceOperator simpleInput(BlockFactory blockFactory, int size) {
return new SequenceBytesRefBlockSourceOperator(
blockFactory,
IntStream.range(0, size).mapToObj(l -> new BytesRef(InetAddressPoint.encode(randomIp(randomBoolean()))))
);
}
@Override
protected AggregatorFunctionSupplier aggregatorFunction() {
return new MinIpAggregatorFunctionSupplier();
}
@Override
protected String expectedDescriptionOfAggregator() {
return "min of ips";
}
@Override
public void assertSimpleOutput(List<Page> input, Block result) {
Optional<BytesRef> max = input.stream().flatMap(p -> allBytesRefs(p.getBlock(0))).min(Comparator.naturalOrder());
if (max.isEmpty()) {
assertThat(result.isNull(0), equalTo(true));
return;
}
assertThat(result.isNull(0), equalTo(false));
assertThat(BlockUtils.toJavaObject(result, 0), equalTo(max.get()));
}
}
|
MinIpAggregatorFunctionTests
|
java
|
spring-projects__spring-framework
|
buildSrc/src/main/java/org/springframework/build/multirelease/MultiReleaseJarValidateTask.java
|
{
"start": 1070,
"end": 1504
}
|
class ____ extends JavaExec {
public MultiReleaseJarValidateTask() {
getMainModule().set("jdk.jartool");
getArgumentProviders().add(() -> List.of("--validate", "--file", getJar().get().getAsFile().getAbsolutePath()));
}
@Inject
protected abstract JavaToolchainService getJavaToolchainService();
@InputFile
@PathSensitive(PathSensitivity.RELATIVE)
public abstract RegularFileProperty getJar();
}
|
MultiReleaseJarValidateTask
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/fields/RecursiveComparisonAssert_isEqualTo_ignoringFieldsOfTypesMatchingRegexes_Test.java
|
{
"start": 6868,
"end": 9065
}
|
class ____ {
private final Map<String, NumberHolder> holders;
WithNumberHolderMap(NumberHolder... holders) {
this.holders = new HashMap<>();
for (int i = 0; i < holders.length; i++) {
this.holders.put("key " + i, holders[i]);
}
}
Map<String, NumberHolder> getNumberHoldersMap() {
return holders;
}
}
// GIVEN
final Number intValue = 12;
final Double doubleValueA = 12.34;
final Double doubleValueB = 56.78;
final NumberHolder[] holdersA = array(new NumberHolder(intValue), new NumberHolder(doubleValueA));
final NumberHolder[] holdersB = array(new NumberHolder(intValue), new NumberHolder(doubleValueB));
recursiveComparisonConfiguration.ignoreFieldsOfTypesMatchingRegexes(".*NumberHolder");
// WHEN/THEN
then(new WithNumberHolderMap(holdersA)).usingRecursiveComparison(recursiveComparisonConfiguration)
.isEqualTo(new WithNumberHolderMap(holdersB));
}
@Test
void does_not_support_ignoring_primitive_types_but_only_their_wrapper_types() {
// GIVEN
Person actual = new Person("John");
actual.home.address.number = 1;
Person expected = new Person("John");
expected.home.address.number = 2;
recursiveComparisonConfiguration.ignoreFieldsOfTypesMatchingRegexes("int");
// WHEN/THEN
compareRecursivelyFailsWithDifferences(actual, expected, javaTypeDiff("home.address.number",
actual.home.address.number,
expected.home.address.number));
}
@Test
void evaluates_expected_when_actual_field_is_null_and_strict_type_checking_is_enabled() {
// GIVEN
Person actual = new Person("John");
actual.home = null;
Person expected = new Person("John");
expected.home.address.number = 123;
// WHEN/THEN
then(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.ignoringFieldsOfTypesMatchingRegexes(".*Home")
.withStrictTypeChecking()
.isEqualTo(expected);
}
}
|
WithNumberHolderMap
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/objectid/ObjectId2759Test.java
|
{
"start": 918,
"end": 2068
}
|
class ____ {
public Long id;
@JsonIdentityInfo(generator = ObjectIdGenerators.PropertyGenerator.class, property = "id")
@JsonIdentityReference(alwaysAsId = true)
@JsonProperty("hiveId")
Hive hive;
public Bee() { }
public Bee(Long id, Hive hive) {
this.id = id;
this.hive = hive;
}
public Hive getHive() {
return hive;
}
public void setHive(Hive hive) {
this.hive = hive;
}
}
@Test
public void testObjectId2759() throws Exception
{
Hive hive = new Hive(100500L, "main hive");
hive.addBee(new Bee(1L, hive));
ObjectMapper mapper = newJsonMapper();
final String json = mapper.writerWithDefaultPrettyPrinter()
.writeValueAsString(hive);
try {
mapper.readerFor(JsonNode.class)
.with(DeserializationFeature.FAIL_ON_READING_DUP_TREE_KEY)
.readValue(json);
} catch (DatabindException e) {
fail("Should not have duplicates, but JSON content has: "+json);
}
}
}
|
Bee
|
java
|
hibernate__hibernate-orm
|
tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/collectionbasictype/ElementCollectionWithConverterTest.java
|
{
"start": 1341,
"end": 1471
}
|
class
____(
Item.class,
"providers",
String.class,
"Generic types or attribute
|
assertListAttributeTypeInMetaModelFor
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
|
{
"start": 2106,
"end": 17536
}
|
class ____ {
@Private
@Unstable
public static ApplicationReport newInstance(ApplicationId applicationId,
ApplicationAttemptId applicationAttemptId, String user, String queue,
String name, String host, int rpcPort, Token clientToAMToken,
YarnApplicationState state, String diagnostics, String url,
long startTime, long launchTime, long finishTime,
FinalApplicationStatus finalStatus,
ApplicationResourceUsageReport appResources, String origTrackingUrl,
float progress, String applicationType, Token amRmToken) {
return newInstance(applicationId, applicationAttemptId, user, queue, name,
host, rpcPort, clientToAMToken, state, diagnostics, url,
startTime, startTime, launchTime, finishTime, finalStatus, appResources,
origTrackingUrl, progress, applicationType, amRmToken);
}
@Private
@Unstable
public static ApplicationReport newInstance(ApplicationId applicationId,
ApplicationAttemptId applicationAttemptId, String user, String queue,
String name, String host, int rpcPort, Token clientToAMToken,
YarnApplicationState state, String diagnostics, String url,
long startTime, long submitTime, long launchTime, long finishTime,
FinalApplicationStatus finalStatus,
ApplicationResourceUsageReport appResources, String origTrackingUrl,
float progress, String applicationType, Token amRmToken) {
ApplicationReport report = Records.newRecord(ApplicationReport.class);
report.setApplicationId(applicationId);
report.setCurrentApplicationAttemptId(applicationAttemptId);
report.setUser(user);
report.setQueue(queue);
report.setName(name);
report.setHost(host);
report.setRpcPort(rpcPort);
report.setClientToAMToken(clientToAMToken);
report.setYarnApplicationState(state);
report.setDiagnostics(diagnostics);
report.setTrackingUrl(url);
report.setStartTime(startTime);
report.setSubmitTime(submitTime);
report.setLaunchTime(launchTime);
report.setFinishTime(finishTime);
report.setFinalApplicationStatus(finalStatus);
report.setApplicationResourceUsageReport(appResources);
report.setOriginalTrackingUrl(origTrackingUrl);
report.setProgress(progress);
report.setApplicationType(applicationType);
report.setAMRMToken(amRmToken);
return report;
}
@Private
@Unstable
public static ApplicationReport newInstance(ApplicationId applicationId,
ApplicationAttemptId applicationAttemptId, String user, String queue,
String name, String host, int rpcPort, Token clientToAMToken,
YarnApplicationState state, String diagnostics, String url,
long startTime, long finishTime,
FinalApplicationStatus finalStatus,
ApplicationResourceUsageReport appResources, String origTrackingUrl,
float progress, String applicationType, Token amRmToken,
Set<String> tags, boolean unmanagedApplication, Priority priority,
String appNodeLabelExpression, String amNodeLabelExpression) {
ApplicationReport report =
newInstance(applicationId, applicationAttemptId, user, queue, name,
host, rpcPort, clientToAMToken, state, diagnostics, url,
startTime, 0, 0, finishTime, finalStatus, appResources,
origTrackingUrl, progress, applicationType, amRmToken);
report.setApplicationTags(tags);
report.setUnmanagedApp(unmanagedApplication);
report.setPriority(priority);
report.setAppNodeLabelExpression(appNodeLabelExpression);
report.setAmNodeLabelExpression(amNodeLabelExpression);
return report;
}
@Private
@Unstable
public static ApplicationReport newInstance(ApplicationId applicationId,
ApplicationAttemptId applicationAttemptId, String user, String queue,
String name, String host, int rpcPort, Token clientToAMToken,
YarnApplicationState state, String diagnostics, String url,
long startTime, long launchTime, long finishTime,
FinalApplicationStatus finalStatus,
ApplicationResourceUsageReport appResources, String origTrackingUrl,
float progress, String applicationType, Token amRmToken, Set<String> tags,
boolean unmanagedApplication, Priority priority,
String appNodeLabelExpression, String amNodeLabelExpression) {
return newInstance(applicationId, applicationAttemptId, user, queue, name,
host, rpcPort, clientToAMToken, state, diagnostics, url,
startTime, startTime, launchTime, finishTime, finalStatus, appResources,
origTrackingUrl, progress, applicationType, amRmToken, tags,
unmanagedApplication, priority, appNodeLabelExpression,
amNodeLabelExpression);
}
@Private
@Unstable
public static ApplicationReport newInstance(ApplicationId applicationId,
ApplicationAttemptId applicationAttemptId, String user, String queue,
String name, String host, int rpcPort, Token clientToAMToken,
YarnApplicationState state, String diagnostics, String url,
long startTime, long submitTime, long launchTime, long finishTime,
FinalApplicationStatus finalStatus,
ApplicationResourceUsageReport appResources, String origTrackingUrl,
float progress, String applicationType, Token amRmToken, Set<String> tags,
boolean unmanagedApplication, Priority priority,
String appNodeLabelExpression, String amNodeLabelExpression) {
ApplicationReport report =
newInstance(applicationId, applicationAttemptId, user, queue, name,
host, rpcPort, clientToAMToken, state, diagnostics, url, startTime,
submitTime, launchTime, finishTime, finalStatus, appResources,
origTrackingUrl, progress, applicationType, amRmToken);
report.setApplicationTags(tags);
report.setUnmanagedApp(unmanagedApplication);
report.setPriority(priority);
report.setAppNodeLabelExpression(appNodeLabelExpression);
report.setAmNodeLabelExpression(amNodeLabelExpression);
return report;
}
/**
* Get the <code>ApplicationId</code> of the application.
* @return <code>ApplicationId</code> of the application
*/
@Public
@Stable
public abstract ApplicationId getApplicationId();
@Private
@Unstable
public abstract void setApplicationId(ApplicationId applicationId);
/**
* Get the <code>ApplicationAttemptId</code> of the current
* attempt of the application
* @return <code>ApplicationAttemptId</code> of the attempt
*/
@Public
@Stable
public abstract ApplicationAttemptId getCurrentApplicationAttemptId();
@Private
@Unstable
public abstract void setCurrentApplicationAttemptId(ApplicationAttemptId applicationAttemptId);
/**
* Get the <em>user</em> who submitted the application.
* @return <em>user</em> who submitted the application
*/
@Public
@Stable
public abstract String getUser();
@Private
@Unstable
public abstract void setUser(String user);
/**
* Get the <em>queue</em> to which the application was submitted.
* @return <em>queue</em> to which the application was submitted
*/
@Public
@Stable
public abstract String getQueue();
@Private
@Unstable
public abstract void setQueue(String queue);
/**
* Get the user-defined <em>name</em> of the application.
* @return <em>name</em> of the application
*/
@Public
@Stable
public abstract String getName();
@Private
@Unstable
public abstract void setName(String name);
/**
* Get the <em>host</em> on which the <code>ApplicationMaster</code>
* is running.
* @return <em>host</em> on which the <code>ApplicationMaster</code>
* is running
*/
@Public
@Stable
public abstract String getHost();
@Private
@Unstable
public abstract void setHost(String host);
/**
* Get the <em>RPC port</em> of the <code>ApplicationMaster</code>.
* @return <em>RPC port</em> of the <code>ApplicationMaster</code>
*/
@Public
@Stable
public abstract int getRpcPort();
@Private
@Unstable
public abstract void setRpcPort(int rpcPort);
/**
* Get the <em>client token</em> for communicating with the
* <code>ApplicationMaster</code>.
* <p>
* <em>ClientToAMToken</em> is the security token used by the AMs to verify
* authenticity of any <code>client</code>.
* </p>
*
* <p>
* The <code>ResourceManager</code>, provides a secure token (via
* {@link ApplicationReport#getClientToAMToken()}) which is verified by the
* ApplicationMaster when the client directly talks to an AM.
* </p>
* @return <em>client token</em> for communicating with the
* <code>ApplicationMaster</code>
*/
@Public
@Stable
public abstract Token getClientToAMToken();
@Private
@Unstable
public abstract void setClientToAMToken(Token clientToAMToken);
/**
* Get the <code>YarnApplicationState</code> of the application.
* @return <code>YarnApplicationState</code> of the application
*/
@Public
@Stable
public abstract YarnApplicationState getYarnApplicationState();
@Private
@Unstable
public abstract void setYarnApplicationState(YarnApplicationState state);
/**
* Get the <em>diagnositic information</em> of the application in case of
* errors.
* @return <em>diagnositic information</em> of the application in case
* of errors
*/
@Public
@Stable
public abstract String getDiagnostics();
@Private
@Unstable
public abstract void setDiagnostics(String diagnostics);
/**
* Get the <em>tracking url</em> for the application.
* @return <em>tracking url</em> for the application
*/
@Public
@Stable
public abstract String getTrackingUrl();
@Private
@Unstable
public abstract void setTrackingUrl(String url);
/**
* Get the original not-proxied <em>tracking url</em> for the application.
* This is intended to only be used by the proxy itself.
* @return the original not-proxied <em>tracking url</em> for the application
*/
@Private
@Unstable
public abstract String getOriginalTrackingUrl();
@Private
@Unstable
public abstract void setOriginalTrackingUrl(String url);
/**
* Get the <em>start time</em> of the application.
* @return <em>start time</em> of the application
*/
@Public
@Stable
public abstract long getStartTime();
@Private
@Unstable
public abstract void setStartTime(long startTime);
@Public
@Stable
public abstract long getSubmitTime();
@Private
@Unstable
public abstract void setSubmitTime(long submitTime);
@Private
@Unstable
public abstract void setLaunchTime(long setLaunchTime);
@Public
@Unstable
public abstract long getLaunchTime();
/**
* Get the <em>finish time</em> of the application.
* @return <em>finish time</em> of the application
*/
@Public
@Stable
public abstract long getFinishTime();
@Private
@Unstable
public abstract void setFinishTime(long finishTime);
/**
* Get the <em>final finish status</em> of the application.
* @return <em>final finish status</em> of the application
*/
@Public
@Stable
public abstract FinalApplicationStatus getFinalApplicationStatus();
@Private
@Unstable
public abstract void setFinalApplicationStatus(FinalApplicationStatus finishState);
/**
* Retrieve the structure containing the job resources for this application
* @return the job resources structure for this application
*/
@Public
@Stable
public abstract ApplicationResourceUsageReport getApplicationResourceUsageReport();
/**
* Store the structure containing the job resources for this application
* @param appResources structure for this application
*/
@Private
@Unstable
public abstract void setApplicationResourceUsageReport(ApplicationResourceUsageReport appResources);
/**
* Get the application's progress ( range 0.0 to 1.0 )
* @return application's progress
*/
@Public
@Stable
public abstract float getProgress();
@Private
@Unstable
public abstract void setProgress(float progress);
/**
* Get the application's Type
* @return application's Type
*/
@Public
@Stable
public abstract String getApplicationType();
@Private
@Unstable
public abstract void setApplicationType(String applicationType);
/**
* Get all tags corresponding to the application
* @return Application's tags
*/
@Public
@Stable
public abstract Set<String> getApplicationTags();
@Private
@Unstable
public abstract void setApplicationTags(Set<String> tags);
@Private
@Stable
public abstract void setAMRMToken(Token amRmToken);
/**
* Get the AMRM token of the application.
* <p>
* The AMRM token is required for AM to RM scheduling operations. For
* managed Application Masters YARN takes care of injecting it. For unmanaged
* Applications Masters, the token must be obtained via this method and set
* in the {@link org.apache.hadoop.security.UserGroupInformation} of the
* current user.
* <p>
* The AMRM token will be returned only if all the following conditions are
* met:
* <ul>
* <li>the requester is the owner of the ApplicationMaster</li>
* <li>the application master is an unmanaged ApplicationMaster</li>
* <li>the application master is in ACCEPTED state</li>
* </ul>
* Else this method returns NULL.
*
* @return the AM to RM token if available.
*/
@Public
@Stable
public abstract Token getAMRMToken();
/**
* Get log aggregation status for the application
* @return Application's log aggregation status
*/
@Public
@Stable
public abstract LogAggregationStatus getLogAggregationStatus();
@Private
@Unstable
public abstract void setLogAggregationStatus(
LogAggregationStatus logAggregationStatus);
/**
* @return true if the AM is not managed by the RM
*/
@Public
@Unstable
public abstract boolean isUnmanagedApp();
/**
* @param unmanagedApplication true if RM should not manage the AM
*/
@Public
@Unstable
public abstract void setUnmanagedApp(boolean unmanagedApplication);
/**
* Get priority of the application
*
* @return Application's priority
*/
@Public
@Stable
public abstract Priority getPriority();
@Private
@Unstable
public abstract void setPriority(Priority priority);
/**
* Get the default Node Label expression for all the application's containers
*
* @return Application's NodeLabelExpression
*/
@Unstable
public abstract String getAppNodeLabelExpression();
@Unstable
public abstract void setAppNodeLabelExpression(String appNodeLabelExpression);
/**
* Get the default Node Label expression for all the application's containers
*
* @return Application's NodeLabelExpression
*/
@Unstable
public abstract String getAmNodeLabelExpression();
@Unstable
public abstract void setAmNodeLabelExpression(String amNodeLabelExpression);
@Public
@Unstable
public abstract Map<ApplicationTimeoutType, ApplicationTimeout> getApplicationTimeouts();
@Private
@Unstable
public abstract void setApplicationTimeouts(
Map<ApplicationTimeoutType, ApplicationTimeout> timeouts);
/**
* Get RM ClusterId.
*
* @return RM ClusterId
*/
@Public
@Stable
public abstract String getRMClusterId();
@Public
@Stable
public abstract void setRMClusterId(String rmClusterId);
}
|
ApplicationReport
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/typeutils/LinkedListSerializerUpgradeTest.java
|
{
"start": 3662,
"end": 4733
}
|
class ____
implements TypeSerializerUpgradeTestBase.UpgradeVerifier<LinkedList<Long>> {
@Override
public TypeSerializer<LinkedList<Long>> createUpgradedSerializer() {
return createLinkedListSerializer();
}
@Override
public Condition<LinkedList<Long>> testDataCondition() {
LinkedList<Long> list = new LinkedList<>();
list.add(42L);
list.add(-42L);
list.add(0L);
list.add(Long.MAX_VALUE);
list.add(Long.MIN_VALUE);
return new Condition<>(list::equals, "");
}
@Override
public Condition<TypeSerializerSchemaCompatibility<LinkedList<Long>>>
schemaCompatibilityCondition(FlinkVersion version) {
if (version.isNewerVersionThan(FlinkVersion.v1_13)) {
return TypeSerializerConditions.isCompatibleAsIs();
} else {
return TypeSerializerConditions.isCompatibleAfterMigration();
}
}
}
}
|
LinkedListSerializerVerifier
|
java
|
apache__logging-log4j2
|
log4j-core-test/src/test/java/org/apache/logging/log4j/core/async/AsyncLoggerCustomSelectorLocationTest.java
|
{
"start": 1862,
"end": 3722
}
|
class ____ {
@BeforeAll
static void beforeClass() {
final File file = new File("target", "AsyncLoggerCustomSelectorLocationTest.log");
file.delete();
System.setProperty(Constants.LOG4J_CONTEXT_SELECTOR, CustomAsyncContextSelector.class.getName());
System.setProperty(
ConfigurationFactory.CONFIGURATION_FILE_PROPERTY, "AsyncLoggerCustomSelectorLocationTest.xml");
}
@AfterAll
static void afterClass() {
System.setProperty(Constants.LOG4J_CONTEXT_SELECTOR, Strings.EMPTY);
}
@Test
void testCustomAsyncSelectorLocation() throws Exception {
final File file = new File("target", "AsyncLoggerCustomSelectorLocationTest.log");
final Logger log = LogManager.getLogger("com.foo.Bar");
final Logger logIncludingLocation = LogManager.getLogger("com.include.location.Bar");
final String msg = "Async logger msg with location";
log.info(msg);
logIncludingLocation.info(msg);
CoreLoggerContexts.stopLoggerContext(false, file); // stop async thread
final BufferedReader reader = new BufferedReader(new FileReader(file));
final String firstLine = reader.readLine();
final String secondLine = reader.readLine();
final String thirdLine = reader.readLine();
reader.close();
file.delete();
// By default we expect location to be disabled
assertThat(firstLine, containsString(msg));
assertThat(firstLine, not(containsString("testCustomAsyncSelectorLocation")));
// Configuration allows us to retain location
assertThat(secondLine, containsString(msg));
assertThat(secondLine, containsString("testCustomAsyncSelectorLocation"));
assertThat(thirdLine, nullValue());
}
public static final
|
AsyncLoggerCustomSelectorLocationTest
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/test/benchmark/encode/ArrayString1000Encode.java
|
{
"start": 153,
"end": 611
}
|
class ____ extends BenchmarkCase {
private Object object;
public ArrayString1000Encode(){
super("ArrayString1000Encode");
String[] array = new String[1000];
for (int i = 0; i < array.length; ++i) {
array[i] = Integer.toHexString(i * 1000);
}
this.object = array;
}
@Override
public void execute(Codec codec) throws Exception {
codec.encode(object);
}
}
|
ArrayString1000Encode
|
java
|
apache__flink
|
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/utils/CompactFileUtils.java
|
{
"start": 1661,
"end": 5856
}
|
class ____ {
private static final Logger LOG = LoggerFactory.getLogger(CompactFileUtils.class);
/**
* Do Compaction: - Target file exists, do nothing. - Can do compaction: - Single file, do
* atomic renaming, there are optimizations for FileSystem. - Multiple file, do reading and
* writing.
*/
public static @Nullable <T> Path doCompact(
FileSystem fileSystem,
String partition,
List<Path> paths,
Path target,
Configuration config,
CompactReader.Factory<T> readerFactory,
CompactWriter.Factory<T> writerFactory)
throws IOException {
if (paths.size() == 0) {
return null;
}
Map<Path, Long> inputMap = new HashMap<>();
for (Path path : paths) {
inputMap.put(path, fileSystem.getFileStatus(path).getLen());
}
if (fileSystem.exists(target)) {
return target;
}
checkExist(fileSystem, paths);
long startMillis = System.currentTimeMillis();
boolean success = false;
if (paths.size() == 1) {
// optimizer for single file
success = doSingleFileMove(fileSystem, paths.get(0), target);
}
if (!success) {
doMultiFilesCompact(
partition, paths, target, config, fileSystem, readerFactory, writerFactory);
}
Map<Path, Long> targetMap = new HashMap<>();
targetMap.put(target, fileSystem.getFileStatus(target).getLen());
double costSeconds = ((double) (System.currentTimeMillis() - startMillis)) / 1000;
LOG.info(
"Compaction time cost is '{}S', output per file as following format: name=size(byte), target file is '{}', input files are '{}'",
costSeconds,
targetMap,
inputMap);
return target;
}
private static boolean doSingleFileMove(FileSystem fileSystem, Path src, Path dst)
throws IOException {
// We can not rename, because we need to keep original file for failover
RecoverableWriter writer;
try {
writer = fileSystem.createRecoverableWriter();
} catch (UnsupportedOperationException ignore) {
// Some writer not support RecoverableWriter, so fallback to per record moving.
// For example, see the constructor of HadoopRecoverableWriter. Although it not support
// RecoverableWriter, but HadoopPathBasedBulkFormatBuilder can support streaming
// writing.
return false;
}
RecoverableFsDataOutputStream out = writer.open(dst);
try (FSDataInputStream in = fileSystem.open(src)) {
IOUtils.copyBytes(in, out, false);
} catch (Throwable t) {
out.close();
throw t;
}
out.closeForCommit().commit();
return true;
}
private static <T> void doMultiFilesCompact(
String partition,
List<Path> files,
Path dst,
Configuration config,
FileSystem fileSystem,
CompactReader.Factory<T> readerFactory,
CompactWriter.Factory<T> writerFactory)
throws IOException {
CompactWriter<T> writer =
writerFactory.create(CompactContext.create(config, fileSystem, partition, dst));
for (Path path : files) {
try (CompactReader<T> reader =
readerFactory.create(
CompactContext.create(config, fileSystem, partition, path))) {
T record;
while ((record = reader.read()) != null) {
writer.write(record);
}
}
}
// commit immediately
writer.commit();
}
private static void checkExist(FileSystem fileSystem, List<Path> candidates)
throws IOException {
for (Path path : candidates) {
if (!fileSystem.exists(path)) {
throw new IOException("Compaction file not exist: " + path);
}
}
}
}
|
CompactFileUtils
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/EmptyArrayAsNullTest.java
|
{
"start": 151,
"end": 410
}
|
class ____ extends TestCase {
public void test_emtpyAsNull() throws Exception {
String text = "{\"value\":[]}";
Model model = JSON.parseObject(text, Model.class);
assertNull(model.value);
}
public static
|
EmptyArrayAsNullTest
|
java
|
quarkusio__quarkus
|
integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/KubernetesConfigWithSecretsTest.java
|
{
"start": 701,
"end": 4601
}
|
class ____ {
private static final String APP_NAME = "kubernetes-config-with-secrets";
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class))
.setApplicationName(APP_NAME)
.setApplicationVersion("0.1-SNAPSHOT")
.withConfigurationResource(APP_NAME + ".properties")
.setForcedDependencies(List.of(Dependency.of("io.quarkus", "quarkus-kubernetes-config", Version.getVersion())));
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@Test
public void assertGeneratedResources() throws IOException {
Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir)
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.yml"));
List<HasMetadata> kubernetesList = DeserializationUtil.deserializeAsList(kubernetesDir.resolve("kubernetes.yml"));
assertThat(kubernetesList).filteredOn(h -> "Role".equals(h.getKind())).hasSize(1);
assertThat(kubernetesList).anySatisfy(res -> {
assertThat(res).isInstanceOfSatisfying(Role.class, role -> {
assertThat(role.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("view-secrets");
});
assertThat(role.getRules()).singleElement().satisfies(r -> {
assertThat(r).isInstanceOfSatisfying(PolicyRule.class, rule -> {
assertThat(rule.getApiGroups()).containsExactly("");
assertThat(rule.getResources()).containsExactly("secrets");
assertThat(rule.getVerbs()).containsExactly("get");
});
});
});
});
assertThat(kubernetesList).filteredOn(h -> "RoleBinding".equals(h.getKind())).hasSize(2)
.anySatisfy(res -> {
assertThat(res).isInstanceOfSatisfying(RoleBinding.class, roleBinding -> {
assertThat(roleBinding.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("kubernetes-config-with-secrets-view-secrets");
});
assertThat(roleBinding.getRoleRef().getKind()).isEqualTo("Role");
assertThat(roleBinding.getRoleRef().getName()).isEqualTo("view-secrets");
assertThat(roleBinding.getSubjects()).singleElement().satisfies(subject -> {
assertThat(subject.getKind()).isEqualTo("ServiceAccount");
assertThat(subject.getName()).isEqualTo("kubernetes-config-with-secrets");
});
});
})
.anySatisfy(res -> {
assertThat(res).isInstanceOfSatisfying(RoleBinding.class, roleBinding -> {
assertThat(roleBinding.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("kubernetes-config-with-secrets-view");
});
assertThat(roleBinding.getRoleRef().getKind()).isEqualTo("ClusterRole");
assertThat(roleBinding.getRoleRef().getName()).isEqualTo("view");
assertThat(roleBinding.getSubjects()).singleElement().satisfies(subject -> {
assertThat(subject.getKind()).isEqualTo("ServiceAccount");
assertThat(subject.getName()).isEqualTo("kubernetes-config-with-secrets");
});
});
});
}
}
|
KubernetesConfigWithSecretsTest
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/booleanarray/BooleanArrayAssert_startsWith_with_Boolean_array_Test.java
|
{
"start": 1207,
"end": 1932
}
|
class ____ extends BooleanArrayAssertBaseTest {
@Test
void should_fail_if_values_is_null() {
// GIVEN
Boolean[] sequence = null;
// WHEN
Throwable thrown = catchThrowable(() -> assertions.startsWith(sequence));
// THEN
then(thrown).isInstanceOf(NullPointerException.class)
.hasMessage(shouldNotBeNull("sequence").create());
}
@Override
protected BooleanArrayAssert invoke_api_method() {
return assertions.startsWith(new Boolean[] { true, false });
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertStartsWith(getInfo(assertions), getActual(assertions), arrayOf(true, false));
}
}
|
BooleanArrayAssert_startsWith_with_Boolean_array_Test
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/GuardedByValidatorTest.java
|
{
"start": 1350,
"end": 2040
}
|
class ____ {
@GuardedBy("This thread")
// BUG: Diagnostic contains:
// Invalid @GuardedBy expression: could not resolve guard
int x;
@GuardedBy("This thread")
// BUG: Diagnostic contains:
// Invalid @GuardedBy expression: could not resolve guard
void m() {}
}
""")
.doTest();
}
@Test
public void negative() {
compilationHelper
.addSourceLines(
"threadsafety/Test.java",
"""
package threadsafety.Test;
import com.google.errorprone.annotations.concurrent.GuardedBy;
|
Test
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/observable/BlockingObservableIterable.java
|
{
"start": 1065,
"end": 1583
}
|
class ____<T> implements Iterable<T> {
final ObservableSource<? extends T> source;
final int bufferSize;
public BlockingObservableIterable(ObservableSource<? extends T> source, int bufferSize) {
this.source = source;
this.bufferSize = bufferSize;
}
@Override
public Iterator<T> iterator() {
BlockingObservableIterator<T> it = new BlockingObservableIterator<>(bufferSize);
source.subscribe(it);
return it;
}
static final
|
BlockingObservableIterable
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/nestedbeans/mixed/source/WaterQualityReport.java
|
{
"start": 248,
"end": 694
}
|
class ____ {
private String organisationName;
private String verdict;
public String getOrganisationName() {
return organisationName;
}
public void setOrganisationName(String organisationName) {
this.organisationName = organisationName;
}
public String getVerdict() {
return verdict;
}
public void setVerdict(String verdict) {
this.verdict = verdict;
}
}
|
WaterQualityReport
|
java
|
google__dagger
|
hilt-android/main/java/dagger/hilt/android/ViewModelLifecycle.java
|
{
"start": 771,
"end": 830
}
|
interface ____ extends RetainedLifecycle {
}
|
ViewModelLifecycle
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
|
{
"start": 7023,
"end": 24439
}
|
class ____
extends BlockAliasMap<FileRegion> {
private int minId;
private int numBlocks;
private Iterator<FileRegion> suppliedIterator;
TestFileRegionBlockAliasMap() {
this(null, MIN_BLK_ID, NUM_PROVIDED_BLKS);
}
TestFileRegionBlockAliasMap(Iterator<FileRegion> iterator, int minId,
int numBlocks) {
this.suppliedIterator = iterator;
this.minId = minId;
this.numBlocks = numBlocks;
}
@Override
public Reader<FileRegion> getReader(Reader.Options opts, String blockPoolId)
throws IOException {
if (!blockPoolId.equals(BLOCK_POOL_IDS[CHOSEN_BP_ID])) {
return null;
}
BlockAliasMap.Reader<FileRegion> reader =
new BlockAliasMap.Reader<FileRegion>() {
@Override
public Iterator<FileRegion> iterator() {
if (suppliedIterator == null) {
return new TestFileRegionIterator(providedBasePath, minId,
numBlocks);
} else {
return suppliedIterator;
}
}
@Override
public void close() throws IOException {
}
@Override
public Optional<FileRegion> resolve(Block ident)
throws IOException {
return null;
}
};
return reader;
}
@Override
public Writer<FileRegion> getWriter(Writer.Options opts, String blockPoolId)
throws IOException {
// not implemented
return null;
}
@Override
public void refresh() throws IOException {
// do nothing!
}
@Override
public void close() throws IOException {
// do nothing
}
}
private static Storage.StorageDirectory createLocalStorageDirectory(
File root, Configuration conf)
throws SecurityException, IOException {
Storage.StorageDirectory sd =
new Storage.StorageDirectory(
StorageLocation.parse(root.toURI().toString()));
DataStorage.createStorageID(sd, false, conf);
return sd;
}
private static Storage.StorageDirectory createProvidedStorageDirectory(
String confString, Configuration conf)
throws SecurityException, IOException {
Storage.StorageDirectory sd =
new Storage.StorageDirectory(StorageLocation.parse(confString));
DataStorage.createStorageID(sd, false, conf);
return sd;
}
private static void createStorageDirs(DataStorage storage,
Configuration conf, int numDirs, int numProvidedDirs)
throws IOException {
List<Storage.StorageDirectory> dirs =
new ArrayList<Storage.StorageDirectory>();
List<String> dirStrings = new ArrayList<String>();
FileUtils.deleteDirectory(new File(BASE_DIR));
for (int i = 0; i < numDirs; i++) {
File loc = new File(BASE_DIR, "data" + i);
dirStrings.add(new Path(loc.toString()).toUri().toString());
loc.mkdirs();
dirs.add(createLocalStorageDirectory(loc, conf));
when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
}
for (int i = numDirs; i < numDirs + numProvidedDirs; i++) {
File loc = new File(BASE_DIR, "data" + i);
providedBasePath = loc.getAbsolutePath();
loc.mkdirs();
String dirString = "[PROVIDED]" +
new Path(loc.toString()).toUri().toString();
dirStrings.add(dirString);
dirs.add(createProvidedStorageDirectory(dirString, conf));
when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
}
String dataDir = StringUtils.join(",", dirStrings);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
when(storage.dirIterator()).thenReturn(dirs.iterator());
when(storage.getNumStorageDirs()).thenReturn(numDirs + numProvidedDirs);
}
private int getNumVolumes() {
try (FsDatasetSpi.FsVolumeReferences volumes =
dataset.getFsVolumeReferences()) {
return volumes.size();
} catch (IOException e) {
return 0;
}
}
@BeforeEach
public void setUp() throws IOException {
datanode = mock(DataNode.class);
storage = mock(DataStorage.class);
conf = new Configuration();
conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
when(datanode.getConf()).thenReturn(conf);
when(datanode.getDataSetLockManager()).thenReturn(manager);
final DNConf dnConf = new DNConf(datanode);
when(datanode.getDnConf()).thenReturn(dnConf);
// reset the space used
spaceUsed = 0;
final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf);
when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner);
final ShortCircuitRegistry shortCircuitRegistry =
new ShortCircuitRegistry(conf);
when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
this.conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
TestFileRegionBlockAliasMap.class, BlockAliasMap.class);
blkToPathMap = new HashMap<Long, String>();
providedVolumes = new LinkedList<FsVolumeImpl>();
createStorageDirs(
storage, conf, NUM_LOCAL_INIT_VOLUMES, NUM_PROVIDED_INIT_VOLUMES);
dataset = new FsDatasetImpl(datanode, storage, conf);
FsVolumeReferences volumes = dataset.getFsVolumeReferences();
for (int i = 0; i < volumes.size(); i++) {
FsVolumeSpi vol = volumes.get(i);
if (vol.getStorageType() == StorageType.PROVIDED) {
providedVolumes.add((FsVolumeImpl) vol);
}
}
for (String bpid : BLOCK_POOL_IDS) {
dataset.addBlockPool(bpid, conf);
}
}
@Test
public void testReserved() throws Exception {
for (FsVolumeSpi vol : providedVolumes) {
// the reserved space for provided volumes should be 0.
assertEquals(0, ((FsVolumeImpl) vol).getReserved());
}
}
@Test
public void testProvidedVolumeImpl() throws IOException {
assertEquals(NUM_LOCAL_INIT_VOLUMES + NUM_PROVIDED_INIT_VOLUMES, getNumVolumes());
assertEquals(NUM_PROVIDED_INIT_VOLUMES, providedVolumes.size());
assertEquals(0, dataset.getNumFailedVolumes());
for (int i = 0; i < providedVolumes.size(); i++) {
// check basic information about provided volume
assertEquals(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT,
providedVolumes.get(i).getStorageID());
assertEquals(StorageType.PROVIDED, providedVolumes.get(i).getStorageType());
long space = providedVolumes.get(i).getBlockPoolUsed(
BLOCK_POOL_IDS[CHOSEN_BP_ID]);
// check the df stats of the volume
assertEquals(spaceUsed, space);
assertEquals(NUM_PROVIDED_BLKS, providedVolumes.get(i).getNumBlocks());
providedVolumes.get(i).shutdownBlockPool(
BLOCK_POOL_IDS[1 - CHOSEN_BP_ID], null);
try {
assertEquals(0,
providedVolumes.get(i).getBlockPoolUsed(BLOCK_POOL_IDS[1 - CHOSEN_BP_ID]));
// should not be triggered
assertTrue(false);
} catch (IOException e) {
LOG.info("Expected exception: " + e);
}
}
}
@Test
public void testBlockLoad() throws IOException {
for (int i = 0; i < providedVolumes.size(); i++) {
FsVolumeImpl vol = providedVolumes.get(i);
ReplicaMap volumeMap = new ReplicaMap();
vol.getVolumeMap(volumeMap, null);
assertEquals(vol.getBlockPoolList().length, BLOCK_POOL_IDS.length);
for (int j = 0; j < BLOCK_POOL_IDS.length; j++) {
if (j != CHOSEN_BP_ID) {
// this block pool should not have any blocks
assertEquals(null, volumeMap.replicas(BLOCK_POOL_IDS[j]));
}
}
assertEquals(NUM_PROVIDED_BLKS, volumeMap.replicas(BLOCK_POOL_IDS[CHOSEN_BP_ID]).size());
}
}
@Test
public void testProvidedBlockRead() throws IOException {
for (int id = 0; id < NUM_PROVIDED_BLKS; id++) {
ExtendedBlock eb = new ExtendedBlock(
BLOCK_POOL_IDS[CHOSEN_BP_ID], id, BLK_LEN,
HdfsConstants.GRANDFATHER_GENERATION_STAMP);
InputStream ins = dataset.getBlockInputStream(eb, 0);
String filepath = blkToPathMap.get((long) id);
TestProvidedReplicaImpl.verifyReplicaContents(new File(filepath), ins, 0,
BLK_LEN);
}
}
@Test
public void testProvidedBlockIterator() throws IOException {
for (int i = 0; i < providedVolumes.size(); i++) {
FsVolumeImpl vol = providedVolumes.get(i);
BlockIterator iter =
vol.newBlockIterator(BLOCK_POOL_IDS[CHOSEN_BP_ID], "temp");
Set<Long> blockIdsUsed = new HashSet<Long>();
assertEquals(BLOCK_POOL_IDS[CHOSEN_BP_ID], iter.getBlockPoolId());
while(!iter.atEnd()) {
ExtendedBlock eb = iter.nextBlock();
long blkId = eb.getBlockId();
assertTrue(blkId >= MIN_BLK_ID && blkId < NUM_PROVIDED_BLKS);
// all block ids must be unique!
assertTrue(!blockIdsUsed.contains(blkId));
blockIdsUsed.add(blkId);
}
assertEquals(NUM_PROVIDED_BLKS, blockIdsUsed.size());
// rewind the block iterator
iter.rewind();
while(!iter.atEnd()) {
ExtendedBlock eb = iter.nextBlock();
long blkId = eb.getBlockId();
// the block should have already appeared in the first scan.
assertTrue(blockIdsUsed.contains(blkId));
blockIdsUsed.remove(blkId);
}
// none of the blocks should remain in blockIdsUsed
assertEquals(0, blockIdsUsed.size());
// the other block pool should not contain any blocks!
BlockIterator nonProvidedBpIter =
vol.newBlockIterator(BLOCK_POOL_IDS[1 - CHOSEN_BP_ID], "temp");
assertEquals(null, nonProvidedBpIter.nextBlock());
}
}
private int getBlocksInProvidedVolumes(String basePath, int numBlocks,
int minBlockId) throws IOException {
TestFileRegionIterator fileRegionIterator =
new TestFileRegionIterator(basePath, minBlockId, numBlocks);
int totalBlocks = 0;
for (int i = 0; i < providedVolumes.size(); i++) {
ProvidedVolumeImpl vol = (ProvidedVolumeImpl) providedVolumes.get(i);
vol.setFileRegionProvider(BLOCK_POOL_IDS[CHOSEN_BP_ID],
new TestFileRegionBlockAliasMap(fileRegionIterator, minBlockId,
numBlocks));
ReplicaMap volumeMap = new ReplicaMap();
vol.getVolumeMap(BLOCK_POOL_IDS[CHOSEN_BP_ID], volumeMap, null);
totalBlocks += volumeMap.size(BLOCK_POOL_IDS[CHOSEN_BP_ID]);
}
return totalBlocks;
}
/**
* Tests if the FileRegions provided by the FileRegionProvider
* can belong to the Providevolume.
* @throws IOException
*/
@Test
public void testProvidedVolumeContents() throws IOException {
int expectedBlocks = 5;
int minId = 0;
// use a path which has the same prefix as providedBasePath
// all these blocks can belong to the provided volume
int blocksFound = getBlocksInProvidedVolumes(providedBasePath + "/test1/",
expectedBlocks, minId);
assertEquals(expectedBlocks, blocksFound,
"Number of blocks in provided volumes should be " + expectedBlocks);
blocksFound = getBlocksInProvidedVolumes(
"file:/" + providedBasePath + "/test1/", expectedBlocks, minId);
assertEquals(expectedBlocks, blocksFound,
"Number of blocks in provided volumes should be " + expectedBlocks);
// use a path that is entirely different from the providedBasePath
// none of these blocks can belong to the volume
blocksFound =
getBlocksInProvidedVolumes("randomtest1/", expectedBlocks, minId);
assertEquals(0, blocksFound, "Number of blocks in provided volumes should be 0");
}
@Test
public void testProvidedVolumeContainsBlock() throws URISyntaxException {
assertEquals(true, ProvidedVolumeImpl.containsBlock(null, null));
assertEquals(false, ProvidedVolumeImpl.containsBlock(new URI("file:/a"), null));
assertEquals(true,
ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/c/"), new URI("file:/a/b/c/d/e.file")));
assertEquals(true,
ProvidedVolumeImpl.containsBlock(new URI("/a/b/c/"), new URI("file:/a/b/c/d/e.file")));
assertEquals(true,
ProvidedVolumeImpl.containsBlock(new URI("/a/b/c"), new URI("file:/a/b/c/d/e.file")));
assertEquals(true,
ProvidedVolumeImpl.containsBlock(new URI("/a/b/c/"), new URI("/a/b/c/d/e.file")));
assertEquals(true,
ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/c/"), new URI("/a/b/c/d/e.file")));
assertEquals(false,
ProvidedVolumeImpl.containsBlock(new URI("/a/b/e"), new URI("file:/a/b/c/d/e.file")));
assertEquals(false,
ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/e"), new URI("file:/a/b/c/d/e.file")));
assertEquals(true,
ProvidedVolumeImpl.containsBlock(new URI("s3a:/bucket1/dir1/"),
new URI("s3a:/bucket1/dir1/temp.txt")));
assertEquals(false,
ProvidedVolumeImpl.containsBlock(new URI("s3a:/bucket2/dir1/"),
new URI("s3a:/bucket1/dir1/temp.txt")));
assertEquals(false,
ProvidedVolumeImpl.containsBlock(new URI("s3a:/bucket1/dir1/"),
new URI("s3a:/bucket1/temp.txt")));
assertEquals(false,
ProvidedVolumeImpl.containsBlock(new URI("/bucket1/dir1/"),
new URI("s3a:/bucket1/dir1/temp.txt")));
}
@Test
public void testProvidedReplicaSuffixExtraction() {
assertEquals("B.txt",
ProvidedVolumeImpl.getSuffix(new Path("file:///A/"), new Path("file:///A/B.txt")));
assertEquals("B/C.txt",
ProvidedVolumeImpl.getSuffix(new Path("file:///A/"), new Path("file:///A/B/C.txt")));
assertEquals("B/C/D.txt",
ProvidedVolumeImpl.getSuffix(new Path("file:///A/"), new Path("file:///A/B/C/D.txt")));
assertEquals("D.txt", ProvidedVolumeImpl.getSuffix(new Path("file:///A/B/C/"),
new Path("file:///A/B/C/D.txt")));
assertEquals("file:/A/B/C/D.txt", ProvidedVolumeImpl.getSuffix(new Path("file:///X/B/C/"),
new Path("file:///A/B/C/D.txt")));
assertEquals("D.txt",
ProvidedVolumeImpl.getSuffix(new Path("/A/B/C"), new Path("/A/B/C/D.txt")));
assertEquals("D.txt",
ProvidedVolumeImpl.getSuffix(new Path("/A/B/C/"), new Path("/A/B/C/D.txt")));
assertEquals("data/current.csv", ProvidedVolumeImpl.getSuffix(
new Path("wasb:///users/alice/"), new Path("wasb:///users/alice/data/current.csv")));
assertEquals("current.csv", ProvidedVolumeImpl.getSuffix(new Path("wasb:///users/alice/data"),
new Path("wasb:///users/alice/data/current.csv")));
assertEquals("wasb:/users/alice/data/current.csv",
ProvidedVolumeImpl.getSuffix(new Path("wasb:///users/bob/"),
new Path("wasb:///users/alice/data/current.csv")));
}
@Test
public void testProvidedReplicaPrefix() throws Exception {
for (int i = 0; i < providedVolumes.size(); i++) {
FsVolumeImpl vol = providedVolumes.get(i);
ReplicaMap volumeMap = new ReplicaMap();
vol.getVolumeMap(volumeMap, null);
Path expectedPrefix = new Path(
StorageLocation.normalizeFileURI(new File(providedBasePath).toURI()));
for (ReplicaInfo info : volumeMap
.replicas(BLOCK_POOL_IDS[CHOSEN_BP_ID])) {
ProvidedReplica pInfo = (ProvidedReplica) info;
assertEquals(expectedPrefix, pInfo.getPathPrefix());
}
}
}
@Test
public void testScannerWithProvidedVolumes() throws Exception {
DirectoryScanner scanner = new DirectoryScanner(dataset, conf);
Collection<ScanInfoVolumeReport> reports = scanner.getVolumeReports();
// no blocks should be reported for the Provided volume as long as
// the directoryScanner is disabled.
for (ScanInfoVolumeReport report : reports) {
assertEquals(0, report.getScanInfo(BLOCK_POOL_IDS[CHOSEN_BP_ID]).size());
}
}
/**
* Tests that a ProvidedReplica supports path handles.
*
* @throws Exception
*/
@Test
public void testProvidedReplicaWithPathHandle() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
// generate random data
int chunkSize = 512;
Random r = new Random(12345L);
byte[] data = new byte[chunkSize];
r.nextBytes(data);
Path file = new Path("/testfile");
try (FSDataOutputStream fout = fs.create(file)) {
fout.write(data);
}
PathHandle pathHandle = fs.getPathHandle(fs.getFileStatus(file),
Options.HandleOpt.changed(true), Options.HandleOpt.moved(true));
FinalizedProvidedReplica replica = new FinalizedProvidedReplica(0,
file.toUri(), 0, chunkSize, 0, pathHandle, null, conf, fs);
byte[] content = new byte[chunkSize];
IOUtils.readFully(replica.getDataInputStream(0), content, 0, chunkSize);
assertArrayEquals(data, content);
fs.rename(file, new Path("/testfile.1"));
// read should continue succeeding after the rename operation
IOUtils.readFully(replica.getDataInputStream(0), content, 0, chunkSize);
assertArrayEquals(data, content);
replica.setPathHandle(null);
try {
// expected to fail as URI of the provided replica is no longer valid.
replica.getDataInputStream(0);
fail("Expected an exception");
} catch (IOException e) {
LOG.info("Expected exception " + e);
}
}
}
|
TestFileRegionBlockAliasMap
|
java
|
grpc__grpc-java
|
api/src/main/java/io/grpc/ForwardingServerCall.java
|
{
"start": 732,
"end": 1345
}
|
class ____<ReqT, RespT>
extends PartialForwardingServerCall<ReqT, RespT> {
/**
* Returns the delegated {@code ServerCall}.
*/
@Override
protected abstract ServerCall<ReqT, RespT> delegate();
@Override
public void sendMessage(RespT message) {
delegate().sendMessage(message);
}
@Override
public MethodDescriptor<ReqT, RespT> getMethodDescriptor() {
return delegate().getMethodDescriptor();
}
/**
* A simplified version of {@link ForwardingServerCall} where subclasses can pass in a {@link
* ServerCall} as the delegate.
*/
public abstract static
|
ForwardingServerCall
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/convert/ConvertingSerializerTest.java
|
{
"start": 1157,
"end": 1387
}
|
class ____ extends StdConverter<ConvertingBean, int[]>
{
@Override
public int[] convert(ConvertingBean value) {
return new int[] { value.x, value.y };
}
}
static
|
ConvertingBeanConverter
|
java
|
apache__camel
|
components/camel-telegram/src/main/java/org/apache/camel/component/telegram/model/IncomingAnimation.java
|
{
"start": 1255,
"end": 4001
}
|
class ____ implements Serializable {
private static final long serialVersionUID = 5280714879829232835L;
@JsonProperty("file_id")
private String fileId;
@JsonProperty("file_unique_id")
private String fileUniqueId;
private Integer width;
private Integer height;
@JsonProperty("duration")
private Integer durationSeconds;
private IncomingPhotoSize thumb;
@JsonProperty("file_name")
private String fileName;
@JsonProperty("mime_type")
private String mimeType;
@JsonProperty("file_size")
private Long fileSize;
public IncomingAnimation() {
}
public String getFileId() {
return fileId;
}
public void setFileId(String fileId) {
this.fileId = fileId;
}
public Integer getWidth() {
return width;
}
public void setWidth(Integer width) {
this.width = width;
}
public Integer getHeight() {
return height;
}
public void setHeight(Integer height) {
this.height = height;
}
public Long getFileSize() {
return fileSize;
}
public void setFileSize(Long fileSize) {
this.fileSize = fileSize;
}
public Integer getDurationSeconds() {
return durationSeconds;
}
public void setDurationSeconds(Integer durationSeconds) {
this.durationSeconds = durationSeconds;
}
public IncomingPhotoSize getThumb() {
return thumb;
}
public void setThumb(IncomingPhotoSize thumb) {
this.thumb = thumb;
}
public String getMimeType() {
return mimeType;
}
public void setMimeType(String mimeType) {
this.mimeType = mimeType;
}
public String getFileUniqueId() {
return fileUniqueId;
}
public void setFileUniqueId(String fileUniqueId) {
this.fileUniqueId = fileUniqueId;
}
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("IncomingAnimation{");
sb.append("fileId='").append(fileId).append('\'');
sb.append(", fileUniqueId='").append(fileUniqueId).append('\'');
sb.append(", width=").append(width);
sb.append(", height=").append(height);
sb.append(", durationSeconds=").append(durationSeconds);
sb.append(", thumb=").append(thumb);
sb.append(", fileName='").append(fileName).append('\'');
sb.append(", mimeType='").append(mimeType).append('\'');
sb.append(", fileSize=").append(fileSize);
sb.append('}');
return sb.toString();
}
}
|
IncomingAnimation
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/constructor/nestedsource/source/Chart.java
|
{
"start": 253,
"end": 675
}
|
class ____ {
private final String type;
private final String name;
private final Song song;
public Chart(String type, String name, Song song) {
this.type = type;
this.name = name;
this.song = song;
}
public String getType() {
return type;
}
public String getName() {
return name;
}
public Song getSong() {
return song;
}
}
|
Chart
|
java
|
apache__dubbo
|
dubbo-metrics/dubbo-metrics-event/src/main/java/org/apache/dubbo/metrics/model/key/MetricsKey.java
|
{
"start": 854,
"end": 10962
}
|
enum ____ {
APPLICATION_METRIC_INFO("dubbo.application.info.total", "Total Application Info"),
CONFIGCENTER_METRIC_TOTAL("dubbo.configcenter.total", "Config Changed Total"),
// provider metrics key
METRIC_REQUESTS("dubbo.%s.requests.total", "Total Requests"),
METRIC_REQUESTS_SUCCEED("dubbo.%s.requests.succeed.total", "Total Succeed Requests"),
METRIC_REQUEST_BUSINESS_FAILED("dubbo.%s.requests.business.failed.total", "Total Failed Business Requests"),
METRIC_REQUESTS_PROCESSING("dubbo.%s.requests.processing.total", "Processing Requests"),
METRIC_REQUESTS_TIMEOUT("dubbo.%s.requests.timeout.total", "Total Timeout Failed Requests"),
METRIC_REQUESTS_LIMIT("dubbo.%s.requests.limit.total", "Total Limit Failed Requests"),
METRIC_REQUESTS_FAILED("dubbo.%s.requests.unknown.failed.total", "Total Unknown Failed Requests"),
METRIC_REQUESTS_TOTAL_FAILED("dubbo.%s.requests.failed.total", "Total Failed Requests"),
METRIC_REQUESTS_NETWORK_FAILED("dubbo.%s.requests.failed.network.total", "Total network Failed Requests"),
METRIC_REQUESTS_SERVICE_UNAVAILABLE_FAILED(
"dubbo.%s.requests.failed.service.unavailable.total", "Total Service Unavailable Failed Requests"),
METRIC_REQUESTS_CODEC_FAILED("dubbo.%s.requests.failed.codec.total", "Total Codec Failed Requests"),
METRIC_REQUESTS_TOTAL_AGG("dubbo.%s.requests.total.aggregate", "Aggregated Total Requests"),
METRIC_REQUESTS_SUCCEED_AGG("dubbo.%s.requests.succeed.aggregate", "Aggregated Succeed Requests"),
METRIC_REQUESTS_FAILED_AGG("dubbo.%s.requests.failed.aggregate", "Aggregated Failed Requests"),
METRIC_REQUEST_BUSINESS_FAILED_AGG(
"dubbo.%s.requests.business.failed.aggregate", "Aggregated Business Failed Requests"),
METRIC_REQUESTS_TIMEOUT_AGG("dubbo.%s.requests.timeout.failed.aggregate", "Aggregated timeout Failed Requests"),
METRIC_REQUESTS_LIMIT_AGG("dubbo.%s.requests.limit.aggregate", "Aggregated limit Requests"),
METRIC_REQUESTS_TOTAL_FAILED_AGG("dubbo.%s.requests.failed.total.aggregate", "Aggregated failed total Requests"),
METRIC_REQUESTS_NETWORK_FAILED_AGG(
"dubbo.%s.requests.failed.network.total.aggregate", "Aggregated failed network total Requests"),
METRIC_REQUESTS_CODEC_FAILED_AGG(
"dubbo.%s.requests.failed.codec.total.aggregate", "Aggregated failed codec total Requests"),
METRIC_REQUESTS_TOTAL_SERVICE_UNAVAILABLE_FAILED_AGG(
"dubbo.%s.requests.failed.service.unavailable.total.aggregate", "Aggregated failed codec total Requests"),
METRIC_QPS("dubbo.%s.qps.total", "Query Per Seconds"),
METRIC_RT_LAST("dubbo.%s.rt.milliseconds.last", "Last Response Time"),
METRIC_RT_MIN("dubbo.%s.rt.milliseconds.min", "Min Response Time"),
METRIC_RT_MAX("dubbo.%s.rt.milliseconds.max", "Max Response Time"),
METRIC_RT_SUM("dubbo.%s.rt.milliseconds.sum", "Sum Response Time"),
METRIC_RT_AVG("dubbo.%s.rt.milliseconds.avg", "Average Response Time"),
METRIC_RT_P99("dubbo.%s.rt.milliseconds.p99", "Response Time P99"),
METRIC_RT_P95("dubbo.%s.rt.milliseconds.p95", "Response Time P95"),
METRIC_RT_P90("dubbo.%s.rt.milliseconds.p90", "Response Time P90"),
METRIC_RT_P50("dubbo.%s.rt.milliseconds.p50", "Response Time P50"),
METRIC_RT_MIN_AGG("dubbo.%s.rt.min.milliseconds.aggregate", "Aggregated Min Response"),
METRIC_RT_MAX_AGG("dubbo.%s.rt.max.milliseconds.aggregate", "Aggregated Max Response"),
METRIC_RT_AVG_AGG("dubbo.%s.rt.avg.milliseconds.aggregate", "Aggregated Avg Response"),
// register metrics key
REGISTER_METRIC_REQUESTS("dubbo.registry.register.requests.total", "Total Register Requests"),
REGISTER_METRIC_REQUESTS_SUCCEED("dubbo.registry.register.requests.succeed.total", "Succeed Register Requests"),
REGISTER_METRIC_REQUESTS_FAILED("dubbo.registry.register.requests.failed.total", "Failed Register Requests"),
METRIC_RT_HISTOGRAM("dubbo.%s.rt.milliseconds.histogram", "Response Time Histogram"),
GENERIC_METRIC_REQUESTS("dubbo.%s.requests.total", "Total %s Requests"),
GENERIC_METRIC_REQUESTS_SUCCEED("dubbo.%s.requests.succeed.total", "Succeed %s Requests"),
GENERIC_METRIC_REQUESTS_FAILED("dubbo.%s.requests.failed.total", "Failed %s Requests"),
// subscribe metrics key
SUBSCRIBE_METRIC_NUM("dubbo.registry.subscribe.num.total", "Total Subscribe Num"),
SUBSCRIBE_METRIC_NUM_SUCCEED("dubbo.registry.subscribe.num.succeed.total", "Succeed Subscribe Num"),
SUBSCRIBE_METRIC_NUM_FAILED("dubbo.registry.subscribe.num.failed.total", "Failed Subscribe Num"),
// directory metrics key
DIRECTORY_METRIC_NUM_ALL("dubbo.registry.directory.num.all", "All Directory Urls"),
DIRECTORY_METRIC_NUM_VALID("dubbo.registry.directory.num.valid.total", "Valid Directory Urls"),
DIRECTORY_METRIC_NUM_TO_RECONNECT("dubbo.registry.directory.num.to_reconnect.total", "ToReconnect Directory Urls"),
DIRECTORY_METRIC_NUM_DISABLE("dubbo.registry.directory.num.disable.total", "Disable Directory Urls"),
NOTIFY_METRIC_REQUESTS("dubbo.registry.notify.requests.total", "Total Notify Requests"),
NOTIFY_METRIC_NUM_LAST("dubbo.registry.notify.num.last", "Last Notify Nums"),
THREAD_POOL_CORE_SIZE("dubbo.thread.pool.core.size", "Thread Pool Core Size"),
THREAD_POOL_LARGEST_SIZE("dubbo.thread.pool.largest.size", "Thread Pool Largest Size"),
THREAD_POOL_MAX_SIZE("dubbo.thread.pool.max.size", "Thread Pool Max Size"),
THREAD_POOL_ACTIVE_SIZE("dubbo.thread.pool.active.size", "Thread Pool Active Size"),
THREAD_POOL_THREAD_COUNT("dubbo.thread.pool.thread.count", "Thread Pool Thread Count"),
THREAD_POOL_QUEUE_SIZE("dubbo.thread.pool.queue.size", "Thread Pool Queue Size"),
THREAD_POOL_THREAD_REJECT_COUNT("dubbo.thread.pool.reject.thread.count", "Thread Pool Reject Thread Count"),
// metadata push metrics key
METADATA_PUSH_METRIC_NUM("dubbo.metadata.push.num.total", "Total Push Num"),
METADATA_PUSH_METRIC_NUM_SUCCEED("dubbo.metadata.push.num.succeed.total", "Succeed Push Num"),
METADATA_PUSH_METRIC_NUM_FAILED("dubbo.metadata.push.num.failed.total", "Failed Push Num"),
// metadata subscribe metrics key
METADATA_SUBSCRIBE_METRIC_NUM("dubbo.metadata.subscribe.num.total", "Total Metadata Subscribe Num"),
METADATA_SUBSCRIBE_METRIC_NUM_SUCCEED(
"dubbo.metadata.subscribe.num.succeed.total", "Succeed Metadata Subscribe Num"),
METADATA_SUBSCRIBE_METRIC_NUM_FAILED("dubbo.metadata.subscribe.num.failed.total", "Failed Metadata Subscribe Num"),
// register service metrics key
SERVICE_REGISTER_METRIC_REQUESTS("dubbo.registry.register.service.total", "Total Service-Level Register Requests"),
SERVICE_REGISTER_METRIC_REQUESTS_SUCCEED(
"dubbo.registry.register.service.succeed.total", "Succeed Service-Level Register Requests"),
SERVICE_REGISTER_METRIC_REQUESTS_FAILED(
"dubbo.registry.register.service.failed.total", "Failed Service-Level Register Requests"),
// subscribe metrics key
SERVICE_SUBSCRIBE_METRIC_NUM("dubbo.registry.subscribe.service.num.total", "Total Service-Level Subscribe Num"),
SERVICE_SUBSCRIBE_METRIC_NUM_SUCCEED(
"dubbo.registry.subscribe.service.num.succeed.total", "Succeed Service-Level Num"),
SERVICE_SUBSCRIBE_METRIC_NUM_FAILED(
"dubbo.registry.subscribe.service.num.failed.total", "Failed Service-Level Num"),
// store provider metadata service key
STORE_PROVIDER_METADATA("dubbo.metadata.store.provider.total", "Store Provider Metadata"),
STORE_PROVIDER_METADATA_SUCCEED("dubbo.metadata.store.provider.succeed.total", "Succeed Store Provider Metadata"),
STORE_PROVIDER_METADATA_FAILED("dubbo.metadata.store.provider.failed.total", "Failed Store Provider Metadata"),
METADATA_GIT_COMMITID_METRIC("git.commit.id", "Git Commit Id Metrics"),
// consumer metrics key
INVOKER_NO_AVAILABLE_COUNT(
"dubbo.consumer.invoker.no.available.count", "Request Throw No Invoker Available Exception Count"),
// count the number of occurrences of each error code
ERROR_CODE_COUNT("dubbo.error.code.count", "The Count Of Occurrences for Each Error Code"),
// netty metrics key
NETTY_ALLOCATOR_HEAP_MEMORY_USED("netty.allocator.memory.used", "Netty Allocator Memory Used"),
NETTY_ALLOCATOR_DIRECT_MEMORY_USED("netty.allocator.direct.memory.used", "Netty Allocator Direct Memory Used"),
NETTY_ALLOCATOR_PINNED_DIRECT_MEMORY(
"netty.allocator.pinned.direct.memory", "Netty Allocator Pinned Direct Memory"),
NETTY_ALLOCATOR_PINNED_HEAP_MEMORY("netty.allocator.pinned.heap.memory", "Netty Allocator Pinned Heap Memory"),
NETTY_ALLOCATOR_HEAP_ARENAS_NUM("netty.allocator.heap.arenas.num", "Netty Allocator Heap Arenas Num"),
NETTY_ALLOCATOR_DIRECT_ARENAS_NUM("netty.allocator.direct.arenas.num", "Netty Allocator Direct Arenas Num"),
NETTY_ALLOCATOR_NORMAL_CACHE_SIZE("netty.allocator.normal.cache.size", "Netty Allocator Normal Cache Size"),
NETTY_ALLOCATOR_SMALL_CACHE_SIZE("netty.allocator.small.cache.size", "Netty Allocator Small Cache Size"),
NETTY_ALLOCATOR_THREAD_LOCAL_CACHES_NUM(
"netty.allocator.thread.local.caches.num", "Netty Allocator Thread Local Caches Num"),
NETTY_ALLOCATOR_CHUNK_SIZE("netty.allocator.chunk.size", "Netty Allocator Chunk Size"),
;
private String name;
private String description;
public final String getName() {
return this.name;
}
public final String getNameByType(String type) {
return String.format(name, type);
}
public static MetricsKey getMetricsByName(String name) {
for (MetricsKey metricsKey : MetricsKey.values()) {
if (metricsKey.getName().equals(name)) {
return metricsKey;
}
}
return null;
}
public final String getDescription() {
return this.description;
}
MetricsKey(String name, String description) {
this.name = name;
this.description = description;
}
}
|
MetricsKey
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableReduceSeedSingle.java
|
{
"start": 1241,
"end": 1803
}
|
class ____<T, R> extends Single<R> {
final ObservableSource<T> source;
final R seed;
final BiFunction<R, ? super T, R> reducer;
public ObservableReduceSeedSingle(ObservableSource<T> source, R seed, BiFunction<R, ? super T, R> reducer) {
this.source = source;
this.seed = seed;
this.reducer = reducer;
}
@Override
protected void subscribeActual(SingleObserver<? super R> observer) {
source.subscribe(new ReduceSeedObserver<>(observer, reducer, seed));
}
static final
|
ObservableReduceSeedSingle
|
java
|
quarkusio__quarkus
|
independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/InvokerBuilder.java
|
{
"start": 18298,
"end": 19369
}
|
class ____ declares the invoker wrapper
* @param methodName invoker wrapper method name
* @return this builder
* @throws IllegalStateException if this method is called more than once
*/
public InvokerBuilder withInvocationWrapper(Class<?> clazz, String methodName) {
if (invocationWrapper != null) {
throw new IllegalStateException("Invocation wrapper already set");
}
this.invocationWrapper = new InvocationTransformer(InvocationTransformerKind.WRAPPER, clazz, methodName);
return this;
}
/**
* Returns a representation of the built {@link Invoker}.
*
* @return the built invoker
*/
public InvokerInfo build() {
Injection argumentsInjection = Injection.forInvokerArgumentLookups(targetBean.getImplClazz(), targetMethod,
argumentLookups, beanDeployment, injectionPointTransformer);
InvokerInfo result = new InvokerInfo(this, argumentsInjection, beanDeployment);
beanDeployment.addInvoker(result);
return result;
}
}
|
that
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/deser/jdk/JDKValueInstantiators.java
|
{
"start": 7303,
"end": 7628
}
|
class ____
extends JDKValueInstantiator
{
public PropertiesInstantiator() {
super(Properties.class);
}
@Override
public Object createUsingDefault(DeserializationContext ctxt) {
return new Properties();
}
}
private static
|
PropertiesInstantiator
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/access/SillyConverter.java
|
{
"start": 231,
"end": 484
}
|
class ____ implements AttributeConverter<String, String> {
@Override
public String convertToDatabaseColumn(String attribute) {
return attribute;
}
@Override
public String convertToEntityAttribute(String dbData) {
return dbData;
}
}
|
SillyConverter
|
java
|
grpc__grpc-java
|
xds/src/test/java/io/grpc/xds/RoutingUtilsTest.java
|
{
"start": 1267,
"end": 9754
}
|
class ____ {
@Test
public void findVirtualHostForHostName_exactMatchFirst() {
String hostname = "a.googleapis.com";
List<Route> routes = Collections.emptyList();
VirtualHost vHost1 = VirtualHost.create("virtualhost01.googleapis.com",
Arrays.asList("a.googleapis.com", "b.googleapis.com"), routes,
ImmutableMap.of());
VirtualHost vHost2 = VirtualHost.create("virtualhost02.googleapis.com",
Collections.singletonList("*.googleapis.com"), routes,
ImmutableMap.of());
VirtualHost vHost3 = VirtualHost.create("virtualhost03.googleapis.com",
Collections.singletonList("*"), routes,
ImmutableMap.of());
List<VirtualHost> virtualHosts = Arrays.asList(vHost1, vHost2, vHost3);
assertThat(RoutingUtils.findVirtualHostForHostName(virtualHosts, hostname))
.isEqualTo(vHost1);
}
@Test
public void findVirtualHostForHostName_preferSuffixDomainOverPrefixDomain() {
String hostname = "a.googleapis.com";
List<Route> routes = Collections.emptyList();
VirtualHost vHost1 = VirtualHost.create("virtualhost01.googleapis.com",
Arrays.asList("*.googleapis.com", "b.googleapis.com"), routes,
ImmutableMap.of());
VirtualHost vHost2 = VirtualHost.create("virtualhost02.googleapis.com",
Collections.singletonList("a.googleapis.*"), routes,
ImmutableMap.of());
VirtualHost vHost3 = VirtualHost.create("virtualhost03.googleapis.com",
Collections.singletonList("*"), routes,
ImmutableMap.of());
List<VirtualHost> virtualHosts = Arrays.asList(vHost1, vHost2, vHost3);
assertThat(RoutingUtils.findVirtualHostForHostName(virtualHosts, hostname))
.isEqualTo(vHost1);
}
@Test
public void findVirtualHostForHostName_asteriskMatchAnyDomain() {
String hostname = "a.googleapis.com";
List<Route> routes = Collections.emptyList();
VirtualHost vHost1 = VirtualHost.create("virtualhost01.googleapis.com",
Collections.singletonList("*"), routes,
ImmutableMap.of());
VirtualHost vHost2 = VirtualHost.create("virtualhost02.googleapis.com",
Collections.singletonList("b.googleapis.com"), routes,
ImmutableMap.of());
List<VirtualHost> virtualHosts = Arrays.asList(vHost1, vHost2);
assertThat(RoutingUtils.findVirtualHostForHostName(virtualHosts, hostname))
.isEqualTo(vHost1);
}
@Test
public void routeMatching_pathOnly() {
Metadata headers = new Metadata();
ThreadSafeRandom random = mock(ThreadSafeRandom.class);
RouteMatch routeMatch1 =
RouteMatch.create(
PathMatcher.fromPath("/FooService/barMethod", true),
Collections.emptyList(), null);
assertThat(RoutingUtils.matchRoute(routeMatch1, "/FooService/barMethod", headers, random))
.isTrue();
assertThat(RoutingUtils.matchRoute(routeMatch1, "/FooService/bazMethod", headers, random))
.isFalse();
RouteMatch routeMatch2 =
RouteMatch.create(
PathMatcher.fromPrefix("/FooService/", true),
Collections.emptyList(), null);
assertThat(RoutingUtils.matchRoute(routeMatch2, "/FooService/barMethod", headers, random))
.isTrue();
assertThat(RoutingUtils.matchRoute(routeMatch2, "/FooService/bazMethod", headers, random))
.isTrue();
assertThat(RoutingUtils.matchRoute(routeMatch2, "/BarService/bazMethod", headers, random))
.isFalse();
RouteMatch routeMatch3 =
RouteMatch.create(
PathMatcher.fromRegEx(Pattern.compile(".*Foo.*")),
Collections.emptyList(), null);
assertThat(RoutingUtils.matchRoute(routeMatch3, "/FooService/barMethod", headers, random))
.isTrue();
}
@Test
public void routeMatching_pathOnly_caseInsensitive() {
Metadata headers = new Metadata();
ThreadSafeRandom random = mock(ThreadSafeRandom.class);
RouteMatch routeMatch1 =
RouteMatch.create(
PathMatcher.fromPath("/FooService/barMethod", false),
Collections.emptyList(), null);
assertThat(RoutingUtils.matchRoute(routeMatch1, "/fooservice/barmethod", headers, random))
.isTrue();
RouteMatch routeMatch2 =
RouteMatch.create(
PathMatcher.fromPrefix("/FooService", false),
Collections.emptyList(), null);
assertThat(RoutingUtils.matchRoute(routeMatch2, "/fooservice/barmethod", headers, random))
.isTrue();
}
@Test
public void routeMatching_withHeaders() {
Metadata headers = new Metadata();
headers.put(Metadata.Key.of("authority", Metadata.ASCII_STRING_MARSHALLER),
"foo.googleapis.com");
headers.put(Metadata.Key.of("grpc-encoding", Metadata.ASCII_STRING_MARSHALLER), "gzip");
headers.put(Metadata.Key.of("user-agent", Metadata.ASCII_STRING_MARSHALLER), "gRPC-Java");
headers.put(Metadata.Key.of("content-length", Metadata.ASCII_STRING_MARSHALLER), "1000");
headers.put(Metadata.Key.of("custom-key", Metadata.ASCII_STRING_MARSHALLER), "custom-value1");
headers.put(Metadata.Key.of("custom-key", Metadata.ASCII_STRING_MARSHALLER), "custom-value2");
ThreadSafeRandom random = mock(ThreadSafeRandom.class);
PathMatcher pathMatcher = PathMatcher.fromPath("/FooService/barMethod", true);
RouteMatch routeMatch1 = RouteMatch.create(
pathMatcher,
Arrays.asList(
HeaderMatcher.forExactValue("grpc-encoding", "gzip", false),
HeaderMatcher.forSafeRegEx("authority", Pattern.compile(".*googleapis.*"), false),
HeaderMatcher.forRange(
"content-length", HeaderMatcher.Range.create(100, 10000), false),
HeaderMatcher.forPresent("user-agent", true, false),
HeaderMatcher.forPrefix("custom-key", "custom-", false),
HeaderMatcher.forSuffix("custom-key", "value2", false)),
null);
assertThat(RoutingUtils.matchRoute(routeMatch1, "/FooService/barMethod", headers, random))
.isTrue();
RouteMatch routeMatch2 = RouteMatch.create(
pathMatcher,
Collections.singletonList(
HeaderMatcher.forSafeRegEx("authority", Pattern.compile(".*googleapis.*"), true)),
null);
assertThat(RoutingUtils.matchRoute(routeMatch2, "/FooService/barMethod", headers, random))
.isFalse();
RouteMatch routeMatch3 = RouteMatch.create(
pathMatcher,
Collections.singletonList(
HeaderMatcher.forExactValue("user-agent", "gRPC-Go", false)), null);
assertThat(RoutingUtils.matchRoute(routeMatch3, "/FooService/barMethod", headers, random))
.isFalse();
RouteMatch routeMatch4 = RouteMatch.create(
pathMatcher,
Collections.singletonList(HeaderMatcher.forPresent("user-agent", false, false)),
null);
assertThat(RoutingUtils.matchRoute(routeMatch4, "/FooService/barMethod", headers, random))
.isFalse();
RouteMatch routeMatch5 = RouteMatch.create(
pathMatcher,
Collections.singletonList(HeaderMatcher.forPresent("user-agent", false, true)), // inverted
null);
assertThat(RoutingUtils.matchRoute(routeMatch5, "/FooService/barMethod", headers, random))
.isTrue();
RouteMatch routeMatch6 = RouteMatch.create(
pathMatcher,
Collections.singletonList(HeaderMatcher.forPresent("user-agent", true, true)),
null);
assertThat(RoutingUtils.matchRoute(routeMatch6, "/FooService/barMethod", headers, random))
.isFalse();
RouteMatch routeMatch7 = RouteMatch.create(
pathMatcher,
Collections.singletonList(
HeaderMatcher.forExactValue("custom-key", "custom-value1,custom-value2", false)),
null);
assertThat(RoutingUtils.matchRoute(routeMatch7, "/FooService/barMethod", headers, random))
.isTrue();
RouteMatch routeMatch8 = RouteMatch.create(
pathMatcher,
Collections.singletonList(
HeaderMatcher.forExactValue("content-type", "application/grpc", false)),
null);
assertThat(RoutingUtils.matchRoute(
routeMatch8, "/FooService/barMethod", new Metadata(), random)).isTrue();
RouteMatch routeMatch9 = RouteMatch.create(
pathMatcher,
Collections.singletonList(
HeaderMatcher.forExactValue("custom-key!", "custom-value1,custom-value2", false)),
null);
assertThat(RoutingUtils.matchRoute(routeMatch9, "/FooService/barMethod", headers, random))
.isFalse();
}
}
|
RoutingUtilsTest
|
java
|
quarkusio__quarkus
|
extensions/redis-client/runtime/src/test/java/io/quarkus/redis/datasource/HyperLogLogCommandsTest.java
|
{
"start": 584,
"end": 4133
}
|
class ____ extends DatasourceTestBase {
private RedisDataSource ds;
static AtomicInteger count = new AtomicInteger(0);
private HyperLogLogCommands<String, Person> hll;
@BeforeEach
void initialize() {
ds = new BlockingRedisDataSourceImpl(vertx, redis, api, Duration.ofSeconds(1));
hll = ds.hyperloglog(Person.class);
}
@Test
void getDataSource() {
assertThat(ds).isEqualTo(hll.getDataSource());
}
@Test
void pfadd() {
String k = getKey();
assertThat(hll.pfadd(k, Person.person1, Person.person1)).isTrue();
assertThat(hll.pfadd(k, Person.person1, Person.person1)).isFalse();
Assertions.assertThat(hll.pfadd(k, Person.person1)).isFalse();
}
@Test
void pfaddNoValues() {
assertThatThrownBy(() -> hll.pfadd(key)).isInstanceOf(IllegalArgumentException.class);
}
@SuppressWarnings("ConfusingArgumentToVarargsMethod")
@Test
void pfaddNullValues() {
assertThatThrownBy(() -> hll.pfadd(key, null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("`values`");
assertThatThrownBy(() -> hll.pfadd(key, Person.person1, null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("`values`");
}
private String getKey() {
return "key-hll-" + count.getAndIncrement();
}
@Test
void pfmerge() {
String k1 = getKey();
String k2 = getKey();
String k3 = getKey();
hll.pfadd(k1, Person.person1);
hll.pfadd(k2, new Person("Bossk", ""));
hll.pfadd(k3, new Person("Lando", "Calrissian"));
hll.pfmerge(k1, k2, k3);
assertThat(hll.pfcount(k1)).isEqualTo(3);
String k4 = getKey();
String k5 = getKey();
hll.pfadd(k4, new Person("Lobot", ""), new Person("Ackbar", ""));
hll.pfadd(k5, new Person("Ackbar", ""), new Person("Mon", "Mothma"));
String k6 = getKey();
hll.pfmerge(k6, k4, k5);
assertThat(hll.pfcount(k6)).isEqualTo(3);
}
@Test
void pfmergeNoKeys() {
assertThatThrownBy(() -> hll.pfmerge(key)).isInstanceOf(IllegalArgumentException.class);
}
@Test
void pfcount() {
String k0 = getKey();
String k1 = getKey();
hll.pfadd(k0, Person.person1);
hll.pfadd(k1, Person.person2);
assertThat(hll.pfcount(k0)).isEqualTo(1);
assertThat(hll.pfcount(k0, k1)).isEqualTo(2);
}
@Test
void pfcountNoKeys() {
assertThatThrownBy(() -> hll.pfcount()).isInstanceOf(IllegalArgumentException.class);
}
@Test
void pfaddPfmergePfCount() {
String k0 = getKey();
String k1 = getKey();
String k2 = getKey();
hll.pfadd(k0, new Person("Lobot", ""), new Person("Ackbar", ""));
hll.pfadd(k1, new Person("Ackbar", ""), new Person("Mon", "Mothma"));
hll.pfmerge(k2, k0, k1);
assertThat(hll.pfcount(k2)).isEqualTo(3);
}
@Test
void pfaddWithTypeReference() {
String k = getKey();
var hll = ds.hyperloglog(new TypeReference<List<Person>>() {
// Empty on purpose
});
var l1 = List.of(Person.person1, Person.person2);
var l2 = List.of(Person.person3, Person.person2);
assertThat(hll.pfadd(k, l1, l2)).isTrue();
assertThat(hll.pfadd(k, l1, l1)).isFalse();
Assertions.assertThat(hll.pfadd(k, l1)).isFalse();
}
}
|
HyperLogLogCommandsTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ParameterNameTest.java
|
{
"start": 3942,
"end": 4186
}
|
class ____ extends Foo {}
""")
.doTest();
}
@Test
public void namedParametersChecker_ignoresCall_withNoComments() {
testHelper
.addSourceLines(
"Test.java",
"""
abstract
|
Baz
|
java
|
quarkusio__quarkus
|
extensions/hibernate-search-orm-elasticsearch/runtime-dev/src/main/java/io/quarkus/hibernate/search/orm/elasticsearch/runtime/dev/HibernateSearchElasticsearchDevController.java
|
{
"start": 502,
"end": 3275
}
|
class ____ {
private static final HibernateSearchElasticsearchDevController INSTANCE = new HibernateSearchElasticsearchDevController();
public static HibernateSearchElasticsearchDevController get() {
return INSTANCE;
}
private final Set<String> activePersistenceUnitNames = new HashSet<>();
private HibernateSearchElasticsearchDevController() {
}
void setActivePersistenceUnitNames(Set<String> activePersistenceUnitNames) {
this.activePersistenceUnitNames.clear();
this.activePersistenceUnitNames.addAll(activePersistenceUnitNames);
}
public HibernateSearchElasticsearchDevInfo getInfo() {
Map<String, SearchMapping> mappings = searchMappings(activePersistenceUnitNames);
if (mappings.isEmpty()) {
return new HibernateSearchElasticsearchDevInfo();
}
return mappings.entrySet().stream()
.map(mapping -> new HibernateSearchElasticsearchDevInfo.PersistenceUnit(mapping.getKey(),
mapping.getValue().allIndexedEntities().stream()
.map(HibernateSearchElasticsearchDevInfo.IndexedEntity::new).sorted()
.collect(Collectors.toList())))
.collect(Collector.of(HibernateSearchElasticsearchDevInfo::new, HibernateSearchElasticsearchDevInfo::add,
(left, right) -> {
left.addAll(right);
return left;
}));
}
public SearchMapping searchMapping(String persistenceUnitName) {
return Arc.container()
.select(SearchMapping.class,
new io.quarkus.hibernate.orm.PersistenceUnit.PersistenceUnitLiteral(persistenceUnitName))
.get();
}
public Map<String, SearchMapping> searchMappings(Set<String> persistenceUnitNames) {
return persistenceUnitNames.stream().map(this::searchMapping)
.collect(Collectors.toMap(HibernateSearchElasticsearchDevController::getPersistenceUnitName,
mapping -> mapping));
}
private static String getPersistenceUnitName(SearchMapping searchMapping) {
SessionFactoryImplementor sessionFactory = searchMapping.toOrmSessionFactory().unwrap(SessionFactoryImplementor.class);
String name = sessionFactory.getName();
if (name != null) {
return name;
}
Object persistenceUnitName = sessionFactory.getProperties().get(PERSISTENCE_UNIT_NAME);
if (persistenceUnitName != null) {
return persistenceUnitName.toString();
}
return PersistenceUnitUtil.DEFAULT_PERSISTENCE_UNIT_NAME;
}
}
|
HibernateSearchElasticsearchDevController
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
|
{
"start": 8041,
"end": 8480
}
|
class ____ {
LocalResourceTrackerState publicTrackerState =
new LocalResourceTrackerState(null, null);
RecoveryIterator<Entry<String, RecoveredUserResources>> it = null;
public LocalResourceTrackerState getPublicTrackerState() {
return publicTrackerState;
}
public RecoveryIterator<Entry<String, RecoveredUserResources>> getIterator() {
return it;
}
}
public static
|
RecoveredLocalizationState
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLExtractExpr.java
|
{
"start": 954,
"end": 3207
}
|
class ____ extends SQLExprImpl implements SQLReplaceable {
private SQLExpr value;
private SQLIntervalUnit unit;
public SQLExtractExpr() {
}
public SQLExtractExpr clone() {
SQLExtractExpr x = new SQLExtractExpr();
if (value != null) {
x.setValue(value.clone());
}
x.unit = unit;
return x;
}
public SQLExpr getValue() {
return value;
}
public void setValue(SQLExpr value) {
if (value != null) {
value.setParent(this);
}
this.value = value;
}
public SQLIntervalUnit getUnit() {
return unit;
}
public void setUnit(SQLIntervalUnit unit) {
this.unit = unit;
}
protected void accept0(SQLASTVisitor visitor) {
if (visitor.visit(this)) {
if (this.value != null) {
this.value.accept(visitor);
}
}
visitor.endVisit(this);
}
@Override
public boolean replace(SQLExpr expr, SQLExpr target) {
if (this.value == expr) {
setValue(target);
return true;
}
return false;
}
@Override
public List getChildren() {
return Collections.singletonList(value);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((unit == null) ? 0 : unit.hashCode());
result = prime * result + ((value == null) ? 0 : value.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof SQLExtractExpr)) {
return false;
}
SQLExtractExpr other = (SQLExtractExpr) obj;
if (unit != other.unit) {
return false;
}
if (value == null) {
if (other.value != null) {
return false;
}
} else if (!value.equals(other.value)) {
return false;
}
return true;
}
public SQLDataType computeDataType() {
return SQLIntegerExpr.DATA_TYPE;
}
}
|
SQLExtractExpr
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/bug/Bug_for_lixianfeng.java
|
{
"start": 215,
"end": 476
}
|
class ____ extends TestCase {
public void test_long_list() throws Exception {
String str = "{\"id\":14281,\"name\":\"test\",\"canPurchase\":1,\"categoryId\":955063}";
JSON.parseObject(str, Te.class);
}
public static
|
Bug_for_lixianfeng
|
java
|
netty__netty
|
transport/src/test/java/io/netty/channel/socket/nio/NioSocketChannelTest.java
|
{
"start": 2482,
"end": 12298
}
|
class ____ extends AbstractNioChannelTest<NioSocketChannel> {
/**
* Reproduces the issue #1600
*/
@Test
public void testFlushCloseReentrance() throws Exception {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(1, NioIoHandler.newFactory());
try {
final Queue<ChannelFuture> futures = new LinkedBlockingQueue<ChannelFuture>();
ServerBootstrap sb = new ServerBootstrap();
sb.group(group).channel(NioServerSocketChannel.class);
sb.childOption(ChannelOption.SO_SNDBUF, 1024);
sb.childHandler(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
// Write a large enough data so that it is split into two loops.
futures.add(ctx.write(
ctx.alloc().buffer().writeZero(1048576)).addListener(ChannelFutureListener.CLOSE));
futures.add(ctx.write(ctx.alloc().buffer().writeZero(1048576)));
ctx.flush();
futures.add(ctx.write(ctx.alloc().buffer().writeZero(1048576)));
ctx.flush();
}
});
SocketAddress address = sb.bind(0).sync().channel().localAddress();
Socket s = new Socket(NetUtil.LOCALHOST, ((InetSocketAddress) address).getPort());
InputStream in = s.getInputStream();
byte[] buf = new byte[8192];
for (;;) {
if (in.read(buf) == -1) {
break;
}
// Wait a little bit so that the write attempts are split into multiple flush attempts.
Thread.sleep(10);
}
s.close();
assertEquals(3, futures.size());
ChannelFuture f1 = futures.poll();
ChannelFuture f2 = futures.poll();
ChannelFuture f3 = futures.poll();
assertTrue(f1.isSuccess());
assertTrue(f2.isDone());
assertFalse(f2.isSuccess());
assertInstanceOf(ClosedChannelException.class, f2.cause());
assertTrue(f3.isDone());
assertFalse(f3.isSuccess());
assertInstanceOf(ClosedChannelException.class, f3.cause());
} finally {
group.shutdownGracefully().sync();
}
}
/**
* Reproduces the issue #1679
*/
@Test
public void testFlushAfterGatheredFlush() throws Exception {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(1, NioIoHandler.newFactory());
try {
ServerBootstrap sb = new ServerBootstrap();
sb.group(group).channel(NioServerSocketChannel.class);
sb.childHandler(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(final ChannelHandlerContext ctx) throws Exception {
// Trigger a gathering write by writing two buffers.
ctx.write(Unpooled.wrappedBuffer(new byte[] { 'a' }));
ChannelFuture f = ctx.write(Unpooled.wrappedBuffer(new byte[] { 'b' }));
f.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
// This message must be flushed
ctx.writeAndFlush(Unpooled.wrappedBuffer(new byte[]{'c'}));
}
});
ctx.flush();
}
});
SocketAddress address = sb.bind(0).sync().channel().localAddress();
Socket s = new Socket(NetUtil.LOCALHOST, ((InetSocketAddress) address).getPort());
DataInput in = new DataInputStream(s.getInputStream());
byte[] buf = new byte[3];
in.readFully(buf);
assertEquals("abc", new String(buf, CharsetUtil.US_ASCII));
s.close();
} finally {
group.shutdownGracefully().sync();
}
}
// Test for https://github.com/netty/netty/issues/4805
@Test
@Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testChannelReRegisterReadSameEventLoop() throws Exception {
testChannelReRegisterRead(true);
}
@Test
@Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testChannelReRegisterReadDifferentEventLoop() throws Exception {
testChannelReRegisterRead(false);
}
private static void testChannelReRegisterRead(final boolean sameEventLoop) throws Exception {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(2, NioIoHandler.newFactory());
final CountDownLatch latch = new CountDownLatch(1);
// Just some random bytes
byte[] bytes = new byte[1024];
ThreadLocalRandom.current().nextBytes(bytes);
Channel sc = null;
Channel cc = null;
ServerBootstrap b = new ServerBootstrap();
try {
b.group(group)
.channel(NioServerSocketChannel.class)
.childOption(ChannelOption.SO_KEEPALIVE, true)
.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(new SimpleChannelInboundHandler<ByteBuf>() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, ByteBuf byteBuf) {
// We was able to read something from the Channel after reregister.
latch.countDown();
}
@Override
public void channelActive(final ChannelHandlerContext ctx) throws Exception {
final EventLoop loop = group.next();
if (sameEventLoop) {
deregister(ctx, loop);
} else {
loop.execute(new Runnable() {
@Override
public void run() {
deregister(ctx, loop);
}
});
}
}
private void deregister(ChannelHandlerContext ctx, final EventLoop loop) {
// As soon as the channel becomes active re-register it to another
// EventLoop. After this is done we should still receive the data that
// was written to the channel.
ctx.deregister().addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture cf) {
Channel channel = cf.channel();
assertNotSame(loop, channel.eventLoop());
group.next().register(channel);
}
});
}
});
}
});
sc = b.bind(0).syncUninterruptibly().channel();
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(group).channel(NioSocketChannel.class);
bootstrap.handler(new ChannelInboundHandlerAdapter());
cc = bootstrap.connect(sc.localAddress()).syncUninterruptibly().channel();
cc.writeAndFlush(Unpooled.wrappedBuffer(bytes)).syncUninterruptibly();
latch.await();
} finally {
if (cc != null) {
cc.close();
}
if (sc != null) {
sc.close();
}
group.shutdownGracefully();
}
}
@Test
@Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testShutdownOutputAndClose() throws IOException {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(1, NioIoHandler.newFactory());
ServerSocket socket = new ServerSocket();
socket.bind(new InetSocketAddress(0));
Socket accepted = null;
try {
Bootstrap sb = new Bootstrap();
sb.group(group).channel(NioSocketChannel.class);
sb.handler(new ChannelInboundHandlerAdapter());
SocketChannel channel = (SocketChannel) sb.connect(socket.getLocalSocketAddress())
.syncUninterruptibly().channel();
accepted = socket.accept();
channel.shutdownOutput().syncUninterruptibly();
channel.close().syncUninterruptibly();
} finally {
if (accepted != null) {
try {
accepted.close();
} catch (IOException ignore) {
// ignore
}
}
try {
socket.close();
} catch (IOException ignore) {
// ignore
}
group.shutdownGracefully();
}
}
@Override
protected NioSocketChannel newNioChannel() {
return new NioSocketChannel();
}
@Override
protected NetworkChannel jdkChannel(NioSocketChannel channel) {
return channel.javaChannel();
}
@Override
protected SocketOption<?> newInvalidOption() {
return StandardSocketOptions.IP_MULTICAST_IF;
}
}
|
NioSocketChannelTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/refaster/UStaticIdentTest.java
|
{
"start": 963,
"end": 2614
}
|
class ____ extends AbstractUTreeTest {
@Test
public void equality() {
new EqualsTester()
.addEqualityGroup(
UStaticIdent.create(
"java.lang.Integer",
"valueOf",
UMethodType.create(
UClassType.create("java.lang.Integer"), UClassType.create("java.lang.String"))))
.addEqualityGroup(
UStaticIdent.create(
"java.lang.Integer",
"valueOf",
UMethodType.create(
UClassType.create("java.lang.Integer"),
UClassType.create("java.lang.String"),
UPrimitiveType.INT)))
.addEqualityGroup(
UStaticIdent.create(
"java.lang.Integer",
"getInteger",
UMethodType.create(
UClassType.create("java.lang.Integer"), UClassType.create("java.lang.String"))))
.testEquals();
}
@Test
public void serialization() {
SerializableTester.reserializeAndAssert(
UStaticIdent.create(
"java.lang.Integer",
"valueOf",
UMethodType.create(
UClassType.create("java.lang.Integer"), UClassType.create("java.lang.String"))));
}
@Test
public void inline() {
ImportPolicy.bind(context, ImportPolicy.IMPORT_TOP_LEVEL);
assertInlines(
"Integer.valueOf",
UStaticIdent.create(
"java.lang.Integer",
"valueOf",
UMethodType.create(
UClassType.create("java.lang.Integer"), UClassType.create("java.lang.String"))));
}
}
|
UStaticIdentTest
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/suite/engine/SuiteEngineTests.java
|
{
"start": 28178,
"end": 28306
}
|
class ____ {
}
@Suite
@SelectClasses(names = "org.junit.platform.suite.engine.testcases.SingleTestTestCase")
|
AbstractInnerSuite
|
java
|
google__guice
|
extensions/grapher/src/com/google/inject/grapher/graphviz/NodeStyle.java
|
{
"start": 849,
"end": 1179
}
|
enum ____ {
BOLD("bold"),
DASHED("dashed"),
DIAGONALS("diagonals"),
DOTTED("dotted"),
INVISIBLE("invis"),
FILLED("filled"),
ROUNDED("rounded"),
SOLID("solid");
private final String name;
NodeStyle(String name) {
this.name = name;
}
@Override
public String toString() {
return name;
}
}
|
NodeStyle
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/buildextension/qualifiers/AdditionalQualifiersTest.java
|
{
"start": 3565,
"end": 3666
}
|
class ____ {
}
@ToBeQualifierWithNonBindingField(foo = "bzzzz")
@Singleton
static
|
Alpha
|
java
|
apache__camel
|
core/camel-management/src/test/java/org/apache/camel/management/RemoveRouteDefinitionTest.java
|
{
"start": 1266,
"end": 3704
}
|
class ____ extends ManagementTestSupport {
@Test
public void testShutdownRoute() throws Exception {
MBeanServer mbeanServer = getMBeanServer();
Set<ObjectName> set = mbeanServer.queryNames(new ObjectName("*:type=routes,*"), null);
assertEquals(1, set.size());
ObjectName on = set.iterator().next();
boolean registered = mbeanServer.isRegistered(on);
assertTrue(registered, "Should be registered");
context.getRouteController().stopRoute("route1");
context.removeRoute("route1");
// route is shutdown (= also removed), so its not longer in JMX
set = mbeanServer.queryNames(new ObjectName("*:type=routes,*"), null);
assertEquals(0, set.size());
}
@Test
public void testStopAndRemoveRoute() throws Exception {
MBeanServer mbeanServer = getMBeanServer();
Set<ObjectName> set = mbeanServer.queryNames(new ObjectName("*:type=routes,*"), null);
assertEquals(1, set.size());
ObjectName on = set.iterator().next();
boolean registered = mbeanServer.isRegistered(on);
assertTrue(registered, "Should be registered");
// must stop before we can remove
context.getRouteController().stopRoute("route1");
context.removeRoute("route1");
// route is removed, so its not longer in JMX
set = mbeanServer.queryNames(new ObjectName("*:type=routes,*"), null);
assertEquals(0, set.size());
}
@Test
public void testStopRoute() throws Exception {
MBeanServer mbeanServer = getMBeanServer();
Set<ObjectName> set = mbeanServer.queryNames(new ObjectName("*:type=routes,*"), null);
assertEquals(1, set.size());
ObjectName on = set.iterator().next();
boolean registered = mbeanServer.isRegistered(on);
assertTrue(registered, "Should be registered");
context.getRouteController().stopRoute("route1");
// route is only stopped so its still in JMX
set = mbeanServer.queryNames(new ObjectName("*:type=routes,*"), null);
assertEquals(1, set.size());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").routeId("route1").to("log:foo").to("mock:result");
}
};
}
}
|
RemoveRouteDefinitionTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/jdbc/env/internal/JdbcEnvironmentInitiator.java
|
{
"start": 20668,
"end": 21519
}
|
class ____ implements JdbcConnectionAccess {
private final MultiTenantConnectionProvider<?> connectionProvider;
public MultiTenantConnectionProviderJdbcConnectionAccess(MultiTenantConnectionProvider<?> connectionProvider) {
this.connectionProvider = connectionProvider;
}
public MultiTenantConnectionProvider<?> getConnectionProvider() {
return connectionProvider;
}
@Override
public Connection obtainConnection() throws SQLException {
return connectionProvider.getAnyConnection();
}
@Override
public void releaseConnection(Connection connection) throws SQLException {
connectionProvider.releaseAnyConnection( connection );
}
@Override
public boolean supportsAggressiveRelease() {
return connectionProvider.supportsAggressiveRelease();
}
}
private static
|
MultiTenantConnectionProviderJdbcConnectionAccess
|
java
|
google__guice
|
extensions/grapher/src/com/google/inject/grapher/BindingEdge.java
|
{
"start": 1115,
"end": 1198
}
|
class ____ provides the binding's type. */
PROVIDER,
/** Binding is to the
|
that
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/GlobalEndpointConfiguration.java
|
{
"start": 950,
"end": 2765
}
|
interface ____ {
boolean isLazyStartProducer();
/**
* Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow
* CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause
* the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled
* during routing messages via Camel's routing error handlers. Beware that when the first message is processed then
* creating and starting the producer may take a little time and prolong the total processing time of the
* processing.
*/
void setLazyStartProducer(boolean lazyStartProducer);
boolean isBridgeErrorHandler();
/**
* Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the
* consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by
* the routing Error Handler.
* <p/>
* By default, the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be
* logged at WARN/ERROR level and ignored.
*/
void setBridgeErrorHandler(boolean bridgeErrorHandler);
boolean isAutowiredEnabled();
/**
* Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as
* autowired) by looking up in the registry to find if there is a single instance of matching type, which then gets
* configured on the component. This can be used for automatic configuring JDBC data sources, JMS connection
* factories, AWS Clients, etc.
*/
void setAutowiredEnabled(boolean autowiredEnabled);
}
|
GlobalEndpointConfiguration
|
java
|
google__jimfs
|
jimfs/src/main/java/com/google/common/jimfs/OwnerAttributeProvider.java
|
{
"start": 1234,
"end": 3263
}
|
class ____ extends AttributeProvider {
private static final ImmutableSet<String> ATTRIBUTES = ImmutableSet.of("owner");
private static final UserPrincipal DEFAULT_OWNER = createUserPrincipal("user");
@Override
public String name() {
return "owner";
}
@Override
public ImmutableSet<String> fixedAttributes() {
return ATTRIBUTES;
}
@Override
public ImmutableMap<String, ?> defaultValues(Map<String, ?> userProvidedDefaults) {
Object userProvidedOwner = userProvidedDefaults.get("owner:owner");
UserPrincipal owner = DEFAULT_OWNER;
if (userProvidedOwner != null) {
if (userProvidedOwner instanceof String) {
owner = createUserPrincipal((String) userProvidedOwner);
} else {
throw invalidType("owner", "owner", userProvidedOwner, String.class, UserPrincipal.class);
}
}
return ImmutableMap.of("owner:owner", owner);
}
@Override
public @Nullable Object get(File file, String attribute) {
if (attribute.equals("owner")) {
return file.getAttribute("owner", "owner");
}
return null;
}
@Override
public void set(File file, String view, String attribute, Object value, boolean create) {
if (attribute.equals("owner")) {
checkNotCreate(view, attribute, create);
UserPrincipal user = checkType(view, attribute, value, UserPrincipal.class);
// TODO(cgdecker): Do we really need to do this? Any reason not to allow any UserPrincipal?
if (!(user instanceof UserLookupService.JimfsUserPrincipal)) {
user = createUserPrincipal(user.getName());
}
file.setAttribute("owner", "owner", user);
}
}
@Override
public Class<FileOwnerAttributeView> viewType() {
return FileOwnerAttributeView.class;
}
@Override
public FileOwnerAttributeView view(
FileLookup lookup, ImmutableMap<String, FileAttributeView> inheritedViews) {
return new View(lookup);
}
/** Implementation of {@link FileOwnerAttributeView}. */
private static final
|
OwnerAttributeProvider
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/path/WhitespaceInPathTest.java
|
{
"start": 822,
"end": 985
}
|
class ____ {
@Path(" yolo /{name}")
@GET
public String hello(String name) {
return "yolo " + name;
}
}
}
|
HelloResource
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/RemoteIterators.java
|
{
"start": 13392,
"end": 15525
}
|
class ____<S, T>
implements RemoteIterator<T>, IOStatisticsSource, Closeable {
/**
* Source iterator.
*/
private final RemoteIterator<S> source;
private final Closeable sourceToClose;
protected WrappingRemoteIterator(final RemoteIterator<S> source) {
this.source = requireNonNull(source);
sourceToClose = new MaybeClose(source);
}
protected RemoteIterator<S> getSource() {
return source;
}
@Override
public IOStatistics getIOStatistics() {
return retrieveIOStatistics(source);
}
@Override
public void close() throws IOException {
sourceToClose.close();
}
/**
* Check for the source having a next element.
* If it does not, this object's close() method
* is called and false returned
* @return true if there is a new value
* @throws IOException failure to retrieve next value
*/
protected boolean sourceHasNext() throws IOException {
boolean hasNext;
try {
hasNext = getSource().hasNext();
} catch (IOException e) {
IOUtils.cleanupWithLogger(LOG, this);
throw e;
}
if (!hasNext) {
// there is nothing less so automatically close.
close();
}
return hasNext;
}
/**
* Get the next source value.
* This calls {@link #sourceHasNext()} first to verify
* that there is data.
* @return the next value
* @throws IOException failure
* @throws NoSuchElementException no more data
*/
protected S sourceNext() throws IOException {
try {
if (!sourceHasNext()) {
throw new NoSuchElementException();
}
return getSource().next();
} catch (NoSuchElementException | IOException e) {
IOUtils.cleanupWithLogger(LOG, this);
throw e;
}
}
@Override
public String toString() {
return source.toString();
}
}
/**
* Iterator taking a source and a transformational function.
* @param <S> source type
* @param <T> final output type.There
*/
private static final
|
WrappingRemoteIterator
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/cache/Cache.java
|
{
"start": 5892,
"end": 6445
}
|
class ____<K, V> {
final K key;
final V value;
final long writeTime;
volatile long accessTime;
Entry<K, V> before;
Entry<K, V> after;
State state = State.NEW;
Entry(K key, V value, long writeTime) {
this.key = key;
this.value = value;
this.writeTime = this.accessTime = writeTime;
}
}
/**
* A cache segment.
* <p>
* A CacheSegment is backed by a HashMap and is protected by a read/write lock.
*/
private final
|
Entry
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRMAppSubmissionData.java
|
{
"start": 5596,
"end": 5677
}
|
class ____ prepare all data required to submit an app.
*/
public static final
|
to
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/pool/vendor/MSSQLValidConnectionChecker.java
|
{
"start": 922,
"end": 1720
}
|
class ____ extends ValidConnectionCheckerAdapter implements ValidConnectionChecker, Serializable {
private static final long serialVersionUID = 1L;
private static final String DEFAULT_VALIDATION_QUERY = "SELECT 1";
public MSSQLValidConnectionChecker() {
}
public boolean isValidConnection(final Connection conn,
String validateQuery,
int validationQueryTimeout) throws Exception {
if (conn.isClosed()) {
return false;
}
if (StringUtils.isEmpty(validateQuery)) {
validateQuery = DEFAULT_VALIDATION_QUERY;
}
return ValidConnectionCheckerAdapter.execValidQuery(conn, validateQuery, validationQueryTimeout);
}
}
|
MSSQLValidConnectionChecker
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/extension/memoized/MemoizedTest.java
|
{
"start": 5394,
"end": 5652
}
|
class ____ {
int overrideHashCode;
int hashCodeCount;
abstract EqualsCounter equalsCounter();
@Memoized
@Override
public int hashCode() {
hashCodeCount++;
return overrideHashCode;
}
static
|
HashCodeEqualsOptimization
|
java
|
netty__netty
|
handler-proxy/src/test/java/io/netty/handler/proxy/TestMode.java
|
{
"start": 670,
"end": 738
}
|
enum ____ {
INTERMEDIARY,
TERMINAL,
UNRESPONSIVE,
}
|
TestMode
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/common/extension/ext6_wrap/impl/Ext6Impl1.java
|
{
"start": 968,
"end": 1093
}
|
class ____ implements WrappedExt {
public String echo(URL url, String s) {
return "Ext6Impl1-echo";
}
}
|
Ext6Impl1
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/strategy/IdentifierReuseTest.java
|
{
"start": 1073,
"end": 2147
}
|
class ____ {
@Test
public void testIdentifierReuse(EntityManagerFactoryScope scope) {
final Integer reusedId = 1;
saveUpdateAndRemoveEntity( scope, reusedId );
saveUpdateAndRemoveEntity( scope, reusedId );
scope.inEntityManager( em -> {
assertEquals(
Arrays.asList( 1, 2, 3, 4, 5, 6 ),
AuditReaderFactory.get( em ).getRevisions( IntNoAutoIdTestEntity.class, reusedId )
);
} );
}
private void saveUpdateAndRemoveEntity(EntityManagerFactoryScope scope, Integer id) {
scope.inTransaction( em -> {
IntNoAutoIdTestEntity entity = new IntNoAutoIdTestEntity( 0, id );
em.persist( entity );
assertEquals( id, entity.getId() );
} );
scope.inTransaction( em -> {
IntNoAutoIdTestEntity entity = em.find( IntNoAutoIdTestEntity.class, id );
entity.setNumVal( 1 );
entity = em.merge( entity );
assertEquals( id, entity.getId() );
} );
scope.inTransaction( em -> {
IntNoAutoIdTestEntity entity = em.find( IntNoAutoIdTestEntity.class, id );
assertNotNull( entity );
em.remove( entity );
} );
}
}
|
IdentifierReuseTest
|
java
|
apache__camel
|
test-infra/camel-test-infra-hivemq/src/test/java/org/apache/camel/test/infra/hivemq/services/HiveMQService.java
|
{
"start": 1041,
"end": 1131
}
|
interface ____ extends TestService, HiveMQInfraService, ContainerTestService {
}
|
HiveMQService
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryLambdaTest.java
|
{
"start": 6422,
"end": 6937
}
|
class ____ {
private static String f(String x) {
return "hello " + x;
}
void g() {
Function<String, String> l = Test::f;
System.err.println(f("world"));
}
}
""")
.doTest();
}
@Test
public void variable_static_butNotUpperCased() {
testHelper
.addInputLines(
"Test.java",
"""
import java.util.function.Function;
|
Test
|
java
|
elastic__elasticsearch
|
test/framework/src/main/java/org/elasticsearch/datageneration/matchers/source/FieldSpecificMatcher.java
|
{
"start": 27523,
"end": 29233
}
|
class ____ implements FieldSpecificMatcher {
private final String fieldType;
private final XContentBuilder actualMappings;
private final Settings.Builder actualSettings;
private final XContentBuilder expectedMappings;
private final Settings.Builder expectedSettings;
ExactMatcher(
String fieldType,
XContentBuilder actualMappings,
Settings.Builder actualSettings,
XContentBuilder expectedMappings,
Settings.Builder expectedSettings
) {
this.fieldType = fieldType;
this.actualMappings = actualMappings;
this.actualSettings = actualSettings;
this.expectedMappings = expectedMappings;
this.expectedSettings = expectedSettings;
}
@Override
public MatchResult match(
List<Object> actual,
List<Object> expected,
Map<String, Object> actualMapping,
Map<String, Object> expectedMapping
) {
return actual.equals(expected)
? MatchResult.match()
: MatchResult.noMatch(
formatErrorMessage(
actualMappings,
actualSettings,
expectedMappings,
expectedSettings,
"Values of type ["
+ fieldType
+ "] were expected to match exactly "
+ "but don't match, values "
+ prettyPrintCollections(actual, expected)
)
);
}
}
|
ExactMatcher
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java
|
{
"start": 1697,
"end": 3577
}
|
class ____ implements MetricsSink {
private static final Logger log =
LoggerFactory.getLogger(ServiceMetricsSink.class);
private ServiceTimelinePublisher serviceTimelinePublisher;
public ServiceMetricsSink() {
}
public ServiceMetricsSink(ServiceTimelinePublisher publisher) {
serviceTimelinePublisher = publisher;
}
/**
* Publishes service and component metrics to ATS.
*/
@Override
public void putMetrics(MetricsRecord record) {
if (serviceTimelinePublisher.isStopped()) {
log.warn("ServiceTimelinePublisher has stopped. "
+ "Not publishing any more metrics to ATS.");
return;
}
boolean isServiceMetrics = false;
boolean isComponentMetrics = false;
String appId = null;
for (MetricsTag tag : record.tags()) {
if (tag.name().equals("type") && tag.value().equals("service")) {
isServiceMetrics = true;
} else if (tag.name().equals("type") && tag.value().equals("component")) {
isComponentMetrics = true;
break; // if component metrics, no more information required from tag so
// break the loop
} else if (tag.name().equals("appId")) {
appId = tag.value();
}
}
if (isServiceMetrics && appId != null) {
log.debug("Publishing service metrics. {}", record);
serviceTimelinePublisher.publishMetrics(record.metrics(), appId,
ServiceTimelineEntityType.SERVICE_ATTEMPT.toString(),
record.timestamp());
} else if (isComponentMetrics) {
log.debug("Publishing Component metrics. {}", record);
serviceTimelinePublisher.publishMetrics(record.metrics(), record.name(),
ServiceTimelineEntityType.COMPONENT.toString(), record.timestamp());
}
}
@Override
public void init(SubsetConfiguration conf) {
}
@Override
public void flush() {
}
}
|
ServiceMetricsSink
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/scheduling/support/CronTrigger.java
|
{
"start": 1615,
"end": 10058
}
|
class ____ implements Trigger {
private final CronExpression expression;
private final @Nullable ZoneId zoneId;
/**
* Build a {@code CronTrigger} from the pattern provided in the default time zone.
* <p>This is equivalent to the {@link CronTrigger#forLenientExecution} factory
* method. Original trigger firings may be skipped if the previous task is still
* running; if this is not desirable, consider {@link CronTrigger#forFixedExecution}.
* @param expression a space-separated list of time fields, following cron
* expression conventions
* @see CronTrigger#forLenientExecution
* @see CronTrigger#forFixedExecution
*/
public CronTrigger(String expression) {
this.expression = CronExpression.parse(expression);
this.zoneId = null;
}
/**
* Build a {@code CronTrigger} from the pattern provided in the given time zone,
* with the same lenient execution as {@link CronTrigger#CronTrigger(String)}.
* <p>Note that such explicit time zone customization is usually not necessary,
* using {@link org.springframework.scheduling.TaskScheduler#getClock()} instead.
* @param expression a space-separated list of time fields, following cron
* expression conventions
* @param timeZone a time zone in which the trigger times will be generated
*/
public CronTrigger(String expression, TimeZone timeZone) {
this.expression = CronExpression.parse(expression);
Assert.notNull(timeZone, "TimeZone must not be null");
this.zoneId = timeZone.toZoneId();
}
/**
* Build a {@code CronTrigger} from the pattern provided in the given time zone,
* with the same lenient execution as {@link CronTrigger#CronTrigger(String)}.
* <p>Note that such explicit time zone customization is usually not necessary,
* using {@link org.springframework.scheduling.TaskScheduler#getClock()} instead.
* @param expression a space-separated list of time fields, following cron
* expression conventions
* @param zoneId a time zone in which the trigger times will be generated
* @since 5.3
* @see CronExpression#parse(String)
*/
public CronTrigger(String expression, ZoneId zoneId) {
this.expression = CronExpression.parse(expression);
Assert.notNull(zoneId, "ZoneId must not be null");
this.zoneId = zoneId;
}
/**
* Return the cron pattern that this trigger has been built with.
*/
public String getExpression() {
return this.expression.toString();
}
/**
* Determine the next execution time according to the given trigger context.
* <p>Next execution times are calculated based on the
* {@linkplain TriggerContext#lastCompletion completion time} of the
* previous execution; therefore, overlapping executions won't occur.
*/
@Override
public @Nullable Instant nextExecution(TriggerContext triggerContext) {
Instant timestamp = determineLatestTimestamp(triggerContext);
ZoneId zone = (this.zoneId != null ? this.zoneId : triggerContext.getClock().getZone());
ZonedDateTime zonedTimestamp = timestamp.atZone(zone);
ZonedDateTime nextTimestamp = this.expression.next(zonedTimestamp);
return (nextTimestamp != null ? nextTimestamp.toInstant() : null);
}
Instant determineLatestTimestamp(TriggerContext triggerContext) {
Instant timestamp = triggerContext.lastCompletion();
if (timestamp != null) {
Instant scheduled = triggerContext.lastScheduledExecution();
if (scheduled != null && timestamp.isBefore(scheduled)) {
// Previous task apparently executed too early...
// Let's simply use the last calculated execution time then,
// in order to prevent accidental re-fires in the same second.
timestamp = scheduled;
}
}
else {
timestamp = determineInitialTimestamp(triggerContext);
}
return timestamp;
}
Instant determineInitialTimestamp(TriggerContext triggerContext) {
return triggerContext.getClock().instant();
}
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof CronTrigger that &&
this.expression.equals(that.expression)));
}
@Override
public int hashCode() {
return this.expression.hashCode();
}
@Override
public String toString() {
return this.expression.toString();
}
/**
* Create a {@link CronTrigger} for lenient execution, to be rescheduled
* after every task based on the completion time.
* <p>This variant does not make up for missed trigger firings if the
* associated task has taken too long. As a consequence, original trigger
* firings may be skipped if the previous task is still running.
* <p>This is equivalent to the regular {@link CronTrigger} constructor.
* Note that lenient execution is scheduler-dependent: it may skip trigger
* firings with long-running tasks on a thread pool while executing at
* {@link #forFixedExecution}-like precision with new threads per task.
* @param expression a space-separated list of time fields, following cron
* expression conventions
* @since 6.1.3
* @see #resumeLenientExecution
*/
public static CronTrigger forLenientExecution(String expression) {
return new CronTrigger(expression);
}
/**
* Create a {@link CronTrigger} for lenient execution, to be rescheduled
* after every task based on the completion time.
* <p>This variant does not make up for missed trigger firings if the
* associated task has taken too long. As a consequence, original trigger
* firings may be skipped if the previous task is still running.
* @param expression a space-separated list of time fields, following cron
* expression conventions
* @param resumptionTimestamp the timestamp to resume from (the last-known
* completion timestamp), with the new trigger calculated from there and
* possibly immediately firing (but only once, every subsequent calculation
* will start from the completion time of that first resumed trigger)
* @since 6.1.3
* @see #forLenientExecution
*/
public static CronTrigger resumeLenientExecution(String expression, Instant resumptionTimestamp) {
return new CronTrigger(expression) {
@Override
Instant determineInitialTimestamp(TriggerContext triggerContext) {
return resumptionTimestamp;
}
};
}
/**
* Create a {@link CronTrigger} for fixed execution, to be rescheduled
* after every task based on the last scheduled time.
* <p>This variant makes up for missed trigger firings if the associated task
* has taken too long, scheduling a task for every original trigger firing.
* Such follow-up tasks may execute late but will never be skipped.
* <p>Immediate versus late execution in case of long-running tasks may
* be scheduler-dependent but the guarantee to never skip a task is portable.
* @param expression a space-separated list of time fields, following cron
* expression conventions
* @since 6.1.3
* @see #resumeFixedExecution
*/
public static CronTrigger forFixedExecution(String expression) {
return new CronTrigger(expression) {
@Override
protected Instant determineLatestTimestamp(TriggerContext triggerContext) {
Instant scheduled = triggerContext.lastScheduledExecution();
return (scheduled != null ? scheduled : super.determineInitialTimestamp(triggerContext));
}
};
}
/**
* Create a {@link CronTrigger} for fixed execution, to be rescheduled
* after every task based on the last scheduled time.
* <p>This variant makes up for missed trigger firings if the associated task
* has taken too long, scheduling a task for every original trigger firing.
* Such follow-up tasks may execute late but will never be skipped.
* @param expression a space-separated list of time fields, following cron
* expression conventions
* @param resumptionTimestamp the timestamp to resume from (the last-known
* scheduled timestamp), with every trigger in-between immediately firing
* to make up for every execution that would have happened in the meantime
* @since 6.1.3
* @see #forFixedExecution
*/
public static CronTrigger resumeFixedExecution(String expression, Instant resumptionTimestamp) {
return new CronTrigger(expression) {
@Override
protected Instant determineLatestTimestamp(TriggerContext triggerContext) {
Instant scheduled = triggerContext.lastScheduledExecution();
return (scheduled != null ? scheduled : super.determineLatestTimestamp(triggerContext));
}
@Override
Instant determineInitialTimestamp(TriggerContext triggerContext) {
return resumptionTimestamp;
}
};
}
}
|
CronTrigger
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ClassCanBeStaticTest.java
|
{
"start": 11431,
"end": 11734
}
|
class ____ {
{
Test.super.getClass();
}
}
}
""")
.doTest();
}
@Test
public void annotationMethod() {
compilationHelper
.addSourceLines(
"Test.java",
"""
|
One
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/ActiveProfilesResolver.java
|
{
"start": 1490,
"end": 1798
}
|
class ____ which the profiles should be resolved;
* never {@code null}
* @return the bean definition profiles to use when loading the
* {@code ApplicationContext}; never {@code null}
* @see ActiveProfiles#resolver
* @see ActiveProfiles#inheritProfiles
*/
String[] resolve(Class<?> testClass);
}
|
for
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/dialect/db2/parser/DB2CreateTableParser.java
|
{
"start": 1135,
"end": 3838
}
|
class ____ extends SQLCreateTableParser {
public DB2CreateTableParser(String sql) {
super(new DB2ExprParser(sql));
}
public DB2CreateTableParser(SQLExprParser exprParser) {
super(exprParser);
}
@Override
protected void parseCreateTableRest(SQLCreateTableStatement stmt) {
DB2CreateTableStatement createTable = (DB2CreateTableStatement) stmt;
for (; ; ) {
if (lexer.nextIfIdentifier(FnvHash.Constants.DATA)) {
acceptIdentifier("CAPTURE");
if (lexer.identifierEquals(FnvHash.Constants.NONE)) {
lexer.nextToken();
createTable.setDataCaptureNone(true);
continue;
}
throw new ParserException("TODO " + lexer.info());
} else if (lexer.nextIf(Token.IN)) {
if (lexer.nextIf(Token.DATABASE)) {
SQLName database = this.exprParser.name();
createTable.setDatabase(database);
} else if (lexer.identifierEquals("tablespace")) {
throw new ParserException("TODO " + lexer.info());
} else {
SQLName tablespace = this.exprParser.name();
createTable.setTablespace(tablespace);
}
continue;
} else if (lexer.nextIfIdentifier(FnvHash.Constants.PARTITIONING)) {
SQLPartitionByHash partitionBy = new SQLPartitionByHash();
accept(Token.KEY);
accept(Token.LPAREN);
this.exprParser.exprList(partitionBy.getColumns(), partitionBy);
accept(Token.RPAREN);
accept(Token.USING);
acceptIdentifier("HASHING");
createTable.setPartitionBy(partitionBy);
continue;
} else if (lexer.nextIfIdentifier(FnvHash.Constants.VALIDPROC)) {
SQLName validproc = this.exprParser.name();
createTable.setValidproc(validproc);
continue;
} else if (lexer.nextIfIdentifier(FnvHash.Constants.COMPRESS)) {
createTable.setCompress(true);
lexer.nextIfIdentifier(FnvHash.Constants.YES);
continue;
} else if (lexer.nextIf(Token.INDEX)) {
accept(Token.IN);
SQLName indexIn = this.exprParser.name();
createTable.setIndexIn(indexIn);
continue;
}
break;
}
}
protected DB2CreateTableStatement newCreateStatement() {
return new DB2CreateTableStatement();
}
}
|
DB2CreateTableParser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.