language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
|
{
"start": 19232,
"end": 19352
}
|
enum ____ {
STABLE, UPGRADING
}
@VisibleForTesting
Service getServiceSpec() {
return serviceSpec;
}
}
|
State
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/InvariantsChecker.java
|
{
"start": 1323,
"end": 3179
}
|
class ____ implements SchedulingEditPolicy {
private static final Logger LOG =
LoggerFactory.getLogger(InvariantsChecker.class);
public static final String THROW_ON_VIOLATION =
"yarn.resourcemanager.invariant-checker.throw-on-violation";
public static final String INVARIANT_MONITOR_INTERVAL =
"yarn.resourcemanager.invariant-checker.monitor-interval";
private Configuration conf;
private RMContext context;
private ResourceScheduler scheduler;
private boolean throwOnInvariantViolation;
private long monitoringInterval;
@Override
public void init(Configuration config, RMContext rmContext,
ResourceScheduler scheduler) {
this.conf = config;
this.context = rmContext;
this.scheduler = scheduler;
this.throwOnInvariantViolation =
conf.getBoolean(InvariantsChecker.THROW_ON_VIOLATION, false);
this.monitoringInterval =
conf.getLong(InvariantsChecker.INVARIANT_MONITOR_INTERVAL, 1000L);
LOG.info("Invariant checker " + this.getPolicyName()
+ " enabled. Monitoring every " + monitoringInterval
+ "ms, throwOnViolation=" + throwOnInvariantViolation);
}
@Override
public long getMonitoringInterval() {
return monitoringInterval;
}
@Override
public String getPolicyName() {
return this.getClass().getSimpleName();
}
public void logOrThrow(String message) throws InvariantViolationException {
if (getThrowOnInvariantViolation()) {
throw new InvariantViolationException(message);
} else {
LOG.warn(message);
}
}
public boolean getThrowOnInvariantViolation() {
return throwOnInvariantViolation;
}
public Configuration getConf() {
return conf;
}
public RMContext getContext() {
return context;
}
public ResourceScheduler getScheduler() {
return scheduler;
}
}
|
InvariantsChecker
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/orphan/onetoone/OneToOneEagerNonOptionalOrphanRemovalTest.java
|
{
"start": 4244,
"end": 4827
}
|
class ____ {
@Id
private Integer id;
private String color;
@OneToOne(mappedBy = "paintColor")
private Car car;
PaintColor() {
// Required by JPA
}
PaintColor(Integer id, String color) {
this.id = id;
this.color = color;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getColor() {
return color;
}
public void setColor(String color) {
this.color = color;
}
public Car getCar() {
return car;
}
public void setCar(Car car) {
this.car = car;
}
}
}
|
PaintColor
|
java
|
spring-projects__spring-security
|
webauthn/src/main/java/org/springframework/security/web/webauthn/jackson/AuthenticationExtensionsClientOutputsJackson2Deserializer.java
|
{
"start": 1849,
"end": 3110
}
|
class ____
extends StdDeserializer<AuthenticationExtensionsClientOutputs> {
private static final Log logger = LogFactory
.getLog(AuthenticationExtensionsClientOutputsJackson2Deserializer.class);
/**
* Creates a new instance.
*/
AuthenticationExtensionsClientOutputsJackson2Deserializer() {
super(AuthenticationExtensionsClientOutputs.class);
}
@Override
public AuthenticationExtensionsClientOutputs deserialize(JsonParser parser, DeserializationContext ctxt)
throws IOException, JacksonException {
List<AuthenticationExtensionsClientOutput<?>> outputs = new ArrayList<>();
for (String key = parser.nextFieldName(); key != null; key = parser.nextFieldName()) {
JsonToken startObject = parser.nextValue();
if (startObject != JsonToken.START_OBJECT) {
break;
}
if (CredentialPropertiesOutput.EXTENSION_ID.equals(key)) {
CredentialPropertiesOutput output = parser.readValueAs(CredentialPropertiesOutput.class);
outputs.add(output);
}
else {
if (logger.isDebugEnabled()) {
logger.debug("Skipping unknown extension with id " + key);
}
parser.nextValue();
}
}
return new ImmutableAuthenticationExtensionsClientOutputs(outputs);
}
}
|
AuthenticationExtensionsClientOutputsJackson2Deserializer
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/query/EqlDtoQueryTransformerUnitTests.java
|
{
"start": 932,
"end": 1448
}
|
class ____ extends AbstractDtoQueryTransformerUnitTests<JpaQueryEnhancer.EqlQueryParser> {
@Override
JpaQueryEnhancer.EqlQueryParser parse(String query) {
return JpaQueryEnhancer.EqlQueryParser.parseQuery(query);
}
@Override
ParseTreeVisitor<QueryTokenStream> getTransformer(JpaQueryEnhancer.EqlQueryParser parser, QueryMethod method) {
return new EqlSortedQueryTransformer(Sort.unsorted(), parser.getQueryInformation(),
method.getResultProcessor().getReturnedType());
}
}
|
EqlDtoQueryTransformerUnitTests
|
java
|
hibernate__hibernate-orm
|
hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/RDMSOS2200Dialect.java
|
{
"start": 3516,
"end": 12705
}
|
class ____ extends Dialect {
/**
* Constructs a RDMSOS2200Dialect
*/
public RDMSOS2200Dialect() {
super( SimpleDatabaseVersion.ZERO_VERSION );
}
public RDMSOS2200Dialect(DialectResolutionInfo info) {
super( info );
}
@Override
protected String columnType(int sqlTypeCode) {
/*
* For a list of column types to register, see section A-1
* in 7862 7395, the Unisys JDBC manual.
*
* Here are column sizes as documented in Table A-1 of
* 7831 0760, "Enterprise Relational Database Server
* for ClearPath OS2200 Administration Guide"
* Numeric - 21
* Decimal - 22 (21 digits plus one for sign)
* Float - 60 bits
* Char - 28000
* NChar - 14000
* BLOB+ - 4294967296 (4 Gb)
* + RDMS JDBC driver does not support BLOBs
*
* DATE, TIME and TIMESTAMP literal formats are
* are all described in section 2.3.4 DATE Literal Format
* in 7830 8160.
* The DATE literal format is: YYYY-MM-DD
* The TIME literal format is: HH:MM:SS[.[FFFFFF]]
* The TIMESTAMP literal format is: YYYY-MM-DD HH:MM:SS[.[FFFFFF]]
*
* Note that $l (dollar-L) will use the length value if provided.
* Also new for Hibernate3 is the $p percision and $s (scale) parameters
*/
return switch ( sqlTypeCode ) {
case BOOLEAN, TINYINT -> "smallint";
case BIGINT -> "numeric(19,0)";
//'varchar' is not supported in RDMS for OS 2200
//(but it is for other flavors of RDMS)
//'character' means ASCII by default, 'unicode(n)'
//means 'character(n) character set "UCS-2"'
case CHAR, NCHAR, VARCHAR, NVARCHAR, LONG32VARCHAR, LONG32NVARCHAR -> "unicode($l)";
case CLOB, NCLOB -> "clob($l)";
//no 'binary' nor 'varbinary' so use 'blob'
case BINARY, VARBINARY, LONG32VARBINARY, BLOB -> "blob($l)";
case TIMESTAMP_WITH_TIMEZONE -> columnType( TIMESTAMP );
default -> super.columnType( sqlTypeCode );
};
}
@Override
public boolean useMaterializedLobWhenCapacityExceeded() {
return false;
}
@Override
public int getMaxVarbinaryLength() {
//no varbinary type
return -1;
}
@Override
public DatabaseVersion getVersion() {
return ZERO_VERSION;
}
@Override
public JdbcType resolveSqlTypeDescriptor(
String columnTypeName,
int jdbcTypeCode,
int precision,
int scale,
JdbcTypeRegistry jdbcTypeRegistry) {
if ( jdbcTypeCode == Types.BIT ) {
return jdbcTypeRegistry.getDescriptor( Types.BOOLEAN );
}
return super.resolveSqlTypeDescriptor(
columnTypeName,
jdbcTypeCode,
precision,
scale,
jdbcTypeRegistry
);
}
@Override
public int getPreferredSqlTypeCodeForBoolean() {
return Types.BIT;
}
@Override
public int getDefaultDecimalPrecision() {
//the (really low) maximum
return 21;
}
@Override
public void initializeFunctionRegistry(FunctionContributions functionContributions) {
super.initializeFunctionRegistry(functionContributions);
CommonFunctionFactory functionFactory = new CommonFunctionFactory(functionContributions);
functionFactory.cosh();
functionFactory.sinh();
functionFactory.tanh();
functionFactory.cot();
functionFactory.log();
functionFactory.log10();
functionFactory.pi();
functionFactory.rand();
functionFactory.trunc();
// functionFactory.truncate();
functionFactory.soundex();
functionFactory.trim2();
functionFactory.space();
functionFactory.repeat();
// functionFactory.replicate(); //synonym for more common repeat()
functionFactory.initcap();
functionFactory.instr();
functionFactory.substr();
functionFactory.translate();
functionFactory.yearMonthDay();
functionFactory.hourMinuteSecond();
functionFactory.dayofweekmonthyear();
functionFactory.weekQuarter();
functionFactory.daynameMonthname();
functionFactory.lastDay();
functionFactory.ceiling_ceil();
functionFactory.concat_pipeOperator();
functionFactory.ascii();
functionFactory.chr_char();
functionFactory.insert();
functionFactory.addMonths();
functionFactory.monthsBetween();
}
@Override
public SqlAstTranslatorFactory getSqlAstTranslatorFactory() {
return new StandardSqlAstTranslatorFactory() {
@Override
protected <T extends JdbcOperation> SqlAstTranslator<T> buildTranslator(
SessionFactoryImplementor sessionFactory, Statement statement) {
return new RDMSOS2200SqlAstTranslator<>( sessionFactory, statement );
}
};
}
@Override
public long getFractionalSecondPrecisionInNanos() {
return 1_000; //microseconds
}
/**
* RDMS supports a limited list of temporal fields in the
* extract() function, but we can emulate some of them by
* using the appropriate named functions instead of
* extract().
*
* Thus, the additional supported fields are
* {@link TemporalUnit#DAY_OF_YEAR},
* {@link TemporalUnit#DAY_OF_MONTH},
* {@link TemporalUnit#DAY_OF_YEAR}.
*
* In addition, the field {@link TemporalUnit#SECOND} is
* redefined to include microseconds.
*/
@Override
public String extractPattern(TemporalUnit unit) {
return switch (unit) {
case SECOND -> "(second(?2)+microsecond(?2)/1e6)";
case DAY_OF_WEEK -> "dayofweek(?2)";
case DAY_OF_MONTH -> "dayofmonth(?2)";
case DAY_OF_YEAR -> "dayofyear(?2)";
default -> "?1(?2)";
};
}
@Override
public String timestampaddPattern(TemporalUnit unit, TemporalType temporalType, IntervalType intervalType) {
return switch (unit) {
case NANOSECOND -> "timestampadd('SQL_TSI_FRAC_SECOND',(?2)/1e3,?3)";
case NATIVE -> "timestampadd('SQL_TSI_FRAC_SECOND',?2,?3)";
default -> "dateadd('?1',?2,?3)";
};
}
@Override
public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalType, TemporalType toTemporalType) {
return switch (unit) {
case NANOSECOND -> "timestampdiff('SQL_TSI_FRAC_SECOND',?2,?3)*1e3";
case NATIVE -> "timestampdiff('SQL_TSI_FRAC_SECOND',?2,?3)";
default -> "dateadd('?1',?2,?3)";
};
}
// Dialect method overrides ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/**
* RDMS does not support qualifing index names with the schema name.
* <p>
* {@inheritDoc}
*/
@Override
public boolean qualifyIndexName() {
return false;
}
/**
* {@code FOR UPDATE} only supported for cursors
*
* @return the empty string
*/
@Override
public String getForUpdateString() {
// Original Dialect.java returns " for update";
return "";
}
// Verify the state of this new method in Hibernate 3.0 Dialect.java
/**
* RDMS does not support Cascade Deletes.
* Need to review this in the future when support is provided.
* <p>
* {@inheritDoc}
*/
@Override
public boolean supportsCascadeDelete() {
return false;
}
@Override
public LockingSupport getLockingSupport() {
return LockingSupportSimple.NO_OUTER_JOIN;
}
@Override
public String getAddColumnString() {
return "add";
}
@Override
public String getNullColumnString() {
// The keyword used to specify a nullable column.
return " null";
}
@Override
public SequenceSupport getSequenceSupport() {
return RDMSSequenceSupport.INSTANCE;
}
@Override
public String getCascadeConstraintsString() {
// Used with DROP TABLE to delete all records in the table.
return " including contents";
}
@Override
public LimitHandler getLimitHandler() {
return FetchLimitHandler.INSTANCE;
}
@Override
public boolean supportsOrderByInSubquery() {
// This is just a guess
return false;
}
@Override
protected LockingStrategy buildPessimisticWriteStrategy(EntityPersister lockable, LockMode lockMode, Locking.Scope lockScope) {
// RDMS has no known variation of "SELECT ... FOR UPDATE" syntax...
return new PessimisticWriteUpdateLockingStrategy( lockable, lockMode );
}
@Override
protected LockingStrategy buildPessimisticReadStrategy(EntityPersister lockable, LockMode lockMode, Locking.Scope lockScope) {
// RDMS has no known variation of "SELECT ... FOR UPDATE" syntax...
return new PessimisticReadUpdateLockingStrategy( lockable, lockMode );
}
@Override
public LockingClauseStrategy getLockingClauseStrategy(QuerySpec querySpec, LockOptions lockOptions) {
// Unisys 2200 does not support the FOR UPDATE clause
return NON_CLAUSE_STRATEGY;
}
@Override
public void appendDatetimeFormat(SqlAppender appender, String format) {
appender.appendSql(
OracleDialect.datetimeFormat( format, true, false ) //Does it really support FM?
.replace("SSSSSS", "MLS")
.replace("SSSSS", "MLS")
.replace("SSSS", "MLS")
.replace("SSS", "MLS")
.replace("SS", "MLS")
.replace("S", "MLS")
.result()
);
}
@Override
public String trimPattern(TrimSpec specification, boolean isWhitespace) {
return AbstractTransactSQLDialect.replaceLtrimRtrim( specification, isWhitespace );
}
@Override
public String getDual() {
return "rdms.rdms_dummy";
}
@Override
public String getFromDualForSelectOnly() {
return " from " + getDual() + " where key_col=1";
}
@Override
public boolean supportsRowValueConstructorSyntax() {
return false;
}
@Override
public boolean supportsRowValueConstructorSyntaxInQuantifiedPredicates() {
return false;
}
@Override
public boolean supportsRowValueConstructorSyntaxInInList() {
return false;
}
}
|
RDMSOS2200Dialect
|
java
|
micronaut-projects__micronaut-core
|
http-client/src/main/java/io/micronaut/http/client/netty/ConnectionManager.java
|
{
"start": 38240,
"end": 38584
}
|
class ____ extends ChannelInitializer<Channel> {
NettyClientCustomizer bootstrappedCustomizer;
}
/**
* Initializer for TLS channels. After ALPN we will proceed either with
* {@link #initHttp1(Channel)} or {@link #initHttp2(PoolHolder, Channel, NettyClientCustomizer)}.
*/
private final
|
CustomizerAwareInitializer
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockito/internal/stubbing/InvocationContainerImplStubbingTest.java
|
{
"start": 855,
"end": 4322
}
|
class ____ extends TestBase {
private InvocationContainerImpl invocationContainerImpl;
private InvocationContainerImpl invocationContainerImplStubOnly;
private MockingProgress state;
private Invocation simpleMethod;
@Before
public void setup() {
state = mockingProgress();
invocationContainerImpl = new InvocationContainerImpl(new MockSettingsImpl());
invocationContainerImpl.setInvocationForPotentialStubbing(
new InvocationBuilder().toInvocationMatcher());
invocationContainerImplStubOnly =
new InvocationContainerImpl(new MockSettingsImpl().stubOnly());
invocationContainerImplStubOnly.setInvocationForPotentialStubbing(
new InvocationBuilder().toInvocationMatcher());
simpleMethod = new InvocationBuilder().simpleMethod().toInvocation();
}
@Test
public void should_finish_stubbing_when_wrong_throwable_is_set() throws Exception {
state.stubbingStarted();
try {
invocationContainerImpl.addAnswer(new ThrowsException(new Exception()), null);
fail();
} catch (MockitoException e) {
state.validateState();
}
}
@Test
public void should_finish_stubbing_on_adding_return_value() throws Exception {
state.stubbingStarted();
invocationContainerImpl.addAnswer(new Returns("test"), null);
state.validateState();
}
@Test
public void should_get_results_for_methods() throws Throwable {
invocationContainerImpl.setInvocationForPotentialStubbing(
new InvocationMatcher(simpleMethod));
invocationContainerImpl.addAnswer(new Returns("simpleMethod"), null);
Invocation differentMethod = new InvocationBuilder().differentMethod().toInvocation();
invocationContainerImpl.setInvocationForPotentialStubbing(
new InvocationMatcher(differentMethod));
invocationContainerImpl.addAnswer(new ThrowsException(new MyException()), null);
assertEquals("simpleMethod", invocationContainerImpl.answerTo(simpleMethod));
try {
invocationContainerImpl.answerTo(differentMethod);
fail();
} catch (MyException e) {
}
}
@Test
public void should_get_results_for_methods_stub_only() throws Throwable {
invocationContainerImplStubOnly.setInvocationForPotentialStubbing(
new InvocationMatcher(simpleMethod));
invocationContainerImplStubOnly.addAnswer(new Returns("simpleMethod"), null);
Invocation differentMethod = new InvocationBuilder().differentMethod().toInvocation();
invocationContainerImplStubOnly.setInvocationForPotentialStubbing(
new InvocationMatcher(differentMethod));
invocationContainerImplStubOnly.addAnswer(new ThrowsException(new MyException()), null);
assertEquals("simpleMethod", invocationContainerImplStubOnly.answerTo(simpleMethod));
try {
invocationContainerImplStubOnly.answerTo(differentMethod);
fail();
} catch (MyException e) {
}
}
@Test
public void should_validate_throwable() throws Throwable {
try {
invocationContainerImpl.addAnswer(new ThrowsException(null), null);
fail();
} catch (MockitoException e) {
}
}
@SuppressWarnings("serial")
|
InvocationContainerImplStubbingTest
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/filter/ReadOnlyListDeser2283Test.java
|
{
"start": 1092,
"end": 2781
}
|
class ____ {
@JsonProperty("renamedList")
List<Long> getList() {
return Collections.emptyList();
}
}
/*
/**********************************************************
/* Test methods
/**********************************************************
*/
private final ObjectMapper MAPPER = jsonMapperBuilder()
.configure(MapperFeature.USE_GETTERS_AS_SETTERS, true).build();
@Test
public void testRenamedToSameOnGetter() throws Exception
{
assertEquals("{\"list\":[]}",
MAPPER.writeValueAsString(new RenamedToSameOnGetter()));
String payload = "{\"list\":[1,2,3,4]}";
RenamedToSameOnGetter foo = MAPPER.readValue(payload, RenamedToSameOnGetter.class);
assertTrue(foo.getList().isEmpty(), "List should be empty");
}
@Test
public void testRenamedToDifferentOnGetter() throws Exception
{
assertEquals("{\"renamedList\":[]}",
MAPPER.writeValueAsString(new RenamedToDifferentOnGetter()));
String payload = "{\"renamedList\":[1,2,3,4]}";
RenamedToDifferentOnGetter foo = MAPPER.readValue(payload, RenamedToDifferentOnGetter.class);
assertTrue(foo.getList().isEmpty(), "List should be empty");
}
@Test
public void testRenamedOnClass() throws Exception
{
assertEquals("{\"renamedList\":[]}",
MAPPER.writeValueAsString(new RenamedOnClass()));
String payload = "{\"renamedList\":[1,2,3,4]}";
RenamedOnClass foo = MAPPER.readValue(payload, RenamedOnClass.class);
assertTrue(foo.getList().isEmpty(), "List should be empty");
}
}
|
RenamedOnClass
|
java
|
apache__camel
|
components/camel-ldap/src/test/java/org/apache/directory/server/core/integ5/DirectoryExtension.java
|
{
"start": 2524,
"end": 3194
}
|
class ____ implements BeforeAllCallback, AfterAllCallback, BeforeEachCallback, AfterEachCallback {
/**
* A logger for this class
*/
private static final Logger LOG = LoggerFactory.getLogger(DirectoryExtension.class);
private static final ExtensionContext.Namespace NAMESPACE = ExtensionContext.Namespace.create(DirectoryExtension.class);
/**
* The 'service' field in the run tests
*/
private static final String SET_SERVICE_METHOD_NAME = "setService";
/**
* The 'ldapServer' field in the run tests
*/
private static final String SET_LDAP_SERVER_METHOD_NAME = "setLdapServer";
public static
|
DirectoryExtension
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java
|
{
"start": 24041,
"end": 24419
}
|
class ____<T> {
// BUG: Diagnostic contains: mutable type for 'E', 'Object' is mutable
final ImmutableList<?> xs = null;
}
""")
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.Immutable;
import java.util.List;
@Immutable
|
X
|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4327ExcludeForkingMojoFromForkedLifecycleTest.java
|
{
"start": 1184,
"end": 2062
}
|
class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify that lifecycle forking mojos are excluded from the lifecycles that have directly or indirectly forked
* by them.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-4327");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("generate-sources");
verifier.execute();
verifier.verifyErrorFreeLog();
List<String> log = verifier.loadLines("target/fork-lifecycle.txt");
assertEquals(1, log.size());
assertTrue(log.contains("fork-lifecycle.txt"), log.toString());
}
}
|
MavenITmng4327ExcludeForkingMojoFromForkedLifecycleTest
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/config/InspectionClassLoaderUnitTests.java
|
{
"start": 848,
"end": 1397
}
|
class ____ {
@Test // DATAJPA-1250
void shouldLoadExternalClass() throws ClassNotFoundException {
InspectionClassLoader classLoader = new InspectionClassLoader(getClass().getClassLoader());
Class<?> isolated = classLoader.loadClass("org.hsqldb.Database");
Class<?> included = getClass().getClassLoader().loadClass("org.hsqldb.Database");
assertThat(isolated.getClassLoader()) //
.isSameAs(classLoader) //
.isNotSameAs(getClass().getClassLoader());
assertThat(isolated).isNotEqualTo(included);
}
}
|
InspectionClassLoaderUnitTests
|
java
|
spring-projects__spring-boot
|
module/spring-boot-restclient/src/test/java/org/springframework/boot/restclient/autoconfigure/RestClientAutoConfigurationTests.java
|
{
"start": 13585,
"end": 13731
}
|
class ____ {
@Bean
MyRestClientBuilder myRestClientBuilder() {
return mock(MyRestClientBuilder.class);
}
}
|
CustomRestClientBuilderConfig
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetricValue.java
|
{
"start": 1350,
"end": 2597
}
|
class ____ {
private final Map<String, Long> values = Maps.newHashMap();
public void increase(Resource res) {
update(res, Long::sum);
}
public void increaseWithMultiplier(Resource res, long multiplier) {
update(res, (v1, v2) -> v1 + v2 * multiplier);
}
public void decrease(Resource res) {
update(res, (v1, v2) -> v1 - v2);
}
public void decreaseWithMultiplier(Resource res, int containers) {
update(res, (v1, v2) -> v1 - v2 * containers);
}
public void set(Resource res) {
update(res, (v1, v2) -> v2);
}
private void update(Resource res, BiFunction<Long, Long, Long> operation) {
if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
ResourceInformation[] resources = res.getResources();
for (int i = 2; i < resources.length; i++) {
ResourceInformation resource = resources[i];
// Map.merge only applies operation if there is
// a value for the key in the map
if (!values.containsKey(resource.getName())) {
values.put(resource.getName(), 0L);
}
values.merge(resource.getName(), resource.getValue(), operation);
}
}
}
public Map<String, Long> getValues() {
return values;
}
}
|
CustomResourceMetricValue
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/factories/TestValuesRuntimeFunctions.java
|
{
"start": 26281,
"end": 29432
}
|
class ____ extends AbstractExactlyOnceSink {
private static final String LINEAGE_NAMESPACE = "values://RetractingSinkFunction";
private static final long serialVersionUID = 1L;
protected transient ListState<Row> retractResultState;
protected transient List<Row> localRetractResult;
protected RetractingSinkFunction(
String tableName, DataType consumedDataType, DataStructureConverter converter) {
super(tableName, consumedDataType, converter);
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
super.initializeState(context);
this.retractResultState =
context.getOperatorStateStore()
.getListState(
new ListStateDescriptor<>(
"sink-retract-results",
ExternalSerializer.of(consumedDataType)));
this.localRetractResult = new ArrayList<>();
if (context.isRestored()) {
for (Row value : retractResultState.get()) {
localRetractResult.add(value);
}
}
int taskId = getRuntimeContext().getTaskInfo().getIndexOfThisSubtask();
synchronized (LOCK) {
globalRetractResult
.computeIfAbsent(tableName, k -> new HashMap<>())
.put(taskId, localRetractResult);
}
}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
super.snapshotState(context);
synchronized (LOCK) {
retractResultState.update(localRetractResult);
}
}
@SuppressWarnings("rawtypes")
@Override
public void invoke(RowData value, Context context) throws Exception {
RowKind kind = value.getRowKind();
Row row = (Row) converter.toExternal(value);
assertThat(row).isNotNull();
synchronized (LOCK) {
final Row retractRow = Row.copy(row);
retractRow.setKind(RowKind.INSERT);
if (kind == RowKind.INSERT || kind == RowKind.UPDATE_AFTER) {
localRetractResult.add(retractRow);
} else {
boolean contains = localRetractResult.remove(retractRow);
if (!contains) {
throw new RuntimeException(
"Tried to retract a value that wasn't inserted first. "
+ "This is probably an incorrectly implemented test.");
}
}
// Moving this to the end so that the rawLocalObservers can see update
// globalRetracts.
addLocalRawResult(row);
}
}
@Override
public String getNamespace() {
return LINEAGE_NAMESPACE;
}
}
static
|
RetractingSinkFunction
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/kstream/Initializer.java
|
{
"start": 873,
"end": 1528
}
|
interface ____ creating an initial value in aggregations.
* {@code Initializer} is used in combination with {@link Aggregator}.
*
* @param <VAgg> aggregate value type
*
* @see Aggregator
* @see KGroupedStream#aggregate(Initializer, Aggregator)
* @see KGroupedStream#aggregate(Initializer, Aggregator, Materialized)
* @see TimeWindowedKStream#aggregate(Initializer, Aggregator)
* @see TimeWindowedKStream#aggregate(Initializer, Aggregator, Materialized)
* @see SessionWindowedKStream#aggregate(Initializer, Aggregator, Merger)
* @see SessionWindowedKStream#aggregate(Initializer, Aggregator, Merger, Materialized)
*/
@FunctionalInterface
public
|
for
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/common/CommonExecTableSourceScan.java
|
{
"start": 4789,
"end": 18380
}
|
class ____ extends ExecNodeBase<RowData>
implements MultipleTransformationTranslator<RowData> {
public static final String SOURCE_TRANSFORMATION = "source";
public static final String FIELD_NAME_SCAN_TABLE_SOURCE = "scanTableSource";
@JsonProperty(FIELD_NAME_SCAN_TABLE_SOURCE)
private final DynamicTableSourceSpec tableSourceSpec;
protected CommonExecTableSourceScan(
int id,
ExecNodeContext context,
ReadableConfig persistedConfig,
DynamicTableSourceSpec tableSourceSpec,
List<InputProperty> inputProperties,
LogicalType outputType,
String description) {
super(id, context, persistedConfig, inputProperties, outputType, description);
this.tableSourceSpec = tableSourceSpec;
}
@Override
public String getSimplifiedName() {
return tableSourceSpec.getContextResolvedTable().getIdentifier().getObjectName();
}
public DynamicTableSourceSpec getTableSourceSpec() {
return tableSourceSpec;
}
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final Transformation<RowData> sourceTransform;
final StreamExecutionEnvironment env = planner.getExecEnv();
final TransformationMetadata meta = createTransformationMeta(SOURCE_TRANSFORMATION, config);
final InternalTypeInfo<RowData> outputTypeInfo =
InternalTypeInfo.of((RowType) getOutputType());
final ScanTableSource tableSource =
tableSourceSpec.getScanTableSource(
planner.getFlinkContext(), ShortcutUtils.unwrapTypeFactory(planner));
ScanTableSource.ScanRuntimeProvider provider =
tableSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
final int sourceParallelism = deriveSourceParallelism(provider);
final boolean sourceParallelismConfigured = isParallelismConfigured(provider);
Optional<LineageVertex> lineageVertex = Optional.empty();
if (provider instanceof SourceFunctionProvider) {
final SourceFunctionProvider sourceFunctionProvider = (SourceFunctionProvider) provider;
final SourceFunction<RowData> function = sourceFunctionProvider.createSourceFunction();
lineageVertex = TableLineageUtils.extractLineageDataset(function);
sourceTransform =
createSourceFunctionTransformation(
env,
function,
sourceFunctionProvider.isBounded(),
meta.getName(),
outputTypeInfo,
sourceParallelism,
sourceParallelismConfigured);
LineageDataset tableLineageDataset =
TableLineageUtils.createTableLineageDataset(
tableSourceSpec.getContextResolvedTable(), lineageVertex);
TableSourceLineageVertex sourceLineageVertex =
new TableSourceLineageVertexImpl(
Arrays.asList(tableLineageDataset),
provider.isBounded()
? Boundedness.BOUNDED
: Boundedness.CONTINUOUS_UNBOUNDED);
((TransformationWithLineage<RowData>) sourceTransform)
.setLineageVertex(sourceLineageVertex);
if (function instanceof ParallelSourceFunction && sourceParallelismConfigured) {
meta.fill(sourceTransform);
return new SourceTransformationWrapper<>(sourceTransform);
} else {
return meta.fill(sourceTransform);
}
} else if (provider instanceof InputFormatProvider) {
final InputFormat<RowData, ?> inputFormat =
((InputFormatProvider) provider).createInputFormat();
lineageVertex = TableLineageUtils.extractLineageDataset(inputFormat);
sourceTransform =
createInputFormatTransformation(
env, inputFormat, outputTypeInfo, meta.getName());
meta.fill(sourceTransform);
} else if (provider instanceof SourceProvider) {
final Source<RowData, ?, ?> source = ((SourceProvider) provider).createSource();
lineageVertex = TableLineageUtils.extractLineageDataset(source);
// TODO: Push down watermark strategy to source scan
sourceTransform =
env.fromSource(
source,
WatermarkStrategy.noWatermarks(),
meta.getName(),
outputTypeInfo)
.getTransformation();
meta.fill(sourceTransform);
} else if (provider instanceof DataStreamScanProvider) {
sourceTransform =
((DataStreamScanProvider) provider)
.produceDataStream(createProviderContext(config), env)
.getTransformation();
meta.fill(sourceTransform);
sourceTransform.setOutputType(outputTypeInfo);
} else if (provider instanceof TransformationScanProvider) {
sourceTransform =
((TransformationScanProvider) provider)
.createTransformation(createProviderContext(config));
meta.fill(sourceTransform);
sourceTransform.setOutputType(outputTypeInfo);
} else {
throw new UnsupportedOperationException(
provider.getClass().getSimpleName() + " is unsupported now.");
}
LineageDataset tableLineageDataset =
TableLineageUtils.createTableLineageDataset(
tableSourceSpec.getContextResolvedTable(), lineageVertex);
TableSourceLineageVertex sourceLineageVertex =
new TableSourceLineageVertexImpl(
Arrays.asList(tableLineageDataset),
provider.isBounded()
? Boundedness.BOUNDED
: Boundedness.CONTINUOUS_UNBOUNDED);
if (sourceTransform instanceof TransformationWithLineage) {
((TransformationWithLineage<RowData>) sourceTransform)
.setLineageVertex(sourceLineageVertex);
}
if (sourceParallelismConfigured) {
Transformation<RowData> sourceTransformationWrapper =
applySourceTransformationWrapper(
sourceTransform,
planner.getFlinkContext().getClassLoader(),
outputTypeInfo,
config,
tableSource.getChangelogMode(),
sourceParallelism);
return sourceTransformationWrapper;
}
return sourceTransform;
}
private boolean isParallelismConfigured(ScanTableSource.ScanRuntimeProvider runtimeProvider) {
return runtimeProvider instanceof ParallelismProvider
&& ((ParallelismProvider) runtimeProvider).getParallelism().isPresent();
}
private int deriveSourceParallelism(ScanTableSource.ScanRuntimeProvider runtimeProvider) {
if (isParallelismConfigured(runtimeProvider)) {
int sourceParallelism = ((ParallelismProvider) runtimeProvider).getParallelism().get();
if (sourceParallelism <= 0) {
throw new TableException(
String.format(
"Invalid configured parallelism %s for table '%s'.",
sourceParallelism,
tableSourceSpec
.getContextResolvedTable()
.getIdentifier()
.asSummaryString()));
}
return sourceParallelism;
} else {
return ExecutionConfig.PARALLELISM_DEFAULT;
}
}
protected RowType getPhysicalRowType(ResolvedSchema schema) {
return (RowType) schema.toPhysicalRowDataType().getLogicalType();
}
protected int[] getPrimaryKeyIndices(RowType sourceRowType, ResolvedSchema schema) {
return schema.getPrimaryKey()
.map(k -> k.getColumns().stream().mapToInt(sourceRowType::getFieldIndex).toArray())
.orElse(new int[0]);
}
private Transformation<RowData> applySourceTransformationWrapper(
Transformation<RowData> sourceTransform,
ClassLoader classLoader,
InternalTypeInfo<RowData> outputTypeInfo,
ExecNodeConfig config,
ChangelogMode changelogMode,
int sourceParallelism) {
sourceTransform.setParallelism(sourceParallelism, true);
Transformation<RowData> sourceTransformationWrapper =
new SourceTransformationWrapper<>(sourceTransform);
if (!changelogMode.containsOnly(RowKind.INSERT)) {
final ResolvedSchema schema =
tableSourceSpec.getContextResolvedTable().getResolvedSchema();
final RowType physicalRowType = getPhysicalRowType(schema);
final int[] primaryKeys = getPrimaryKeyIndices(physicalRowType, schema);
final boolean hasPk = primaryKeys.length > 0;
if (!hasPk) {
throw new TableException(
String.format(
"Configured parallelism %s for upsert table '%s' while can not find primary key field. "
+ "This is a bug, please file an issue.",
sourceParallelism,
tableSourceSpec
.getContextResolvedTable()
.getIdentifier()
.asSummaryString()));
}
final RowDataKeySelector selector =
KeySelectorUtil.getRowDataSelector(classLoader, primaryKeys, outputTypeInfo);
final KeyGroupStreamPartitioner<RowData, RowData> partitioner =
new KeyGroupStreamPartitioner<>(selector, DEFAULT_LOWER_BOUND_MAX_PARALLELISM);
Transformation<RowData> partitionedTransform =
new PartitionTransformation<>(sourceTransformationWrapper, partitioner);
createTransformationMeta("partitioner", "Partitioner", "Partitioner", config)
.fill(partitionedTransform);
return partitionedTransform;
} else {
return sourceTransformationWrapper;
}
}
private ProviderContext createProviderContext(ExecNodeConfig config) {
return name -> {
if (this instanceof StreamExecNode && config.shouldSetUid()) {
return Optional.of(createTransformationUid(name, config));
}
return Optional.empty();
};
}
/**
* Adopted from {@link StreamExecutionEnvironment#addSource(SourceFunction, String,
* TypeInformation)} but with custom {@link Boundedness}.
*
* @deprecated This method relies on the {@link SourceFunction} API, which is due to be removed.
*/
@Deprecated
protected Transformation<RowData> createSourceFunctionTransformation(
StreamExecutionEnvironment env,
SourceFunction<RowData> function,
boolean isBounded,
String operatorName,
TypeInformation<RowData> outputTypeInfo,
int sourceParallelism,
boolean sourceParallelismConfigured) {
env.clean(function);
final int parallelism;
if (function instanceof ParallelSourceFunction) {
if (sourceParallelismConfigured) {
parallelism = sourceParallelism;
} else {
parallelism = env.getParallelism();
}
} else {
parallelism = 1;
sourceParallelismConfigured = true;
}
final Boundedness boundedness;
if (isBounded) {
boundedness = Boundedness.BOUNDED;
} else {
boundedness = Boundedness.CONTINUOUS_UNBOUNDED;
}
final StreamSource<RowData, ?> sourceOperator = new StreamSource<>(function, !isBounded);
LegacySourceTransformation<RowData> transformation =
new LegacySourceTransformation<>(
operatorName,
sourceOperator,
outputTypeInfo,
parallelism,
boundedness,
sourceParallelismConfigured);
transformation.setChainingStrategy(ChainingStrategy.HEAD);
return transformation;
}
/**
* Creates a {@link Transformation} based on the given {@link InputFormat}. The implementation
* is different for streaming mode and batch mode.
*/
protected abstract Transformation<RowData> createInputFormatTransformation(
StreamExecutionEnvironment env,
InputFormat<RowData, ?> inputFormat,
InternalTypeInfo<RowData> outputTypeInfo,
String operatorName);
}
|
CommonExecTableSourceScan
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/Fail.java
|
{
"start": 6019,
"end": 6115
}
|
class ____ final, there is no point on creating a new instance of it.
*/
private Fail() {}
}
|
is
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ASTHelpersSuggestionsTest.java
|
{
"start": 1641,
"end": 2136
}
|
class ____ {
void f(Symbol s) {
isStatic(s);
enclosingPackage(s);
}
}
""")
.addModules(
"jdk.compiler/com.sun.tools.javac.code", "jdk.compiler/com.sun.tools.javac.util")
.doTest();
}
@Test
public void onSymbolSubtype() {
testHelper
.addInputLines(
"Test.java",
"""
import com.sun.tools.javac.code.Symbol.VarSymbol;
|
Test
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
|
{
"start": 6241,
"end": 9855
}
|
class ____ implements WildcardType {
private final Type[] upperBounds;
private final Type[] lowerBounds;
/**
* Constructor
*
* @param upperBounds of this type.
* @param lowerBounds of this type.
*/
private WildcardTypeImpl(final Type[] upperBounds, final Type[] lowerBounds) {
this.upperBounds = ObjectUtils.getIfNull(upperBounds, ArrayUtils.EMPTY_TYPE_ARRAY);
this.lowerBounds = ObjectUtils.getIfNull(lowerBounds, ArrayUtils.EMPTY_TYPE_ARRAY);
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(final Object obj) {
return obj == this || obj instanceof WildcardType && TypeUtils.equals(this, (WildcardType) obj);
}
/**
* {@inheritDoc}
*/
@Override
public Type[] getLowerBounds() {
return lowerBounds.clone();
}
/**
* {@inheritDoc}
*/
@Override
public Type[] getUpperBounds() {
return upperBounds.clone();
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
int result = 73 << 8;
result |= Arrays.hashCode(upperBounds);
result <<= 8;
result |= Arrays.hashCode(lowerBounds);
return result;
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return TypeUtils.toString(this);
}
}
/**
* Ampersand sign joiner.
*/
// @formatter:off
private static final AppendableJoiner<Type> AMP_JOINER = AppendableJoiner.<Type>builder()
.setDelimiter(" & ")
.setElementAppender((a, e) -> a.append(toString(e)))
.get();
// @formatter:on
/**
* Method classToString joiner.
*/
// @formatter:off
private static final AppendableJoiner<TypeVariable<Class<?>>> CTJ_JOINER = AppendableJoiner.<TypeVariable<Class<?>>>builder()
.setDelimiter(", ")
.setElementAppender((a, e) -> a.append(anyToString(e)))
.get();
// @formatter:on
/**
* Greater than and lesser than sign joiner.
*/
// @formatter:off
private static final AppendableJoiner<Object> GT_JOINER = AppendableJoiner.builder()
.setPrefix("<")
.setSuffix(">")
.setDelimiter(", ")
.setElementAppender((a, e) -> a.append(anyToString(e)))
.get();
// @formatter:on
/**
* A wildcard instance matching {@code ?}.
*
* @since 3.2
*/
public static final WildcardType WILDCARD_ALL = wildcardType().withUpperBounds(Object.class).build();
private static <T> String anyToString(final T object) {
return object instanceof Type ? toString((Type) object) : object.toString();
}
private static void appendRecursiveTypes(final StringBuilder builder, final int[] recursiveTypeIndexes, final Type[] argumentTypes) {
for (int i = 0; i < recursiveTypeIndexes.length; i++) {
// toString() or SO
GT_JOINER.join(builder, argumentTypes[i].toString());
}
final Type[] argumentsFiltered = ArrayUtils.removeAll(argumentTypes, recursiveTypeIndexes);
if (argumentsFiltered.length > 0) {
GT_JOINER.join(builder, (Object[]) argumentsFiltered);
}
}
/**
* Formats a {@link Class} as a {@link String}.
*
* @param cls {@link Class} to format.
* @return The
|
WildcardTypeImpl
|
java
|
netty__netty
|
handler/src/main/java/io/netty/handler/ssl/SslClosedEngineException.java
|
{
"start": 859,
"end": 1071
}
|
class ____ extends SSLException {
private static final long serialVersionUID = -5204207600474401904L;
public SslClosedEngineException(String reason) {
super(reason);
}
}
|
SslClosedEngineException
|
java
|
apache__camel
|
components/camel-debezium/camel-debezium-common/camel-debezium-common-component/src/main/java/org/apache/camel/component/debezium/configuration/EmbeddedDebeziumConfiguration.java
|
{
"start": 15017,
"end": 16636
}
|
class ____ implement the interface
* 'OffsetCommitPolicy'. The default is a periodic commit policy based upon time intervals.
*/
public String getOffsetCommitPolicy() {
return offsetCommitPolicy;
}
public void setOffsetCommitPolicy(String offsetCommitPolicy) {
this.offsetCommitPolicy = offsetCommitPolicy;
}
/**
* Interval at which to try committing offsets. The default is 1 minute.
*/
public long getOffsetFlushIntervalMs() {
return offsetFlushIntervalMs;
}
public void setOffsetFlushIntervalMs(long offsetFlushIntervalMs) {
this.offsetFlushIntervalMs = offsetFlushIntervalMs;
}
/**
* Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset
* storage before cancelling the process and restoring the offset data to be committed in a future attempt. The
* default is 5 seconds.
*/
public long getOffsetCommitTimeoutMs() {
return offsetCommitTimeoutMs;
}
public void setOffsetCommitTimeoutMs(long offsetCommitTimeoutMs) {
this.offsetCommitTimeoutMs = offsetCommitTimeoutMs;
}
/**
* The number of partitions used when creating the offset storage topic. Required when offset.storage is set to the
* 'KafkaOffsetBackingStore'.
*/
public int getOffsetStoragePartitions() {
return offsetStoragePartitions;
}
public void setOffsetStoragePartitions(int offsetStoragePartitions) {
this.offsetStoragePartitions = offsetStoragePartitions;
}
/**
* The Converter
|
must
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java
|
{
"start": 939,
"end": 2274
}
|
class ____ extends MasterNodeRequest<DeleteIndexTemplateRequest> {
private String name;
public DeleteIndexTemplateRequest(StreamInput in) throws IOException {
super(in);
name = in.readString();
}
public DeleteIndexTemplateRequest() {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
}
/**
* Constructs a new delete index request for the specified name.
*/
public DeleteIndexTemplateRequest(String name) {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
this.name = name;
}
/**
* Set the index template name to delete.
*/
public DeleteIndexTemplateRequest name(String name) {
this.name = name;
return this;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (name == null) {
validationException = addValidationError("name is missing", validationException);
}
return validationException;
}
/**
* The index template name to delete.
*/
public String name() {
return name;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(name);
}
}
|
DeleteIndexTemplateRequest
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ZeebeEndpointBuilderFactory.java
|
{
"start": 1600,
"end": 4172
}
|
interface ____
extends
EndpointConsumerBuilder {
default AdvancedZeebeEndpointConsumerBuilder advanced() {
return (AdvancedZeebeEndpointConsumerBuilder) this;
}
/**
* Format the result in the body as JSON.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param formatJSON the value to set
* @return the dsl builder
*/
default ZeebeEndpointConsumerBuilder formatJSON(boolean formatJSON) {
doSetProperty("formatJSON", formatJSON);
return this;
}
/**
* Format the result in the body as JSON.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param formatJSON the value to set
* @return the dsl builder
*/
default ZeebeEndpointConsumerBuilder formatJSON(String formatJSON) {
doSetProperty("formatJSON", formatJSON);
return this;
}
/**
* JobKey for the job worker.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param jobKey the value to set
* @return the dsl builder
*/
default ZeebeEndpointConsumerBuilder jobKey(String jobKey) {
doSetProperty("jobKey", jobKey);
return this;
}
/**
* Timeout for job worker.
*
* The option is a: <code>int</code> type.
*
* Default: 10
* Group: consumer
*
* @param timeout the value to set
* @return the dsl builder
*/
default ZeebeEndpointConsumerBuilder timeout(int timeout) {
doSetProperty("timeout", timeout);
return this;
}
/**
* Timeout for job worker.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 10
* Group: consumer
*
* @param timeout the value to set
* @return the dsl builder
*/
default ZeebeEndpointConsumerBuilder timeout(String timeout) {
doSetProperty("timeout", timeout);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the Zeebe component.
*/
public
|
ZeebeEndpointConsumerBuilder
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/dev/JavaCompilationProvider.java
|
{
"start": 691,
"end": 5685
}
|
class ____ implements CompilationProvider {
private static final Logger LOG = Logger.getLogger(JavaCompilationProvider.class);
// -g is used to make the java compiler generate all debugging info
// -parameters is used to generate metadata for reflection on method parameters
// this is useful when people using debuggers against their hot-reloaded app
private static final Set<String> COMPILER_OPTIONS = Set.of("-g", "-parameters");
private static final Set<String> IGNORE_NAMESPACES = Set.of("org.osgi", "Annotation processing is enabled because");
private static final String PROVIDER_KEY = "java";
private JavaCompiler compiler;
private List<String> compilerFlags;
private QuarkusFileManager fileManager;
@Override
public String getProviderKey() {
return PROVIDER_KEY;
}
@Override
public Set<String> handledExtensions() {
return Set.of(".java");
}
@Override
public void compile(Set<File> filesToCompile, CompilationProvider.Context context) {
if (this.compiler == null) {
this.compiler = ToolProvider.getSystemJavaCompiler();
this.compilerFlags = new CompilerFlags(COMPILER_OPTIONS,
context.getCompilerOptions(PROVIDER_KEY),
context.getReleaseJavaVersion(),
context.getSourceJavaVersion(),
context.getTargetJvmVersion(),
context.getAnnotationProcessors()).toList();
}
final JavaCompiler compiler = this.compiler;
if (compiler == null) {
throw new RuntimeException("No system java compiler provided");
}
final QuarkusFileManager.Context sourcesContext = new QuarkusFileManager.Context(
context.getClasspath(), context.getReloadableClasspath(),
context.getOutputDirectory(), context.getGeneratedSourcesDirectory(),
context.getAnnotationProcessorPaths(),
context.getSourceEncoding(),
context.ignoreModuleInfo());
if (this.fileManager == null) {
final Supplier<StandardJavaFileManager> supplier = () -> {
final Charset charset = context.getSourceEncoding();
return compiler.getStandardFileManager(null, null, charset);
};
if (context.getReloadableClasspath().isEmpty()) {
this.fileManager = new StaticFileManager(supplier, sourcesContext);
} else {
this.fileManager = new ReloadableFileManager(supplier, sourcesContext);
}
} else {
this.fileManager.reset(sourcesContext);
}
final DiagnosticCollector<JavaFileObject> diagnosticsCollector = new DiagnosticCollector<>();
final Iterable<? extends JavaFileObject> sources = this.fileManager.getJavaSources(filesToCompile);
final JavaCompiler.CompilationTask task = this.compiler.getTask(null, this.fileManager,
diagnosticsCollector, this.compilerFlags, null, sources);
final boolean compilationTaskSucceed = task.call();
if (LOG.isEnabled(Logger.Level.ERROR) || LOG.isEnabled(Logger.Level.WARN)) {
collectDiagnostics(diagnosticsCollector, (level, diagnostic) -> LOG.logf(level, "%s, line %d in %s",
diagnostic.getMessage(null), diagnostic.getLineNumber(),
diagnostic.getSource() == null ? "[unknown source]" : diagnostic.getSource().getName()));
}
if (!compilationTaskSucceed) {
final String errorMessage = extractCompilationErrorMessage(diagnosticsCollector);
throw new RuntimeException(errorMessage);
}
}
@Override
public void close() throws IOException {
if (this.fileManager != null) {
this.fileManager.close();
this.fileManager = null;
}
}
private void collectDiagnostics(final DiagnosticCollector<JavaFileObject> diagnosticsCollector,
final BiConsumer<Logger.Level, Diagnostic<? extends JavaFileObject>> callback) {
for (Diagnostic<? extends JavaFileObject> diagnostic : diagnosticsCollector.getDiagnostics()) {
Logger.Level level = diagnostic.getKind() == Diagnostic.Kind.ERROR ? Logger.Level.ERROR : Logger.Level.WARN;
if (level.equals(Logger.Level.WARN) && IGNORE_NAMESPACES.stream()
.anyMatch(diagnostic.getMessage(null)::contains)) {
continue;
}
callback.accept(level, diagnostic);
}
}
private String extractCompilationErrorMessage(final DiagnosticCollector<JavaFileObject> diagnosticsCollector) {
StringBuilder builder = new StringBuilder();
diagnosticsCollector.getDiagnostics().forEach(diagnostic -> builder.append("\n").append(diagnostic));
return String.format("\u001B[91mCompilation Failed:%s\u001b[0m", builder);
}
}
|
JavaCompilationProvider
|
java
|
bumptech__glide
|
library/test/src/test/java/com/bumptech/glide/load/data/resource/FileDescriptorLocalUriFetcherTest.java
|
{
"start": 1343,
"end": 4340
}
|
class ____ {
@Mock private DataFetcher.DataCallback<ParcelFileDescriptor> callback;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
}
@Test
public void testLoadResource_returnsFileDescriptor() throws Exception {
Context context = ApplicationProvider.getApplicationContext();
Uri uri = Uri.parse("file://nothing");
ContentResolver contentResolver = context.getContentResolver();
ContentResolverShadow shadow = Shadow.extract(contentResolver);
AssetFileDescriptor assetFileDescriptor = mock(AssetFileDescriptor.class);
ParcelFileDescriptor parcelFileDescriptor = mock(ParcelFileDescriptor.class);
when(assetFileDescriptor.getParcelFileDescriptor()).thenReturn(parcelFileDescriptor);
shadow.registerFileDescriptor(uri, assetFileDescriptor);
FileDescriptorLocalUriFetcher fetcher =
new FileDescriptorLocalUriFetcher(context.getContentResolver(), uri, false);
fetcher.loadData(Priority.NORMAL, callback);
verify(callback).onDataReady(eq(parcelFileDescriptor));
}
@Test
public void testLoadResource_mediaUri_returnsFileDescriptor() throws Exception {
Context context = ApplicationProvider.getApplicationContext();
Uri uri = Uri.parse("content://media");
ContentResolver contentResolver = context.getContentResolver();
AssetFileDescriptor assetFileDescriptor = mock(AssetFileDescriptor.class);
ParcelFileDescriptor parcelFileDescriptor = mock(ParcelFileDescriptor.class);
when(assetFileDescriptor.getParcelFileDescriptor()).thenReturn(parcelFileDescriptor);
FileDescriptorLocalUriFetcher fetcher =
new FileDescriptorLocalUriFetcher(
context.getContentResolver(), uri, /* useMediaStoreApisIfAvailable */ true);
try (MockedStatic<MediaStoreUtil> utils = Mockito.mockStatic(MediaStoreUtil.class)) {
utils.when(MediaStoreUtil::isMediaStoreOpenFileApisAvailable).thenReturn(true);
utils.when(() -> MediaStoreUtil.isMediaStoreUri(uri)).thenReturn(true);
utils
.when(() -> MediaStoreUtil.openAssetFileDescriptor(uri, contentResolver))
.thenReturn(assetFileDescriptor);
fetcher.loadData(Priority.NORMAL, callback);
verify(callback).onDataReady(eq(parcelFileDescriptor));
}
}
@Test
public void testLoadResource_withNullFileDescriptor_callsLoadFailed() {
Context context = ApplicationProvider.getApplicationContext();
Uri uri = Uri.parse("file://nothing");
ContentResolver contentResolver = context.getContentResolver();
ContentResolverShadow shadow = Shadow.extract(contentResolver);
shadow.registerFileDescriptor(uri, null /*fileDescriptor*/);
FileDescriptorLocalUriFetcher fetcher =
new FileDescriptorLocalUriFetcher(
context.getContentResolver(), uri, /* useMediaStoreApisIfAvailable */ false);
fetcher.loadData(Priority.NORMAL, callback);
verify(callback).onLoadFailed(isA(FileNotFoundException.class));
}
}
|
FileDescriptorLocalUriFetcherTest
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/error/ShouldBeLess.java
|
{
"start": 977,
"end": 2332
}
|
class ____ extends BasicErrorMessageFactory {
/**
* Creates a new <code>{@link ShouldBeLess}</code>.
* @param <T> guarantees that the values used in this factory have the same type.
* @param actual the actual value in the failed assertion.
* @param other the value used in the failed assertion to compare the actual value to.
* @return the created {@code ErrorMessageFactory}.
*/
public static <T> ErrorMessageFactory shouldBeLess(Comparable<? super T> actual, Comparable<? super T> other) {
return new ShouldBeLess(actual, other, StandardComparisonStrategy.instance());
}
/**
* Creates a new <code>{@link ShouldBeLess}</code>.
* @param actual the actual value in the failed assertion.
* @param other the value used in the failed assertion to compare the actual value to.
* @param comparisonStrategy the {@link ComparisonStrategy} used to evaluate assertion.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldBeLess(Object actual, Object other, ComparisonStrategy comparisonStrategy) {
return new ShouldBeLess(actual, other, comparisonStrategy);
}
private <T> ShouldBeLess(T actual, T other, ComparisonStrategy comparisonStrategy) {
super("%nExpecting actual:%n %s%nto be less than:%n %s %s", actual, other, comparisonStrategy);
}
}
|
ShouldBeLess
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
|
{
"start": 4183,
"end": 5016
}
|
class ____ extends HttpServlet {
@SuppressWarnings("unchecked")
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
response.setContentType(MediaType.TEXT_PLAIN + "; " + JettyUtils.UTF_8);
PrintWriter out = response.getWriter();
SortedSet<String> sortedKeys = new TreeSet<String>();
Enumeration<String> keys = request.getParameterNames();
while(keys.hasMoreElements()) {
sortedKeys.add(keys.nextElement());
}
for(String key: sortedKeys) {
out.print(key);
out.print(':');
out.print(request.getParameter(key));
out.print('\n');
}
out.close();
}
}
@SuppressWarnings("serial")
public static
|
EchoServlet
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/health/HealthCheck.java
|
{
"start": 3997,
"end": 4761
}
|
interface ____ {
/**
* The {@link HealthCheck} associated to this response.
*/
HealthCheck getCheck();
/**
* The state of the service.
*/
State getState();
/**
* A message associated to the result, used to provide more information for unhealthy services.
*/
Optional<String> getMessage();
/**
* An error associated to the result, used to provide the error associated to unhealthy services.
*/
Optional<Throwable> getError();
/**
* A key/value combination of details.
*
* @return a non null details map (empty if no details)
*/
Map<String, Object> getDetails();
}
}
|
Result
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Bug_for_issue_630.java
|
{
"start": 1829,
"end": 2059
}
|
class ____ {
public int id;
public String name;
public String modelName;
public boolean isFlay;
public List<Person> persons;// = new ArrayList<Person>();
}
public static
|
Model
|
java
|
elastic__elasticsearch
|
test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/TeardownSection.java
|
{
"start": 764,
"end": 2956
}
|
class ____ {
/**
* Parse a {@link TeardownSection} if the next field is {@code skip}, otherwise returns {@link TeardownSection#EMPTY}.
*/
static TeardownSection parseIfNext(XContentParser parser) throws IOException {
ParserUtils.advanceToFieldName(parser);
if ("teardown".equals(parser.currentName())) {
parser.nextToken();
TeardownSection section = parse(parser);
parser.nextToken();
return section;
}
return EMPTY;
}
public static TeardownSection parse(XContentParser parser) throws IOException {
PrerequisiteSection prerequisiteSection = PrerequisiteSection.parseIfNext(parser);
List<ExecutableSection> executableSections = new ArrayList<>();
while (parser.currentToken() != XContentParser.Token.END_ARRAY) {
ParserUtils.advanceToFieldName(parser);
if ("do".equals(parser.currentName()) == false) {
throw new ParsingException(
parser.getTokenLocation(),
"section [" + parser.currentName() + "] not supported within teardown section"
);
}
executableSections.add(DoSection.parse(parser));
parser.nextToken();
}
parser.nextToken();
return new TeardownSection(prerequisiteSection, executableSections);
}
public static final TeardownSection EMPTY = new TeardownSection(PrerequisiteSection.EMPTY, Collections.emptyList());
private final PrerequisiteSection prerequisiteSection;
private final List<ExecutableSection> doSections;
TeardownSection(PrerequisiteSection prerequisiteSection, List<ExecutableSection> doSections) {
this.prerequisiteSection = Objects.requireNonNull(prerequisiteSection, "skip section cannot be null");
this.doSections = Collections.unmodifiableList(doSections);
}
public PrerequisiteSection getPrerequisiteSection() {
return prerequisiteSection;
}
public List<ExecutableSection> getDoSections() {
return doSections;
}
public boolean isEmpty() {
return EMPTY.equals(this);
}
}
|
TeardownSection
|
java
|
quarkusio__quarkus
|
extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/OpenTelemetryServiceNameAppNameTest.java
|
{
"start": 460,
"end": 1057
}
|
class ____ extends OpenTelemetryServiceNameBaseTest {
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addClass(TestSpanExporter.class)
.addClass(TestSpanExporterProvider.class)
.addAsResource(new StringAsset("" +
"quarkus.otel.bsp.schedule.delay=50\n" +
"quarkus.application.name=" + SERVICE_NAME + "\n"), "application.properties"));
}
|
OpenTelemetryServiceNameAppNameTest
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Bug_101_for_rongganlin.java
|
{
"start": 1487,
"end": 1525
}
|
class ____ extends Element {
}
}
|
Image
|
java
|
spring-projects__spring-boot
|
module/spring-boot-mongodb/src/main/java/org/springframework/boot/mongodb/autoconfigure/MongoClientSettingsBuilderCustomizer.java
|
{
"start": 1038,
"end": 1242
}
|
interface ____ {
/**
* Customize the {@link Builder}.
* @param clientSettingsBuilder the builder to customize
*/
void customize(Builder clientSettingsBuilder);
}
|
MongoClientSettingsBuilderCustomizer
|
java
|
netty__netty
|
handler/src/main/java/io/netty/handler/ssl/OpenSslContextOption.java
|
{
"start": 850,
"end": 4786
}
|
class ____<T> extends SslContextOption<T> {
private OpenSslContextOption(String name) {
super(name);
}
/**
* If enabled heavy-operations may be offloaded from the {@link io.netty.channel.EventLoop} if possible.
*/
public static final OpenSslContextOption<Boolean> USE_TASKS =
new OpenSslContextOption<Boolean>("USE_TASKS");
/**
* If enabled <a href="https://tools.ietf.org/html/rfc7918">TLS false start</a> will be enabled if supported.
* When TLS false start is enabled the flow of {@link SslHandshakeCompletionEvent}s may be different compared when,
* not enabled.
*
* This is currently only supported when {@code BoringSSL} and ALPN is used.
*/
public static final OpenSslContextOption<Boolean> TLS_FALSE_START =
new OpenSslContextOption<Boolean>("TLS_FALSE_START");
/**
* Set the {@link OpenSslPrivateKeyMethod} to use. This allows to offload private-key operations
* if needed.
*
* This is currently only supported when {@code BoringSSL} is used.
*/
public static final OpenSslContextOption<OpenSslPrivateKeyMethod> PRIVATE_KEY_METHOD =
new OpenSslContextOption<OpenSslPrivateKeyMethod>("PRIVATE_KEY_METHOD");
/**
* Set the {@link OpenSslAsyncPrivateKeyMethod} to use. This allows to offload private-key operations
* if needed.
*
* This is currently only supported when {@code BoringSSL} is used.
*/
public static final OpenSslContextOption<OpenSslAsyncPrivateKeyMethod> ASYNC_PRIVATE_KEY_METHOD =
new OpenSslContextOption<OpenSslAsyncPrivateKeyMethod>("ASYNC_PRIVATE_KEY_METHOD");
/**
* Set the {@link OpenSslCertificateCompressionConfig} to use. This allows for the configuration of certificate
* compression algorithms which should be used, the priority of those algorithms and the directions in which
* they should be used.
*
* This is currently only supported when {@code BoringSSL} is used.
*/
public static final OpenSslContextOption<OpenSslCertificateCompressionConfig> CERTIFICATE_COMPRESSION_ALGORITHMS =
new OpenSslContextOption<OpenSslCertificateCompressionConfig>("CERTIFICATE_COMPRESSION_ALGORITHMS");
/**
* Set the maximum number of bytes that is allowed during the handshake for certificate chain.
*/
public static final OpenSslContextOption<Integer> MAX_CERTIFICATE_LIST_BYTES =
new OpenSslContextOption<Integer>("MAX_CERTIFICATE_LIST_BYTES");
/**
* Set the groups that should be used. This will override curves set with {@code -Djdk.tls.namedGroups}.
* <p>
* See <a href="https://docs.openssl.org/master/man3/SSL_CTX_set1_groups_list/#description">
* SSL_CTX_set1_groups_list</a>.
*/
public static final OpenSslContextOption<String[]> GROUPS = new OpenSslContextOption<String[]>("GROUPS");
/**
* Set the desired length of the Diffie-Hellman ephemeral session keys.
* This will override the key length set with {@code -Djdk.tls.ephemeralDHKeySize}.
* <p>
* The only supported values are {@code 512}, {@code 1024}, {@code 2048}, and {@code 4096}.
* <p>
* See <a href="https://docs.openssl.org/1.0.2/man3/SSL_CTX_set_tmp_dh_callback/">SSL_CTX_set_tmp_dh_callback</a>.
*/
public static final OpenSslContextOption<Integer> TMP_DH_KEYLENGTH =
new OpenSslContextOption<Integer>("TMP_DH_KEYLENGTH");
/**
* Set the policy for handling alternative key providers (such as hardware security keys,
* smart cards, remote signing services, etc.) when using BoringSSL.
* <p>
* Note: this feature only works when {@code BoringSSL} or {@code AWS-LC} is used.
*/
public static final OpenSslContextOption<Boolean> USE_JDK_PROVIDER_SIGNATURES =
new OpenSslContextOption<>("USE_JDK_PROVIDER_SIGNATURES");
}
|
OpenSslContextOption
|
java
|
elastic__elasticsearch
|
x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/CancellationIT.java
|
{
"start": 8005,
"end": 9399
}
|
class ____ extends MockScriptPlugin {
static final String SCRIPT_NAME = "search_block";
private final AtomicInteger hits = new AtomicInteger();
private final AtomicInteger slack = new AtomicInteger(0);
private final AtomicBoolean shouldBlock = new AtomicBoolean(true);
void reset() {
hits.set(0);
}
void disableBlock() {
shouldBlock.set(false);
}
void enableBlock() {
shouldBlock.set(true);
}
void setSlack(int slack) {
this.slack.set(slack);
}
@Override
public Map<String, Function<Map<String, Object>, Object>> pluginScripts() {
return Collections.singletonMap(SCRIPT_NAME, params -> {
LeafStoredFieldsLookup fieldsLookup = (LeafStoredFieldsLookup) params.get("_fields");
LogManager.getLogger(CancellationIT.class).info("Blocking on the document {}", fieldsLookup.get("_id"));
hits.incrementAndGet();
if (slack.decrementAndGet() < 0) {
try {
waitUntil(() -> shouldBlock.get() == false);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return true;
});
}
}
}
|
ScriptedBlockPlugin
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/bootstrap/WithEntitlementsOnTestCodeMetaTests.java
|
{
"start": 1044,
"end": 1791
}
|
class ____ extends ESTestCase {
/**
* {@link WithEntitlementsOnTestCode} should not affect this, since the sensitive method
* is called from server code. The self-test should pass as usual.
*/
public void testSelfTestPasses() {
assumeTrue("Not yet working in serverless", TestEntitlementBootstrap.isEnabledForTests());
Elasticsearch.entitlementSelfTest();
}
@SuppressForbidden(reason = "Testing that a forbidden API is disallowed")
public void testForbiddenActionDenied() {
assumeTrue("Not yet working in serverless", TestEntitlementBootstrap.isEnabledForTests());
assertThrows(NotEntitledException.class, () -> Path.of(".").toRealPath());
}
}
|
WithEntitlementsOnTestCodeMetaTests
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/core/memory/MemorySegment.java
|
{
"start": 1830,
"end": 2886
}
|
class ____ various reasons:
*
* <ul>
* <li>It offers additional binary compare, swap, and copy methods.
* <li>It uses collapsed checks for range check and memory segment disposal.
* <li>It offers absolute positioning methods for bulk put/get methods, to guarantee thread safe
* use.
* <li>It offers explicit big-endian / little-endian access methods, rather than tracking
* internally a byte order.
* <li>It transparently and efficiently moves data between on-heap and off-heap variants.
* </ul>
*
* <p><i>Comments on the implementation</i>: We make heavy use of operations that are supported by
* native instructions, to achieve a high efficiency. Multi byte types (int, long, float, double,
* ...) are read and written with "unsafe" native commands.
*
* <p><i>Note on efficiency</i>: For best efficiency, we do not separate implementations of
* different memory types with inheritance, to avoid the overhead from looking for concrete
* implementations on invocations of abstract methods.
*/
@Internal
public final
|
for
|
java
|
alibaba__nacos
|
api/src/test/java/com/alibaba/nacos/api/config/listener/AbstractFuzzyWatchEventWatcherTest.java
|
{
"start": 869,
"end": 1655
}
|
class ____ {
private AbstractFuzzyWatchEventWatcher fuzzyWatchEventWatcher;
@BeforeEach
void setUp() {
fuzzyWatchEventWatcher = new AbstractFuzzyWatchEventWatcher() {
@Override
public void onEvent(ConfigFuzzyWatchChangeEvent event) {
// Empty implementation for testing
}
};
}
@Test
void testGetExecutor() {
assertNull(fuzzyWatchEventWatcher.getExecutor());
}
@Test
void testOnPatternOverLimit() {
assertDoesNotThrow(() -> fuzzyWatchEventWatcher.onPatternOverLimit());
}
@Test
void testOnConfigReachUpLimit() {
assertDoesNotThrow(() -> fuzzyWatchEventWatcher.onConfigReachUpLimit());
}
}
|
AbstractFuzzyWatchEventWatcherTest
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/asyncprocessing/operators/AsyncIntervalJoinOperatorTest.java
|
{
"start": 2854,
"end": 28299
}
|
class ____ {
private final boolean lhsFasterThanRhs;
@Parameters(name = "lhs faster than rhs: {0}")
private static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {{true}, {false}});
}
public AsyncIntervalJoinOperatorTest(boolean lhsFasterThanRhs) {
this.lhsFasterThanRhs = lhsFasterThanRhs;
}
@TestTemplate
void testImplementationMirrorsCorrectly() throws Exception {
long lowerBound = 1;
long upperBound = 3;
boolean lowerBoundInclusive = true;
boolean upperBoundInclusive = false;
setupHarness(lowerBound, lowerBoundInclusive, upperBound, upperBoundInclusive)
.processElementsAndWatermarks(1, 4)
.andExpect(
streamRecordOf(1, 2),
streamRecordOf(1, 3),
streamRecordOf(2, 3),
streamRecordOf(2, 4),
streamRecordOf(3, 4))
.noLateRecords()
.close();
setupHarness(-1 * upperBound, upperBoundInclusive, -1 * lowerBound, lowerBoundInclusive)
.processElementsAndWatermarks(1, 4)
.andExpect(
streamRecordOf(2, 1),
streamRecordOf(3, 1),
streamRecordOf(3, 2),
streamRecordOf(4, 2),
streamRecordOf(4, 3))
.noLateRecords()
.close();
}
@TestTemplate // lhs - 2 <= rhs <= rhs + 2
void testNegativeInclusiveAndNegativeInclusive() throws Exception {
setupHarness(-2, true, -1, true)
.processElementsAndWatermarks(1, 4)
.andExpect(
streamRecordOf(2, 1),
streamRecordOf(3, 1),
streamRecordOf(3, 2),
streamRecordOf(4, 2),
streamRecordOf(4, 3))
.noLateRecords()
.close();
}
@TestTemplate // lhs - 1 <= rhs <= rhs + 1
void testNegativeInclusiveAndPositiveInclusive() throws Exception {
setupHarness(-1, true, 1, true)
.processElementsAndWatermarks(1, 4)
.andExpect(
streamRecordOf(1, 1),
streamRecordOf(1, 2),
streamRecordOf(2, 1),
streamRecordOf(2, 2),
streamRecordOf(2, 3),
streamRecordOf(3, 2),
streamRecordOf(3, 3),
streamRecordOf(3, 4),
streamRecordOf(4, 3),
streamRecordOf(4, 4))
.noLateRecords()
.close();
}
@TestTemplate // lhs + 1 <= rhs <= lhs + 2
void testPositiveInclusiveAndPositiveInclusive() throws Exception {
setupHarness(1, true, 2, true)
.processElementsAndWatermarks(1, 4)
.andExpect(
streamRecordOf(1, 2),
streamRecordOf(1, 3),
streamRecordOf(2, 3),
streamRecordOf(2, 4),
streamRecordOf(3, 4))
.noLateRecords()
.close();
}
@TestTemplate
void testNegativeExclusiveAndNegativeExlusive() throws Exception {
setupHarness(-3, false, -1, false)
.processElementsAndWatermarks(1, 4)
.andExpect(streamRecordOf(3, 1), streamRecordOf(4, 2))
.noLateRecords()
.close();
}
@TestTemplate
void testNegativeExclusiveAndPositiveExlusive() throws Exception {
setupHarness(-1, false, 1, false)
.processElementsAndWatermarks(1, 4)
.andExpect(
streamRecordOf(1, 1),
streamRecordOf(2, 2),
streamRecordOf(3, 3),
streamRecordOf(4, 4))
.noLateRecords()
.close();
}
@TestTemplate
void testPositiveExclusiveAndPositiveExlusive() throws Exception {
setupHarness(1, false, 3, false)
.processElementsAndWatermarks(1, 4)
.andExpect(streamRecordOf(1, 3), streamRecordOf(2, 4))
.noLateRecords()
.close();
}
@TestTemplate
void testStateCleanupNegativeInclusiveNegativeInclusive() throws Exception {
setupHarness(-1, true, 0, true)
.processElement1(1)
.processElement1(2)
.processElement1(3)
.processElement1(4)
.processElement1(5)
.processElement2(1)
.processElement2(2)
.processElement2(3)
.processElement2(4)
.processElement2(5) // fill both buffers with values
.processWatermark1(1)
.processWatermark2(1) // set common watermark to 1 and check that data is cleaned
.assertLeftBufferContainsOnly(2, 3, 4, 5)
.assertRightBufferContainsOnly(1, 2, 3, 4, 5)
.processWatermark1(4) // set common watermark to 4 and check that data is cleaned
.processWatermark2(4)
.assertLeftBufferContainsOnly(5)
.assertRightBufferContainsOnly(4, 5)
.processWatermark1(
6) // set common watermark to 6 and check that data all buffers are empty
.processWatermark2(6)
.assertLeftBufferEmpty()
.assertRightBufferEmpty()
.close();
}
@TestTemplate
void testStateCleanupNegativePositiveNegativeExlusive() throws Exception {
setupHarness(-2, false, 1, false)
.processElement1(1)
.processElement1(2)
.processElement1(3)
.processElement1(4)
.processElement1(5)
.processElement2(1)
.processElement2(2)
.processElement2(3)
.processElement2(4)
.processElement2(5) // fill both buffers with values
.processWatermark1(1)
.processWatermark2(1) // set common watermark to 1 and check that data is cleaned
.assertLeftBufferContainsOnly(2, 3, 4, 5)
.assertRightBufferContainsOnly(1, 2, 3, 4, 5)
.processWatermark1(4) // set common watermark to 4 and check that data is cleaned
.processWatermark2(4)
.assertLeftBufferContainsOnly(5)
.assertRightBufferContainsOnly(4, 5)
.processWatermark1(
6) // set common watermark to 6 and check that data all buffers are empty
.processWatermark2(6)
.assertLeftBufferEmpty()
.assertRightBufferEmpty()
.close();
}
@TestTemplate
void testStateCleanupPositiveInclusivePositiveInclusive() throws Exception {
setupHarness(0, true, 1, true)
.processElement1(1)
.processElement1(2)
.processElement1(3)
.processElement1(4)
.processElement1(5)
.processElement2(1)
.processElement2(2)
.processElement2(3)
.processElement2(4)
.processElement2(5) // fill both buffers with values
.processWatermark1(1)
.processWatermark2(1) // set common watermark to 1 and check that data is cleaned
.assertLeftBufferContainsOnly(1, 2, 3, 4, 5)
.assertRightBufferContainsOnly(2, 3, 4, 5)
.processWatermark1(4) // set common watermark to 4 and check that data is cleaned
.processWatermark2(4)
.assertLeftBufferContainsOnly(4, 5)
.assertRightBufferContainsOnly(5)
.processWatermark1(
6) // set common watermark to 6 and check that data all buffers are empty
.processWatermark2(6)
.assertLeftBufferEmpty()
.assertRightBufferEmpty()
.close();
}
@TestTemplate
void testStateCleanupPositiveExlusivePositiveExclusive() throws Exception {
setupHarness(-1, false, 2, false)
.processElement1(1)
.processElement1(2)
.processElement1(3)
.processElement1(4)
.processElement1(5)
.processElement2(1)
.processElement2(2)
.processElement2(3)
.processElement2(4)
.processElement2(5) // fill both buffers with values
.processWatermark1(1)
.processWatermark2(1) // set common watermark to 1 and check that data is cleaned
.assertLeftBufferContainsOnly(1, 2, 3, 4, 5)
.assertRightBufferContainsOnly(2, 3, 4, 5)
.processWatermark1(4) // set common watermark to 4 and check that data is cleaned
.processWatermark2(4)
.assertLeftBufferContainsOnly(4, 5)
.assertRightBufferContainsOnly(5)
.processWatermark1(
6) // set common watermark to 6 and check that data all buffers are empty
.processWatermark2(6)
.assertLeftBufferEmpty()
.assertRightBufferEmpty()
.close();
}
@TestTemplate
void testRestoreFromSnapshot() throws Exception {
// config
int lowerBound = -1;
boolean lowerBoundInclusive = true;
int upperBound = 1;
boolean upperBoundInclusive = true;
// create first test harness
OperatorSubtaskState handles;
List<StreamRecord<Tuple2<TestElem, TestElem>>> expectedOutput;
try (TestHarness testHarness =
createTestHarness(
lowerBound, lowerBoundInclusive, upperBound, upperBoundInclusive)) {
testHarness.setup();
testHarness.open();
// process elements with first test harness
testHarness.processElement1(createStreamRecord(1, "lhs"));
testHarness.processWatermark1(new Watermark(1));
testHarness.processElement2(createStreamRecord(1, "rhs"));
testHarness.processWatermark2(new Watermark(1));
testHarness.processElement1(createStreamRecord(2, "lhs"));
testHarness.processWatermark1(new Watermark(2));
testHarness.processElement2(createStreamRecord(2, "rhs"));
testHarness.processWatermark2(new Watermark(2));
testHarness.processElement1(createStreamRecord(3, "lhs"));
testHarness.processWatermark1(new Watermark(3));
testHarness.processElement2(createStreamRecord(3, "rhs"));
testHarness.processWatermark2(new Watermark(3));
// snapshot and validate output
handles = testHarness.snapshot(0, 0);
expectedOutput =
Lists.newArrayList(
streamRecordOf(1, 1),
streamRecordOf(1, 2),
streamRecordOf(2, 1),
streamRecordOf(2, 2),
streamRecordOf(2, 3),
streamRecordOf(3, 2),
streamRecordOf(3, 3));
TestHarnessUtil.assertNoLateRecords(testHarness.getOutput());
assertOutput(expectedOutput, testHarness.getOutput());
}
try (TestHarness newTestHarness =
createTestHarness(
lowerBound, lowerBoundInclusive, upperBound, upperBoundInclusive)) {
// create new test harness from snapshpt
newTestHarness.setup();
newTestHarness.initializeState(handles);
newTestHarness.open();
// process elements
newTestHarness.processElement1(createStreamRecord(4, "lhs"));
newTestHarness.processWatermark1(new Watermark(4));
newTestHarness.processElement2(createStreamRecord(4, "rhs"));
newTestHarness.processWatermark2(new Watermark(4));
// assert expected output
expectedOutput =
Lists.newArrayList(
streamRecordOf(3, 4), streamRecordOf(4, 3), streamRecordOf(4, 4));
TestHarnessUtil.assertNoLateRecords(newTestHarness.getOutput());
assertOutput(expectedOutput, newTestHarness.getOutput());
}
}
@TestTemplate
void testContextCorrectLeftTimestamp() throws Exception {
AsyncIntervalJoinOperator<String, TestElem, TestElem, Tuple2<TestElem, TestElem>> op =
new AsyncIntervalJoinOperator<>(
-1,
1,
true,
true,
null,
null,
TestElem.serializer(),
TestElem.serializer(),
new ProcessJoinFunction<TestElem, TestElem, Tuple2<TestElem, TestElem>>() {
@Override
public void processElement(
TestElem left,
TestElem right,
Context ctx,
Collector<Tuple2<TestElem, TestElem>> out)
throws Exception {
assertThat(ctx.getLeftTimestamp()).isEqualTo(left.ts);
}
});
try (TestHarness testHarness =
TestHarness.createOne(
op,
(elem) -> elem.key,
(elem) -> elem.key,
TypeInformation.of(String.class))) {
testHarness.setup();
testHarness.open();
processElementsAndWatermarks(testHarness);
}
}
@TestTemplate
void testReturnsCorrectTimestamp() throws Exception {
AsyncIntervalJoinOperator<String, TestElem, TestElem, Tuple2<TestElem, TestElem>> op =
new AsyncIntervalJoinOperator<>(
-1,
1,
true,
true,
null,
null,
TestElem.serializer(),
TestElem.serializer(),
new ProcessJoinFunction<TestElem, TestElem, Tuple2<TestElem, TestElem>>() {
private static final long serialVersionUID = 1L;
@Override
public void processElement(
TestElem left,
TestElem right,
Context ctx,
Collector<Tuple2<TestElem, TestElem>> out)
throws Exception {
assertThat(ctx.getTimestamp())
.isEqualTo(Math.max(left.ts, right.ts));
}
});
try (TestHarness testHarness =
TestHarness.createOne(
op,
(elem) -> elem.key,
(elem) -> elem.key,
TypeInformation.of(String.class))) {
testHarness.setup();
testHarness.open();
processElementsAndWatermarks(testHarness);
}
}
@TestTemplate
void testContextCorrectRightTimestamp() throws Exception {
AsyncIntervalJoinOperator<String, TestElem, TestElem, Tuple2<TestElem, TestElem>> op =
new AsyncIntervalJoinOperator<>(
-1,
1,
true,
true,
null,
null,
TestElem.serializer(),
TestElem.serializer(),
new ProcessJoinFunction<TestElem, TestElem, Tuple2<TestElem, TestElem>>() {
@Override
public void processElement(
TestElem left,
TestElem right,
Context ctx,
Collector<Tuple2<TestElem, TestElem>> out)
throws Exception {
assertThat(ctx.getRightTimestamp()).isEqualTo(right.ts);
}
});
try (TestHarness testHarness =
TestHarness.createOne(
op,
(elem) -> elem.key,
(elem) -> elem.key,
TypeInformation.of(String.class))) {
testHarness.setup();
testHarness.open();
processElementsAndWatermarks(testHarness);
}
}
@TestTemplate
void testFailsWithNoTimestampsLeft() throws Exception {
try (TestHarness newTestHarness = createTestHarness(0L, true, 0L, true)) {
newTestHarness.setup();
newTestHarness.open();
// note that the StreamRecord has no timestamp in constructor
assertThatThrownBy(
() ->
newTestHarness.processElement1(
new StreamRecord<>(new TestElem(0, "lhs"))))
.isInstanceOf(FlinkException.class);
}
}
@TestTemplate // (expected = FlinkException.class)
void testFailsWithNoTimestampsRight() throws Exception {
try (TestHarness newTestHarness = createTestHarness(0L, true, 0L, true)) {
newTestHarness.setup();
newTestHarness.open();
// note that the StreamRecord has no timestamp in constructor
assertThatThrownBy(
() ->
newTestHarness.processElement2(
new StreamRecord<>(new TestElem(0, "rhs"))))
.isInstanceOf(FlinkException.class);
}
}
@TestTemplate
void testDiscardsLateData() throws Exception {
setupHarness(-1, true, 1, true)
.processElement1(1)
.processElement2(1)
.processElement1(2)
.processElement2(2)
.processElement1(3)
.processElement2(3)
.processWatermark1(3)
.processWatermark2(3)
.processElement1(1) // this element is late and should not be joined again
.processElement1(4)
.processElement2(4)
.processElement1(5)
.processElement2(5)
.andExpect(
streamRecordOf(1, 1),
streamRecordOf(1, 2),
streamRecordOf(2, 1),
streamRecordOf(2, 2),
streamRecordOf(2, 3),
streamRecordOf(3, 2),
streamRecordOf(3, 3),
streamRecordOf(3, 4),
streamRecordOf(4, 3),
streamRecordOf(4, 4),
streamRecordOf(4, 5),
streamRecordOf(5, 4),
streamRecordOf(5, 5))
.noLateRecords()
.close();
}
@TestTemplate
void testLateData() throws Exception {
OutputTag<TestElem> leftLateTag = new OutputTag<TestElem>("left_late") {};
OutputTag<TestElem> rightLateTag = new OutputTag<TestElem>("right_late") {};
setupHarness(-1, true, 1, true, leftLateTag, rightLateTag)
.processElement1(3)
.processElement2(3)
.processWatermark1(3)
.processWatermark2(3)
.processElement1(4)
.processElement2(4)
.processElement1(1) // the left side element is late
.processElement2(2) // the right side element is late
.processElement1(5)
.processElement2(5)
.andExpect(
streamRecordOf(3, 3),
streamRecordOf(3, 4),
streamRecordOf(4, 3),
streamRecordOf(4, 4),
streamRecordOf(4, 5),
streamRecordOf(5, 4),
streamRecordOf(5, 5))
.expectLateRecords(leftLateTag, createStreamRecord(1, "lhs"))
.expectLateRecords(rightLateTag, createStreamRecord(2, "rhs"))
.close();
}
private void assertEmpty(MapState<Long, ?> state) throws Exception {
assertThat(state.keys()).isEmpty();
}
private void assertContainsOnly(MapState<Long, ?> state, long... ts) throws Exception {
for (long t : ts) {
String message =
"Keys not found in state. \n Expected: "
+ Arrays.toString(ts)
+ "\n Actual: "
+ state.keys();
assertThat(state.contains(t)).as(message).isTrue();
}
String message =
"Too many objects in state. \n Expected: "
+ Arrays.toString(ts)
+ "\n Actual: "
+ state.keys();
assertThat(state.keys()).as(message).hasSize(ts.length);
}
private <T1, T2> void assertOutput(
Iterable<StreamRecord<T1>> expectedOutput, Queue<T2> actualOutput) {
int actualSize =
actualOutput.stream()
.filter(elem -> elem instanceof StreamRecord)
.collect(Collectors.toList())
.size();
int expectedSize = Iterables.size(expectedOutput);
assertThat(actualSize)
.as("Expected and actual size of stream records different")
.isEqualTo(expectedSize);
for (StreamRecord<T1> record : expectedOutput) {
assertThat(actualOutput.contains(record)).isTrue();
}
}
private TestHarness createTestHarness(
long lowerBound,
boolean lowerBoundInclusive,
long upperBound,
boolean upperBoundInclusive)
throws Exception {
AsyncIntervalJoinOperator<String, TestElem, TestElem, Tuple2<TestElem, TestElem>> operator =
new AsyncIntervalJoinOperator<>(
lowerBound,
upperBound,
lowerBoundInclusive,
upperBoundInclusive,
null,
null,
TestElem.serializer(),
TestElem.serializer(),
new PassthroughFunction());
return TestHarness.createOne(
operator,
(elem) -> elem.key, // key
(elem) -> elem.key, // key
TypeInformation.of(String.class));
}
private JoinTestBuilder setupHarness(
long lowerBound,
boolean lowerBoundInclusive,
long upperBound,
boolean upperBoundInclusive,
OutputTag<TestElem> leftLateDataOutputTag,
OutputTag<TestElem> rightLateDataOutputTag)
throws Exception {
AsyncIntervalJoinOperator<String, TestElem, TestElem, Tuple2<TestElem, TestElem>> operator =
new AsyncIntervalJoinOperator<>(
lowerBound,
upperBound,
lowerBoundInclusive,
upperBoundInclusive,
leftLateDataOutputTag,
rightLateDataOutputTag,
TestElem.serializer(),
TestElem.serializer(),
new PassthroughFunction());
TestHarness t =
TestHarness.createOne(
operator,
(elem) -> elem.key, // key
(elem) -> elem.key, // key
TypeInformation.of(String.class));
return new JoinTestBuilder(t, operator);
}
private JoinTestBuilder setupHarness(
long lowerBound,
boolean lowerBoundInclusive,
long upperBound,
boolean upperBoundInclusive)
throws Exception {
return setupHarness(
lowerBound, lowerBoundInclusive, upperBound, upperBoundInclusive, null, null);
}
private
|
AsyncIntervalJoinOperatorTest
|
java
|
apache__rocketmq
|
store/src/main/java/org/apache/rocketmq/store/AppendMessageCallback.java
|
{
"start": 1043,
"end": 1770
}
|
interface ____ {
/**
* After message serialization, write MappedByteBuffer
*
* @return How many bytes to write
*/
AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer byteBuffer,
final int maxBlank, final MessageExtBrokerInner msg, PutMessageContext putMessageContext);
/**
* After batched message serialization, write MappedByteBuffer
*
* @param messageExtBatch, backed up by a byte array
* @return How many bytes to write
*/
AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer byteBuffer,
final int maxBlank, final MessageExtBatch messageExtBatch, PutMessageContext putMessageContext);
}
|
AppendMessageCallback
|
java
|
quarkusio__quarkus
|
extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/AllowParameterConstraintsOnParallelMethodsTest.java
|
{
"start": 1487,
"end": 1610
}
|
interface ____ {
String foo(@NotNull String s);
}
private static
|
AnotherInterfaceWithMethodParameterConstraint
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/Mockito.java
|
{
"start": 132948,
"end": 140482
}
|
class ____.");
}
return mockConstruction(getClassOf(reified), mockSettingsFactory, mockInitializer);
}
/**
* Enables stubbing methods. Use it when you want the mock to return particular value when particular method is called.
* <p>
* Simply put: "<b>When</b> the x method is called <b>then</b> return y".
*
* <p>
* Examples:
*
* <pre class="code"><code class="java">
* <b>when</b>(mock.someMethod()).<b>thenReturn</b>(10);
*
* //you can use flexible argument matchers, e.g:
* when(mock.someMethod(<b>anyString()</b>)).thenReturn(10);
*
* //setting exception to be thrown:
* when(mock.someMethod("some arg")).thenThrow(new RuntimeException());
*
* //you can set different behavior for consecutive method calls.
* //Last stubbing (e.g: thenReturn("foo")) determines the behavior of further consecutive calls.
* when(mock.someMethod("some arg"))
* .thenThrow(new RuntimeException())
* .thenReturn("foo");
*
* //Alternative, shorter version for consecutive stubbing:
* when(mock.someMethod("some arg"))
* .thenReturn("one", "two");
* //is the same as:
* when(mock.someMethod("some arg"))
* .thenReturn("one")
* .thenReturn("two");
*
* //shorter version for consecutive method calls throwing exceptions:
* when(mock.someMethod("some arg"))
* .thenThrow(new RuntimeException(), new NullPointerException();
*
* </code></pre>
*
* For stubbing void methods with throwables see: {@link Mockito#doThrow(Throwable...)}
* <p>
* Stubbing can be overridden: for example common stubbing can go to fixture
* setup but the test methods can override it.
* Please note that overriding stubbing is a potential code smell that points out too much stubbing.
* <p>
* Once stubbed, the method will always return stubbed value regardless
* of how many times it is called.
* <p>
* Last stubbing is more important - when you stubbed the same method with
* the same arguments many times.
* <p>
* Although it is possible to verify a stubbed invocation, usually <b>it's just redundant</b>.
* Let's say you've stubbed <code>foo.bar()</code>.
* If your code cares what <code>foo.bar()</code> returns then something else breaks(often before even <code>verify()</code> gets executed).
* If your code doesn't care what <code>get(0)</code> returns then it should not be stubbed.
*
* <p>
* See examples in javadoc for {@link Mockito} class
* @param methodCall method to be stubbed
* @return OngoingStubbing object used to stub fluently.
* <strong>Do not</strong> create a reference to this returned object.
*/
public static <T> OngoingStubbing<T> when(T methodCall) {
return MOCKITO_CORE.when(methodCall);
}
/**
* Verifies certain behavior <b>happened once</b>.
* <p>
* Alias to <code>verify(mock, times(1))</code> E.g:
* <pre class="code"><code class="java">
* verify(mock).someMethod("some arg");
* </code></pre>
* Above is equivalent to:
* <pre class="code"><code class="java">
* verify(mock, times(1)).someMethod("some arg");
* </code></pre>
* <p>
* Arguments passed are compared using <code>equals()</code> method.
* Read about {@link ArgumentCaptor} or {@link ArgumentMatcher} to find out other ways of matching / asserting arguments passed.
* <p>
* Although it is possible to verify a stubbed invocation, usually <b>it's just redundant</b>.
* Let's say you've stubbed <code>foo.bar()</code>.
* If your code cares what <code>foo.bar()</code> returns then something else breaks(often before even <code>verify()</code> gets executed).
* If your code doesn't care what <code>foo.bar()</code> returns then it should not be stubbed.
*
* <p>
* See examples in javadoc for {@link Mockito} class
*
* @param mock to be verified
* @return mock object itself
*/
public static <T> T verify(T mock) {
return MOCKITO_CORE.verify(mock, times(1));
}
/**
* Verifies certain behavior happened at least once / exact number of times / never. E.g:
* <pre class="code"><code class="java">
* verify(mock, times(5)).someMethod("was called five times");
*
* verify(mock, atLeast(2)).someMethod("was called at least two times");
*
* //you can use flexible argument matchers, e.g:
* verify(mock, atLeastOnce()).someMethod(<b>anyString()</b>);
* </code></pre>
*
* <b>times(1) is the default</b> and can be omitted
* <p>
* Arguments passed are compared using <code>equals()</code> method.
* Read about {@link ArgumentCaptor} or {@link ArgumentMatcher} to find out other ways of matching / asserting arguments passed.
* <p>
*
* @param mock to be verified
* @param mode times(x), atLeastOnce() or never()
*
* @return mock object itself
*/
public static <T> T verify(T mock, VerificationMode mode) {
return MOCKITO_CORE.verify(mock, mode);
}
/**
* Smart Mockito users hardly use this feature because they know it could be a sign of poor tests.
* Normally, you don't need to reset your mocks, just create new mocks for each test method.
* <p>
* Instead of <code>#reset()</code> please consider writing simple, small and focused test methods over lengthy, over-specified tests.
* <b>First potential code smell is <code>reset()</code> in the middle of the test method.</b> This probably means you're testing too much.
* Follow the whisper of your test methods: "Please keep us small and focused on single behavior".
* There are several threads about it on mockito mailing list.
* <p>
* The only reason we added <code>reset()</code> method is to
* make it possible to work with container-injected mocks.
* For more information see the FAQ (<a href="https://github.com/mockito/mockito/wiki/FAQ">here</a>).
* <p>
* <b>Don't harm yourself.</b> <code>reset()</code> in the middle of the test method is a code smell (you're probably testing too much).
* <pre class="code"><code class="java">
* List mock = mock(List.class);
* when(mock.size()).thenReturn(10);
* mock.add(1);
*
* reset(mock);
* //at this point the mock forgot any interactions and stubbing
* </code></pre>
*
* @param <T> The Type of the mocks
* @param mocks to be reset
*/
public static <T> void reset(T... mocks) {
MOCKITO_CORE.reset(mocks);
}
/**
* Clears all mocks, type caches and instrumentations.
* <p>
* By clearing Mockito's state, previously created mocks might begin to malfunction. This option can be used if
* Mockito's caches take up too much space or if the inline mock maker's instrumentation is causing performance
* issues in code where mocks are no longer used. Normally, you would not need to use this option.
*/
public static void clearAllCaches() {
MOCKITO_CORE.clearAllCaches();
}
/**
* Use this method in order to only clear invocations, when stubbing is non-trivial. Use-cases can be:
* <ul>
* <li>You are using a dependency injection framework to inject your mocks.</li>
* <li>The mock is used in a stateful scenario. For example a
|
automagically
|
java
|
hibernate__hibernate-orm
|
hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/sequence/SequenceInformationExtractorSAPDBDatabaseImpl.java
|
{
"start": 276,
"end": 1023
}
|
class ____ extends SequenceInformationExtractorLegacyImpl {
/**
* Singleton access
*/
public static final SequenceInformationExtractorSAPDBDatabaseImpl INSTANCE = new SequenceInformationExtractorSAPDBDatabaseImpl();
@Override
protected String sequenceCatalogColumn() {
return null;
}
@Override
protected String sequenceSchemaColumn() {
return "schemaname";
}
@Override
protected String sequenceStartValueColumn() {
return null;
}
@Override
protected String sequenceMinValueColumn() {
return "min_value";
}
@Override
protected String sequenceMaxValueColumn() {
return "max_value";
}
@Override
protected String sequenceIncrementColumn() {
return "increment_by";
}
}
|
SequenceInformationExtractorSAPDBDatabaseImpl
|
java
|
apache__camel
|
components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindyMarshalOneToManyWithHeadersTest.java
|
{
"start": 1531,
"end": 2893
}
|
class ____ {
private static final String URI_MOCK_RESULT = "mock:result";
private static final String URI_MOCK_ERROR = "mock:error";
private static final String URI_DIRECT_START = "direct:start";
private String expected;
@Produce(URI_DIRECT_START)
private ProducerTemplate template;
@EndpointInject(URI_MOCK_RESULT)
private MockEndpoint result;
@Test
@DirtiesContext
public void testMarshallMessage() throws Exception {
expected = "orderNumber,customerName,sku,quantity,unitPrice\r\n"
+ "11111,Joe Blow,abc,1,3\r\n"
+ "11111,Joe Blow,cde,3,2\r\n";
result.expectedBodiesReceived(expected);
template.sendBody(generateModel());
result.assertIsSatisfied();
}
public Order generateModel() {
Order order = new Order();
order.setCustomerName("Joe Blow");
order.setOrderNumber(11111);
OrderItem oi1 = new OrderItem();
oi1.setSku("abc");
oi1.setQuantity(1);
oi1.setUnitPrice(3);
OrderItem oi2 = new OrderItem();
oi2.setSku("cde");
oi2.setQuantity(3);
oi2.setUnitPrice(2);
List<OrderItem> orderList = Arrays.asList(oi1, oi2);
order.setItems(orderList);
return order;
}
public static
|
BindyMarshalOneToManyWithHeadersTest
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AWrappedIO.java
|
{
"start": 1153,
"end": 1323
}
|
class ____ extends TestWrappedIO {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
}
|
ITestS3AWrappedIO
|
java
|
apache__dubbo
|
dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/stub/StubInvocationUtil.java
|
{
"start": 1124,
"end": 3202
}
|
class ____ {
public static <T, R> R unaryCall(Invoker<?> invoker, MethodDescriptor methodDescriptor, T request) {
return (R) call(invoker, methodDescriptor, new Object[] {request});
}
public static <T, R> void unaryCall(
Invoker<?> invoker, MethodDescriptor method, T request, StreamObserver<R> responseObserver) {
try {
Object res = unaryCall(invoker, method, request);
responseObserver.onNext((R) res);
} catch (Exception e) {
responseObserver.onError(e);
}
responseObserver.onCompleted();
}
public static <T, R> StreamObserver<T> biOrClientStreamCall(
Invoker<?> invoker, MethodDescriptor method, StreamObserver<R> responseObserver) {
return (StreamObserver<T>) call(invoker, method, new Object[] {responseObserver});
}
public static <T, R> void serverStreamCall(
Invoker<?> invoker, MethodDescriptor method, T request, StreamObserver<R> responseObserver) {
call(invoker, method, new Object[] {request, responseObserver});
}
private static Object call(Invoker<?> invoker, MethodDescriptor methodDescriptor, Object[] arguments) {
RpcInvocation rpcInvocation = new RpcInvocation(
invoker.getUrl().getServiceModel(),
methodDescriptor.getMethodName(),
invoker.getInterface().getName(),
invoker.getUrl().getProtocolServiceKey(),
methodDescriptor.getParameterClasses(),
arguments);
// When there are multiple MethodDescriptors with the same method name, the return type will be wrong
rpcInvocation.setReturnType(methodDescriptor.getReturnClass());
try {
return InvocationUtil.invoke(invoker, rpcInvocation);
} catch (Throwable e) {
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
} else {
throw TriRpcStatus.INTERNAL.withCause(e).asException();
}
}
}
}
|
StubInvocationUtil
|
java
|
apache__kafka
|
jmh-benchmarks/src/main/java/org/apache/kafka/jmh/connect/JsonConverterBenchmark.java
|
{
"start": 2103,
"end": 19052
}
|
class ____ {
final org.apache.kafka.connect.data.Schema envelopeSchema = buildEnvelopeSchema();
final Struct envelopeStruct = new Struct(envelopeSchema)
.put("before", buildValueStruct())
.put("after", buildValueStruct())
.put("source", buildSourceStruct())
.put("op", "u")
.put("ts_ms", 1638362438000L)
.put("transaction", buildTransactionStruct());
public String structJson = "{\n" +
" \"schema\": {\n" +
" \"type\": \"struct\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"type\": \"struct\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"type\": \"int32\",\n" +
" \"optional\": false,\n" +
" \"field\": \"id\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"aircraft\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"airline\"\n" +
" },\n" +
" {\n" +
" \"type\": \"int32\",\n" +
" \"optional\": true,\n" +
" \"field\": \"passengers\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"airport\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"flight\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"metar\"\n" +
" },\n" +
" {\n" +
" \"type\": \"double\",\n" +
" \"optional\": true,\n" +
" \"field\": \"flight_distance\"\n" +
" }\n" +
" ],\n" +
" \"optional\": true,\n" +
" \"name\": \"dbserver1.public.aviation.Value\",\n" +
" \"field\": \"before\"\n" +
" },\n" +
" {\n" +
" \"type\": \"struct\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"type\": \"int32\",\n" +
" \"optional\": false,\n" +
" \"field\": \"id\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"aircraft\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"airline\"\n" +
" },\n" +
" {\n" +
" \"type\": \"int32\",\n" +
" \"optional\": true,\n" +
" \"field\": \"passengers\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"airport\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"flight\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"metar\"\n" +
" },\n" +
" {\n" +
" \"type\": \"double\",\n" +
" \"optional\": true,\n" +
" \"field\": \"flight_distance\"\n" +
" }\n" +
" ],\n" +
" \"optional\": true,\n" +
" \"name\": \"dbserver1.public.aviation.Value\",\n" +
" \"field\": \"after\"\n" +
" },\n" +
" {\n" +
" \"type\": \"struct\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": false,\n" +
" \"field\": \"version\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": false,\n" +
" \"field\": \"connector\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": false,\n" +
" \"field\": \"name\"\n" +
" },\n" +
" {\n" +
" \"type\": \"int64\",\n" +
" \"optional\": false,\n" +
" \"field\": \"ts_ms\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"name\": \"io.debezium.data.Enum\",\n" +
" \"version\": 1,\n" +
" \"parameters\": {\n" +
" \"allowed\": \"true,last,false,incremental\"\n" +
" },\n" +
" \"default\": \"false\",\n" +
" \"field\": \"snapshot\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": false,\n" +
" \"field\": \"db\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": true,\n" +
" \"field\": \"sequence\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": false,\n" +
" \"field\": \"schema\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": false,\n" +
" \"field\": \"table\"\n" +
" },\n" +
" {\n" +
" \"type\": \"int64\",\n" +
" \"optional\": true,\n" +
" \"field\": \"txId\"\n" +
" },\n" +
" {\n" +
" \"type\": \"int64\",\n" +
" \"optional\": true,\n" +
" \"field\": \"lsn\"\n" +
" },\n" +
" {\n" +
" \"type\": \"int64\",\n" +
" \"optional\": true,\n" +
" \"field\": \"xmin\"\n" +
" }\n" +
" ],\n" +
" \"optional\": false,\n" +
" \"name\": \"io.debezium.connector.postgresql.Source\",\n" +
" \"field\": \"source\"\n" +
" },\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": false,\n" +
" \"field\": \"op\"\n" +
" },\n" +
" {\n" +
" \"type\": \"int64\",\n" +
" \"optional\": true,\n" +
" \"field\": \"ts_ms\"\n" +
" },\n" +
" {\n" +
" \"type\": \"struct\",\n" +
" \"fields\": [\n" +
" {\n" +
" \"type\": \"string\",\n" +
" \"optional\": false,\n" +
" \"field\": \"id\"\n" +
" },\n" +
" {\n" +
" \"type\": \"int64\",\n" +
" \"optional\": false,\n" +
" \"field\": \"total_order\"\n" +
" },\n" +
" {\n" +
" \"type\": \"int64\",\n" +
" \"optional\": false,\n" +
" \"field\": \"data_collection_order\"\n" +
" }\n" +
" ],\n" +
" \"optional\": true,\n" +
" \"name\": \"event.block\",\n" +
" \"version\": 1,\n" +
" \"field\": \"transaction\"\n" +
" }\n" +
" ],\n" +
" \"optional\": false,\n" +
" \"name\": \"dbserver1.public.aviation.Envelope\",\n" +
" \"version\": 1\n" +
"},\n" +
" \"payload\": {\n" +
" \"before\": null,\n" +
" \"after\": {\n" +
" \"id\": 941445,\n" +
" \"aircraft\": \"Mi-8\",\n" +
" \"airline\": \"LOT Polish Airlines\",\n" +
" \"passengers\": 232,\n" +
" \"airport\": \"ZBAR\",\n" +
" \"flight\": \"MH9445\",\n" +
" \"metar\": \"METAR: GOOY 251100Z 24008KT 9999 BKN011 27/22 Q1014\",\n" +
" \"flight_distance\": 1697.4732487340466\n" +
" },\n" +
" \"source\": {\n" +
" \"version\": \"2.5.0-SNAPSHOT\",\n" +
" \"connector\": \"postgresql\",\n" +
" \"name\": \"dbserver1\",\n" +
" \"ts_ms\": 1702288693179,\n" +
" \"snapshot\": \"last_in_data_collection\",\n" +
" \"db\": \"postgres\",\n" +
" \"sequence\": \"[null,\\\"2195663032\\\"]\",\n" +
" \"schema\": \"public\",\n" +
" \"table\": \"aviation\",\n" +
" \"txId\": 30881,\n" +
" \"lsn\": 2195663032,\n" +
" \"xmin\": null\n" +
" },\n" +
" \"op\": \"r\",\n" +
" \"ts_ms\": 1702288722694,\n" +
" \"transaction\": null\n" +
" }\n" +
"}";
private static org.apache.kafka.connect.data.Schema buildEnvelopeSchema() {
return SchemaBuilder.struct()
.name("dbserver1.public.aviation.Envelope")
.version(1)
.field("before", buildValueSchema())
.field("after", buildValueSchema())
.field("source", buildSourceSchema())
.field("op", SchemaBuilder.STRING_SCHEMA)
.field("ts_ms", SchemaBuilder.OPTIONAL_INT64_SCHEMA)
.field("transaction", buildTransactionSchema())
.build();
}
private static org.apache.kafka.connect.data.Schema buildValueSchema() {
return SchemaBuilder.struct()
.name("dbserver1.public.aviation.Value")
.version(1)
.field("id", SchemaBuilder.INT32_SCHEMA)
.field("aircraft", SchemaBuilder.STRING_SCHEMA)
.field("airline", SchemaBuilder.STRING_SCHEMA)
.field("passengers", SchemaBuilder.INT32_SCHEMA)
.field("airport", SchemaBuilder.STRING_SCHEMA)
.field("flight", SchemaBuilder.STRING_SCHEMA)
.field("metar", SchemaBuilder.STRING_SCHEMA)
.field("flight_distance", SchemaBuilder.FLOAT64_SCHEMA)
.build();
}
private static Struct buildValueStruct() {
Struct valueStruct = new Struct(buildValueSchema());
valueStruct.put("id", 941445);
valueStruct.put("aircraft", "Mi-8");
valueStruct.put("airline", "LOT Polish Airlines");
valueStruct.put("passengers", 232);
valueStruct.put("airport", "ZBAR");
valueStruct.put("flight", "MH9445");
valueStruct.put("metar", "METAR: GOOY 251100Z 24008KT 9999 BKN011 27/22 Q1014");
valueStruct.put("flight_distance", 1697.4732487340466);
return valueStruct;
}
private static org.apache.kafka.connect.data.Schema buildSourceSchema() {
return SchemaBuilder.struct()
.name("io.debezium.connector.postgresql.Source")
.version(1)
.field("version", SchemaBuilder.STRING_SCHEMA)
.field("connector", SchemaBuilder.STRING_SCHEMA)
.field("name", SchemaBuilder.STRING_SCHEMA)
.field("ts_ms", SchemaBuilder.INT64_SCHEMA)
.field("snapshot", SchemaBuilder.STRING_SCHEMA)
.field("db", SchemaBuilder.STRING_SCHEMA)
.field("sequence", SchemaBuilder.STRING_SCHEMA)
.field("schema", SchemaBuilder.STRING_SCHEMA)
.field("table", SchemaBuilder.STRING_SCHEMA)
.field("txId", SchemaBuilder.INT64_SCHEMA)
.field("lsn", SchemaBuilder.INT64_SCHEMA)
.field("xmin", SchemaBuilder.INT64_SCHEMA)
.build();
}
private static Struct buildSourceStruct() {
Struct sourceStruct = new Struct(buildSourceSchema());
sourceStruct.put("version", "2.5.0-SNAPSHOT");
sourceStruct.put("connector", "postgresql");
sourceStruct.put("name", "dbserver1");
sourceStruct.put("ts_ms", 1702288693179L);
sourceStruct.put("snapshot", "last_in_data_collection");
sourceStruct.put("db", "postgres");
sourceStruct.put("sequence", "[null,\"2195663032\"]");
sourceStruct.put("schema", "public");
sourceStruct.put("table", "aviation");
sourceStruct.put("txId", 30881L);
sourceStruct.put("lsn", 2195663032L);
sourceStruct.put("xmin", 30881L);
return sourceStruct;
}
private static org.apache.kafka.connect.data.Schema buildTransactionSchema() {
return SchemaBuilder.struct()
.name("event.block")
.version(1)
.field("id", SchemaBuilder.STRING_SCHEMA)
.field("total_order", SchemaBuilder.INT64_SCHEMA)
.field("data_collection_order", SchemaBuilder.INT64_SCHEMA)
.build();
}
private static Struct buildTransactionStruct() {
Struct transactionStruct = new Struct(buildTransactionSchema());
transactionStruct.put("id", "transaction_id");
transactionStruct.put("total_order", 1000L);
transactionStruct.put("data_collection_order", 10000L);
return transactionStruct;
}
}
@Setup(Level.Trial)
public void setup(BenchmarkParams params) {
converter = new JsonConverter(Boolean.parseBoolean(params.getParam("blackbirdModule")));
converter.configure(Map.of(), false);
}
@Benchmark
public void deserialize(Blackhole blackhole, Data data) {
blackhole.consume(converter.toConnectData(TOPIC, data.structJson.getBytes(Charset.defaultCharset())));
}
@Benchmark
public void serialize(Blackhole blackhole, Data data) {
blackhole.consume(converter.fromConnectData(TOPIC, data.envelopeSchema, data.envelopeStruct));
}
}
|
Data
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/record/FileLogInputStream.java
|
{
"start": 1859,
"end": 4291
}
|
class ____ implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> {
private int position;
private final int end;
private final FileRecords fileRecords;
private final ByteBuffer logHeaderBuffer = ByteBuffer.allocate(HEADER_SIZE_UP_TO_MAGIC);
/**
* Create a new log input stream over the FileChannel
* @param records Underlying FileRecords instance
* @param start Position in the file channel to start from
* @param end Position in the file channel not to read past
*/
FileLogInputStream(FileRecords records,
int start,
int end) {
this.fileRecords = records;
this.position = start;
this.end = end;
}
@Override
public FileChannelRecordBatch nextBatch() throws IOException {
FileChannel channel = fileRecords.channel();
if (position >= end - HEADER_SIZE_UP_TO_MAGIC)
return null;
logHeaderBuffer.rewind();
Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header");
logHeaderBuffer.rewind();
long offset = logHeaderBuffer.getLong(OFFSET_OFFSET);
int size = logHeaderBuffer.getInt(SIZE_OFFSET);
// V0 has the smallest overhead, stricter checking is done later
if (size < LegacyRecord.RECORD_OVERHEAD_V0)
throw new CorruptRecordException(String.format("Found record size %d smaller than minimum record " +
"overhead (%d) in file %s.", size, LegacyRecord.RECORD_OVERHEAD_V0, fileRecords.file()));
if (position > end - LOG_OVERHEAD - size)
return null;
byte magic = logHeaderBuffer.get(MAGIC_OFFSET);
final FileChannelRecordBatch batch;
if (magic < RecordBatch.MAGIC_VALUE_V2)
batch = new LegacyFileChannelRecordBatch(offset, magic, fileRecords, position, size);
else
batch = new DefaultFileChannelRecordBatch(offset, magic, fileRecords, position, size);
position += batch.sizeInBytes();
return batch;
}
/**
* Log entry backed by an underlying FileChannel. This allows iteration over the record batches
* without needing to read the record data into memory until it is needed. The downside
* is that entries will generally no longer be readable when the underlying channel is closed.
*/
public abstract static
|
FileLogInputStream
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/ml/ModelPredictRuntimeProviderContext.java
|
{
"start": 1080,
"end": 1676
}
|
class ____ implements ModelProvider.Context {
private final ResolvedCatalogModel catalogModel;
private final ReadableConfig runtimeConfig;
public ModelPredictRuntimeProviderContext(
ResolvedCatalogModel catalogModel, ReadableConfig runtimeConfig) {
this.catalogModel = catalogModel;
this.runtimeConfig = runtimeConfig;
}
@Override
public ResolvedCatalogModel getCatalogModel() {
return catalogModel;
}
@Override
public ReadableConfig runtimeConfig() {
return runtimeConfig;
}
}
|
ModelPredictRuntimeProviderContext
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/output/EnumSetOutput.java
|
{
"start": 1072,
"end": 2171
}
|
enum ____ cannot be looked up.
*/
public EnumSetOutput(RedisCodec<K, V> codec, Class<E> enumClass, UnaryOperator<String> enumValuePreprocessor,
Function<String, E> onUnknownValue) {
super(codec, Collections.emptySet());
this.enumClass = enumClass;
this.enumValuePreprocessor = enumValuePreprocessor;
this.onUnknownValue = onUnknownValue;
}
@Override
public void set(ByteBuffer bytes) {
if (bytes == null) {
return;
}
E enumConstant = resolve(enumValuePreprocessor.apply(decodeString(bytes)));
if (enumConstant == null) {
return;
}
output.add(enumConstant);
}
@Override
public void multi(int count) {
if (!initialized) {
output = EnumSet.noneOf(enumClass);
initialized = true;
}
}
private E resolve(String value) {
try {
return Enum.valueOf(enumClass, value);
} catch (IllegalArgumentException e) {
return onUnknownValue.apply(value);
}
}
}
|
value
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/query/AssociationRevisionsOfEntitiesQueryTest.java
|
{
"start": 1178,
"end": 4145
}
|
class ____ {
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
// Revision 1
scope.inTransaction( entityManager -> {
final TemplateType type1 = new TemplateType( 1, "Type1" );
final TemplateType type2 = new TemplateType( 2, "Type2" );
entityManager.persist( type1 );
entityManager.persist( type2 );
final Template template = new Template( 1, "Template1", type1 );
entityManager.persist( template );
} );
// Revision 2
scope.inTransaction( entityManager -> {
final TemplateType type = entityManager.find( TemplateType.class, 2 );
final Template template = entityManager.find( Template.class, 1 );
template.setName( "Template1-Updated" );
template.setTemplateType( type );
entityManager.merge( template );
} );
// Revision 3
scope.inTransaction( entityManager -> {
final Template template = entityManager.find( Template.class, 1 );
entityManager.remove( template );
} );
}
@Test
public void testRevisionsOfEntityWithAssociationQueries(EntityManagerFactoryScope scope) {
scope.inEntityManager( entityManager -> {
List<?> results = AuditReaderFactory.get( entityManager ).createQuery()
.forRevisionsOfEntity( Template.class, true, true )
.add( AuditEntity.id().eq( 1 ) )
.traverseRelation( "templateType", JoinType.INNER )
.add( AuditEntity.property( "name" ).eq( "Type1" ) )
.up()
.getResultList();
assertEquals( 1, results.size() );
assertEquals( "Template1", ( (Template) results.get( 0 ) ).getName() );
} );
scope.inEntityManager( entityManager -> {
List<?> results = AuditReaderFactory.get( entityManager ).createQuery()
.forRevisionsOfEntity( Template.class, true, true )
.add( AuditEntity.id().eq( 1 ) )
.traverseRelation( "templateType", JoinType.INNER )
.add( AuditEntity.property( "name" ).eq( "Type2" ) )
.up()
.getResultList();
assertEquals( isStoreDataAtDelete( scope ) ? 2 : 1, results.size() );
for ( Object result : results ) {
assertEquals( "Template1-Updated", ( (Template) result ).getName() );
}
} );
}
@Test
public void testAssociationQueriesNotAllowedWhenNotSelectingJustEntities(EntityManagerFactoryScope scope) {
try {
scope.inEntityManager( entityManager -> {
AuditReaderFactory.get( entityManager ).createQuery()
.forRevisionsOfEntity( Template.class, false, true )
.add( AuditEntity.id().eq( 1 ) )
.traverseRelation( "templateType", JoinType.INNER )
.add( AuditEntity.property( "name" ).eq( "Type1" ) )
.up()
.getResultList();
} );
fail( "Test should have thrown IllegalStateException due to selectEntitiesOnly=false" );
}
catch ( Exception e ) {
assertInstanceOf( IllegalStateException.class, e );
}
}
protected boolean isStoreDataAtDelete(EntityManagerFactoryScope scope) {
return false;
}
@Entity(name = "TemplateType")
@Audited
public static
|
AssociationRevisionsOfEntitiesQueryTest
|
java
|
google__gson
|
gson/src/test/java/com/google/gson/functional/ReflectionAccessFilterTest.java
|
{
"start": 5160,
"end": 5692
}
|
class ____.lang.Thread."
+ " Register a TypeAdapter for this type or adjust the access filter.");
}
@Test
public void testBlockAllJavaExtendingJdkClass() {
Gson gson =
new GsonBuilder().addReflectionAccessFilter(ReflectionAccessFilter.BLOCK_ALL_JAVA).create();
var e = assertThrows(JsonIOException.class, () -> gson.toJson(new ClassExtendingJdkClass()));
assertThat(e)
.hasMessageThat()
.isEqualTo(
"ReflectionAccessFilter does not permit using reflection for
|
java
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/LockNotBeforeTryTest.java
|
{
"start": 5777,
"end": 6133
}
|
class ____ {
private void test(ReentrantLock lock) {
lock.lock();
System.out.println("hi");
lock.unlock();
}
}
""")
.addOutputLines(
"Test.java",
"""
import java.util.concurrent.locks.ReentrantLock;
|
Test
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PropgateUnmappedFields.java
|
{
"start": 981,
"end": 1840
}
|
class ____ extends Rule<LogicalPlan, LogicalPlan> {
@Override
public LogicalPlan apply(LogicalPlan logicalPlan) {
if (logicalPlan instanceof EsRelation) {
return logicalPlan;
}
var unmappedFieldsBuilder = AttributeSet.builder();
logicalPlan.forEachExpressionDown(FieldAttribute.class, fa -> {
if (fa.field() instanceof PotentiallyUnmappedKeywordEsField) {
unmappedFieldsBuilder.add(fa);
}
});
var unmappedFields = unmappedFieldsBuilder.build();
return unmappedFields.isEmpty()
? logicalPlan
: logicalPlan.transformUp(
EsRelation.class,
er -> er.withAttributes(NamedExpressions.mergeOutputAttributes(new ArrayList<>(unmappedFields), er.output()))
);
}
}
|
PropgateUnmappedFields
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/recordutils/RecordComparatorFactory.java
|
{
"start": 1598,
"end": 6479
}
|
class ____ implements TypeComparatorFactory<Record> {
private static final String NUM_KEYS = "numkeys";
private static final String KEY_POS_PREFIX = "keypos.";
private static final String KEY_CLASS_PREFIX = "keyclass.";
private static final String KEY_SORT_DIRECTION_PREFIX = "key-direction.";
// --------------------------------------------------------------------------------------------
private int[] positions;
private Class<? extends Value>[] types;
private boolean[] sortDirections;
// --------------------------------------------------------------------------------------------
public RecordComparatorFactory() {
// do nothing, allow to be configured via config
}
public RecordComparatorFactory(int[] positions, Class<? extends Value>[] types) {
this(positions, types, null);
}
public RecordComparatorFactory(
int[] positions, Class<? extends Value>[] types, boolean[] sortDirections) {
if (positions == null || types == null) {
throw new NullPointerException();
}
if (positions.length != types.length) {
throw new IllegalArgumentException();
}
this.positions = positions;
this.types = types;
if (sortDirections == null) {
this.sortDirections = new boolean[positions.length];
Arrays.fill(this.sortDirections, true);
} else if (sortDirections.length != positions.length) {
throw new IllegalArgumentException();
} else {
this.sortDirections = sortDirections;
}
}
@Override
public void writeParametersToConfig(Configuration config) {
for (int i = 0; i < this.positions.length; i++) {
if (this.positions[i] < 0) {
throw new IllegalArgumentException(
"The key position " + i + " is invalid: " + this.positions[i]);
}
if (this.types[i] == null || !Value.class.isAssignableFrom(this.types[i])) {
throw new IllegalArgumentException(
"The key type "
+ i
+ " is null or not implenting the interface "
+ Value.class.getName()
+ ".");
}
}
// write the config
config.set(getIntConfigOption(NUM_KEYS), this.positions.length);
for (int i = 0; i < this.positions.length; i++) {
config.set(getIntConfigOption(KEY_POS_PREFIX + i), this.positions[i]);
config.setString(KEY_CLASS_PREFIX + i, this.types[i].getName());
config.set(
getBooleanConfigOption(KEY_SORT_DIRECTION_PREFIX + i), this.sortDirections[i]);
}
}
@SuppressWarnings("unchecked")
@Override
public void readParametersFromConfig(Configuration config, ClassLoader cl)
throws ClassNotFoundException {
// figure out how many key fields there are
final int numKeyFields = config.get(getIntConfigOption(NUM_KEYS), -1);
if (numKeyFields < 0) {
throw new IllegalConfigurationException(
"The number of keys for the comparator is invalid: " + numKeyFields);
}
final int[] positions = new int[numKeyFields];
final Class<? extends Value>[] types = new Class[numKeyFields];
final boolean[] direction = new boolean[numKeyFields];
// read the individual key positions and types
for (int i = 0; i < numKeyFields; i++) {
// next key position
final int p = config.get(getIntConfigOption(KEY_POS_PREFIX + i), -1);
if (p >= 0) {
positions[i] = p;
} else {
throw new IllegalConfigurationException(
"Contained invalid position for key no positions for keys.");
}
// next key type
final String name = config.getString(KEY_CLASS_PREFIX + i, null);
if (name != null) {
types[i] =
(Class<? extends Value>)
Class.forName(name, true, cl).asSubclass(Value.class);
} else {
throw new IllegalConfigurationException(
"The key type (" + i + ") for the comparator is null");
}
// next key sort direction
direction[i] = config.get(getBooleanConfigOption(KEY_SORT_DIRECTION_PREFIX + i), true);
}
this.positions = positions;
this.types = types;
this.sortDirections = direction;
}
@Override
public RecordComparator createComparator() {
return new RecordComparator(this.positions, this.types, this.sortDirections);
}
}
|
RecordComparatorFactory
|
java
|
apache__camel
|
dsl/camel-jbang/camel-jbang-plugin-kubernetes/src/main/java/org/apache/camel/dsl/jbang/core/commands/kubernetes/traits/model/Service.java
|
{
"start": 2464,
"end": 2896
}
|
enum ____ {
@JsonProperty("ClusterIP")
CLUSTERIP("ClusterIP"),
@JsonProperty("NodePort")
NODEPORT("NodePort"),
@JsonProperty("LoadBalancer")
LOADBALANCER("LoadBalancer");
private final String value;
Type(String value) {
this.value = value;
}
@JsonValue
public String getValue() {
return this.value;
}
}
}
|
Type
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/TokenizerUtils.java
|
{
"start": 401,
"end": 3405
}
|
class ____ {
private TokenizerUtils() {}
static LinkedList<DelimitedToken> splitOutNeverSplit(CharSequence input, CharTrie neverSplit, CharArraySet neverSplitSet) {
CharTrie current = neverSplit;
LinkedList<DelimitedToken> bigTokens = new LinkedList<>();
int windowStart = 0;
int neverSplitStart = 0;
for (int i = 0; i < input.length(); i++) {
CharTrie childNode = current.children().get(input.charAt(i));
if (current == neverSplit && childNode != null) {
neverSplitStart = i;
}
if (childNode == null) {
if (current != neverSplit) {
current = neverSplit;
}
childNode = current.children().get(input.charAt(i));
if (childNode != null) {
neverSplitStart = i;
current = childNode;
}
} else if (childNode.isLeaf()) {
// build char seq view, verify its in never split
CharSequence maybeNeverSplit = new CharSequenceRef(input, neverSplitStart, (i + 1) - neverSplitStart);
if (neverSplitSet.contains(maybeNeverSplit)) {
if (windowStart < neverSplitStart) {
bigTokens.add(
new DelimitedToken(
new CharSequenceRef(input, windowStart, neverSplitStart - windowStart),
windowStart,
neverSplitStart
)
);
}
bigTokens.add(new DelimitedToken(maybeNeverSplit, neverSplitStart, i + 1));
}
windowStart = i + 1;
current = neverSplit;
} else {
// still in potential never split
current = childNode;
}
}
int finalIndex = bigTokens.isEmpty() ? 0 : bigTokens.getLast().endOffset();
if (finalIndex < input.length()) {
bigTokens.add(
new DelimitedToken(new CharSequenceRef(input, finalIndex, input.length() - finalIndex), finalIndex, input.length())
);
}
return bigTokens;
}
public record CharSequenceRef(CharSequence wrapped, int offset, int len) implements CharSequence {
public int getOffset() {
return offset;
}
@Override
public int length() {
return len;
}
@Override
public char charAt(int index) {
return wrapped.charAt(index + offset);
}
@Override
public CharSequence subSequence(int start, int end) {
return wrapped.subSequence(start + offset, end + offset);
}
@Override
public String toString() {
return wrapped.subSequence(offset, offset + len).toString();
}
}
}
|
TokenizerUtils
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/error/ShouldBeEqualToIgnoringFields.java
|
{
"start": 906,
"end": 4128
}
|
class ____ extends BasicErrorMessageFactory {
private static final String EXPECTED_MULTIPLE = "%nExpecting values:%n %s%nin fields:%n %s%nbut were:%n %s%nin %s.%n";
private static final String EXPECTED_SINGLE = "%nExpecting value %s in field %s but was %s in %s.%n";
private static final String COMPARISON = "Comparison was performed on all fields";
private static final String EXCLUDING = " but %s";
/**
* Creates a new <code>{@link ShouldBeEqualToIgnoringFields}</code>.
*
* @param actual the actual value in the failed assertion.
* @param rejectedFields fields name not matching
* @param rejectedValues rejected fields values
* @param expectedValues expected fields values
* @param ignoredFields fields which are not base the lenient equality
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldBeEqualToIgnoringGivenFields(Object actual, List<String> rejectedFields,
List<Object> rejectedValues,
List<Object> expectedValues,
List<String> ignoredFields) {
if (rejectedFields.size() == 1) {
if (ignoredFields.isEmpty()) {
return new ShouldBeEqualToIgnoringFields(actual, rejectedFields.get(0), rejectedValues.get(0),
expectedValues.get(0));
}
return new ShouldBeEqualToIgnoringFields(actual, rejectedFields.get(0), rejectedValues.get(0),
expectedValues.get(0), ignoredFields);
}
if (ignoredFields.isEmpty()) {
return new ShouldBeEqualToIgnoringFields(actual, rejectedFields, rejectedValues, expectedValues);
}
return new ShouldBeEqualToIgnoringFields(actual, rejectedFields, rejectedValues, expectedValues, ignoredFields);
}
private ShouldBeEqualToIgnoringFields(Object actual, List<String> rejectedFields, List<Object> rejectedValues,
List<Object> expectedValues, List<String> ignoredFields) {
super(EXPECTED_MULTIPLE + COMPARISON + EXCLUDING, expectedValues, rejectedFields, rejectedValues, actual,
ignoredFields);
}
private ShouldBeEqualToIgnoringFields(Object actual, String rejectedField, Object rejectedValue, Object expectedValue,
List<String> ignoredFields) {
super(EXPECTED_SINGLE + COMPARISON + EXCLUDING, expectedValue, rejectedField, rejectedValue, actual, ignoredFields);
}
private ShouldBeEqualToIgnoringFields(Object actual, List<String> rejectedFields, List<Object> rejectedValues,
List<Object> expectedValue) {
super(EXPECTED_MULTIPLE + COMPARISON, expectedValue, rejectedFields, rejectedValues, actual);
}
private ShouldBeEqualToIgnoringFields(Object actual, String rejectedField, Object rejectedValue,
Object expectedValue) {
super(EXPECTED_SINGLE + COMPARISON, expectedValue, rejectedField, rejectedValue, actual);
}
}
|
ShouldBeEqualToIgnoringFields
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/type/descriptor/sql/spi/DdlTypeRegistry.java
|
{
"start": 8927,
"end": 10038
}
|
enum ____ array types,
* use {@link #getTypeName(int, Size, Type)} instead
*/
@Deprecated(since = "6.3")
public String getTypeName(int typeCode, Long size, Integer precision, Integer scale) {
final DdlType descriptor = getDescriptor( typeCode );
if ( descriptor == null ) {
throw new HibernateException(
String.format(
"No type mapping for org.hibernate.type.SqlTypes code: %s (%s)",
typeCode,
JdbcTypeNameMapper.getTypeName( typeCode )
)
);
}
return descriptor.getTypeName( size, precision, scale );
}
/**
* Determines if there is a registered {@link DdlType} whose {@linkplain
* DdlType#getRawTypeName() raw type name} matches the given type name,
* taking into account DDL types registered by Hibernate.
*
* @param typeName the type name.
*
* @return {@code true} if there is a DDL type with the given raw type name
*/
public boolean isTypeNameRegistered(final String typeName) {
for ( DdlType value : ddlTypes.values() ) {
if ( value.getRawTypeName().equals( typeName ) ) {
return true;
}
}
return false;
}
}
|
or
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/Session.java
|
{
"start": 38472,
"end": 40213
}
|
class ____ the given identifier,
* or null if there is no such persistent instance. If the instance is already associated
* with the session, return that instance. This method never returns an uninitialized
* instance. Obtain the specified lock mode if the instance exists.
*
* @apiNote This operation is very similar to {@link #find(Class, Object, LockModeType)}.
*
* @param entityType the entity type
* @param id an identifier
* @param lockMode the lock mode
*
* @return a persistent instance or null
*
* @deprecated Use {@link #find(Class, Object, FindOption...)} instead.
*/
@Deprecated(since = "7.0", forRemoval = true)
<T> T get(Class<T> entityType, Object id, LockMode lockMode);
/**
* Return the persistent instance of the given named entity with the given identifier,
* or null if there is no such persistent instance. If the instance is already associated
* with the session, return that instance. This method never returns an uninitialized
* instance.
*
* @param entityName the entity name
* @param id an identifier
*
* @return a persistent instance or null
*
* @deprecated The semantics of this method may change in a future release.
* Use {@link SessionFactory#createGraphForDynamicEntity(String)}
* together with {@link #find(EntityGraph, Object, FindOption...)}
* to load {@link org.hibernate.metamodel.RepresentationMode#MAP
* dynamic entities}.
*
* @see SessionFactory#createGraphForDynamicEntity(String)
* @see #find(EntityGraph, Object, FindOption...)
*/
@Deprecated(since = "7", forRemoval = true)
Object get(String entityName, Object id);
/**
* Return the persistent instance of the given entity
|
with
|
java
|
elastic__elasticsearch
|
test/framework/src/main/java/org/elasticsearch/test/hamcrest/RectangleMatcher.java
|
{
"start": 945,
"end": 3408
}
|
class ____ extends TypeSafeMatcher<Rectangle> {
private final Rectangle r;
private final CoordinateEncoder coordinateEncoder;
private final double error;
public static TypeSafeMatcher<Rectangle> closeTo(Rectangle r, double error, CoordinateEncoder coordinateEncoder) {
return new RectangleMatcher(r, error, coordinateEncoder);
}
private RectangleMatcher(Rectangle r, double error, CoordinateEncoder coordinateEncoder) {
this.r = r;
this.coordinateEncoder = coordinateEncoder;
this.error = error;
}
/**
* Casts the rectangle coordinates to floats before comparing. Useful when working with extents which hold the coordinate data as ints.
*/
public static TypeSafeMatcher<Rectangle> closeToFloat(Rectangle r, double v, CoordinateEncoder encoder) {
var normalized = new Rectangle((float) r.getMinX(), (float) r.getMaxX(), (float) r.getMaxY(), (float) r.getMinY());
return closeTo(normalized, v, encoder);
}
@Override
protected boolean matchesSafely(Rectangle other) {
// For geo bounds, longitude of (-180, 180) and (epsilon, -epsilon) are actually very close, since both encompass the entire globe.
boolean wrapAroundWorkAround = coordinateEncoder == CoordinateEncoder.GEO && r.getMinX() >= r.getMaxX();
boolean matchMinX = Matchers.closeTo(r.getMinX(), error).matches(other.getMinX())
|| (wrapAroundWorkAround && Matchers.closeTo(r.getMinX() - 180, error).matches(other.getMinX()))
|| (wrapAroundWorkAround && Matchers.closeTo(r.getMinX(), error).matches(other.getMinX() - 180));
boolean matchMaxX = Matchers.closeTo(r.getMaxX(), error).matches(other.getMaxX())
|| (wrapAroundWorkAround && Matchers.closeTo(r.getMaxX() + 180, error).matches(other.getMaxX()))
|| (wrapAroundWorkAround && Matchers.closeTo(r.getMaxX(), error).matches(other.getMaxX() + 180));
return matchMinX
&& matchMaxX
&& Matchers.closeTo(r.getMaxY(), error).matches(other.getMaxY())
&& Matchers.closeTo(r.getMinY(), error).matches(other.getMinY());
}
@Override
public void describeMismatchSafely(Rectangle rectangle, Description description) {
description.appendText("was ").appendValue(rectangle);
}
@Override
public void describeTo(Description description) {
description.appendValue(" " + r);
}
}
|
RectangleMatcher
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/FreemarkerEndpointBuilderFactory.java
|
{
"start": 8709,
"end": 11215
}
|
interface ____
extends
EndpointProducerBuilder {
default FreemarkerEndpointBuilder basic() {
return (FreemarkerEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedFreemarkerEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedFreemarkerEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
public
|
AdvancedFreemarkerEndpointBuilder
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/WordpressComponentBuilderFactory.java
|
{
"start": 11388,
"end": 14246
}
|
class ____
extends AbstractComponentBuilder<WordpressComponent>
implements WordpressComponentBuilder {
@Override
protected WordpressComponent buildConcreteComponent() {
return new WordpressComponent();
}
private org.apache.camel.component.wordpress.WordpressConfiguration getOrCreateConfiguration(WordpressComponent component) {
if (component.getConfiguration() == null) {
component.setConfiguration(new org.apache.camel.component.wordpress.WordpressConfiguration());
}
return component.getConfiguration();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "apiVersion": getOrCreateConfiguration((WordpressComponent) component).setApiVersion((java.lang.String) value); return true;
case "configuration": ((WordpressComponent) component).setConfiguration((org.apache.camel.component.wordpress.WordpressConfiguration) value); return true;
case "criteria": getOrCreateConfiguration((WordpressComponent) component).setCriteria((java.util.Map) value); return true;
case "force": getOrCreateConfiguration((WordpressComponent) component).setForce((boolean) value); return true;
case "id": getOrCreateConfiguration((WordpressComponent) component).setId((java.lang.Integer) value); return true;
case "password": getOrCreateConfiguration((WordpressComponent) component).setPassword((java.lang.String) value); return true;
case "searchCriteria": getOrCreateConfiguration((WordpressComponent) component).setSearchCriteria((org.apache.camel.component.wordpress.api.model.SearchCriteria) value); return true;
case "url": getOrCreateConfiguration((WordpressComponent) component).setUrl((java.lang.String) value); return true;
case "user": getOrCreateConfiguration((WordpressComponent) component).setUser((java.lang.String) value); return true;
case "bridgeErrorHandler": ((WordpressComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "lazyStartProducer": ((WordpressComponent) component).setLazyStartProducer((boolean) value); return true;
case "autowiredEnabled": ((WordpressComponent) component).setAutowiredEnabled((boolean) value); return true;
case "healthCheckConsumerEnabled": ((WordpressComponent) component).setHealthCheckConsumerEnabled((boolean) value); return true;
case "healthCheckProducerEnabled": ((WordpressComponent) component).setHealthCheckProducerEnabled((boolean) value); return true;
default: return false;
}
}
}
}
|
WordpressComponentBuilderImpl
|
java
|
apache__camel
|
tooling/maven/camel-api-component-maven-plugin/src/main/java/org/apache/camel/maven/ApiComponentGeneratorMojo.java
|
{
"start": 6780,
"end": 17726
}
|
class ____ being generated elsewhere
final String proxyClass = api.getProxyClass();
boolean found = false;
for (ApiProxy other : apis) {
if (other != api && proxyClass.equals(other.getProxyClass())) {
found = true;
break;
}
}
if (!found) {
throw new MojoExecutionException(
"Missing one of fromSignatureFile or fromJavadoc for "
+ proxyClass);
}
}
// set common aliases if needed
if (!aliases.isEmpty() && api.getAliases().isEmpty()) {
api.setAliases(aliases);
}
// set common nullable options if needed
if (api.getNullableOptions() == null) {
api.setNullableOptions(nullableOptions);
}
}
// generate ApiCollection
mergeTemplate(getApiContext(), getApiCollectionFile(), "/api-collection.vm");
// generate ApiName
mergeTemplate(getApiContext(), getApiNameFile(), "/api-name-enum.vm");
try (Stream<File> stream = Stream.of(this.generatedSrcDir, generatedTestDir)) {
newDate = stream
.map(File::toPath)
.flatMap(this::walk)
.filter(Files::isRegularFile)
.map(this::lastModified)
.max(Comparator.naturalOrder())
.orElse(Instant.now());
}
writeCacheFile(Arrays.asList(
"# ApiComponentGenerator cache file",
"hash=" + newHash,
"date=" + newDate.toString()));
}
private Instant lastModified(Path path) {
try {
return Files.getLastModifiedTime(path).toInstant();
} catch (IOException e) {
return Instant.now();
}
}
private Stream<Path> walk(Path p) {
try {
return Files.walk(p, Integer.MAX_VALUE);
} catch (IOException e) {
return Stream.empty();
}
}
private void configureMethodGenerator(AbstractApiMethodGeneratorMojo mojo, ApiProxy apiProxy) {
// set AbstractGeneratorMojo properties
mojo.componentName = componentName;
mojo.scheme = scheme;
mojo.outPackage = outPackage;
mojo.componentPackage = componentPackage;
mojo.project = project;
// set AbstractSourceGeneratorMojo properties
mojo.generatedSrcDir = generatedSrcDir;
mojo.generatedTestDir = generatedTestDir;
mojo.addCompileSourceRoots = addCompileSourceRoots;
// set AbstractAPIMethodBaseMojo properties
mojo.substitutions = apiProxy.getSubstitutions().length != 0
? apiProxy.getSubstitutions() : substitutions;
mojo.excludeConfigNames = apiProxy.getExcludeConfigNames() != null
? apiProxy.getExcludeConfigNames() : excludeConfigNames;
mojo.excludeConfigTypes = apiProxy.getExcludeConfigTypes() != null
? apiProxy.getExcludeConfigTypes() : excludeConfigTypes;
mojo.extraOptions = apiProxy.getExtraOptions() != null
? apiProxy.getExtraOptions() : extraOptions;
// set AbstractAPIMethodGeneratorMojo properties
mojo.proxyClass = apiProxy.getProxyClass();
mojo.classPrefix = apiProxy.getClassPrefix();
mojo.apiName = apiProxy.getApiName();
mojo.apiDescription = apiProxy.getApiDescription();
mojo.consumerOnly = apiProxy.isConsumerOnly();
mojo.producerOnly = apiProxy.isProducerOnly();
}
private AbstractApiMethodGeneratorMojo getApiMethodGenerator(ApiProxy api) {
AbstractApiMethodGeneratorMojo apiMethodGenerator = null;
final FromJavasource apiFromJavasource = api.getFromJavasource();
if (apiFromJavasource != null) {
final JavaSourceApiMethodGeneratorMojo mojo = new JavaSourceApiMethodGeneratorMojo();
mojo.excludePackages = apiFromJavasource.getExcludePackages() != null
? apiFromJavasource.getExcludePackages() : fromJavasource.getExcludePackages();
mojo.excludeClasses = apiFromJavasource.getExcludeClasses() != null
? apiFromJavasource.getExcludeClasses() : fromJavasource.getExcludeClasses();
mojo.includeMethods = apiFromJavasource.getIncludeMethods() != null
? apiFromJavasource.getIncludeMethods() : fromJavasource.getIncludeMethods();
mojo.excludeMethods = apiFromJavasource.getExcludeMethods() != null
? apiFromJavasource.getExcludeMethods() : fromJavasource.getExcludeMethods();
mojo.includeStaticMethods = apiFromJavasource.getIncludeStaticMethods() != null
? apiFromJavasource.getIncludeStaticMethods() : fromJavasource.getIncludeStaticMethods();
mojo.includeSetters = apiFromJavasource.getIncludeSetters() != null
? apiFromJavasource.getIncludeSetters() : fromJavasource.getIncludeSetters();
mojo.aliases = api.getAliases().isEmpty() ? aliases : api.getAliases();
mojo.nullableOptions = api.getNullableOptions() != null ? api.getNullableOptions() : nullableOptions;
apiMethodGenerator = mojo;
}
return apiMethodGenerator;
}
private VelocityContext getApiContext() {
final VelocityContext context = new VelocityContext();
context.put("componentName", componentName);
context.put("componentPackage", componentPackage);
context.put("apis", apis);
context.put("helper", getClass());
context.put("collectionName", getApiCollectionName());
context.put("apiNameEnum", getApiNameEnum());
return context;
}
private String getApiCollectionName() {
return componentName + "ApiCollection";
}
private String getApiNameEnum() {
return componentName + "ApiName";
}
private File getApiCollectionFile() {
final StringBuilder fileName = getFileBuilder();
fileName.append(getApiCollectionName()).append(".java");
return new File(generatedSrcDir, fileName.toString());
}
private File getApiNameFile() {
final StringBuilder fileName = getFileBuilder();
fileName.append(getApiNameEnum()).append(".java");
return new File(generatedSrcDir, fileName.toString());
}
private StringBuilder getFileBuilder() {
final StringBuilder fileName = new StringBuilder();
fileName.append(outPackage.replace(".", Matcher.quoteReplacement(File.separator))).append(File.separator);
return fileName;
}
/*
* This is used when configuring the plugin instead of directly, which is why it reports as unused
* without the annotation
*/
@SuppressWarnings("unused")
public static String getApiMethod(String proxyClass, String classPrefix) {
String proxyClassWithCanonicalName = getProxyClassWithCanonicalName(proxyClass);
String prefix = classPrefix != null ? classPrefix : "";
return prefix + proxyClassWithCanonicalName.substring(proxyClassWithCanonicalName.lastIndexOf('.') + 1) + "ApiMethod";
}
/*
* This is used when configuring the plugin instead of directly, which is why it reports as unused
* without the annotation
*/
@SuppressWarnings("unused")
public static String getEndpointConfig(String proxyClass, String classPrefix) {
String proxyClassWithCanonicalName = getProxyClassWithCanonicalName(proxyClass);
String prefix = classPrefix != null ? classPrefix : "";
return prefix + proxyClassWithCanonicalName.substring(proxyClassWithCanonicalName.lastIndexOf('.') + 1)
+ "EndpointConfiguration";
}
private static String getProxyClassWithCanonicalName(String proxyClass) {
return proxyClass.replace("$", "");
}
/*
* This is used when configuring the plugin instead of directly, which is why it reports as unused
* without the annotation
*/
@SuppressWarnings("unused")
public static String getEnumConstant(String enumValue) {
if (enumValue == null || enumValue.isEmpty()) {
return "DEFAULT";
}
String value = StringHelper.camelCaseToDash(enumValue);
// replace dash with underscore and upper case
value = value.replace('-', '_');
value = value.toUpperCase(Locale.ENGLISH);
return value;
}
/*
* This is used when configuring the plugin instead of directly, which is why it reports as unused
* without the annotation
*/
@SuppressWarnings("unused")
public static String getNullableOptionValues(String[] nullableOptions) {
if (nullableOptions == null || nullableOptions.length == 0) {
return "";
}
final StringBuilder builder = new StringBuilder();
final int nOptions = nullableOptions.length;
int i = 0;
for (String option : nullableOptions) {
builder.append('"').append(option).append('"');
if (++i < nOptions) {
builder.append(", ");
}
}
return builder.toString();
}
/**
* Store file hash cache.
*/
private void writeCacheFile(List<String> cache) {
if (this.cachedir != null) {
File cacheFile = new File(this.cachedir, CACHE_PROPERTIES_FILENAME);
try (OutputStream out = new FileOutputStream(cacheFile)) {
Files.write(cacheFile.toPath(), cache);
} catch (IOException e) {
getLog().warn("Cannot store file hash cache properties file", e);
}
}
}
/**
* Read file hash cache file.
*/
private List<String> readCacheFile() {
Log log = getLog();
if (this.cachedir == null) {
return Collections.emptyList();
}
if (!this.cachedir.exists()) {
if (!this.cachedir.mkdirs()) {
log.warn("Unable to create cache directory '" + this.cachedir + "'.");
}
} else if (!this.cachedir.isDirectory()) {
log.warn("Something strange here as the '" + this.cachedir
+ "' supposedly cache directory is not a directory.");
return Collections.emptyList();
}
File cacheFile = new File(this.cachedir, CACHE_PROPERTIES_FILENAME);
if (!cacheFile.exists()) {
return Collections.emptyList();
}
try {
return Files.readAllLines(cacheFile.toPath());
} catch (IOException e) {
log.warn("Cannot load cache file", e);
return Collections.emptyList();
}
}
}
|
is
|
java
|
netty__netty
|
codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpHeadersFactory.java
|
{
"start": 1199,
"end": 14329
}
|
class ____ implements HttpHeadersFactory {
private static final NameValidator<CharSequence> DEFAULT_NAME_VALIDATOR = new NameValidator<CharSequence>() {
@Override
public void validateName(CharSequence name) {
if (name == null || name.length() == 0) {
throw new IllegalArgumentException("empty headers are not allowed [" + name + ']');
}
int index = HttpHeaderValidationUtil.validateToken(name);
if (index != -1) {
throw new IllegalArgumentException("a header name can only contain \"token\" characters, " +
"but found invalid character 0x" + Integer.toHexString(name.charAt(index)) +
" at index " + index + " of header '" + name + "'.");
}
}
};
private static final ValueValidator<CharSequence> DEFAULT_VALUE_VALIDATOR = new ValueValidator<CharSequence>() {
@Override
public void validate(CharSequence value) {
int index = HttpHeaderValidationUtil.validateValidHeaderValue(value);
if (index != -1) {
throw new IllegalArgumentException("a header value contains prohibited character 0x" +
Integer.toHexString(value.charAt(index)) + " at index " + index + '.');
}
}
};
private static final NameValidator<CharSequence> DEFAULT_TRAILER_NAME_VALIDATOR =
new NameValidator<CharSequence>() {
@Override
public void validateName(CharSequence name) {
DEFAULT_NAME_VALIDATOR.validateName(name);
if (HttpHeaderNames.CONTENT_LENGTH.contentEqualsIgnoreCase(name)
|| HttpHeaderNames.TRANSFER_ENCODING.contentEqualsIgnoreCase(name)
|| HttpHeaderNames.TRAILER.contentEqualsIgnoreCase(name)) {
throw new IllegalArgumentException("prohibited trailing header: " + name);
}
}
};
@SuppressWarnings("unchecked")
private static final NameValidator<CharSequence> NO_NAME_VALIDATOR = NameValidator.NOT_NULL;
@SuppressWarnings("unchecked")
private static final ValueValidator<CharSequence> NO_VALUE_VALIDATOR =
(ValueValidator<CharSequence>) ValueValidator.NO_VALIDATION;
private static final DefaultHttpHeadersFactory DEFAULT =
new DefaultHttpHeadersFactory(DEFAULT_NAME_VALIDATOR, DEFAULT_VALUE_VALIDATOR, false);
private static final DefaultHttpHeadersFactory DEFAULT_TRAILER =
new DefaultHttpHeadersFactory(DEFAULT_TRAILER_NAME_VALIDATOR, DEFAULT_VALUE_VALIDATOR, false);
private static final DefaultHttpHeadersFactory DEFAULT_COMBINING =
new DefaultHttpHeadersFactory(DEFAULT.nameValidator, DEFAULT.valueValidator, true);
private static final DefaultHttpHeadersFactory DEFAULT_NO_VALIDATION =
new DefaultHttpHeadersFactory(NO_NAME_VALIDATOR, NO_VALUE_VALIDATOR, false);
private final NameValidator<CharSequence> nameValidator;
private final ValueValidator<CharSequence> valueValidator;
private final boolean combiningHeaders;
/**
* Create a header builder with the given settings.
*
* @param nameValidator The name validator to use, not null.
* @param valueValidator The value validator to use, not null.
* @param combiningHeaders {@code true} if multi-valued headers should be combined into single lines.
*/
private DefaultHttpHeadersFactory(
NameValidator<CharSequence> nameValidator,
ValueValidator<CharSequence> valueValidator,
boolean combiningHeaders) {
this.nameValidator = checkNotNull(nameValidator, "nameValidator");
this.valueValidator = checkNotNull(valueValidator, "valueValidator");
this.combiningHeaders = combiningHeaders;
}
/**
* Get the default implementation of {@link HttpHeadersFactory} for creating headers.
* <p>
* This {@link DefaultHttpHeadersFactory} creates {@link HttpHeaders} instances that has the
* recommended header validation enabled.
*/
public static DefaultHttpHeadersFactory headersFactory() {
return DEFAULT;
}
/**
* Get the default implementation of {@link HttpHeadersFactory} for creating trailers.
* <p>
* This {@link DefaultHttpHeadersFactory} creates {@link HttpHeaders} instances that has the
* validation enabled that is recommended for trailers.
*/
public static DefaultHttpHeadersFactory trailersFactory() {
return DEFAULT_TRAILER;
}
@Override
public HttpHeaders newHeaders() {
if (isCombiningHeaders()) {
return new CombinedHttpHeaders(getNameValidator(), getValueValidator());
}
return new DefaultHttpHeaders(getNameValidator(), getValueValidator());
}
@Override
public HttpHeaders newEmptyHeaders() {
if (isCombiningHeaders()) {
return new CombinedHttpHeaders(getNameValidator(), getValueValidator(), 2);
}
return new DefaultHttpHeaders(getNameValidator(), getValueValidator(), 2);
}
/**
* Create a new builder that has HTTP header name validation enabled or disabled.
* <p>
* <b>Warning!</b> Setting {@code validation} to {@code false} will mean that Netty won't
* validate & protect against user-supplied headers that are malicious.
* This can leave your server implementation vulnerable to
* <a href="https://cwe.mitre.org/data/definitions/113.html">
* CWE-113: Improper Neutralization of CRLF Sequences in HTTP Headers ('HTTP Response Splitting')
* </a>.
* When disabling this validation, it is the responsibility of the caller to ensure that the values supplied
* do not contain a non-url-escaped carriage return (CR) and/or line feed (LF) characters.
*
* @param validation If validation should be enabled or disabled.
* @return The new builder.
*/
public DefaultHttpHeadersFactory withNameValidation(boolean validation) {
return withNameValidator(validation ? DEFAULT_NAME_VALIDATOR : NO_NAME_VALIDATOR);
}
/**
* Create a new builder that with the given {@link NameValidator}.
* <p>
* <b>Warning!</b> If the given validator does not check that the header names are standards compliant, Netty won't
* validate & protect against user-supplied headers that are malicious.
* This can leave your server implementation vulnerable to
* <a href="https://cwe.mitre.org/data/definitions/113.html">
* CWE-113: Improper Neutralization of CRLF Sequences in HTTP Headers ('HTTP Response Splitting')
* </a>.
* When disabling this validation, it is the responsibility of the caller to ensure that the values supplied
* do not contain a non-url-escaped carriage return (CR) and/or line feed (LF) characters.
*
* @param validator The HTTP header name validator to use.
* @return The new builder.
*/
public DefaultHttpHeadersFactory withNameValidator(NameValidator<CharSequence> validator) {
if (nameValidator == checkNotNull(validator, "validator")) {
return this;
}
if (validator == DEFAULT_NAME_VALIDATOR && valueValidator == DEFAULT_VALUE_VALIDATOR) {
return combiningHeaders ? DEFAULT_COMBINING : DEFAULT;
}
return new DefaultHttpHeadersFactory(validator, valueValidator, combiningHeaders);
}
/**
* Create a new builder that has HTTP header value validation enabled or disabled.
* <p>
* <b>Warning!</b> Setting {@code validation} to {@code false} will mean that Netty won't
* validate & protect against user-supplied headers that are malicious.
* This can leave your server implementation vulnerable to
* <a href="https://cwe.mitre.org/data/definitions/113.html">
* CWE-113: Improper Neutralization of CRLF Sequences in HTTP Headers ('HTTP Response Splitting')
* </a>.
* When disabling this validation, it is the responsibility of the caller to ensure that the values supplied
* do not contain a non-url-escaped carriage return (CR) and/or line feed (LF) characters.
*
* @param validation If validation should be enabled or disabled.
* @return The new builder.
*/
public DefaultHttpHeadersFactory withValueValidation(boolean validation) {
return withValueValidator(validation ? DEFAULT_VALUE_VALIDATOR : NO_VALUE_VALIDATOR);
}
/**
* Create a new builder that with the given {@link ValueValidator}.
* <p>
* <b>Warning!</b> If the given validator does not check that the header values are standards compliant, Netty won't
* validate & protect against user-supplied headers that are malicious.
* This can leave your server implementation vulnerable to
* <a href="https://cwe.mitre.org/data/definitions/113.html">
* CWE-113: Improper Neutralization of CRLF Sequences in HTTP Headers ('HTTP Response Splitting')
* </a>.
* When disabling this validation, it is the responsibility of the caller to ensure that the values supplied
* do not contain a non-url-escaped carriage return (CR) and/or line feed (LF) characters.
*
* @param validator The HTTP header name validator to use.
* @return The new builder.
*/
public DefaultHttpHeadersFactory withValueValidator(ValueValidator<CharSequence> validator) {
if (valueValidator == checkNotNull(validator, "validator")) {
return this;
}
if (nameValidator == DEFAULT_NAME_VALIDATOR && validator == DEFAULT_VALUE_VALIDATOR) {
return combiningHeaders ? DEFAULT_COMBINING : DEFAULT;
}
return new DefaultHttpHeadersFactory(nameValidator, validator, combiningHeaders);
}
/**
* Create a new builder that has HTTP header validation enabled or disabled.
* <p>
* <b>Warning!</b> Setting {@code validation} to {@code false} will mean that Netty won't
* validate & protect against user-supplied headers that are malicious.
* This can leave your server implementation vulnerable to
* <a href="https://cwe.mitre.org/data/definitions/113.html">
* CWE-113: Improper Neutralization of CRLF Sequences in HTTP Headers ('HTTP Response Splitting')
* </a>.
* When disabling this validation, it is the responsibility of the caller to ensure that the values supplied
* do not contain a non-url-escaped carriage return (CR) and/or line feed (LF) characters.
*
* @param validation If validation should be enabled or disabled.
* @return The new builder.
*/
public DefaultHttpHeadersFactory withValidation(boolean validation) {
if (this == DEFAULT && !validation) {
return DEFAULT_NO_VALIDATION;
}
if (this == DEFAULT_NO_VALIDATION && validation) {
return DEFAULT;
}
return withNameValidation(validation).withValueValidation(validation);
}
/**
* Create a new builder that will build {@link HttpHeaders} objects that either combine
* multi-valued headers, or not.
*
* @param combiningHeaders {@code true} if multi-valued headers should be combined, otherwise {@code false}.
* @return The new builder.
*/
public DefaultHttpHeadersFactory withCombiningHeaders(boolean combiningHeaders) {
if (this.combiningHeaders == combiningHeaders) {
return this;
}
return new DefaultHttpHeadersFactory(nameValidator, valueValidator, combiningHeaders);
}
/**
* Get the currently configured {@link NameValidator}.
* <p>
* This method will be used by the {@link #newHeaders()} method.
*
* @return The configured name validator.
*/
public NameValidator<CharSequence> getNameValidator() {
return nameValidator;
}
/**
* Get the currently configured {@link ValueValidator}.
* <p>
* This method will be used by the {@link #newHeaders()} method.
*
* @return The configured value validator.
*/
public ValueValidator<CharSequence> getValueValidator() {
return valueValidator;
}
/**
* Check whether header combining is enabled or not.
*
* @return {@code true} if header value combining is enabled, otherwise {@code false}.
*/
public boolean isCombiningHeaders() {
return combiningHeaders;
}
/**
* Check whether header name validation is enabled.
*
* @return {@code true} if header name validation is enabled, otherwise {@code false}.
*/
public boolean isValidatingHeaderNames() {
return nameValidator != NO_NAME_VALIDATOR;
}
/**
* Check whether header value validation is enabled.
*
* @return {@code true} if header value validation is enabled, otherwise {@code false}.
*/
public boolean isValidatingHeaderValues() {
return valueValidator != NO_VALUE_VALIDATOR;
}
}
|
DefaultHttpHeadersFactory
|
java
|
micronaut-projects__micronaut-core
|
http-server-netty/src/main/java/io/micronaut/http/server/netty/binders/StreamedNettyRequestArgumentBinder.java
|
{
"start": 1217,
"end": 2474
}
|
interface ____<T> extends RequestArgumentBinder<T> {
@Override
default BindingResult<T> bind(ArgumentConversionContext<T> context, HttpRequest<?> source) {
if (source instanceof NettyHttpRequest<?> nettyHttpRequest) {
io.netty.handler.codec.http.HttpRequest nativeRequest = nettyHttpRequest.getNativeRequest();
if (nativeRequest instanceof StreamedHttpRequest streamedHttpRequest) {
return bindForStreamedNettyRequest(context, streamedHttpRequest, nettyHttpRequest);
}
}
return BindingResult.empty();
}
/**
* Bind the given argument from the given source.
*
* @param context The {@link ArgumentConversionContext}
* @param streamedHttpRequest The streamed HTTP request
* @param nettyHttpRequest The netty http request
* @return An {@link Optional} of the value. If no binding was possible {@link Optional#empty()}
*/
BindingResult<T> bindForStreamedNettyRequest(ArgumentConversionContext<T> context,
StreamedHttpRequest streamedHttpRequest,
NettyHttpRequest<?> nettyHttpRequest);
}
|
StreamedNettyRequestArgumentBinder
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/web/servlet/samples/client/standalone/SseTests.java
|
{
"start": 2065,
"end": 2323
}
|
class ____ {
@GetMapping(path = "/persons", produces = "text/event-stream")
public Flux<Person> getPersonStream() {
return Flux.interval(ofMillis(100)).take(50).onBackpressureBuffer(50)
.map(index -> new Person("N" + index));
}
}
}
|
SseController
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/providers/serialisers/jsonp/ServerJsonValueHandler.java
|
{
"start": 781,
"end": 2023
}
|
class ____ extends JsonValueHandler
implements ServerMessageBodyWriter<JsonValue>, ServerMessageBodyReader<JsonValue> {
@Override
public boolean isWriteable(Class<?> type, Type genericType, ResteasyReactiveResourceInfo target, MediaType mediaType) {
return JsonValue.class.isAssignableFrom(type);
}
@Override
public void writeResponse(JsonValue o, Type genericType, ServerRequestContext context) throws WebApplicationException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
try (JsonWriter writer = JsonpUtil.writer(out, context.getResponseMediaType())) {
writer.write(o);
}
context.serverResponse().end(out.toByteArray());
}
@Override
public boolean isReadable(Class<?> type, Type genericType, ResteasyReactiveResourceInfo lazyMethod,
MediaType mediaType) {
return JsonValue.class.isAssignableFrom(type);
}
@Override
public JsonValue readFrom(Class<JsonValue> type, Type genericType, MediaType mediaType,
ServerRequestContext context) throws WebApplicationException, IOException {
return JsonpUtil.reader(context.getInputStream(), mediaType).readValue();
}
}
|
ServerJsonValueHandler
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/TestContextAnnotationUtilsTests.java
|
{
"start": 24823,
"end": 24979
}
|
interface ____ {
@AliasFor(annotation = ContextConfiguration.class)
Class<?>[] classes() default { DevConfig.class, ProductionConfig.class };
|
MetaConfig
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/FactoryMethodResolutionTests.java
|
{
"start": 2428,
"end": 2521
}
|
class ____ {
}
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
@
|
ExampleBean
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/support/FlashMapManagerTests.java
|
{
"start": 12729,
"end": 13300
}
|
class ____ extends AbstractFlashMapManager {
private List<FlashMap> flashMaps;
public void setFlashMaps(List<FlashMap> flashMaps) {
this.flashMaps = new CopyOnWriteArrayList<>(flashMaps);
}
public List<FlashMap> getFlashMaps() {
return this.flashMaps;
}
@Override
protected List<FlashMap> retrieveFlashMaps(HttpServletRequest request) {
return this.flashMaps;
}
@Override
protected void updateFlashMaps(List<FlashMap> maps, HttpServletRequest request, HttpServletResponse response) {
this.flashMaps = maps;
}
}
}
|
TestFlashMapManager
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/ClearScrollResponseTests.java
|
{
"start": 1356,
"end": 3573
}
|
class ____ extends ESTestCase {
private static final ConstructingObjectParser<ClosePointInTimeResponse, Void> PARSER = new ConstructingObjectParser<>(
"clear_scroll",
true,
a -> new ClosePointInTimeResponse((boolean) a[0], (int) a[1])
);
static {
PARSER.declareField(
constructorArg(),
(parser, context) -> parser.booleanValue(),
ClearScrollResponse.SUCCEEDED,
ObjectParser.ValueType.BOOLEAN
);
PARSER.declareField(
constructorArg(),
(parser, context) -> parser.intValue(),
ClearScrollResponse.NUMFREED,
ObjectParser.ValueType.INT
);
}
public void testToXContent() throws IOException {
ClearScrollResponse clearScrollResponse = new ClearScrollResponse(true, 10);
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
clearScrollResponse.toXContent(builder, ToXContent.EMPTY_PARAMS);
}
assertEquals(true, clearScrollResponse.isSucceeded());
assertEquals(10, clearScrollResponse.getNumFreed());
}
public void testToAndFromXContent() throws IOException {
XContentType xContentType = randomFrom(XContentType.values());
ClearScrollResponse originalResponse = createTestItem();
BytesReference originalBytes = toShuffledXContent(originalResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean());
ClearScrollResponse parsedResponse;
try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) {
parsedResponse = PARSER.parse(parser, null);
}
assertEquals(originalResponse.isSucceeded(), parsedResponse.isSucceeded());
assertEquals(originalResponse.getNumFreed(), parsedResponse.getNumFreed());
BytesReference parsedBytes = XContentHelper.toXContent(parsedResponse, xContentType, randomBoolean());
assertToXContentEquivalent(originalBytes, parsedBytes, xContentType);
}
private static ClearScrollResponse createTestItem() {
return new ClearScrollResponse(randomBoolean(), randomIntBetween(0, Integer.MAX_VALUE));
}
}
|
ClearScrollResponseTests
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/async/AsyncQueueFullPolicyFactory.java
|
{
"start": 2246,
"end": 3295
}
|
class ____ {
static final String PROPERTY_NAME_ASYNC_EVENT_ROUTER = "log4j2.AsyncQueueFullPolicy";
static final String PROPERTY_VALUE_DEFAULT_ASYNC_EVENT_ROUTER = "Default";
static final String PROPERTY_VALUE_DISCARDING_ASYNC_EVENT_ROUTER = "Discard";
static final String PROPERTY_NAME_DISCARDING_THRESHOLD_LEVEL = "log4j2.DiscardThreshold";
private static final Logger LOGGER = StatusLogger.getLogger();
/**
* Creates and returns {@link AsyncQueueFullPolicy} instances based on user-specified system properties.
* <p>
* Property {@code "log4j2.AsyncQueueFullPolicy"} controls the routing behaviour. If this property is not specified or
* has value {@code "Default"}, this method returns {@link DefaultAsyncQueueFullPolicy} objects.
* </p> <p>
* If this property has value {@code "Discard"}, this method returns {@link DiscardingAsyncQueueFullPolicy} objects.
* </p> <p>
* For any other value, this method interprets the value as the fully qualified name of a
|
AsyncQueueFullPolicyFactory
|
java
|
apache__flink
|
flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/expressions/resolver/rules/ResolveSqlCallRule.java
|
{
"start": 1696,
"end": 2280
}
|
class ____ implements ResolverRule {
@Override
public List<Expression> apply(List<Expression> expression, ResolutionContext context) {
// only the top-level expressions may access the output data type
final LogicalType outputType =
context.getOutputDataType().map(DataType::getLogicalType).orElse(null);
final TranslateSqlCallsVisitor visitor = new TranslateSqlCallsVisitor(context, outputType);
return expression.stream().map(expr -> expr.accept(visitor)).collect(Collectors.toList());
}
private static
|
ResolveSqlCallRule
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/nullness/FieldMissingNullableTest.java
|
{
"start": 16916,
"end": 17048
}
|
class ____ {
@Nullable private final Object obj1 = null;
private final Object obj2 = null;
@
|
T
|
java
|
apache__camel
|
dsl/camel-jbang/camel-jbang-plugin-kubernetes/src/main/java/org/apache/camel/dsl/jbang/core/commands/kubernetes/traits/MountTrait.java
|
{
"start": 11068,
"end": 11149
}
|
enum ____ {
DATA,
TEXT
}
private
|
ContentType
|
java
|
apache__camel
|
components/camel-ai/camel-milvus/src/main/java/org/apache/camel/component/milvus/transform/MilvusEmbeddingsDataTypeTransformer.java
|
{
"start": 1835,
"end": 5324
}
|
class ____ extends Transformer {
@Override
public void transform(Message message, DataType fromType, DataType toType) {
Embedding embedding = message.getHeader(CamelLangchain4jAttributes.CAMEL_LANGCHAIN4J_EMBEDDING_VECTOR, Embedding.class);
String textFieldName = message.getHeader(MilvusHeaders.TEXT_FIELD_NAME, () -> "text", String.class);
String vectorFieldName = message.getHeader(MilvusHeaders.VECTOR_FIELD_NAME, () -> "vector", String.class);
String collectionName = message.getHeader(MilvusHeaders.COLLECTION_NAME, () -> "embeddings", String.class);
String keyName = message.getHeader(MilvusHeaders.KEY_NAME, () -> "id", String.class);
Object keyValue = message.getHeader(MilvusHeaders.KEY_VALUE, () -> null);
TextSegment text = message.getBody(TextSegment.class);
final MilvusAction action = message.getHeader(MilvusHeaders.ACTION, MilvusAction.class);
switch (action) {
case INSERT -> insertEmbeddingOperation(message, embedding, vectorFieldName, textFieldName, text, collectionName,
keyValue, keyName);
case UPSERT -> upsertEmbeddingOperation(message, embedding, vectorFieldName, textFieldName, text, collectionName,
keyValue, keyName);
default -> throw new IllegalStateException("The only operations supported are insert and upsert");
}
}
private static void insertEmbeddingOperation(
Message message, Embedding embedding, String vectorFieldName, String textFieldName, TextSegment text,
String collectionName, Object keyValue, String keyName) {
List<InsertParam.Field> fields = new ArrayList<>();
ArrayList list = new ArrayList<>();
list.add(embedding.vectorAsList());
fields.add(new InsertParam.Field(vectorFieldName, list));
fields.add(new InsertParam.Field(textFieldName, Collections.singletonList(text.text())));
if (ObjectHelper.isNotEmpty(keyValue) && ObjectHelper.isNotEmpty(keyName)) {
ArrayList keyValues = new ArrayList<>();
keyValues.add(keyValue);
fields.add(new InsertParam.Field(keyName, keyValues));
}
InsertParam insertParam = InsertParam.newBuilder()
.withCollectionName(collectionName)
.withFields(fields)
.build();
message.setBody(insertParam);
}
private static void upsertEmbeddingOperation(
Message message, Embedding embedding, String vectorFieldName, String textFieldName, TextSegment text,
String collectionName, Object keyValue, String keyName) {
List<InsertParam.Field> fields = new ArrayList<>();
ArrayList list = new ArrayList<>();
list.add(embedding.vectorAsList());
fields.add(new UpsertParam.Field(vectorFieldName, list));
fields.add(new UpsertParam.Field(textFieldName, Collections.singletonList(text.text())));
if (ObjectHelper.isNotEmpty(keyValue) && ObjectHelper.isNotEmpty(keyName)) {
ArrayList keyValues = new ArrayList<>();
keyValues.add(keyValue);
fields.add(new UpsertParam.Field(keyName, keyValues));
}
UpsertParam upsertParam = UpsertParam.newBuilder()
.withCollectionName(collectionName)
.withFields(fields)
.build();
message.setBody(upsertParam);
}
}
|
MilvusEmbeddingsDataTypeTransformer
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/diagnostics/analyzer/BeanNotOfRequiredTypeFailureAnalyzerTests.java
|
{
"start": 2975,
"end": 3126
}
|
class ____ {
@Bean
AsyncBean asyncBean() {
return new AsyncBean();
}
}
@Configuration(proxyBeanMethods = false)
static
|
JdkProxyConfiguration
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/JdbcOAuth2AuthorizationService.java
|
{
"start": 38435,
"end": 38713
}
|
class ____ protect from getting {@link NoClassDefFoundError} when Jackson 2 is
* not on the classpath.
*
* @deprecated This is used to allow transition to Jackson 3. Use {@link Jackson3}
* instead.
*/
@Deprecated(forRemoval = true, since = "7.0")
private static final
|
to
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionDefinition.java
|
{
"start": 550,
"end": 928
}
|
class ____ {
/**
* Converts an {@link UnresolvedFunction} into a proper {@link Function}.
* <p>
* Provides the basic signature (unresolved function + runtime configuration object) while
* allowing extensions through the vararg extras which subclasses should expand for their
* own purposes.
*/
@FunctionalInterface
public
|
FunctionDefinition
|
java
|
spring-projects__spring-boot
|
module/spring-boot-quartz/src/test/java/org/springframework/boot/quartz/autoconfigure/QuartzAutoConfigurationTests.java
|
{
"start": 23568,
"end": 23742
}
|
class ____ {
ComponentThatUsesScheduler(Scheduler scheduler) {
Assert.notNull(scheduler, "'scheduler' must not be null");
}
}
public static
|
ComponentThatUsesScheduler
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/enum_with_method/Currency.java
|
{
"start": 742,
"end": 990
}
|
enum ____ {
Dollar {
@Override
public BigDecimal getExchange() {
return null;
}
},
RMB {
@Override
public BigDecimal getExchange() {
return null;
}
};
public abstract BigDecimal getExchange();
}
|
Currency
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptivebatch/StreamGraphOptimizationStrategy.java
|
{
"start": 1761,
"end": 3070
}
|
class ____ "
+ "implementing the StreamGraphOptimizationStrategy interface.");
/**
* Initializes the StreamGraphOptimizationStrategy with the provided {@link StreamGraphContext}.
*
* @param context the StreamGraphContext with a read-only view of a StreamGraph, providing
* methods to modify StreamEdges and StreamNodes within the StreamGraph.
*/
default void initialize(StreamGraphContext context) {}
/**
* Tries to optimize the StreamGraph using the provided {@link OperatorsFinished} and {@link
* StreamGraphContext}. The method returns a boolean indicating whether the StreamGraph was
* successfully optimized.
*
* @param operatorsFinished the OperatorsFinished object containing information about completed
* operators and their produced data size and distribution information.
* @param context the StreamGraphContext with a read-only view of a StreamGraph, providing
* methods to modify StreamEdges and StreamNodes within the StreamGraph.
* @return {@code true} if the StreamGraph was successfully optimized; {@code false} otherwise.
*/
boolean onOperatorsFinished(OperatorsFinished operatorsFinished, StreamGraphContext context)
throws Exception;
}
|
names
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemResourceSnapshotIT.java
|
{
"start": 2656,
"end": 56774
}
|
class ____ extends AbstractSnapshotIntegTestCase {
public static final String REPO_NAME = "test-repo";
private List<String> dataNodes = null;
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
List<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins());
plugins.add(SystemIndexTestPlugin.class);
plugins.add(AnotherSystemIndexTestPlugin.class);
plugins.add(AssociatedIndicesTestPlugin.class);
plugins.add(DataStreamsPlugin.class);
plugins.add(AnotherSystemDataStreamTestPlugin.class);
plugins.add(SystemDataStreamTestPlugin.class);
plugins.add(SystemDataStreamManyShardsTestPlugin.class);
plugins.add(AssociatedIndicesSystemDSTestPlugin.class);
return plugins;
}
@Before
public void setup() {
internalCluster().startMasterOnlyNodes(2);
dataNodes = internalCluster().startDataOnlyNodes(2);
}
/**
* Test that if a snapshot includes system indices and we restore global state,
* with no reference to feature state, the system indices are restored too.
*/
public void testRestoreSystemIndicesAsGlobalState() {
createRepository(REPO_NAME, "fs");
// put a document in a system index and data stream
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME);
// run a snapshot including global state
createFullSnapshot(REPO_NAME, "test-snap");
// add another document to each system resource
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "2", "purpose", "post-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME);
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
// restore snapshot with global state, without closing the system index
RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
"test-snap"
).setWaitForCompletion(true).setRestoreGlobalState(true).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
// verify only the original documents are restored
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(1L));
assertThat(getDocCount(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(1L));
}
/**
* If we take a snapshot with includeGlobalState set to false, are system indices included?
*/
public void testSnapshotWithoutGlobalState() {
createRepository(REPO_NAME, "fs");
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "system index doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
indexDoc("not-a-system-index", "1", "purpose", "non system index doc");
// run a snapshot without global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(false)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// check snapshot info for for which
clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, REPO_NAME).get();
Set<String> snapshottedIndices = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME)
.get()
.getSnapshots()
.stream()
.map(SnapshotInfo::indices)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
assertThat("not-a-system-index", in(snapshottedIndices));
assertThat(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, not(in(snapshottedIndices)));
assertThat(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, not(in(snapshottedIndices)));
}
/**
* Test that we can snapshot feature states by name.
*/
public void testSnapshotByFeature() {
createRepository(REPO_NAME, "fs");
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDoc(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
indexDataStream(AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
refresh(
SystemIndexTestPlugin.SYSTEM_INDEX_NAME,
AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME,
SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME,
AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME
);
// snapshot by feature
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setIncludeGlobalState(true)
.setWaitForCompletion(true)
.setFeatureStates(
SystemIndexTestPlugin.class.getSimpleName(),
AnotherSystemIndexTestPlugin.class.getSimpleName(),
SystemDataStreamTestPlugin.class.getSimpleName(),
AnotherSystemDataStreamTestPlugin.class.getSimpleName()
)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// add some other documents
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc");
indexDoc(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "2", "purpose", "post-snapshot doc");
indexDataStream(AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "2", "purpose", "post-snapshot doc");
refresh(
SystemIndexTestPlugin.SYSTEM_INDEX_NAME,
AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME,
SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME,
AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME
);
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
assertThat(getDocCount(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
assertThat(getDocCount(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(2L));
assertThat(getDocCount(AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(2L));
// restore indices as global state without closing the index
RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
"test-snap"
).setWaitForCompletion(true).setRestoreGlobalState(true).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
// verify only the original document is restored
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(1L));
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(1L));
assertThat(getDocCount(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(1L));
assertThat(getDocCount(AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(1L));
}
/**
* Take a snapshot with global state but don't restore system indexes. By
* default, snapshot restorations ignore global state and don't include system indices.
*
* This means that we should be able to take a snapshot with a system index in it and restore it without specifying indices, even if
* the cluster already has a system index with the same name (because the system index from the snapshot won't be restored).
*/
public void testDefaultRestoreOnlyRegularIndices() {
createRepository(REPO_NAME, "fs");
final String regularIndex = "test-idx";
indexDoc(regularIndex, "1", "purpose", "create an index that can be restored");
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
refresh(regularIndex, SystemIndexTestPlugin.SYSTEM_INDEX_NAME, SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME);
// snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// Delete the regular index so we can restore it
assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex));
RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.get();
assertThat(restoreResponse.getRestoreInfo().totalShards(), greaterThan(0));
assertThat(
restoreResponse.getRestoreInfo().indices(),
allOf(
hasItem(regularIndex),
not(hasItem(SystemIndexTestPlugin.SYSTEM_INDEX_NAME)),
not(hasItem(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME))
)
);
}
/**
* Take a snapshot with global state but restore features by feature state.
*/
public void testRestoreByFeature() {
createRepository(REPO_NAME, "fs");
final String regularIndex = "test-idx";
indexDoc(regularIndex, "1", "purpose", "create an index that can be restored");
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDoc(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
indexDataStream(AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
refresh(
regularIndex,
SystemIndexTestPlugin.SYSTEM_INDEX_NAME,
AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME,
SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME,
AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME
);
// snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// add some other documents
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc");
indexDoc(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "2", "purpose", "post-snapshot doc");
indexDataStream(AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "2", "purpose", "post-snapshot doc");
refresh(
regularIndex,
SystemIndexTestPlugin.SYSTEM_INDEX_NAME,
AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME,
SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME,
AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME
);
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
assertThat(getDocCount(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
assertThat(getDocCount(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(2L));
assertThat(getDocCount(AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(2L));
// Delete the regular index so we can restore it
assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex));
// restore indices by feature
RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
"test-snap"
).setWaitForCompletion(true).setFeatureStates("SystemIndexTestPlugin", "SystemDataStreamTestPlugin").get();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
// verify that the restored system index and data stream each only have one document
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(1L));
assertThat(getDocCount(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(1L));
// but the non-requested features should still have their new documents
assertThat(getDocCount(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
assertThat(getDocCount(AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(2L));
}
/**
* Test that if a feature state has associated indices, they are included in the snapshot
* when that feature state is selected.
*/
public void testSnapshotAndRestoreAssociatedIndices() {
createRepository(REPO_NAME, "fs");
final String regularIndex = "regular-idx";
// put documents into a regular index as well as the system index and associated index of a feature
indexDoc(regularIndex, "1", "purpose", "pre-snapshot doc");
indexDoc(AssociatedIndicesTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDoc(AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDataStream(AssociatedIndicesSystemDSTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
indexDoc(AssociatedIndicesSystemDSTestPlugin.ASSOCIATED_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
refresh(
regularIndex,
AssociatedIndicesTestPlugin.SYSTEM_INDEX_NAME,
AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME,
AssociatedIndicesSystemDSTestPlugin.SYSTEM_DATASTREAM_NAME,
AssociatedIndicesSystemDSTestPlugin.ASSOCIATED_INDEX_NAME
);
// snapshot
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setFeatureStates(AssociatedIndicesTestPlugin.class.getSimpleName(), AssociatedIndicesSystemDSTestPlugin.class.getSimpleName())
.setWaitForCompletion(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// verify the correctness of the snapshot
var snapshotsResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME).get();
Set<String> snapshottedIndices = snapshotsResponse.getSnapshots()
.stream()
.map(SnapshotInfo::indices)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
Set<String> snapshottedDataStreams = snapshotsResponse.getSnapshots()
.stream()
.map(SnapshotInfo::dataStreams)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
assertThat(snapshottedIndices, hasItem(AssociatedIndicesTestPlugin.SYSTEM_INDEX_NAME));
assertThat(snapshottedIndices, hasItem(AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME));
assertThat(snapshottedDataStreams, hasItem(AssociatedIndicesSystemDSTestPlugin.SYSTEM_DATASTREAM_NAME));
assertThat(snapshottedIndices, hasItem(AssociatedIndicesSystemDSTestPlugin.ASSOCIATED_INDEX_NAME));
// add some other documents
indexDoc(regularIndex, "2", "purpose", "post-snapshot doc");
indexDoc(AssociatedIndicesTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc");
indexDataStream(AssociatedIndicesSystemDSTestPlugin.SYSTEM_DATASTREAM_NAME, "2", "purpose", "post-snapshot doc");
refresh(regularIndex, AssociatedIndicesTestPlugin.SYSTEM_INDEX_NAME, AssociatedIndicesSystemDSTestPlugin.SYSTEM_DATASTREAM_NAME);
assertThat(getDocCount(regularIndex), equalTo(2L));
assertThat(getDocCount(AssociatedIndicesTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
assertThat(getDocCount(AssociatedIndicesSystemDSTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(2L));
// And delete the associated index so we can restore it
assertAcked(
indicesAdmin().prepareDelete(
AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME,
AssociatedIndicesSystemDSTestPlugin.ASSOCIATED_INDEX_NAME
).get()
);
// restore the feature state and its associated index
RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
"test-snap"
)
.setIndices(AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME, AssociatedIndicesSystemDSTestPlugin.ASSOCIATED_INDEX_NAME)
.setWaitForCompletion(true)
.setFeatureStates(AssociatedIndicesTestPlugin.class.getSimpleName(), AssociatedIndicesSystemDSTestPlugin.class.getSimpleName())
.get();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
// verify only the original document is restored
assertThat(getDocCount(AssociatedIndicesTestPlugin.SYSTEM_INDEX_NAME), equalTo(1L));
assertThat(getDocCount(AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME), equalTo(1L));
assertThat(getDocCount(AssociatedIndicesSystemDSTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(1L));
assertThat(getDocCount(AssociatedIndicesSystemDSTestPlugin.ASSOCIATED_INDEX_NAME), equalTo(1L));
}
/**
* Check that if we request a feature not in the snapshot, we get an error.
*/
public void testRestoreFeatureNotInSnapshot() {
createRepository(REPO_NAME, "fs");
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME);
// snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
final String fakeFeatureStateName = "NonExistentTestPlugin";
SnapshotRestoreException exception = expectThrows(
SnapshotRestoreException.class,
clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setFeatureStates("SystemIndexTestPlugin", "SystemDataStreamTestPlugin", fakeFeatureStateName)
);
assertThat(
exception.getMessage(),
containsString("requested feature states [[" + fakeFeatureStateName + "]] are not present in snapshot")
);
}
public void testSnapshottingSystemIndexByNameIsRejected() throws Exception {
createRepository(REPO_NAME, "fs");
// put a document in system index
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME);
IllegalArgumentException error = expectThrows(
IllegalArgumentException.class,
clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME)
.setWaitForCompletion(true)
.setIncludeGlobalState(randomBoolean())
);
assertThat(
error.getMessage(),
equalTo(
"the [indices] parameter includes system indices [.test-system-idx]; to include or exclude system indices from a snapshot, "
+ "use the [include_global_state] or [feature_states] parameters"
)
);
// And create a successful snapshot so we don't upset the test framework
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
}
/**
* Check that directly requesting a system index in a restore request throws an Exception.
*/
public void testRestoringSystemIndexByNameIsRejected() throws IllegalAccessException {
createRepository(REPO_NAME, "fs");
// put a document in system index
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME);
// snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// Now that we've taken the snapshot, add another doc
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME);
IllegalArgumentException ex = expectThrows(
IllegalArgumentException.class,
clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME)
);
assertThat(
ex.getMessage(),
equalTo("requested system indices [.test-system-idx], but system indices can only be restored as part of a feature state")
);
// Make sure the original index exists unchanged
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
}
/**
* Check that if a system index matches a rename pattern in a restore request, it's not renamed
*/
public void testSystemIndicesCannotBeRenamed() {
createRepository(REPO_NAME, "fs");
final String nonSystemIndex = ".test-non-system-index";
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDoc(nonSystemIndex, "1", "purpose", "pre-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME);
// snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
assertAcked(indicesAdmin().prepareDelete(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, nonSystemIndex).get());
// Restore using a rename pattern that matches both the regular and the system index
clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setRestoreGlobalState(true)
.setRenamePattern(".test-(.+)")
.setRenameReplacement(".test-restored-$1")
.get();
// The original system index and the renamed normal index should exist
assertTrue("System index not renamed", indexExists(SystemIndexTestPlugin.SYSTEM_INDEX_NAME));
assertTrue("Non-system index was renamed", indexExists(".test-restored-non-system-index"));
// The original normal index should still be deleted, and there shouldn't be a renamed version of the system index
assertFalse("Renamed system index doesn't exist", indexExists(".test-restored-system-index"));
assertFalse("Original non-system index doesn't exist", indexExists(nonSystemIndex));
}
/**
* If the list of feature states to restore is left unspecified and we are restoring global state,
* all feature states should be restored.
*/
public void testRestoreSystemIndicesAsGlobalStateWithDefaultFeatureStateList() {
createRepository(REPO_NAME, "fs");
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME);
// run a snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// add another document
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "2", "purpose", "post-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME);
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
assertThat(getDocCount(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(2L));
// restore indices as global state a null list of feature states
RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
"test-snap"
).setWaitForCompletion(true).setRestoreGlobalState(true).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
// verify that the system index is destroyed
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(1L));
assertThat(getDocCount(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(1L));
}
/**
* If the list of feature states to restore contains only "none" and we are restoring global state,
* no feature states should be restored.
*/
public void testRestoreSystemIndicesAsGlobalStateWithNoFeatureStates() {
createRepository(REPO_NAME, "fs");
String regularIndex = "my-index";
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
indexDoc(regularIndex, "1", "purpose", "pre-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, regularIndex);
// run a snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// add another document
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc");
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "2", "purpose", "post-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME);
assertAcked(indicesAdmin().prepareDelete(regularIndex).get());
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
assertThat(getDocCount(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(2L));
// restore with global state and all indices but explicitly no feature states.
RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
"test-snap"
).setWaitForCompletion(true).setRestoreGlobalState(true).setFeatureStates(new String[] { randomFrom("none", "NONE") }).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
// verify that the system index still has the updated document, i.e. has not been restored
assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
assertThat(getDocCount(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(2L));
// And the regular index has been restored
assertThat(getDocCount(regularIndex), equalTo(1L));
}
/**
* When a feature state is restored, all indices that are part of that feature state should be deleted, then the indices in
* the snapshot should be restored.
*
* However, other feature states should be unaffected.
*/
public void testAllSystemIndicesAreRemovedWhenThatFeatureStateIsRestored() {
createRepository(REPO_NAME, "fs");
// Create a system index we'll snapshot and restore
final String systemIndexInSnapshot = SystemIndexTestPlugin.SYSTEM_INDEX_NAME + "-1";
indexDoc(systemIndexInSnapshot, "1", "purpose", "pre-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME + "*");
// And one we'll snapshot but not restore
indexDoc(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
// And a regular index so we can avoid matching all indices on the restore
final String regularIndex = "regular-index";
indexDoc(regularIndex, "1", "purpose", "pre-snapshot doc");
// run a snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// Now index another doc and create another index in the same pattern as the first index
final String systemIndexNotInSnapshot = SystemIndexTestPlugin.SYSTEM_INDEX_NAME + "-2";
indexDoc(systemIndexInSnapshot, "2", "purpose", "post-snapshot doc");
indexDoc(systemIndexNotInSnapshot, "1", "purpose", "post-snapshot doc");
// Add another doc to the second system index, so we can be sure it hasn't been touched
indexDoc(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME, "2", "purpose", "post-snapshot doc");
refresh(systemIndexInSnapshot, systemIndexNotInSnapshot, AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME);
// Delete the regular index so we can restore it
assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex));
// restore the snapshot
RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
"test-snap"
).setFeatureStates("SystemIndexTestPlugin").setWaitForCompletion(true).setRestoreGlobalState(true).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
// The index we created after the snapshot should be gone
assertFalse(indexExists(systemIndexNotInSnapshot));
// And the first index should have a single doc
assertThat(getDocCount(systemIndexInSnapshot), equalTo(1L));
// And the system index whose state we didn't restore shouldn't have been touched and still have 2 docs
assertThat(getDocCount(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L));
}
public void testSystemIndexAliasesAreAlwaysRestored() {
createRepository(REPO_NAME, "fs");
// Create a system index
final String systemIndexName = SystemIndexTestPlugin.SYSTEM_INDEX_NAME + "-1";
indexDoc(systemIndexName, "1", "purpose", "pre-snapshot doc");
// Create a system data stream
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
// And a regular index
// And a regular index so we can avoid matching all indices on the restore
final String regularIndex = "regular-index";
final String regularAlias = "regular-alias";
indexDoc(regularIndex, "1", "purpose", "pre-snapshot doc");
// And make sure they both have aliases
final String systemIndexAlias = SystemIndexTestPlugin.SYSTEM_INDEX_NAME + "-alias";
assertAcked(
indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT)
.addAlias(systemIndexName, systemIndexAlias)
.addAlias(regularIndex, regularAlias)
.get()
);
// run a snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// And delete both the indices
assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex, systemIndexName));
// Now restore the snapshot with no aliases
RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
"test-snap"
).setFeatureStates("SystemIndexTestPlugin").setWaitForCompletion(true).setRestoreGlobalState(false).setIncludeAliases(false).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
// The regular index should exist
assertTrue(indexExists(regularIndex));
assertFalse(indexExists(regularAlias));
// And the system index, queried by alias, should have a doc
assertTrue(indexExists(systemIndexName));
assertTrue(indexExists(systemIndexAlias));
assertThat(getDocCount(systemIndexAlias), equalTo(1L));
}
public void testSystemDataStreamAliasesAreAlwaysRestored() {
createRepository(REPO_NAME, "fs");
// Create a system data stream
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
// And a regular index so we can avoid matching all indices on the restore
final String regularIndex = "regular-index";
final String regularAlias = "regular-alias";
indexDoc(regularIndex, "1", "purpose", "pre-snapshot doc");
// And make sure they both have aliases
final String systemDataStreamAlias = SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME + "-alias";
assertAcked(
indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT)
.addAlias(regularIndex, regularAlias)
.addAlias(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, systemDataStreamAlias, true)
.get()
);
// And add a doc to ensure the alias works
indexDataStream(systemDataStreamAlias, "2", "purpose", "post-alias doc");
// Run a snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// And delete the regular index and system data stream
assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex));
assertAcked(
client().execute(
DeleteDataStreamAction.INSTANCE,
new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME)
).actionGet()
);
// Now restore the snapshot with no aliases
RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
"test-snap"
)
.setFeatureStates("SystemDataStreamTestPlugin")
.setWaitForCompletion(true)
.setRestoreGlobalState(false)
.setIncludeAliases(false)
.get();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
// The regular index should exist
assertTrue(indexExists(regularIndex));
assertFalse(indexExists(regularAlias));
// And the system data stream, queried by alias, should have 2 docs
assertTrue(indexExists(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME));
assertTrue(indexExists(systemDataStreamAlias));
assertThat(getDocCount(systemDataStreamAlias), equalTo(2L));
}
public void testDeletedDatastreamIsRestorable() {
createRepository(REPO_NAME, "fs");
// Create a system data stream
indexDataStream(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME, "1", "purpose", "pre-snapshot doc");
// And a regular index so we can avoid matching all indices on the restore
final String regularIndex = "regular-index";
indexDoc(regularIndex, "1", "purpose", "pre-snapshot doc");
// Run a snapshot including global state
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
// And delete the regular index and system data stream
assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex));
assertAcked(
client().execute(
DeleteDataStreamAction.INSTANCE,
new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME)
).actionGet()
);
// Now restore the snapshot with no aliases
RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
"test-snap"
)
.setFeatureStates("SystemDataStreamTestPlugin")
.setWaitForCompletion(true)
.setRestoreGlobalState(false)
.setIncludeAliases(false)
.get();
// And the system data stream, queried by alias, should have 2 docs
assertTrue(indexExists(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME));
assertThat(getDocCount(SystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME), equalTo(1L));
}
/**
* Tests that the special "none" feature state name cannot be combined with other
* feature state names, and an error occurs if it's tried.
*/
public void testNoneFeatureStateMustBeAlone() {
createRepository(REPO_NAME, "fs");
// put a document in a system index
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME);
// run a snapshot including global state
IllegalArgumentException createEx = expectThrows(
IllegalArgumentException.class,
clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(randomBoolean())
.setFeatureStates("SystemIndexTestPlugin", "none", "AnotherSystemIndexTestPlugin")
);
assertThat(
createEx.getMessage(),
equalTo(
"the feature_states value [none] indicates that no feature states should be "
+ "snapshotted, but other feature states were requested: [SystemIndexTestPlugin, none, AnotherSystemIndexTestPlugin]"
)
);
// create a successful snapshot with global state/all features
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.get();
assertSnapshotSuccess(createSnapshotResponse);
SnapshotRestoreException restoreEx = expectThrows(
SnapshotRestoreException.class,
clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setRestoreGlobalState(randomBoolean())
.setFeatureStates("SystemIndexTestPlugin", "none")
);
assertThat(
restoreEx.getMessage(),
allOf(
// the order of the requested feature states is non-deterministic so just check that it includes most of the right stuff
containsString(
"the feature_states value [none] indicates that no feature states should be restored, but other feature states were "
+ "requested:"
),
containsString("SystemIndexTestPlugin")
)
);
}
/**
* Tests that using the special "none" feature state value creates a snapshot with no feature states included
*/
public void testNoneFeatureStateOnCreation() {
createRepository(REPO_NAME, "fs");
final String regularIndex = "test-idx";
indexDoc(regularIndex, "1", "purpose", "create an index that can be restored");
indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc");
refresh(regularIndex, SystemIndexTestPlugin.SYSTEM_INDEX_NAME);
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap")
.setWaitForCompletion(true)
.setIncludeGlobalState(true)
.setFeatureStates(randomFrom("none", "NONE"))
.get();
assertSnapshotSuccess(createSnapshotResponse);
// Verify that the system index was not included
Set<String> snapshottedIndices = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME)
.get()
.getSnapshots()
.stream()
.map(SnapshotInfo::indices)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
assertThat(snapshottedIndices, allOf(hasItem(regularIndex), not(hasItem(SystemIndexTestPlugin.SYSTEM_INDEX_NAME))));
}
/**
* Ensures that if we can only capture a partial snapshot of a system index, then the feature state associated with that index is
* not included in the snapshot, because it would not be safe to restore that feature state.
*/
public void testPartialSnapshotsOfSystemIndexRemovesFeatureState() throws Exception {
final String partialIndexName = SystemIndexTestPlugin.SYSTEM_INDEX_NAME;
final String fullIndexName = AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME;
createRepositoryNoVerify(REPO_NAME, "mock");
// Creating the index that we'll get a partial snapshot of with a bunch of shards
assertAcked(prepareCreate(partialIndexName, 0, indexSettingsNoReplicas(6)));
indexDoc(partialIndexName, "1", "purpose", "pre-snapshot doc");
// And another one with the default
indexDoc(fullIndexName, "1", "purpose", "pre-snapshot doc");
ensureGreen();
// Stop a random data node so we lose a shard from the partial index
internalCluster().stopRandomDataNode();
assertBusy(
() -> assertEquals(ClusterHealthStatus.RED, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getStatus()),
30,
TimeUnit.SECONDS
);
// Get ready to block
blockMasterFromFinalizingSnapshotOnIndexFile(REPO_NAME);
// Start a snapshot and wait for it to hit the block, then kill the master to force a failover
final String partialSnapName = "test-partial-snap";
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
partialSnapName
).setIncludeGlobalState(true).setWaitForCompletion(false).setPartial(true).get();
assertThat(createSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED));
waitForBlock(internalCluster().getMasterName(), REPO_NAME);
internalCluster().stopCurrentMasterNode();
// Now get the snapshot and do our checks
assertBusy(() -> {
GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME)
.setSnapshots(partialSnapName)
.get();
SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
assertNotNull(snapshotInfo);
assertThat(snapshotInfo.failedShards(), lessThan(snapshotInfo.totalShards()));
List<String> statesInSnapshot = snapshotInfo.featureStates().stream().map(SnapshotFeatureInfo::getPluginName).toList();
assertThat(statesInSnapshot, not(hasItem((new SystemIndexTestPlugin()).getFeatureName())));
assertThat(statesInSnapshot, hasItem((new AnotherSystemIndexTestPlugin()).getFeatureName()));
});
}
/**
* Ensures that if we can only capture a partial snapshot of a system data stream, then the feature state associated
* with that data stream is not included in the snapshot, because it would not be safe to restore that feature state.
*/
@AwaitsFix(bugUrl = "ES-11251")
public void testPartialSnapshotsOfSystemDataStreamRemovesFeatureState() throws Exception {
final String partialIndexName = SystemDataStreamManyShardsTestPlugin.SYSTEM_DATASTREAM_NAME;
final String fullIndexName = AnotherSystemDataStreamTestPlugin.SYSTEM_DATASTREAM_NAME;
createRepositoryNoVerify(REPO_NAME, "mock");
// Create the index that we'll get a partial snapshot of with a bunch of shards
indexDataStream(partialIndexName, "1", "purpose", "pre-snapshot doc");
// And another one with the default
indexDataStream(fullIndexName, "1", "purpose", "pre-snapshot doc");
ensureGreen();
// Stop a random data node so we lose a shard from the partial index
internalCluster().stopRandomDataNode();
assertBusy(() -> {
var status = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getStatus();
assertThat(status, oneOf(ClusterHealthStatus.YELLOW, ClusterHealthStatus.RED));
}, 30, TimeUnit.SECONDS);
// Get ready to block
blockMasterFromFinalizingSnapshotOnIndexFile(REPO_NAME);
// Start a snapshot and wait for it to hit the block, then kill the master to force a failover
final String partialSnapName = "test-partial-snap";
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
partialSnapName
).setIncludeGlobalState(true).setWaitForCompletion(false).setPartial(true).get();
assertThat(createSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED));
waitForBlock(internalCluster().getMasterName(), REPO_NAME);
internalCluster().stopCurrentMasterNode();
// Now get the snapshot and do our checks
assertBusy(() -> {
GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME)
.setSnapshots(partialSnapName)
.get();
SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
assertNotNull(snapshotInfo);
assertThat(snapshotInfo.failedShards(), lessThan(snapshotInfo.totalShards()));
List<String> statesInSnapshot = snapshotInfo.featureStates().stream().map(SnapshotFeatureInfo::getPluginName).toList();
assertThat(statesInSnapshot, not(hasItem((new SystemDataStreamManyShardsTestPlugin()).getFeatureName())));
assertThat(statesInSnapshot, hasItem((new AnotherSystemDataStreamTestPlugin()).getFeatureName()));
}, 5L, TimeUnit.SECONDS);
// Cleanup to prevent unrelated shutdown failures
internalCluster().startDataOnlyNode();
}
public void testParallelIndexDeleteRemovesFeatureState() throws Exception {
final String indexToBeDeleted = SystemIndexTestPlugin.SYSTEM_INDEX_NAME;
final String fullIndexName = AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME;
final String nonsystemIndex = "nonsystem-idx";
final int nodesInCluster = internalCluster().size();
// Stop one data node so we only have one data node to start with
internalCluster().stopNode(dataNodes.get(1));
dataNodes.remove(1);
ensureStableCluster(nodesInCluster - 1);
createRepositoryNoVerify(REPO_NAME, "mock");
// Creating the index that we'll get a partial snapshot of with a bunch of shards
assertAcked(prepareCreate(indexToBeDeleted, 0, indexSettingsNoReplicas(6)));
indexDoc(indexToBeDeleted, "1", "purpose", "pre-snapshot doc");
// And another one with the default
indexDoc(fullIndexName, "1", "purpose", "pre-snapshot doc");
// Now start up a new node and create an index that should get allocated to it
dataNodes.add(internalCluster().startDataOnlyNode());
createIndexWithContent(
nonsystemIndex,
indexSettingsNoReplicas(2).put("index.routing.allocation.require._name", dataNodes.get(1)).build()
);
refresh();
ensureGreen();
logger.info("--> Created indices, blocking repo on new data node...");
blockDataNode(REPO_NAME, dataNodes.get(1));
// Start a snapshot - need to do this async because some blocks will block this call
logger.info("--> Blocked repo, starting snapshot...");
final String partialSnapName = "test-partial-snap";
ActionFuture<CreateSnapshotResponse> createSnapshotFuture = clusterAdmin().prepareCreateSnapshot(
TEST_REQUEST_TIMEOUT,
REPO_NAME,
partialSnapName
).setIncludeGlobalState(true).setWaitForCompletion(true).setPartial(true).execute();
logger.info("--> Started snapshot, waiting for block...");
waitForBlock(dataNodes.get(1), REPO_NAME);
logger.info("--> Repo hit block, deleting the index...");
assertAcked(cluster().client().admin().indices().prepareDelete(indexToBeDeleted));
logger.info("--> Index deleted, unblocking repo...");
unblockNode(REPO_NAME, dataNodes.get(1));
logger.info("--> Repo unblocked, checking that snapshot finished...");
CreateSnapshotResponse createSnapshotResponse = createSnapshotFuture.get();
logger.info(createSnapshotResponse.toString());
assertThat(createSnapshotResponse.status(), equalTo(RestStatus.OK));
logger.info("--> All operations complete, running assertions");
SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo();
assertNotNull(snapshotInfo);
assertThat(snapshotInfo.indices(), not(hasItem(indexToBeDeleted)));
List<String> statesInSnapshot = snapshotInfo.featureStates().stream().map(SnapshotFeatureInfo::getPluginName).toList();
assertThat(statesInSnapshot, not(hasItem((new SystemIndexTestPlugin()).getFeatureName())));
assertThat(statesInSnapshot, hasItem((new AnotherSystemIndexTestPlugin()).getFeatureName()));
}
private void assertSnapshotSuccess(CreateSnapshotResponse createSnapshotResponse) {
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(
createSnapshotResponse.getSnapshotInfo().successfulShards(),
equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())
);
}
private long getDocCount(String indexName) {
return indicesAdmin().prepareStats(indexName).get().getPrimaries().getDocs().getCount();
}
private DocWriteResponse indexDataStream(String index, String id, String... source) {
var sourceWithTimestamp = new String[source.length + 2];
sourceWithTimestamp[0] = "@timestamp";
sourceWithTimestamp[1] = Long.toString(System.currentTimeMillis());
System.arraycopy(source, 0, sourceWithTimestamp, 2, source.length);
return prepareIndex(index).setId(id).setSource((Object[]) sourceWithTimestamp).setOpType(DocWriteRequest.OpType.CREATE).get();
}
public static
|
SystemResourceSnapshotIT
|
java
|
elastic__elasticsearch
|
x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestSpecialCasesIT.java
|
{
"start": 1152,
"end": 20130
}
|
class ____ extends TransformRestTestCase {
private static boolean indicesCreated = false;
// preserve indices in order to reuse source indices in several test cases
@Override
protected boolean preserveIndicesUponCompletion() {
return true;
}
@Before
public void createIndexes() throws IOException {
// it's not possible to run it as @BeforeClass as clients aren't initialized then, so we need this little hack
if (indicesCreated) {
return;
}
createReviewsIndex();
indicesCreated = true;
}
public void testIndexTemplateMappingClash() throws Exception {
String transformId = "special_pivot_template_mappings_clash";
String transformIndex = "special_pivot_template_mappings_clash";
// create a template that defines a field "rating" with a type "float" which will clash later with
// output field "rating.avg" in the pivot config
final Request createIndexTemplateRequest = new Request("PUT", "_template/special_pivot_template");
String template = """
{
"index_patterns": [ "special_pivot_template*" ],
"mappings": {
"properties": {
"rating": {
"type": "float"
}
}
}
}""";
createIndexTemplateRequest.setJsonEntity(template);
createIndexTemplateRequest.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING));
Map<String, Object> createIndexTemplateResponse = entityAsMap(client().performRequest(createIndexTemplateRequest));
assertThat(createIndexTemplateResponse.get("acknowledged"), equalTo(Boolean.TRUE));
final Request createTransformRequest = new Request("PUT", getTransformEndpoint() + transformId);
String config = Strings.format("""
{
"source": {
"index": "%s"
},
"dest": {
"index": "%s"
},
"pivot": {
"group_by": {
"reviewer": {
"terms": {
"field": "user_id"
}
}
},
"aggregations": {
"rating.avg": {
"avg": {
"field": "stars"
}
}
}
}
}""", REVIEWS_INDEX_NAME, transformIndex);
createTransformRequest.setJsonEntity(config);
Map<String, Object> createTransformResponse = entityAsMap(client().performRequest(createTransformRequest));
assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE));
startAndWaitForTransform(transformId, transformIndex);
assertTrue(indexExists(transformIndex));
// we expect 27 documents as there shall be 27 user_id's
Map<String, Object> indexStats = getAsMap(transformIndex + "/_stats");
assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats));
// get and check some users
Map<String, Object> searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_4");
assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult));
Number actual = (Number) ((List<?>) XContentMapValues.extractValue("hits.hits._source.rating.avg", searchResult)).get(0);
assertEquals(3.878048780, actual.doubleValue(), 0.000001);
}
public void testSparseDataPercentiles() throws Exception {
String indexName = "cpu-utilization";
String transformIndex = "pivot-cpu";
String transformId = "pivot-cpu";
try (XContentBuilder builder = jsonBuilder()) {
builder.startObject();
{
builder.startObject("mappings")
.startObject("properties")
.startObject("host")
.field("type", "keyword")
.endObject()
.startObject("cpu")
.field("type", "integer")
.endObject()
.endObject()
.endObject();
}
builder.endObject();
final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON);
Request req = new Request("PUT", indexName);
req.setEntity(entity);
client().performRequest(req);
}
final StringBuilder bulk = new StringBuilder();
bulk.append(Strings.format("""
{"index":{"_index":"%s"}}
{"host":"host-1","cpu": 22}
{"index":{"_index":"%s"}}
{"host":"host-1","cpu": 55}
{"index":{"_index":"%s"}}
{"host":"host-1","cpu": 23}
{"index":{"_index":"%s"}}
{"host":"host-2","cpu": 0}
{"index":{"_index":"%s"}}
{"host":"host-2","cpu": 99}
{"index":{"_index":"%s"}}
{"host":"host-1","cpu": 28}
{"index":{"_index":"%s"}}
{"host":"host-1","cpu": 77}
""", indexName, indexName, indexName, indexName, indexName, indexName, indexName));
// missing value for cpu
bulk.append(Strings.format("""
{"index":{"_index":"%s"}}
{"host":"host-3"}
""", indexName));
final Request bulkRequest = new Request("POST", "/_bulk");
bulkRequest.addParameter("refresh", "true");
bulkRequest.setJsonEntity(bulk.toString());
client().performRequest(bulkRequest);
final Request createTransformRequest = new Request("PUT", getTransformEndpoint() + transformId);
String config = Strings.format("""
{
"source": {
"index": "%s"
},
"dest": {
"index": "%s"
},
"pivot": {
"group_by": {
"host": {
"terms": {
"field": "host"
}
}
},
"aggregations": {
"p": {
"percentiles": {
"field": "cpu"
}
}
}
}
}""", indexName, transformIndex);
createTransformRequest.setJsonEntity(config);
Map<String, Object> createTransformResponse = entityAsMap(client().performRequest(createTransformRequest));
assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE));
startAndWaitForTransform(transformId, transformIndex);
assertTrue(indexExists(transformIndex));
Map<String, Object> indexStats = getAsMap(transformIndex + "/_stats");
assertEquals(3, XContentMapValues.extractValue("_all.total.docs.count", indexStats));
// get and check some data
Map<String, Object> searchResult = getAsMap(transformIndex + "/_search?q=host:host-1");
assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult));
@SuppressWarnings("unchecked")
Map<String, Object> percentiles = (Map<String, Object>) ((List<?>) XContentMapValues.extractValue(
"hits.hits._source.p",
searchResult
)).get(0);
assertEquals(28.0, (double) percentiles.get("50"), 0.000001);
assertEquals(76.12, (double) percentiles.get("99"), 0.000001);
searchResult = getAsMap(transformIndex + "/_search?q=host:host-3");
assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult));
@SuppressWarnings("unchecked")
Map<String, Object> percentilesEmpty = (Map<String, Object>) ((List<?>) XContentMapValues.extractValue(
"hits.hits._source.p",
searchResult
)).get(0);
assertTrue(percentilesEmpty.containsKey("50"));
assertNull(percentilesEmpty.get("50"));
assertTrue(percentilesEmpty.containsKey("99"));
assertNull(percentilesEmpty.get("99"));
}
/**
* This test verifies that regardless of the max_page_search_size setting value used, the transform works correctly in the face of
* restrictive bucket selector.
* In the past there was a problem when there were no buckets (because bucket selector filtered them out) in a composite aggregation
* page and for small enough max_page_search_size the transform stopped prematurely.
* The problem was fixed by https://github.com/elastic/elasticsearch/pull/82852 and this test serves as a regression test for this PR.
*/
public void testRestrictiveBucketSelector() throws Exception {
String indexName = "special_pivot_bucket_selector_reviews";
createReviewsIndex(indexName, 1000, 327, "date", false, 5, "affiliate_id");
verifyDestIndexHitsCount(indexName, "special_pivot_bucket_selector-10", 10, 41);
verifyDestIndexHitsCount(indexName, "special_pivot_bucket_selector-10000", 10000, 41);
}
public void testEmptyKeyInTermsAgg() throws Exception {
String indexName = REVIEWS_INDEX_NAME;
{
final Request request = new Request("PUT", indexName + "/_doc/strange-business-id-1");
request.addParameter("refresh", "true");
request.setJsonEntity("""
{
"user_id": "user_0",
"business_id": ""
}""");
client().performRequest(request);
}
{
final Request request = new Request("PUT", indexName + "/_doc/strange-business-id-2");
request.addParameter("refresh", "true");
request.setJsonEntity("""
{
"user_id": "user_0",
"business_id": "business_x."
}""");
client().performRequest(request);
}
{
final Request request = new Request("PUT", indexName + "/_doc/strange-business-id-3");
request.addParameter("refresh", "true");
request.setJsonEntity("""
{
"user_id": "user_0",
"business_id": ".business_y"
}""");
client().performRequest(request);
}
{
final Request request = new Request("PUT", indexName + "/_doc/strange-business-id-4");
request.addParameter("refresh", "true");
request.setJsonEntity("""
{
"user_id": "user_0",
"business_id": "..."
}""");
client().performRequest(request);
}
String transformIndex = "empty-terms-agg-key";
String transformId = "empty-terms-agg-key";
final Request createTransformRequest = new Request("PUT", getTransformEndpoint() + transformId);
final String config = Strings.format("""
{
"source": {
"index": "%s"
},
"dest": {
"index": "%s"
},
"pivot": {
"group_by": {
"reviewer": {
"terms": {
"field": "user_id"
}
}
},
"aggregations": {
"businesses": {
"terms": {
"field": "business_id"
}
}
}
}
}""", indexName, transformIndex);
createTransformRequest.setJsonEntity(config);
Map<String, Object> createTransformResponse = entityAsMap(client().performRequest(createTransformRequest));
assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE));
startAndWaitForTransform(transformId, transformIndex);
assertTrue(indexExists(transformIndex));
Map<String, Object> searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_0");
long count = (Integer) XContentMapValues.extractValue("hits.total.value", searchResult);
assertThat(count, is(equalTo(1L)));
assertThat(XContentMapValues.extractValue("hits.hits._source.reviewer", searchResult), is(equalTo(List.of("user_0"))));
assertThat(
XContentMapValues.extractValue("hits.hits._source.businesses", searchResult),
is(equalTo(List.of(Map.of("business_0", 278, "", 1, "business_x.", 1, ".business_y", 1, "...", 1))))
);
{
final Request request = new Request("DELETE", indexName + "/_doc/strange-business-id-1");
request.addParameter("refresh", "true");
client().performRequest(request);
}
{
final Request request = new Request("DELETE", indexName + "/_doc/strange-business-id-2");
request.addParameter("refresh", "true");
client().performRequest(request);
}
{
final Request request = new Request("DELETE", indexName + "/_doc/strange-business-id-3");
request.addParameter("refresh", "true");
client().performRequest(request);
}
{
final Request request = new Request("DELETE", indexName + "/_doc/strange-business-id-4");
request.addParameter("refresh", "true");
client().performRequest(request);
}
}
public void testDataStreamUnsupportedAsDestIndex() throws Exception {
String transformId = "transform-data-stream-unsupported-as-dest";
String sourceIndex = REVIEWS_INDEX_NAME;
String dataStreamIndexTemplate = transformId + "_it";
String destDataStream = transformId + "_ds";
// Create transform
final Request createTransformRequest = new Request("PUT", getTransformEndpoint() + transformId);
createTransformRequest.setJsonEntity(Strings.format("""
{
"source": {
"index": "%s"
},
"dest": {
"index": "%s"
},
"frequency": "1m",
"pivot": {
"group_by": {
"user_id": {
"terms": {
"field": "user_id"
}
}
},
"aggregations": {
"stars_sum": {
"sum": {
"field": "stars"
}
},
"bs": {
"bucket_selector": {
"buckets_path": {
"stars_sum": "stars_sum.value"
},
"script": "params.stars_sum > 20"
}
}
}
}
}""", sourceIndex, destDataStream));
Map<String, Object> createTransformResponse = entityAsMap(client().performRequest(createTransformRequest));
assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE));
// Create index template for data stream
Request createIndexTemplateRequest = new Request("PUT", "_index_template/" + dataStreamIndexTemplate);
createIndexTemplateRequest.setJsonEntity(String.format(Locale.ROOT, """
{
"index_patterns": [ "%s*" ],
"data_stream": {}
}
""", destDataStream));
Response createIndexTemplateResponse = client().performRequest(createIndexTemplateRequest);
assertThat(createIndexTemplateResponse.getStatusLine().getStatusCode(), is(equalTo(RestStatus.OK.getStatus())));
// Create data stream
Request createDataStreamRequest = new Request("PUT", "_data_stream/" + destDataStream);
Response createDataStreamResponse = client().performRequest(createDataStreamRequest);
assertThat(createDataStreamResponse.getStatusLine().getStatusCode(), is(equalTo(RestStatus.OK.getStatus())));
// Try starting the transform, it fails because destination index cannot be created from the data stream template
ResponseException e = expectThrows(ResponseException.class, () -> startTransform(transformId));
assertThat(
e.getMessage(),
containsString(
String.format(
Locale.ROOT,
"cannot create index with name [%s], because it matches with template [%s] that creates data streams only, "
+ "use create data stream api instead",
destDataStream,
dataStreamIndexTemplate
)
)
);
}
private void verifyDestIndexHitsCount(String sourceIndex, String transformId, int maxPageSearchSize, long expectedDestIndexCount)
throws Exception {
String transformIndex = transformId;
String config = Strings.format("""
{
"source": {
"index": "%s"
},
"dest": {
"index": "%s"
},
"frequency": "1m",
"pivot": {
"group_by": {
"user_id": {
"terms": {
"field": "user_id"
}
}
},
"aggregations": {
"stars_sum": {
"sum": {
"field": "stars"
}
},
"bs": {
"bucket_selector": {
"buckets_path": {
"stars_sum": "stars_sum.value"
},
"script": "params.stars_sum > 20"
}
}
}
},
"settings": {
"max_page_search_size": %s
}
}""", sourceIndex, transformIndex, maxPageSearchSize);
Request createTransformRequest = new Request("PUT", getTransformEndpoint() + transformId);
createTransformRequest.setJsonEntity(config);
Map<String, Object> createTransformResponse = entityAsMap(client().performRequest(createTransformRequest));
assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE));
startAndWaitForTransform(transformId, transformIndex);
assertTrue(indexExists(transformIndex));
Map<String, Object> searchResult = getAsMap(transformIndex + "/_search");
long count = (Integer) XContentMapValues.extractValue("hits.total.value", searchResult);
assertThat(count, is(equalTo(expectedDestIndexCount)));
}
}
|
TransformPivotRestSpecialCasesIT
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/spi/RestBindingJacksonXmlDataFormatFactory.java
|
{
"start": 953,
"end": 1431
}
|
interface ____ {
/**
* Service factory key.
*/
String FACTORY = "rest-binding-jacksonxml-dataformat-factory";
/**
* Setup XML data format
*/
void setupJacksonXml(
CamelContext camelContext, RestConfiguration config,
String type, Class<?> typeClass, String outType, Class<?> outTypeClass,
DataFormat jacksonXml, DataFormat outJacksonXml)
throws Exception;
}
|
RestBindingJacksonXmlDataFormatFactory
|
java
|
apache__kafka
|
connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignor.java
|
{
"start": 2866,
"end": 47238
}
|
class ____ implements ConnectAssignor {
private final Logger log;
private final Time time;
private final int maxDelay;
private ConnectorsAndTasks previousAssignment;
private final ConnectorsAndTasks previousRevocation;
private boolean revokedInPrevious;
protected final Set<String> candidateWorkersForReassignment;
protected long scheduledRebalance;
protected int delay;
protected int previousGenerationId;
protected Set<String> previousMembers;
private final ExponentialBackoff consecutiveRevokingRebalancesBackoff;
private int numSuccessiveRevokingRebalances;
public IncrementalCooperativeAssignor(LogContext logContext, Time time, int maxDelay) {
this.log = logContext.logger(IncrementalCooperativeAssignor.class);
this.time = time;
this.maxDelay = maxDelay;
this.previousAssignment = ConnectorsAndTasks.EMPTY;
this.previousRevocation = new ConnectorsAndTasks.Builder().build();
this.scheduledRebalance = 0;
this.revokedInPrevious = false;
this.candidateWorkersForReassignment = new LinkedHashSet<>();
this.delay = 0;
this.previousGenerationId = -1;
this.previousMembers = Set.of();
this.numSuccessiveRevokingRebalances = 0;
// By default, initial interval is 1. The only corner case is when the user has set maxDelay to 0
// in which case, the exponential backoff delay should be 0 which would return the backoff delay to be 0 always
this.consecutiveRevokingRebalancesBackoff = new ExponentialBackoff(maxDelay == 0 ? 0 : 1, 40, maxDelay, 0);
}
@Override
public Map<String, ByteBuffer> performAssignment(String leaderId, ConnectProtocolCompatibility protocol,
List<JoinGroupResponseMember> allMemberMetadata,
WorkerCoordinator coordinator) {
log.debug("Performing task assignment");
Map<String, ExtendedWorkerState> memberConfigs = new HashMap<>();
for (JoinGroupResponseMember member : allMemberMetadata) {
memberConfigs.put(
member.memberId(),
IncrementalCooperativeConnectProtocol.deserializeMetadata(ByteBuffer.wrap(member.metadata())));
}
log.debug("Member configs: {}", memberConfigs);
// The new config offset is the maximum seen by any member. We always perform assignment using this offset,
// even if some members have fallen behind. The config offset used to generate the assignment is included in
// the response so members that have fallen behind will not use the assignment until they have caught up.
long maxOffset = memberConfigs.values().stream().map(ExtendedWorkerState::offset).max(Long::compare).get();
log.debug("Max config offset root: {}, local snapshot config offsets root: {}",
maxOffset, coordinator.configSnapshot().offset());
short protocolVersion = protocol.protocolVersion();
Long leaderOffset = ensureLeaderConfig(maxOffset, coordinator);
if (leaderOffset == null) {
Map<String, ExtendedAssignment> assignments = fillAssignments(
memberConfigs.keySet(), Assignment.CONFIG_MISMATCH,
leaderId, memberConfigs.get(leaderId).url(), maxOffset,
ClusterAssignment.EMPTY, 0, protocolVersion);
return serializeAssignments(assignments, protocolVersion);
}
return performTaskAssignment(leaderId, leaderOffset, memberConfigs, coordinator, protocolVersion);
}
private Long ensureLeaderConfig(long maxOffset, WorkerCoordinator coordinator) {
// If this leader is behind some other members, we can't do assignment
if (coordinator.configSnapshot().offset() < maxOffset) {
// We might be able to take a new snapshot to catch up immediately and avoid another round of syncing here.
// Alternatively, if this node has already passed the maximum reported by any other member of the group, it
// is also safe to use this newer state.
ClusterConfigState updatedSnapshot = coordinator.configFreshSnapshot();
if (updatedSnapshot.offset() < maxOffset) {
log.info("Was selected to perform assignments, but do not have latest config found in sync request. "
+ "Returning an empty configuration to trigger re-sync.");
return null;
} else {
coordinator.configSnapshot(updatedSnapshot);
return updatedSnapshot.offset();
}
}
return maxOffset;
}
/**
* Performs task assignment based on the incremental cooperative connect protocol.
* Read more on the design and implementation in:
* <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-415%3A+Incremental+Cooperative+Rebalancing+in+Kafka+Connect">
* KIP-415</a>
*
* @param leaderId the ID of the group leader
* @param maxOffset the latest known offset of the configuration topic
* @param memberConfigs the metadata of all the members of the group as gather in the current
* round of rebalancing
* @param coordinator the worker coordinator instance that provide the configuration snapshot
* and get assigned the leader state during this assignment
* @param protocolVersion the Connect subprotocol version
* @return the serialized assignment of tasks to the whole group, including assigned or
* revoked tasks
*/
protected Map<String, ByteBuffer> performTaskAssignment(String leaderId, long maxOffset,
Map<String, ExtendedWorkerState> memberConfigs,
WorkerCoordinator coordinator, short protocolVersion) {
log.debug("Performing task assignment during generation: {} with memberId: {}",
coordinator.generationId(), coordinator.memberId());
Map<String, ConnectorsAndTasks> memberAssignments = transformValues(
memberConfigs,
memberConfig -> new ConnectorsAndTasks.Builder()
.with(memberConfig.assignment().connectors(), memberConfig.assignment().tasks())
.build()
);
ClusterAssignment clusterAssignment = performTaskAssignment(
coordinator.configSnapshot(),
coordinator.lastCompletedGenerationId(),
coordinator.generationId(),
memberAssignments
);
coordinator.leaderState(new LeaderState(memberConfigs, clusterAssignment.allAssignedConnectors(), clusterAssignment.allAssignedTasks()));
Map<String, ExtendedAssignment> assignments =
fillAssignments(memberConfigs.keySet(), Assignment.NO_ERROR, leaderId,
memberConfigs.get(leaderId).url(), maxOffset,
clusterAssignment,
delay, protocolVersion);
log.debug("Actual assignments: {}", assignments);
return serializeAssignments(assignments, protocolVersion);
}
// Visible for testing
ClusterAssignment performTaskAssignment(
ClusterConfigState configSnapshot,
int lastCompletedGenerationId,
int currentGenerationId,
Map<String, ConnectorsAndTasks> memberAssignments
) {
// Base set: The previous assignment of connectors-and-tasks is a standalone snapshot that
// can be used to calculate derived sets
log.debug("Previous assignments: {}", previousAssignment);
if (previousGenerationId != lastCompletedGenerationId) {
log.debug("Clearing the view of previous assignments due to generation mismatch between "
+ "previous generation ID {} and last completed generation ID {}. This can "
+ "happen if the leader fails to sync the assignment within a rebalancing round. "
+ "The following view of previous assignments might be outdated and will be "
+ "ignored by the leader in the current computation of new assignments. "
+ "Possibly outdated previous assignments: {}",
previousGenerationId, lastCompletedGenerationId, previousAssignment);
this.previousAssignment = ConnectorsAndTasks.EMPTY;
}
Set<String> configuredConnectors = new TreeSet<>(configSnapshot.connectors());
Set<ConnectorTaskId> configuredTasks = combineCollections(configuredConnectors, configSnapshot::tasks, Collectors.toSet());
// Base set: The set of configured connectors-and-tasks is a standalone snapshot that can
// be used to calculate derived sets
ConnectorsAndTasks configured = new ConnectorsAndTasks.Builder()
.with(configuredConnectors, configuredTasks).build();
log.debug("Configured assignments: {}", configured);
// Base set: The set of active connectors-and-tasks is a standalone snapshot that can be
// used to calculate derived sets
ConnectorsAndTasks activeAssignments = assignment(memberAssignments);
log.debug("Active assignments: {}", activeAssignments);
// This means that a previous revocation did not take effect. In this case, reset
// appropriately and be ready to re-apply revocation of tasks
if (!previousRevocation.isEmpty()) {
if (previousRevocation.connectors().stream().anyMatch(c -> activeAssignments.connectors().contains(c))
|| previousRevocation.tasks().stream().anyMatch(t -> activeAssignments.tasks().contains(t))) {
previousAssignment = activeAssignments;
}
previousRevocation.connectors().clear();
previousRevocation.tasks().clear();
}
// Derived set: The set of deleted connectors-and-tasks is a derived set from the set
// difference of previous - configured
ConnectorsAndTasks deleted = diff(previousAssignment, configured);
log.debug("Deleted assignments: {}", deleted);
// The connectors and tasks that are currently running on more than one worker each
ConnectorsAndTasks duplicated = duplicatedAssignments(memberAssignments);
log.trace("Duplicated assignments: {}", duplicated);
// Derived set: The set of lost or unaccounted connectors-and-tasks is a derived set from
// the set difference of previous - active - deleted
ConnectorsAndTasks lostAssignments = diff(previousAssignment, activeAssignments, deleted);
log.debug("Lost assignments: {}", lostAssignments);
// Derived set: The set of new connectors-and-tasks is a derived set from the set
// difference of configured - previous - active
ConnectorsAndTasks created = diff(configured, previousAssignment, activeAssignments);
log.debug("Created: {}", created);
// A collection of the current assignment excluding the connectors-and-tasks to be deleted
List<WorkerLoad> currentWorkerAssignment = workerAssignment(memberAssignments, deleted);
Map<String, ConnectorsAndTasks.Builder> toRevoke = new HashMap<>();
Map<String, ConnectorsAndTasks> deletedToRevoke = intersection(deleted, memberAssignments);
log.debug("Deleted connectors and tasks to revoke from each worker: {}", deletedToRevoke);
addAll(toRevoke, deletedToRevoke);
// Revoking redundant connectors/tasks if the workers have duplicate assignments
Map<String, ConnectorsAndTasks> duplicatedToRevoke = intersection(duplicated, memberAssignments);
log.debug("Duplicated connectors and tasks to revoke from each worker: {}", duplicatedToRevoke);
addAll(toRevoke, duplicatedToRevoke);
// Compute the assignment that will be applied across the cluster after this round of rebalance
// Later on, new submissions and lost-and-reassigned connectors and tasks will be added to these assignments,
// and load-balancing revocations will be removed from them.
List<WorkerLoad> nextWorkerAssignment = workerLoads(memberAssignments);
removeAll(nextWorkerAssignment, deletedToRevoke);
removeAll(nextWorkerAssignment, duplicatedToRevoke);
// Collect the lost assignments that are ready to be reassigned because the workers that were
// originally responsible for them appear to have left the cluster instead of rejoining within
// the scheduled rebalance delay. These assignments will be re-allocated to the existing workers
// in the cluster later on
ConnectorsAndTasks.Builder lostAssignmentsToReassignBuilder = new ConnectorsAndTasks.Builder();
handleLostAssignments(lostAssignments, lostAssignmentsToReassignBuilder, nextWorkerAssignment);
ConnectorsAndTasks lostAssignmentsToReassign = lostAssignmentsToReassignBuilder.build();
// Do not revoke resources for re-assignment while a delayed rebalance is active
if (delay == 0) {
Map<String, ConnectorsAndTasks> loadBalancingRevocations =
performLoadBalancingRevocations(configured, nextWorkerAssignment);
// If this round and the previous round involved revocation, we will calculate a delay for
// the next round when revoking rebalance would be allowed. Note that delay could be 0, in which
// case we would always revoke.
if (revokedInPrevious && !loadBalancingRevocations.isEmpty()) {
numSuccessiveRevokingRebalances++; // Should we consider overflow for this?
log.debug("Consecutive revoking rebalances observed. Computing delay and next scheduled rebalance.");
delay = (int) consecutiveRevokingRebalancesBackoff.backoff(numSuccessiveRevokingRebalances);
if (delay != 0) {
scheduledRebalance = time.milliseconds() + delay;
log.debug("Skipping revocations in the current round with a delay of {}ms. Next scheduled rebalance:{}",
delay, scheduledRebalance);
} else {
log.debug("Revoking assignments immediately since scheduled.rebalance.max.delay.ms is set to 0");
addAll(toRevoke, loadBalancingRevocations);
// Remove all newly-revoked connectors and tasks from the next assignment, both to
// ensure that they are not included in the assignments during this round, and to produce
// an accurate allocation of all newly-created and lost-and-reassigned connectors and tasks
// that will have to be distributed across the cluster during this round
removeAll(nextWorkerAssignment, loadBalancingRevocations);
}
} else if (!loadBalancingRevocations.isEmpty()) {
// We had a revocation in this round but not in the previous round. Let's store that state.
log.debug("Performing allocation-balancing revocation immediately as no revocations took place during the previous rebalance");
addAll(toRevoke, loadBalancingRevocations);
removeAll(nextWorkerAssignment, loadBalancingRevocations);
revokedInPrevious = true;
} else if (revokedInPrevious) {
// No revocations in this round but the previous round had one. Probably the workers
// have converged to a balanced load. We can reset the rebalance clock
log.debug("Previous round had revocations but this round didn't. Probably, the cluster has reached a " +
"balanced load. Resetting the exponential backoff clock");
revokedInPrevious = false;
numSuccessiveRevokingRebalances = 0;
} else {
// no-op
log.debug("No revocations in previous and current round.");
}
} else {
log.debug("Delayed rebalance is active. Delaying {}ms before revoking connectors and tasks: {}", delay, toRevoke);
revokedInPrevious = false;
}
// The complete set of connectors and tasks that should be newly-assigned during this round
ConnectorsAndTasks toAssign = new ConnectorsAndTasks.Builder()
.addAll(created)
.addAll(lostAssignmentsToReassign)
.build();
assignConnectors(nextWorkerAssignment, toAssign.connectors());
assignTasks(nextWorkerAssignment, toAssign.tasks());
Map<String, Collection<String>> nextConnectorAssignments = nextWorkerAssignment.stream()
.collect(Collectors.toMap(
WorkerLoad::worker,
WorkerLoad::connectors
));
Map<String, Collection<ConnectorTaskId>> nextTaskAssignments = nextWorkerAssignment.stream()
.collect(Collectors.toMap(
WorkerLoad::worker,
WorkerLoad::tasks
));
Map<String, Collection<String>> currentConnectorAssignments =
currentWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::connectors));
Map<String, Collection<ConnectorTaskId>> currentTaskAssignments =
currentWorkerAssignment.stream().collect(Collectors.toMap(WorkerLoad::worker, WorkerLoad::tasks));
Map<String, Collection<String>> incrementalConnectorAssignments =
diff(nextConnectorAssignments, currentConnectorAssignments);
Map<String, Collection<ConnectorTaskId>> incrementalTaskAssignments =
diff(nextTaskAssignments, currentTaskAssignments);
Map<String, ConnectorsAndTasks> revoked = buildAll(toRevoke);
previousAssignment = computePreviousAssignment(revoked, nextConnectorAssignments, nextTaskAssignments, lostAssignments);
previousGenerationId = currentGenerationId;
previousMembers = memberAssignments.keySet();
log.debug("Incremental connector assignments: {}", incrementalConnectorAssignments);
log.debug("Incremental task assignments: {}", incrementalTaskAssignments);
Map<String, Collection<String>> revokedConnectors = transformValues(revoked, ConnectorsAndTasks::connectors);
Map<String, Collection<ConnectorTaskId>> revokedTasks = transformValues(revoked, ConnectorsAndTasks::tasks);
return new ClusterAssignment(
incrementalConnectorAssignments,
incrementalTaskAssignments,
revokedConnectors,
revokedTasks,
diff(nextConnectorAssignments, revokedConnectors),
diff(nextTaskAssignments, revokedTasks)
);
}
private ConnectorsAndTasks computePreviousAssignment(Map<String, ConnectorsAndTasks> toRevoke,
Map<String, Collection<String>> connectorAssignments,
Map<String, Collection<ConnectorTaskId>> taskAssignments,
ConnectorsAndTasks lostAssignments) {
ConnectorsAndTasks previousAssignment = new ConnectorsAndTasks.Builder().with(
ConnectUtils.combineCollections(connectorAssignments.values()),
ConnectUtils.combineCollections(taskAssignments.values())
).build();
for (ConnectorsAndTasks revoked : toRevoke.values()) {
previousAssignment.connectors().removeAll(revoked.connectors());
previousAssignment.tasks().removeAll(revoked.tasks());
previousRevocation.connectors().addAll(revoked.connectors());
previousRevocation.tasks().addAll(revoked.tasks());
}
// Depends on the previous assignment's collections being sets at the moment.
// TODO: make it independent
previousAssignment.connectors().addAll(lostAssignments.connectors());
previousAssignment.tasks().addAll(lostAssignments.tasks());
return previousAssignment;
}
private ConnectorsAndTasks duplicatedAssignments(Map<String, ConnectorsAndTasks> memberAssignments) {
Map<String, Long> connectorInstanceCounts = combineCollections(
memberAssignments.values(),
ConnectorsAndTasks::connectors,
Collectors.groupingBy(Function.identity(), Collectors.counting())
);
Set<String> duplicatedConnectors = connectorInstanceCounts
.entrySet().stream()
.filter(entry -> entry.getValue() > 1L)
.map(Entry::getKey)
.collect(Collectors.toSet());
Map<ConnectorTaskId, Long> taskInstanceCounts = combineCollections(
memberAssignments.values(),
ConnectorsAndTasks::tasks,
Collectors.groupingBy(Function.identity(), Collectors.counting())
);
Set<ConnectorTaskId> duplicatedTasks = taskInstanceCounts
.entrySet().stream()
.filter(entry -> entry.getValue() > 1L)
.map(Entry::getKey)
.collect(Collectors.toSet());
return new ConnectorsAndTasks.Builder().with(duplicatedConnectors, duplicatedTasks).build();
}
// visible for testing
protected void handleLostAssignments(ConnectorsAndTasks lostAssignments,
ConnectorsAndTasks.Builder lostAssignmentsToReassign,
List<WorkerLoad> completeWorkerAssignment) {
// There are no lost assignments and there have been no successive revoking rebalances
if (lostAssignments.isEmpty() && !revokedInPrevious) {
resetDelay();
return;
}
final long now = time.milliseconds();
log.debug("Found the following connectors and tasks missing from previous assignments: {}", lostAssignments);
Set<String> activeMembers = completeWorkerAssignment.stream()
.map(WorkerLoad::worker)
.collect(Collectors.toSet());
if (scheduledRebalance <= 0 && activeMembers.containsAll(previousMembers)) {
log.debug("No worker seems to have departed the group during the rebalance. The "
+ "missing assignments that the leader is detecting are probably due to some "
+ "workers failing to receive the new assignments in the previous rebalance. "
+ "Will reassign missing tasks as new tasks");
lostAssignmentsToReassign.addAll(lostAssignments);
return;
} else if (maxDelay == 0) {
log.debug("Scheduled rebalance delays are disabled ({} = 0); "
+ "reassigning all lost connectors and tasks immediately",
SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG
);
lostAssignmentsToReassign.addAll(lostAssignments);
return;
}
if (scheduledRebalance > 0 && now >= scheduledRebalance) {
// delayed rebalance expired and it's time to assign resources
log.debug("Delayed rebalance expired. Reassigning lost tasks");
List<WorkerLoad> candidateWorkerLoad = List.of();
if (!candidateWorkersForReassignment.isEmpty()) {
candidateWorkerLoad = pickCandidateWorkerForReassignment(completeWorkerAssignment);
}
if (!candidateWorkerLoad.isEmpty()) {
log.debug("Assigning lost tasks to {} candidate workers: {}",
candidateWorkerLoad.size(),
candidateWorkerLoad.stream().map(WorkerLoad::worker).collect(Collectors.joining(",")));
Iterator<WorkerLoad> candidateWorkerIterator = candidateWorkerLoad.iterator();
for (String connector : lostAssignments.connectors()) {
// Loop over the candidate workers as many times as it takes
if (!candidateWorkerIterator.hasNext()) {
candidateWorkerIterator = candidateWorkerLoad.iterator();
}
WorkerLoad worker = candidateWorkerIterator.next();
log.debug("Assigning connector id {} to member {}", connector, worker.worker());
worker.assign(connector);
}
candidateWorkerIterator = candidateWorkerLoad.iterator();
for (ConnectorTaskId task : lostAssignments.tasks()) {
if (!candidateWorkerIterator.hasNext()) {
candidateWorkerIterator = candidateWorkerLoad.iterator();
}
WorkerLoad worker = candidateWorkerIterator.next();
log.debug("Assigning task id {} to member {}", task, worker.worker());
worker.assign(task);
}
} else {
log.debug("No single candidate worker was found to assign lost tasks. Treating lost tasks as new tasks");
lostAssignmentsToReassign.addAll(lostAssignments);
}
resetDelay();
// Resetting the flag as now we can permit successive revoking rebalances.
// since we have gone through the full rebalance delay
revokedInPrevious = false;
} else {
candidateWorkersForReassignment
.addAll(candidateWorkersForReassignment(completeWorkerAssignment));
if (now < scheduledRebalance) {
// a delayed rebalance is in progress, but it's not yet time to reassign
// unaccounted resources
delay = calculateDelay(now);
log.debug("Delayed rebalance in progress. Task reassignment is postponed. New computed rebalance delay: {}", delay);
} else {
// This means scheduledRebalance == 0
// We could also extract the current minimum delay from the group, to make
// independent of consecutive leader failures, but this optimization is skipped
// at the moment
delay = maxDelay;
log.debug("Resetting rebalance delay to the max: {}. scheduledRebalance: {} now: {} diff scheduledRebalance - now: {}",
delay, scheduledRebalance, now, scheduledRebalance - now);
}
scheduledRebalance = now + delay;
}
}
private void resetDelay() {
candidateWorkersForReassignment.clear();
scheduledRebalance = 0;
if (delay != 0) {
log.debug("Resetting delay from previous value: {} to 0", delay);
}
delay = 0;
}
private Set<String> candidateWorkersForReassignment(List<WorkerLoad> completeWorkerAssignment) {
return completeWorkerAssignment.stream()
.filter(WorkerLoad::isEmpty)
.map(WorkerLoad::worker)
.collect(Collectors.toSet());
}
private List<WorkerLoad> pickCandidateWorkerForReassignment(List<WorkerLoad> completeWorkerAssignment) {
Map<String, WorkerLoad> activeWorkers = completeWorkerAssignment.stream()
.collect(Collectors.toMap(WorkerLoad::worker, Function.identity()));
return candidateWorkersForReassignment.stream()
.map(activeWorkers::get)
.filter(Objects::nonNull)
.collect(Collectors.toList());
}
private Map<String, ExtendedAssignment> fillAssignments(Collection<String> members, short error,
String leaderId, String leaderUrl, long maxOffset,
ClusterAssignment clusterAssignment,
int delay, short protocolVersion) {
Map<String, ExtendedAssignment> groupAssignment = new HashMap<>();
for (String member : members) {
Collection<String> connectorsToStart = clusterAssignment.newlyAssignedConnectors(member);
Collection<ConnectorTaskId> tasksToStart = clusterAssignment.newlyAssignedTasks(member);
Collection<String> connectorsToStop = clusterAssignment.newlyRevokedConnectors(member);
Collection<ConnectorTaskId> tasksToStop = clusterAssignment.newlyRevokedTasks(member);
ExtendedAssignment assignment =
new ExtendedAssignment(protocolVersion, error, leaderId, leaderUrl, maxOffset,
connectorsToStart, tasksToStart, connectorsToStop, tasksToStop, delay);
log.debug("Filling assignment: {} -> {}", member, assignment);
groupAssignment.put(member, assignment);
}
log.debug("Finished assignment");
return groupAssignment;
}
/**
* From a map of workers to assignment object generate the equivalent map of workers to byte
* buffers of serialized assignments.
*
* @param assignments the map of worker assignments
* @return the serialized map of assignments to workers
*/
protected Map<String, ByteBuffer> serializeAssignments(Map<String, ExtendedAssignment> assignments, short protocolVersion) {
boolean sessioned = protocolVersion >= CONNECT_PROTOCOL_V2;
return assignments.entrySet()
.stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
e -> IncrementalCooperativeConnectProtocol.serializeAssignment(e.getValue(), sessioned)));
}
private static ConnectorsAndTasks diff(ConnectorsAndTasks base,
ConnectorsAndTasks... toSubtract) {
Collection<String> connectors = new TreeSet<>(base.connectors());
Collection<ConnectorTaskId> tasks = new TreeSet<>(base.tasks());
for (ConnectorsAndTasks sub : toSubtract) {
connectors.removeAll(sub.connectors());
tasks.removeAll(sub.tasks());
}
return new ConnectorsAndTasks.Builder().with(connectors, tasks).build();
}
private static <T> Map<String, Collection<T>> diff(Map<String, Collection<T>> base,
Map<String, Collection<T>> toSubtract) {
Map<String, Collection<T>> incremental = new HashMap<>();
for (Map.Entry<String, Collection<T>> entry : base.entrySet()) {
List<T> values = new ArrayList<>(entry.getValue());
values.removeAll(toSubtract.getOrDefault(entry.getKey(), Set.of()));
incremental.put(entry.getKey(), values);
}
return incremental;
}
private ConnectorsAndTasks assignment(Map<String, ConnectorsAndTasks> memberAssignments) {
log.debug("Received assignments: {}", memberAssignments);
return new ConnectorsAndTasks.Builder().with(
ConnectUtils.combineCollections(memberAssignments.values(), ConnectorsAndTasks::connectors),
ConnectUtils.combineCollections(memberAssignments.values(), ConnectorsAndTasks::tasks)
).build();
}
/**
* Revoke connectors and tasks from each worker in the cluster until no worker is running more than it
* would be with a perfectly-balanced assignment.
* @param configured the set of configured connectors and tasks across the entire cluster
* @param workers the workers in the cluster, whose assignments should not include any deleted or duplicated connectors or tasks
* that are already due to be revoked from the worker in this rebalance
* @return which connectors and tasks should be revoked from which workers; never null, but may be empty
* if no load-balancing revocations are necessary or possible
*/
private Map<String, ConnectorsAndTasks> performLoadBalancingRevocations(
ConnectorsAndTasks configured,
Collection<WorkerLoad> workers
) {
if (log.isTraceEnabled()) {
workers.forEach(wl -> log.trace(
"Per worker current load size; worker: {} connectors: {} tasks: {}",
wl.worker(), wl.connectorsSize(), wl.tasksSize()));
}
if (workers.stream().allMatch(WorkerLoad::isEmpty)) {
log.trace("No load-balancing revocations required; all workers are either new "
+ "or will have all currently-assigned connectors and tasks revoked during this round"
);
return Map.of();
}
if (configured.isEmpty()) {
log.trace("No load-balancing revocations required; no connectors are currently configured on this cluster");
return Map.of();
}
Map<String, ConnectorsAndTasks.Builder> result = new HashMap<>();
Map<String, Set<String>> connectorRevocations = loadBalancingRevocations(
"connector",
configured.connectors().size(),
workers,
WorkerLoad::connectors
);
Map<String, Set<ConnectorTaskId>> taskRevocations = loadBalancingRevocations(
"task",
configured.tasks().size(),
workers,
WorkerLoad::tasks
);
connectorRevocations.forEach((worker, revoked) ->
result.computeIfAbsent(worker, w -> new ConnectorsAndTasks.Builder()).addConnectors(revoked)
);
taskRevocations.forEach((worker, revoked) ->
result.computeIfAbsent(worker, w -> new ConnectorsAndTasks.Builder()).addTasks(revoked)
);
return buildAll(result);
}
private <E> Map<String, Set<E>> loadBalancingRevocations(
String allocatedResourceName,
int totalToAllocate,
Collection<WorkerLoad> workers,
Function<WorkerLoad, Collection<E>> workerAllocation
) {
int totalWorkers = workers.size();
// The minimum instances of this resource that should be assigned to each worker
int minAllocatedPerWorker = totalToAllocate / totalWorkers;
// How many workers are going to have to be allocated exactly one extra instance
// (since the total number to allocate may not be a perfect multiple of the number of workers)
int workersToAllocateExtra = totalToAllocate % totalWorkers;
// Useful function to determine exactly how many instances of the resource a given worker is currently allocated
Function<WorkerLoad, Integer> workerAllocationSize = workerAllocation.andThen(Collection::size);
long workersAllocatedMinimum = workers.stream()
.map(workerAllocationSize)
.filter(n -> n == minAllocatedPerWorker)
.count();
long workersAllocatedSingleExtra = workers.stream()
.map(workerAllocationSize)
.filter(n -> n == minAllocatedPerWorker + 1)
.count();
if (workersAllocatedSingleExtra == workersToAllocateExtra
&& workersAllocatedMinimum + workersAllocatedSingleExtra == totalWorkers) {
log.trace(
"No load-balancing {} revocations required; the current allocations, when combined with any newly-created {}s, should be balanced",
allocatedResourceName,
allocatedResourceName
);
return Map.of();
}
Map<String, Set<E>> result = new HashMap<>();
// How many workers we've allocated a single extra resource instance to
int allocatedExtras = 0;
// Calculate how many (and which) connectors/tasks to revoke from each worker here
for (WorkerLoad worker : workers) {
int currentAllocationSizeForWorker = workerAllocationSize.apply(worker);
if (currentAllocationSizeForWorker <= minAllocatedPerWorker) {
// This worker isn't allocated more than the minimum; no need to revoke anything
continue;
}
int maxAllocationForWorker;
if (allocatedExtras < workersToAllocateExtra) {
// We'll allocate one of the extra resource instances to this worker
allocatedExtras++;
if (currentAllocationSizeForWorker == minAllocatedPerWorker + 1) {
// If the worker's running exactly one more than the minimum, and we're allowed to
// allocate an extra to it, there's no need to revoke anything
continue;
}
maxAllocationForWorker = minAllocatedPerWorker + 1;
} else {
maxAllocationForWorker = minAllocatedPerWorker;
}
Set<E> revokedFromWorker = new LinkedHashSet<>();
result.put(worker.worker(), revokedFromWorker);
Iterator<E> currentWorkerAllocation = workerAllocation.apply(worker).iterator();
// Revoke resources from the worker until it isn't allocated any more than it should be
for (int numRevoked = 0; currentAllocationSizeForWorker - numRevoked > maxAllocationForWorker; numRevoked++) {
if (!currentWorkerAllocation.hasNext()) {
// Should never happen, but better to log a warning and move on than die and fail the whole rebalance if it does
log.warn(
"Unexpectedly ran out of {}s to revoke from worker {} while performing load-balancing revocations; " +
"worker appears to still be allocated {} instances, which is more than the intended allocation of {}",
allocatedResourceName,
worker.worker(),
workerAllocationSize.apply(worker),
maxAllocationForWorker
);
break;
}
E revocation = currentWorkerAllocation.next();
revokedFromWorker.add(revocation);
}
}
return result;
}
private int calculateDelay(long now) {
long diff = scheduledRebalance - now;
return diff > 0 ? (int) Math.min(diff, maxDelay) : 0;
}
/**
* Perform a round-robin assignment of connectors to workers with existing worker load. This
* assignment tries to balance the load between workers, by assigning connectors to workers
* that have equal load, starting with the least loaded workers.
*
* @param workerAssignment the current worker assignment; assigned connectors are added to this list
* @param connectors the connectors to be assigned
*/
protected void assignConnectors(List<WorkerLoad> workerAssignment, Collection<String> connectors) {
workerAssignment.sort(WorkerLoad.connectorComparator());
WorkerLoad first = workerAssignment.get(0);
Iterator<String> load = connectors.iterator();
while (load.hasNext()) {
int firstLoad = first.connectorsSize();
int upTo = IntStream.range(0, workerAssignment.size())
.filter(i -> workerAssignment.get(i).connectorsSize() > firstLoad)
.findFirst()
.orElse(workerAssignment.size());
for (WorkerLoad worker : workerAssignment.subList(0, upTo)) {
String connector = load.next();
log.debug("Assigning connector {} to {}", connector, worker.worker());
worker.assign(connector);
if (!load.hasNext()) {
break;
}
}
}
}
/**
* Perform a round-robin assignment of tasks to workers with existing worker load. This
* assignment tries to balance the load between workers, by assigning tasks to workers that
* have equal load, starting with the least loaded workers.
*
* @param workerAssignment the current worker assignment; assigned tasks are added to this list
* @param tasks the tasks to be assigned
*/
protected void assignTasks(List<WorkerLoad> workerAssignment, Collection<ConnectorTaskId> tasks) {
workerAssignment.sort(WorkerLoad.taskComparator());
WorkerLoad first = workerAssignment.get(0);
Iterator<ConnectorTaskId> load = tasks.iterator();
while (load.hasNext()) {
int firstLoad = first.tasksSize();
int upTo = IntStream.range(0, workerAssignment.size())
.filter(i -> workerAssignment.get(i).tasksSize() > firstLoad)
.findFirst()
.orElse(workerAssignment.size());
for (WorkerLoad worker : workerAssignment.subList(0, upTo)) {
ConnectorTaskId task = load.next();
log.debug("Assigning task {} to {}", task, worker.worker());
worker.assign(task);
if (!load.hasNext()) {
break;
}
}
}
}
private static List<WorkerLoad> workerAssignment(Map<String, ConnectorsAndTasks> memberAssignments,
ConnectorsAndTasks toExclude) {
ConnectorsAndTasks ignore = new ConnectorsAndTasks.Builder()
.with(toExclude.connectors(), toExclude.tasks())
.build();
return memberAssignments.entrySet().stream()
.map(e -> new WorkerLoad.Builder(e.getKey()).with(
e.getValue().connectors().stream()
.filter(v -> !ignore.connectors().contains(v))
.collect(Collectors.toList()),
e.getValue().tasks().stream()
.filter(v -> !ignore.tasks().contains(v))
.collect(Collectors.toList())
).build()
).collect(Collectors.toList());
}
private static void addAll(Map<String, ConnectorsAndTasks.Builder> base, Map<String, ConnectorsAndTasks> toAdd) {
toAdd.forEach((worker, assignment) -> base
.computeIfAbsent(worker, w -> new ConnectorsAndTasks.Builder())
.addAll(assignment)
);
}
private static <K> Map<K, ConnectorsAndTasks> buildAll(Map<K, ConnectorsAndTasks.Builder> builders) {
return transformValues(builders, ConnectorsAndTasks.Builder::build);
}
private static List<WorkerLoad> workerLoads(Map<String, ConnectorsAndTasks> memberAssignments) {
return memberAssignments.entrySet().stream()
.map(e -> new WorkerLoad.Builder(e.getKey()).with(e.getValue().connectors(), e.getValue().tasks()).build())
.collect(Collectors.toList());
}
private static void removeAll(List<WorkerLoad> workerLoads, Map<String, ConnectorsAndTasks> toRemove) {
workerLoads.forEach(workerLoad -> {
String worker = workerLoad.worker();
ConnectorsAndTasks toRemoveFromWorker = toRemove.getOrDefault(worker, ConnectorsAndTasks.EMPTY);
workerLoad.connectors().removeAll(toRemoveFromWorker.connectors());
workerLoad.tasks().removeAll(toRemoveFromWorker.tasks());
});
}
private static Map<String, ConnectorsAndTasks> intersection(ConnectorsAndTasks connectorsAndTasks, Map<String, ConnectorsAndTasks> assignments) {
return transformValues(assignments, assignment -> {
Collection<String> connectors = new HashSet<>(assignment.connectors());
connectors.retainAll(connectorsAndTasks.connectors());
Collection<ConnectorTaskId> tasks = new HashSet<>(assignment.tasks());
tasks.retainAll(connectorsAndTasks.tasks());
return new ConnectorsAndTasks.Builder().with(connectors, tasks).build();
});
}
static
|
IncrementalCooperativeAssignor
|
java
|
grpc__grpc-java
|
benchmarks/src/jmh/java/io/grpc/benchmarks/netty/AbstractBenchmark.java
|
{
"start": 3718,
"end": 20997
}
|
interface ____ the term 'benchmark' in its name.
*
* <p>>This allows traffic shaping to be applied to an IP address and to have the benchmarks
* detect it's presence and use it. E.g for Linux we can apply netem to a specific IP to
* do traffic shaping, bind that IP to the loopback adapter and then apply a label to that
* binding so that it appears as a child interface.
*
* <pre>
* sudo tc qdisc del dev lo root
* sudo tc qdisc add dev lo root handle 1: prio
* sudo tc qdisc add dev lo parent 1:1 handle 2: netem delay 0.1ms rate 10gbit
* sudo tc filter add dev lo parent 1:0 protocol ip prio 1 \
* u32 match ip dst 127.127.127.127 flowid 2:1
* sudo ip addr add dev lo 127.127.127.127/32 label lo:benchmark
* </pre>
*/
@SuppressWarnings("JdkObsolete") // No choice but to use Enumeration
private static InetAddress buildBenchmarkAddr() {
InetAddress tmp = null;
try {
Enumeration<NetworkInterface> networkInterfaces = NetworkInterface.getNetworkInterfaces();
outer: while (networkInterfaces.hasMoreElements()) {
NetworkInterface networkInterface = networkInterfaces.nextElement();
if (!networkInterface.isLoopback()) {
continue;
}
Enumeration<NetworkInterface> subInterfaces = networkInterface.getSubInterfaces();
while (subInterfaces.hasMoreElements()) {
NetworkInterface subLoopback = subInterfaces.nextElement();
if (subLoopback.getDisplayName().contains("benchmark")) {
tmp = subLoopback.getInetAddresses().nextElement();
System.out.println("\nResolved benchmark address to " + tmp + " on "
+ subLoopback.getDisplayName() + "\n\n");
break outer;
}
}
}
} catch (SocketException se) {
System.out.println("\nWARNING: Error trying to resolve benchmark interface \n" + se);
}
if (tmp == null) {
try {
System.out.println(
"\nWARNING: Unable to resolve benchmark interface, defaulting to localhost");
tmp = InetAddress.getLocalHost();
} catch (UnknownHostException uhe) {
throw new RuntimeException(uhe);
}
}
return tmp;
}
protected Server server;
protected ByteBuf request;
protected ByteBuf response;
protected MethodDescriptor<ByteBuf, ByteBuf> unaryMethod;
private MethodDescriptor<ByteBuf, ByteBuf> pingPongMethod;
private MethodDescriptor<ByteBuf, ByteBuf> flowControlledStreaming;
protected ManagedChannel[] channels;
protected AbstractBenchmark() {
}
/**
* Initialize the environment for the executor.
*/
public void setup(ExecutorType clientExecutor,
ExecutorType serverExecutor,
MessageSize requestSize,
MessageSize responseSize,
FlowWindowSize windowSize,
ChannelType channelType,
int maxConcurrentStreams,
int channelCount) throws Exception {
ServerCredentials serverCreds = InsecureServerCredentials.create();
NettyServerBuilder serverBuilder;
NettyChannelBuilder channelBuilder;
if (channelType == ChannelType.LOCAL) {
LocalAddress address = new LocalAddress("netty-e2e-benchmark");
serverBuilder = NettyServerBuilder.forAddress(address, serverCreds);
serverBuilder.channelType(LocalServerChannel.class);
channelBuilder = NettyChannelBuilder.forAddress(address);
channelBuilder.channelType(LocalChannel.class, LocalAddress.class);
} else {
ServerSocket sock = new ServerSocket();
// Pick a port using an ephemeral socket.
sock.bind(new InetSocketAddress(BENCHMARK_ADDR, 0));
SocketAddress address = sock.getLocalSocketAddress();
sock.close();
serverBuilder = NettyServerBuilder.forAddress(address, serverCreds)
.channelType(NioServerSocketChannel.class);
channelBuilder = NettyChannelBuilder.forAddress(address).channelType(NioSocketChannel.class,
InetSocketAddress.class);
}
if (serverExecutor == ExecutorType.DIRECT) {
serverBuilder.directExecutor();
}
if (clientExecutor == ExecutorType.DIRECT) {
channelBuilder.directExecutor();
}
// Always use a different worker group from the client.
ThreadFactory serverThreadFactory = new DefaultThreadFactory("STF pool", true /* daemon */);
serverBuilder.workerEventLoopGroup(new NioEventLoopGroup(0, serverThreadFactory));
serverBuilder.bossEventLoopGroup(new NioEventLoopGroup(1, serverThreadFactory));
// Always set connection and stream window size to same value
serverBuilder.flowControlWindow(windowSize.bytes());
channelBuilder.flowControlWindow(windowSize.bytes());
channelBuilder.negotiationType(NegotiationType.PLAINTEXT);
serverBuilder.maxConcurrentCallsPerConnection(maxConcurrentStreams);
// Create buffers of the desired size for requests and responses.
PooledByteBufAllocator alloc = PooledByteBufAllocator.DEFAULT;
// Use a heap buffer for now, since MessageFramer doesn't know how to directly convert this
// into a WritableBuffer
// TODO(carl-mastrangelo): convert this into a regular buffer() call. See
// https://github.com/grpc/grpc-java/issues/2062#issuecomment-234646216
request = alloc.heapBuffer(requestSize.bytes());
request.writerIndex(request.capacity() - 1);
response = alloc.heapBuffer(responseSize.bytes());
response.writerIndex(response.capacity() - 1);
// Simple method that sends and receives NettyByteBuf
unaryMethod = MethodDescriptor.<ByteBuf, ByteBuf>newBuilder()
.setType(MethodType.UNARY)
.setFullMethodName("benchmark/unary")
.setRequestMarshaller(new ByteBufOutputMarshaller())
.setResponseMarshaller(new ByteBufOutputMarshaller())
.build();
pingPongMethod = unaryMethod.toBuilder()
.setType(MethodType.BIDI_STREAMING)
.setFullMethodName("benchmark/pingPong")
.build();
flowControlledStreaming = pingPongMethod.toBuilder()
.setFullMethodName("benchmark/flowControlledStreaming")
.build();
// Server implementation of unary & streaming methods
serverBuilder.addService(
ServerServiceDefinition.builder(
new ServiceDescriptor("benchmark",
unaryMethod,
pingPongMethod,
flowControlledStreaming))
.addMethod(unaryMethod, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(
final ServerCall<ByteBuf, ByteBuf> call,
Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
// no-op
message.release();
call.sendMessage(response.slice());
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
};
}
})
.addMethod(pingPongMethod, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(
final ServerCall<ByteBuf, ByteBuf> call,
Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
message.release();
call.sendMessage(response.slice());
// Request next message
call.request(1);
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
};
}
})
.addMethod(flowControlledStreaming, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(
final ServerCall<ByteBuf, ByteBuf> call,
Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
message.release();
while (call.isReady()) {
call.sendMessage(response.slice());
}
// Request next message
call.request(1);
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
@Override
public void onReady() {
while (call.isReady()) {
call.sendMessage(response.slice());
}
}
};
}
})
.build());
// Build and start the clients and servers
server = serverBuilder.build();
server.start();
channels = new ManagedChannel[channelCount];
ThreadFactory clientThreadFactory = new DefaultThreadFactory("CTF pool", true /* daemon */);
for (int i = 0; i < channelCount; i++) {
// Use a dedicated event-loop for each channel
channels[i] = channelBuilder
.eventLoopGroup(new NioEventLoopGroup(1, clientThreadFactory))
.build();
}
}
/**
* Start a continuously executing set of unary calls that will terminate when
* {@code done.get()} is true. Each completed call will increment the counter by the specified
* delta which benchmarks can use to measure QPS or bandwidth.
*/
protected void startUnaryCalls(int callsPerChannel,
final AtomicLong counter,
final AtomicBoolean done,
final long counterDelta) {
for (final ManagedChannel channel : channels) {
for (int i = 0; i < callsPerChannel; i++) {
StreamObserver<ByteBuf> observer = new StreamObserver<ByteBuf>() {
@Override
public void onNext(ByteBuf value) {
counter.addAndGet(counterDelta);
}
@Override
public void onError(Throwable t) {
done.set(true);
}
@Override
public void onCompleted() {
if (!done.get()) {
ByteBuf slice = request.slice();
ClientCalls.asyncUnaryCall(
channel.newCall(unaryMethod, CALL_OPTIONS), slice, this);
}
}
};
observer.onCompleted();
}
}
}
/**
* Start a continuously executing set of duplex streaming ping-pong calls that will terminate when
* {@code done.get()} is true. Each completed call will increment the counter by the specified
* delta which benchmarks can use to measure messages per second or bandwidth.
*/
protected CountDownLatch startStreamingCalls(int callsPerChannel, final AtomicLong counter,
final AtomicBoolean record, final AtomicBoolean done, final long counterDelta) {
final CountDownLatch latch = new CountDownLatch(callsPerChannel * channels.length);
for (final ManagedChannel channel : channels) {
for (int i = 0; i < callsPerChannel; i++) {
final ClientCall<ByteBuf, ByteBuf> streamingCall =
channel.newCall(pingPongMethod, CALL_OPTIONS);
final AtomicReference<StreamObserver<ByteBuf>> requestObserverRef =
new AtomicReference<>();
final AtomicBoolean ignoreMessages = new AtomicBoolean();
StreamObserver<ByteBuf> requestObserver = ClientCalls.asyncBidiStreamingCall(
streamingCall,
new StreamObserver<ByteBuf>() {
@Override
public void onNext(ByteBuf value) {
if (done.get()) {
if (!ignoreMessages.getAndSet(true)) {
requestObserverRef.get().onCompleted();
}
return;
}
requestObserverRef.get().onNext(request.slice());
if (record.get()) {
counter.addAndGet(counterDelta);
}
// request is called automatically because the observer implicitly has auto
// inbound flow control
}
@Override
public void onError(Throwable t) {
logger.log(Level.WARNING, "call error", t);
latch.countDown();
}
@Override
public void onCompleted() {
latch.countDown();
}
});
requestObserverRef.set(requestObserver);
requestObserver.onNext(request.slice());
requestObserver.onNext(request.slice());
}
}
return latch;
}
/**
* Start a continuously executing set of duplex streaming ping-pong calls that will terminate when
* {@code done.get()} is true. Each completed call will increment the counter by the specified
* delta which benchmarks can use to measure messages per second or bandwidth.
*/
protected CountDownLatch startFlowControlledStreamingCalls(int callsPerChannel,
final AtomicLong counter, final AtomicBoolean record, final AtomicBoolean done,
final long counterDelta) {
final CountDownLatch latch = new CountDownLatch(callsPerChannel * channels.length);
for (final ManagedChannel channel : channels) {
for (int i = 0; i < callsPerChannel; i++) {
final ClientCall<ByteBuf, ByteBuf> streamingCall =
channel.newCall(flowControlledStreaming, CALL_OPTIONS);
final AtomicReference<StreamObserver<ByteBuf>> requestObserverRef =
new AtomicReference<>();
final AtomicBoolean ignoreMessages = new AtomicBoolean();
StreamObserver<ByteBuf> requestObserver = ClientCalls.asyncBidiStreamingCall(
streamingCall,
new StreamObserver<ByteBuf>() {
@Override
public void onNext(ByteBuf value) {
StreamObserver<ByteBuf> obs = requestObserverRef.get();
if (done.get()) {
if (!ignoreMessages.getAndSet(true)) {
obs.onCompleted();
}
return;
}
if (record.get()) {
counter.addAndGet(counterDelta);
}
// request is called automatically because the observer implicitly has auto
// inbound flow control
}
@Override
public void onError(Throwable t) {
logger.log(Level.WARNING, "call error", t);
latch.countDown();
}
@Override
public void onCompleted() {
latch.countDown();
}
});
requestObserverRef.set(requestObserver);
// Add some outstanding requests to ensure the server is filling the connection
streamingCall.request(5);
requestObserver.onNext(request.slice());
}
}
return latch;
}
/**
* Shutdown all the client channels and then shutdown the server.
*/
protected void teardown() throws Exception {
logger.fine("shutting down channels");
for (ManagedChannel channel : channels) {
channel.shutdown();
}
logger.fine("shutting down server");
server.shutdown();
if (!server.awaitTermination(5, TimeUnit.SECONDS)) {
logger.warning("Failed to shutdown server");
}
logger.fine("server shut down");
for (ManagedChannel channel : channels) {
if (!channel.awaitTermination(1, TimeUnit.SECONDS)) {
logger.warning("Failed to shutdown client");
}
}
logger.fine("channels shut down");
}
}
|
with
|
java
|
playframework__playframework
|
core/play/src/main/java/play/http/websocket/Message.java
|
{
"start": 3035,
"end": 4146
}
|
class ____ extends Message {
private final Optional<Integer> statusCode;
private final String reason;
public Close(int statusCode) {
this(statusCode, "");
}
public Close(int statusCode, String reason) {
this(Optional.of(statusCode), reason);
}
public Close(Optional<Integer> statusCode, String reason) {
this.statusCode = statusCode;
this.reason = reason;
}
public Optional<Integer> code() {
return statusCode;
}
public String reason() {
return reason;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Close close = (Close) o;
return statusCode.equals(close.statusCode) && reason.equals(close.reason);
}
@Override
public int hashCode() {
int result = statusCode.hashCode();
result = 31 * result + reason.hashCode();
return result;
}
@Override
public String toString() {
return "CloseWebSocketMessage(" + statusCode + ", '" + reason + "')";
}
}
}
|
Close
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/NewFileSystemTest.java
|
{
"start": 2422,
"end": 2630
}
|
class ____ {
void f() throws IOException {
FileSystems.newFileSystem(Paths.get("."), (ClassLoader) null);
}
}
""")
.doTest();
}
}
|
Test
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunctionTests.java
|
{
"start": 907,
"end": 2422
}
|
class ____ extends AggregatorFunctionTestCase {
@Override
protected SourceOperator simpleInput(BlockFactory blockFactory, int size) {
return new SequenceDoubleBlockSourceOperator(blockFactory, LongStream.range(0, size).mapToDouble(l -> ESTestCase.randomDouble()));
}
@Override
protected AggregatorFunctionSupplier aggregatorFunction() {
return new CountDistinctDoubleAggregatorFunctionSupplier(40000);
}
@Override
protected String expectedDescriptionOfAggregator() {
return "count_distinct of doubles";
}
@Override
protected void assertSimpleOutput(List<Page> input, Block result) {
long expected = input.stream().flatMapToDouble(p -> allDoubles(p.getBlock(0))).distinct().count();
long count = ((LongBlock) result).getLong(0);
// HLL is an approximation algorithm and precision depends on the number of values computed and the precision_threshold param
// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
// For a number of values close to 10k and precision_threshold=1000, precision should be less than 10%
assertThat((double) count, closeTo(expected, expected * .1));
}
@Override
protected void assertOutputFromEmpty(Block b) {
assertThat(b.getPositionCount(), equalTo(1));
assertThat(valuesAtPositions(b, 0, 1), equalTo(List.of(List.of(0L))));
}
}
|
CountDistinctDoubleAggregatorFunctionTests
|
java
|
apache__kafka
|
server-common/src/main/java/org/apache/kafka/queue/EventQueue.java
|
{
"start": 964,
"end": 1726
}
|
interface ____ {
/**
* Run the event.
*/
void run() throws Exception;
/**
* Handle an exception that was either generated by running the event, or by the
* event queue's inability to run the event.
*
* @param e The exception. This will be a TimeoutException if the event hit
* its deadline before it could be scheduled.
* It will be a RejectedExecutionException if the event could not be
* scheduled because the event queue has already been closed.
* Otherwise, it will be whatever exception was thrown by run().
*/
default void handleException(Throwable e) {}
}
|
Event
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/cache/concurrent/ConcurrentMapCacheFactoryBean.java
|
{
"start": 1495,
"end": 3015
}
|
class ____
implements FactoryBean<ConcurrentMapCache>, BeanNameAware, InitializingBean {
private String name = "";
private @Nullable ConcurrentMap<Object, Object> store;
private boolean allowNullValues = true;
private @Nullable ConcurrentMapCache cache;
/**
* Specify the name of the cache.
* <p>Default is "" (empty String).
*/
public void setName(String name) {
this.name = name;
}
/**
* Specify the ConcurrentMap to use as an internal store
* (possibly pre-populated).
* <p>Default is a standard {@link java.util.concurrent.ConcurrentHashMap}.
*/
public void setStore(ConcurrentMap<Object, Object> store) {
this.store = store;
}
/**
* Set whether to allow {@code null} values
* (adapting them to an internal null holder value).
* <p>Default is "true".
*/
public void setAllowNullValues(boolean allowNullValues) {
this.allowNullValues = allowNullValues;
}
@Override
public void setBeanName(String beanName) {
if (!StringUtils.hasLength(this.name)) {
setName(beanName);
}
}
@Override
public void afterPropertiesSet() {
this.cache = (this.store != null ? new ConcurrentMapCache(this.name, this.store, this.allowNullValues) :
new ConcurrentMapCache(this.name, this.allowNullValues));
}
@Override
public @Nullable ConcurrentMapCache getObject() {
return this.cache;
}
@Override
public Class<?> getObjectType() {
return ConcurrentMapCache.class;
}
@Override
public boolean isSingleton() {
return true;
}
}
|
ConcurrentMapCacheFactoryBean
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFSAuthWithBlobSpecificKeys.java
|
{
"start": 1117,
"end": 1422
}
|
class ____
extends TestNativeAzureFileSystemAuthorization {
@Override
public Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
conf.set(KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS, "false");
return conf;
}
}
|
ITestNativeAzureFSAuthWithBlobSpecificKeys
|
java
|
google__guice
|
core/test/com/googlecode/guice/JakartaTest.java
|
{
"start": 12292,
"end": 12688
}
|
interface ____ {}
public static final Red RED =
new Red() {
@Override
public Class<? extends Annotation> annotationType() {
return Red.class;
}
@Override
public boolean equals(Object obj) {
return obj instanceof Red;
}
@Override
public int hashCode() {
return 0;
}
};
static
|
Red
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/StreamToStringTest.java
|
{
"start": 859,
"end": 1194
}
|
class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(StreamToString.class, getClass());
@Test
public void positive() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import java.util.Arrays;
|
StreamToStringTest
|
java
|
playframework__playframework
|
web/play-java-forms/src/main/java/play/data/FormFactory.java
|
{
"start": 439,
"end": 1104
}
|
class ____ {
private final MessagesApi messagesApi;
private final Formatters formatters;
private final ValidatorFactory validatorFactory;
private final Config config;
@Inject
public FormFactory(
MessagesApi messagesApi,
Formatters formatters,
ValidatorFactory validatorFactory,
Config config) {
this.messagesApi = messagesApi;
this.formatters = formatters;
this.validatorFactory = validatorFactory;
this.config = config;
}
/** @return a dynamic form. */
public DynamicForm form() {
return new DynamicForm(messagesApi, formatters, validatorFactory, config);
}
/**
* @param clazz the
|
FormFactory
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
|
{
"start": 51038,
"end": 51492
}
|
class ____ implements
FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
public FSUnsetStoragePolicy(String path) {
this.path = new Path(path);
}
@Override
public Void execute(FileSystem fs) throws IOException {
fs.unsetStoragePolicy(path);
return null;
}
}
/**
* Executor that performs an allowSnapshot operation.
*/
@InterfaceAudience.Private
public static
|
FSUnsetStoragePolicy
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.