language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/language/SimpleNoFileExpression.java
|
{
"start": 1189,
"end": 1443
}
|
class ____ extends TypedExpressionDefinition {
public SimpleNoFileExpression(SimpleExpression expression) {
super(expression);
}
@Override
public String getLanguage() {
return "simple-no-file";
}
}
|
SimpleNoFileExpression
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/ImplicitListAsBagProvider.java
|
{
"start": 781,
"end": 980
}
|
class ____ implements SettingProvider.Provider<CollectionClassification> {
@Override
public CollectionClassification getSetting() {
return CollectionClassification.BAG;
}
}
|
ImplicitListAsBagProvider
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/analyzer/AnalyzerRules.java
|
{
"start": 4444,
"end": 10551
}
|
class ____ extends AnalyzerRule<LogicalPlan> {
@Override
protected LogicalPlan rule(LogicalPlan plan) {
if (plan.childrenResolved() == false) {
return plan;
}
return doRule(plan);
}
protected abstract LogicalPlan doRule(LogicalPlan plan);
}
public static Function resolveFunction(UnresolvedFunction uf, Configuration configuration, FunctionRegistry functionRegistry) {
Function f = null;
if (uf.analyzed()) {
f = uf;
} else if (uf.childrenResolved() == false) {
f = uf;
} else {
String functionName = functionRegistry.resolveAlias(uf.name());
if (functionRegistry.functionExists(functionName) == false) {
f = uf.missing(functionName, functionRegistry.listFunctions());
} else {
FunctionDefinition def = functionRegistry.resolveFunction(functionName);
f = uf.buildResolved(configuration, def);
}
}
return f;
}
public static List<Attribute> maybeResolveAgainstList(
UnresolvedAttribute u,
Collection<Attribute> attrList,
java.util.function.Function<Attribute, Attribute> fieldInspector
) {
// first take into account the qualified version
final String qualifier = u.qualifier();
final String name = u.name();
final boolean qualified = u.qualifier() != null;
Predicate<Attribute> predicate = a -> {
return qualified ? Objects.equals(qualifier, a.qualifiedName()) :
// if the field is unqualified
// first check the names directly
(Objects.equals(name, a.name()))
// but also if the qualifier might not be quoted and if there's any ambiguity with nested fields
|| Objects.equals(name, a.qualifiedName());
};
return maybeResolveAgainstList(predicate, () -> u, attrList, false, fieldInspector);
}
public static List<Attribute> maybeResolveAgainstList(
Predicate<Attribute> matcher,
Supplier<UnresolvedAttribute> unresolved,
Collection<Attribute> attrList,
boolean isPattern,
java.util.function.Function<Attribute, Attribute> fieldInspector
) {
List<Attribute> matches = new ArrayList<>();
for (Attribute attribute : attrList) {
if (attribute.synthetic() == false) {
boolean match = matcher.test(attribute);
if (match) {
matches.add(attribute);
}
}
}
if (matches.isEmpty()) {
return matches;
}
UnresolvedAttribute ua = unresolved.get();
// found exact match or multiple if pattern
if (matches.size() == 1 || isPattern) {
// NB: only add the location if the match is univocal; b/c otherwise adding the location will overwrite any preexisting one
matches.replaceAll(e -> fieldInspector.apply(e));
return matches;
}
// report ambiguity
List<String> refs = matches.stream().sorted((a, b) -> {
int lineDiff = a.sourceLocation().getLineNumber() - b.sourceLocation().getLineNumber();
int colDiff = a.sourceLocation().getColumnNumber() - b.sourceLocation().getColumnNumber();
return lineDiff != 0 ? lineDiff : (colDiff != 0 ? colDiff : a.qualifiedName().compareTo(b.qualifiedName()));
})
.map(
a -> "line "
+ a.sourceLocation().toString().substring(1)
+ " ["
+ (a.qualifier() != null ? "\"" + a.qualifier() + "\".\"" + a.name() + "\"" : a.name())
+ "]"
)
.toList();
return singletonList(
ua.withUnresolvedMessage(
"Reference ["
+ ua.qualifiedName()
+ "] is ambiguous (to disambiguate use quotes or qualifiers); "
+ "matches any of "
+ refs
)
);
}
public static Attribute handleSpecialFields(UnresolvedAttribute u, Attribute named, boolean allowCompound) {
// if it's a object/compound type, keep it unresolved with a nice error message
if (named instanceof FieldAttribute fa) {
// incompatible mappings
if (fa.field() instanceof InvalidMappedField imf) {
named = u.withUnresolvedMessage("Cannot use field [" + fa.name() + "] due to ambiguities being " + imf.errorMessage());
}
// unsupported types
else if (DataTypes.isUnsupported(fa.dataType())) {
UnsupportedEsField unsupportedField = (UnsupportedEsField) fa.field();
if (unsupportedField.hasInherited()) {
named = u.withUnresolvedMessage(
"Cannot use field ["
+ fa.name()
+ "] with unsupported type ["
+ unsupportedField.getOriginalType()
+ "] in hierarchy (field ["
+ unsupportedField.getInherited()
+ "])"
);
} else {
named = u.withUnresolvedMessage(
"Cannot use field [" + fa.name() + "] with unsupported type [" + unsupportedField.getOriginalType() + "]"
);
}
}
// compound fields
else if (allowCompound == false && DataTypes.isPrimitive(fa.dataType()) == false) {
named = u.withUnresolvedMessage(
"Cannot use field [" + fa.name() + "] type [" + fa.dataType().typeName() + "] only its subfields"
);
}
}
// make sure to copy the resolved attribute with the proper location
return named.withLocation(u.source());
}
}
|
BaseAnalyzerRule
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/LongToDateConversionTest.java
|
{
"start": 2591,
"end": 3030
}
|
class ____ implements AttributeConverter<DateAttribute, Date> {
@Override
public Date convertToDatabaseColumn(DateAttribute attribute) {
if ( attribute == null ) {
return null;
}
return new Date( attribute.field );
}
@Override
public DateAttribute convertToEntityAttribute(Date dbData) {
if ( dbData == null ) {
return null;
}
return new DateAttribute( dbData.getTime() );
}
}
}
|
DateAttributeConverter
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/language/XPathLanguageTest.java
|
{
"start": 990,
"end": 1613
}
|
class ____ extends AbstractSingleInputTypedLanguageTest<XPathExpression.Builder, XPathExpression> {
XPathLanguageTest() {
super("/foo/text()", factory -> factory.xpath().resultType(Integer.class));
}
@Override
protected Object defaultContentToSend() {
return "<foo>1</foo>";
}
@Override
protected TestContext testWithTypeContext() {
return new TestContext(defaultContentToSend(), 1, Integer.class);
}
@Override
protected TestContext testWithoutTypeContext() {
return new TestContext(defaultContentToSend(), 1, Integer.class);
}
}
|
XPathLanguageTest
|
java
|
apache__spark
|
common/utils-java/src/main/java/org/apache/spark/api/java/function/PairFlatMapFunction.java
|
{
"start": 1113,
"end": 1230
}
|
interface ____<T, K, V> extends Serializable {
Iterator<Tuple2<K, V>> call(T t) throws Exception;
}
|
PairFlatMapFunction
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/registrations/MismatchDuplicateRegistrationTests.java
|
{
"start": 1360,
"end": 1910
}
|
class ____ {
@Id
private Integer id;
private String name;
private Thing1 thing1;
private TroublesomeEntity() {
// for use by Hibernate
}
public TroublesomeEntity(Integer id, String name) {
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Thing1 getThing1() {
return thing1;
}
public void setThing1(Thing1 thing1) {
this.thing1 = thing1;
}
}
}
|
TroublesomeEntity
|
java
|
elastic__elasticsearch
|
x-pack/plugin/fleet/src/javaRestTest/java/org/elasticsearch/xpack/fleet/FleetDataStreamIT.java
|
{
"start": 997,
"end": 10713
}
|
class ____ extends AbstractFleetIT {
static final String BASIC_AUTH_VALUE = basicAuthHeaderValue(
"x_pack_rest_user",
SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING
);
@Override
protected Settings restClientSettings() {
// Note that we are superuser here but DO NOT provide a product origin
return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build();
}
@Override
protected Settings restAdminSettings() {
// Note that we are both superuser here and provide a product origin
return Settings.builder()
.put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE)
.put(ThreadContext.PREFIX + ".X-elastic-product-origin", "fleet")
.build();
}
@Override
protected boolean preserveSecurityIndicesUponCompletion() {
return true;
}
public void testAliasWithSystemDataStream() throws Exception {
// Create a system data stream
Request initialDocResponse = new Request("POST", ".fleet-actions-results/_doc");
initialDocResponse.setJsonEntity("{\"@timestamp\": 0}");
assertOK(adminClient().performRequest(initialDocResponse));
// Create a system index - this one has an alias
Request sysIdxRequest = new Request("PUT", ".fleet-artifacts");
assertOK(adminClient().performRequest(sysIdxRequest));
// Create a regular index
String regularIndex = "regular-idx";
String regularAlias = "regular-alias";
Request regularIdxRequest = new Request("PUT", regularIndex);
regularIdxRequest.setJsonEntity("{\"aliases\": {\"" + regularAlias + "\": {}}}");
assertOK(client().performRequest(regularIdxRequest));
assertGetAliasAPIBehavesAsExpected(regularIndex, regularAlias);
}
public void testAliasWithSystemIndices() throws Exception {
// Create a system index - this one has an alias
Request sysIdxRequest = new Request("PUT", ".fleet-artifacts");
assertOK(adminClient().performRequest(sysIdxRequest));
// Create a regular index
String regularIndex = "regular-idx";
String regularAlias = "regular-alias";
Request regularIdxRequest = new Request("PUT", regularIndex);
regularIdxRequest.setJsonEntity("{\"aliases\": {\"" + regularAlias + "\": {}}}");
assertOK(client().performRequest(regularIdxRequest));
assertGetAliasAPIBehavesAsExpected(regularIndex, regularAlias);
}
private void assertGetAliasAPIBehavesAsExpected(String regularIndex, String regularAlias) throws Exception {
// Get a non-system alias, should not warn or error
{
Request request = new Request("GET", "_alias/" + regularAlias);
Response response = client().performRequest(request);
assertOK(response);
assertThat(
EntityUtils.toString(response.getEntity()),
allOf(containsString(regularAlias), containsString(regularIndex), not(containsString(".fleet-artifacts")))
);
}
// Fully specify a regular index and alias, should not warn or error
{
Request request = new Request("GET", regularIndex + "/_alias/" + regularAlias);
Response response = client().performRequest(request);
assertOK(response);
assertThat(
EntityUtils.toString(response.getEntity()),
allOf(containsString(regularAlias), containsString(regularIndex), not(containsString(".fleet-artifacts")))
);
}
// The rest of these produce a warning
RequestOptions consumeWarningsOptions = RequestOptions.DEFAULT.toBuilder()
.setWarningsHandler(
warnings -> List.of(
"this request accesses system indices: [.fleet-artifacts-7], but "
+ "in a future major version, direct access to system indices will be prevented by default"
).equals(warnings) == false
)
.build();
// The base _alias route warns because there is a system index in the response
{
Request request = new Request("GET", "_alias");
request.setOptions(consumeWarningsOptions); // The result includes system indices, so we warn
Response response = client().performRequest(request);
assertOK(response);
assertThat(
EntityUtils.toString(response.getEntity()),
allOf(containsString(regularAlias), containsString(regularIndex), not(containsString(".fleet-actions-results")))
);
}
// Specify a system alias, should warn
{
Request request = new Request("GET", "_alias/.fleet-artifacts");
request.setOptions(consumeWarningsOptions);
Response response = client().performRequest(request);
assertOK(response);
assertThat(
EntityUtils.toString(response.getEntity()),
allOf(
containsString(".fleet-artifacts"),
containsString(".fleet-artifacts-7"),
not(containsString(regularAlias)),
not(containsString(regularIndex))
)
);
}
// Fully specify a system index and alias, should warn
{
Request request = new Request("GET", ".fleet-artifacts-7/_alias/.fleet-artifacts");
request.setOptions(consumeWarningsOptions);
Response response = client().performRequest(request);
assertOK(response);
assertThat(
EntityUtils.toString(response.getEntity()),
allOf(
containsString(".fleet-artifacts"),
containsString(".fleet-artifacts-7"),
not(containsString(regularAlias)),
not(containsString(regularIndex))
)
);
}
// Check an alias that doesn't exist
{
Request getAliasRequest = new Request("GET", "_alias/auditbeat-7.13.0");
try {
client().performRequest(getAliasRequest);
fail("this request should not succeed, as it is looking for an alias that does not exist");
} catch (ResponseException e) {
assertThat(e.getResponse().getStatusLine().getStatusCode(), is(404));
assertThat(
EntityUtils.toString(e.getResponse().getEntity()),
not(containsString("use and access is reserved for system operations"))
);
}
}
// Specify a system data stream as an alias - should 404
{
Request getAliasRequest = new Request("GET", "_alias/.fleet-actions-results");
try {
client().performRequest(getAliasRequest);
fail("this request should not succeed, as it is looking for an alias that does not exist");
} catch (ResponseException e) {
assertThat(e.getResponse().getStatusLine().getStatusCode(), is(404));
assertThat(
EntityUtils.toString(e.getResponse().getEntity()),
not(containsString("use and access is reserved for system operations"))
);
}
}
}
public void testCountWithSystemDataStream() throws Exception {
assertThatAPIWildcardResolutionWorks();
// Create a system data stream
Request initialDocResponse = new Request("POST", ".fleet-actions-results/_doc");
initialDocResponse.setJsonEntity("{\"@timestamp\": 0}");
assertOK(adminClient().performRequest(initialDocResponse));
assertThatAPIWildcardResolutionWorks();
// Create a system index - this one has an alias
Request sysIdxRequest = new Request("PUT", ".fleet-artifacts");
assertOK(adminClient().performRequest(sysIdxRequest));
assertThatAPIWildcardResolutionWorks();
assertThatAPIWildcardResolutionWorks(
singletonList(
"this request accesses system indices: [.fleet-artifacts-7], but in a future major version, direct access to system"
+ " indices will be prevented by default"
),
".f*"
);
// Create a regular index
String regularIndex = "regular-idx";
String regularAlias = "regular-alias";
Request regularIdxRequest = new Request("PUT", regularIndex);
regularIdxRequest.setJsonEntity("{\"aliases\": {\"" + regularAlias + "\": {}}}");
assertOK(client().performRequest(regularIdxRequest));
assertThatAPIWildcardResolutionWorks();
assertThatAPIWildcardResolutionWorks(emptyList(), "r*");
}
private void assertThatAPIWildcardResolutionWorks() throws Exception {
assertThatAPIWildcardResolutionWorks(emptyList(), null);
}
private void assertThatAPIWildcardResolutionWorks(List<String> warningsExpected, String indexPattern) throws Exception {
String path = indexPattern == null || indexPattern.isEmpty() ? "/_count" : "/" + indexPattern + "/_count";
Request countRequest = new Request("GET", path);
if (warningsExpected.isEmpty() == false) {
countRequest.setOptions(
countRequest.getOptions().toBuilder().setWarningsHandler(warnings -> warningsExpected.equals(warnings) == false)
);
}
assertOK(client().performRequest(countRequest));
}
}
|
FleetDataStreamIT
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/biginteger/BigIntegerAssert_isBetween_Test.java
|
{
"start": 833,
"end": 1240
}
|
class ____ extends BigIntegerAssertBaseTest {
@Override
protected BigIntegerAssert invoke_api_method() {
return assertions.isBetween(new BigInteger("6"), new BigInteger("8"));
}
@Override
protected void verify_internal_effects() {
verify(bigIntegers).assertIsBetween(getInfo(assertions), getActual(assertions), new BigInteger("6"), new BigInteger("8"));
}
}
|
BigIntegerAssert_isBetween_Test
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/DebeziumPostgresComponentBuilderFactory.java
|
{
"start": 87969,
"end": 93555
}
|
class ____ returns SourceInfo
* schema and struct.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default:
* io.debezium.connector.postgresql.PostgresSourceInfoStructMaker
* Group: postgres
*
* @param sourceinfoStructMaker the value to set
* @return the dsl builder
*/
default DebeziumPostgresComponentBuilder sourceinfoStructMaker(java.lang.String sourceinfoStructMaker) {
doSetProperty("sourceinfoStructMaker", sourceinfoStructMaker);
return this;
}
/**
* Frequency for sending replication connection status updates to the
* server, given in milliseconds. Defaults to 10 seconds (10,000 ms).
*
* The option is a: <code>int</code> type.
*
* Default: 10s
* Group: postgres
*
* @param statusUpdateIntervalMs the value to set
* @return the dsl builder
*/
default DebeziumPostgresComponentBuilder statusUpdateIntervalMs(int statusUpdateIntervalMs) {
doSetProperty("statusUpdateIntervalMs", statusUpdateIntervalMs);
return this;
}
/**
* A delay period after the snapshot is completed and the streaming
* begins, given in milliseconds. Defaults to 0 ms.
*
* The option is a: <code>long</code> type.
*
* Default: 0ms
* Group: postgres
*
* @param streamingDelayMs the value to set
* @return the dsl builder
*/
default DebeziumPostgresComponentBuilder streamingDelayMs(long streamingDelayMs) {
doSetProperty("streamingDelayMs", streamingDelayMs);
return this;
}
/**
* A comma-separated list of regular expressions that match the
* fully-qualified names of tables to be excluded from monitoring.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: postgres
*
* @param tableExcludeList the value to set
* @return the dsl builder
*/
default DebeziumPostgresComponentBuilder tableExcludeList(java.lang.String tableExcludeList) {
doSetProperty("tableExcludeList", tableExcludeList);
return this;
}
/**
* Flag specifying whether built-in tables should be ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: postgres
*
* @param tableIgnoreBuiltin the value to set
* @return the dsl builder
*/
default DebeziumPostgresComponentBuilder tableIgnoreBuiltin(boolean tableIgnoreBuiltin) {
doSetProperty("tableIgnoreBuiltin", tableIgnoreBuiltin);
return this;
}
/**
* The tables for which changes are to be captured.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: postgres
*
* @param tableIncludeList the value to set
* @return the dsl builder
*/
default DebeziumPostgresComponentBuilder tableIncludeList(java.lang.String tableIncludeList) {
doSetProperty("tableIncludeList", tableIncludeList);
return this;
}
/**
* Time, date, and timestamps can be represented with different kinds of
* precisions, including: 'adaptive' (the default) bases the precision
* of time, date, and timestamp values on the database column's
* precision; 'adaptive_time_microseconds' like 'adaptive' mode, but
* TIME fields always use microseconds precision; 'connect' always
* represents time, date, and timestamp values using Kafka Connect's
* built-in representations for Time, Date, and Timestamp, which uses
* millisecond precision regardless of the database columns' precision.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: adaptive
* Group: postgres
*
* @param timePrecisionMode the value to set
* @return the dsl builder
*/
default DebeziumPostgresComponentBuilder timePrecisionMode(java.lang.String timePrecisionMode) {
doSetProperty("timePrecisionMode", timePrecisionMode);
return this;
}
/**
* Whether delete operations should be represented by a delete event and
* a subsequent tombstone event (true) or only by a delete event
* (false). Emitting the tombstone event (the default behavior) allows
* Kafka to completely delete all events pertaining to the given key
* once the source record got deleted.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: postgres
*
* @param tombstonesOnDelete the value to set
* @return the dsl builder
*/
default DebeziumPostgresComponentBuilder tombstonesOnDelete(boolean tombstonesOnDelete) {
doSetProperty("tombstonesOnDelete", tombstonesOnDelete);
return this;
}
/**
* The name of the TopicNamingStrategy
|
that
|
java
|
spring-projects__spring-boot
|
smoke-test/spring-boot-smoke-test-hibernate/src/main/java/smoketest/jpa/repository/NoteRepository.java
|
{
"start": 727,
"end": 781
}
|
interface ____ {
List<Note> findAll();
}
|
NoteRepository
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/LegacyKeyedProcessOperatorTest.java
|
{
"start": 14713,
"end": 15307
}
|
class ____ extends ProcessFunction<Integer, String> {
static final OutputTag<Integer> INTEGER_OUTPUT_TAG = new OutputTag<Integer>("int-out") {};
static final OutputTag<Long> LONG_OUTPUT_TAG = new OutputTag<Long>("long-out") {};
@Override
public void processElement(Integer value, Context ctx, Collector<String> out)
throws Exception {
out.collect("IN:" + value);
ctx.output(INTEGER_OUTPUT_TAG, value);
ctx.output(LONG_OUTPUT_TAG, value.longValue());
}
}
private static
|
SideOutputProcessFunction
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundToLong9Evaluator.java
|
{
"start": 1088,
"end": 4681
}
|
class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(RoundToLong9Evaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator field;
private final long p0;
private final long p1;
private final long p2;
private final long p3;
private final long p4;
private final long p5;
private final long p6;
private final long p7;
private final long p8;
private final DriverContext driverContext;
private Warnings warnings;
public RoundToLong9Evaluator(Source source, EvalOperator.ExpressionEvaluator field, long p0,
long p1, long p2, long p3, long p4, long p5, long p6, long p7, long p8,
DriverContext driverContext) {
this.source = source;
this.field = field;
this.p0 = p0;
this.p1 = p1;
this.p2 = p2;
this.p3 = p3;
this.p4 = p4;
this.p5 = p5;
this.p6 = p6;
this.p7 = p7;
this.p8 = p8;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (LongBlock fieldBlock = (LongBlock) field.eval(page)) {
LongVector fieldVector = fieldBlock.asVector();
if (fieldVector == null) {
return eval(page.getPositionCount(), fieldBlock);
}
return eval(page.getPositionCount(), fieldVector).asBlock();
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += field.baseRamBytesUsed();
return baseRamBytesUsed;
}
public LongBlock eval(int positionCount, LongBlock fieldBlock) {
try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (fieldBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
long field = fieldBlock.getLong(fieldBlock.getFirstValueIndex(p));
result.appendLong(RoundToLong.process(field, this.p0, this.p1, this.p2, this.p3, this.p4, this.p5, this.p6, this.p7, this.p8));
}
return result.build();
}
}
public LongVector eval(int positionCount, LongVector fieldVector) {
try(LongVector.FixedBuilder result = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
long field = fieldVector.getLong(p);
result.appendLong(p, RoundToLong.process(field, this.p0, this.p1, this.p2, this.p3, this.p4, this.p5, this.p6, this.p7, this.p8));
}
return result.build();
}
}
@Override
public String toString() {
return "RoundToLong9Evaluator[" + "field=" + field + ", p0=" + p0 + ", p1=" + p1 + ", p2=" + p2 + ", p3=" + p3 + ", p4=" + p4 + ", p5=" + p5 + ", p6=" + p6 + ", p7=" + p7 + ", p8=" + p8 + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(field);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static
|
RoundToLong9Evaluator
|
java
|
apache__camel
|
components/camel-disruptor/src/test/java/org/apache/camel/component/disruptor/vm/DisruptorVmInOnlyChainedTest.java
|
{
"start": 1044,
"end": 2292
}
|
class ____ extends AbstractVmTestSupport {
@Test
void testInOnlyDisruptorVmChained() throws Exception {
getMockEndpoint("mock:a").expectedBodiesReceived("start");
TestSupport.resolveMandatoryEndpoint(context2, "mock:b", MockEndpoint.class).expectedBodiesReceived("start-a");
getMockEndpoint("mock:c").expectedBodiesReceived("start-a-b");
template.sendBody("disruptor-vm:a", "start");
MockEndpoint.assertIsSatisfied(context);
MockEndpoint.assertIsSatisfied(context2);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("disruptor-vm:a").to("mock:a").setBody(simple("${body}-a")).to("disruptor-vm:b");
from("disruptor-vm:c").to("mock:c").setBody(simple("${body}-c"));
}
};
}
@Override
protected RouteBuilder createRouteBuilderForSecondContext() {
return new RouteBuilder() {
@Override
public void configure() {
from("disruptor-vm:b").to("mock:b").setBody(simple("${body}-b")).to("disruptor-vm:c");
}
};
}
}
|
DisruptorVmInOnlyChainedTest
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/frame/RowUnboundedFollowingOverFrame.java
|
{
"start": 1498,
"end": 2475
}
|
class ____ extends UnboundedFollowingOverFrame {
private long leftBound;
public RowUnboundedFollowingOverFrame(
RowType valueType, GeneratedAggsHandleFunction aggsHandleFunction, long leftBound) {
super(valueType, aggsHandleFunction);
this.leftBound = leftBound;
}
@Override
public RowData process(int index, RowData current) throws Exception {
boolean bufferUpdated = index == 0;
// Ignore all the rows from the buffer util left bound.
ResettableExternalBuffer.BufferIterator iterator = input.newIterator(inputIndex);
BinaryRowData nextRow = OverWindowFrame.getNextOrNull(iterator);
while (nextRow != null && inputIndex < index + leftBound) {
inputIndex += 1;
bufferUpdated = true;
nextRow = OverWindowFrame.getNextOrNull(iterator);
}
return accumulateIterator(bufferUpdated, nextRow, iterator);
}
}
|
RowUnboundedFollowingOverFrame
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/core/log/LogDelegateFactory.java
|
{
"start": 1135,
"end": 2623
}
|
class ____ {
private LogDelegateFactory() {
}
/**
* Create a composite logger that delegates to a primary or falls back on a
* secondary logger if logging for the primary logger is not enabled.
* <p>This may be used for fallback logging from lower-level packages that
* logically should log together with some higher-level package but the two
* don't happen to share a suitable parent package (for example, logging for the web
* and lower-level http and codec packages). For such cases the primary
* (class-based) logger can be wrapped with a shared fallback logger.
* @param primaryLogger primary logger to try first
* @param secondaryLogger secondary logger
* @param tertiaryLoggers optional vararg of further fallback loggers
* @return the resulting composite logger for the related categories
*/
public static Log getCompositeLog(Log primaryLogger, Log secondaryLogger, Log... tertiaryLoggers) {
List<Log> loggers = new ArrayList<>(2 + tertiaryLoggers.length);
loggers.add(primaryLogger);
loggers.add(secondaryLogger);
Collections.addAll(loggers, tertiaryLoggers);
return new CompositeLog(loggers);
}
/**
* Create a "hidden" logger with a category name prefixed with "_", thus
* precluding it from being enabled together with other log categories from
* the same package. This is useful for specialized output that is either
* too verbose or otherwise optional or unnecessary to see all the time.
* @param clazz the
|
LogDelegateFactory
|
java
|
spring-projects__spring-boot
|
module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/web/annotation/ServletEndpointDiscovererTests.java
|
{
"start": 2802,
"end": 7165
}
|
class ____ {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner();
@Test
void getEndpointsWhenNoEndpointBeansShouldReturnEmptyCollection() {
this.contextRunner.withUserConfiguration(EmptyConfiguration.class)
.run(assertDiscoverer((discoverer) -> assertThat(discoverer.getEndpoints()).isEmpty()));
}
@Test
void getEndpointsShouldIncludeServletEndpoints() {
this.contextRunner.withUserConfiguration(TestServletEndpoint.class).run(assertDiscoverer((discoverer) -> {
Collection<ExposableServletEndpoint> endpoints = discoverer.getEndpoints();
assertThat(endpoints).hasSize(1);
ExposableServletEndpoint endpoint = endpoints.iterator().next();
assertThat(endpoint.getEndpointId()).isEqualTo(EndpointId.of("testservlet"));
assertThat(endpoint.getEndpointServlet()).isNotNull();
Object servlet = Extractors.byName("servlet").apply(endpoint.getEndpointServlet());
assertThat(ClassUtils.isCglibProxy(servlet)).isFalse();
assertThat(endpoint).isInstanceOf(DiscoveredEndpoint.class);
}));
}
@Test
void getEndpointsShouldDiscoverProxyServletEndpoints() {
this.contextRunner.withUserConfiguration(TestProxyServletEndpoint.class).run(assertDiscoverer((discoverer) -> {
Collection<ExposableServletEndpoint> endpoints = discoverer.getEndpoints();
assertThat(endpoints).hasSize(1);
ExposableServletEndpoint endpoint = endpoints.iterator().next();
assertThat(endpoint.getEndpointId()).isEqualTo(EndpointId.of("testservlet"));
assertThat(endpoint.getEndpointServlet()).isNotNull();
Object servlet = Extractors.byName("servlet").apply(endpoint.getEndpointServlet());
assertThat(ClassUtils.isCglibProxy(servlet)).isTrue();
assertThat(endpoint).isInstanceOf(DiscoveredEndpoint.class);
}));
}
@Test
void getEndpointsShouldNotDiscoverRegularEndpoints() {
this.contextRunner.withUserConfiguration(WithRegularEndpointConfiguration.class)
.run(assertDiscoverer((discoverer) -> {
Collection<ExposableServletEndpoint> endpoints = discoverer.getEndpoints();
List<EndpointId> ids = endpoints.stream().map(ExposableServletEndpoint::getEndpointId).toList();
assertThat(ids).containsOnly(EndpointId.of("testservlet"));
}));
}
@Test
void getEndpointWhenEndpointHasOperationsShouldThrowException() {
this.contextRunner.withUserConfiguration(TestServletEndpointWithOperation.class)
.run(assertDiscoverer((discoverer) -> assertThatIllegalStateException().isThrownBy(discoverer::getEndpoints)
.withMessageContaining("ServletEndpoints must not declare operations")));
}
@Test
void getEndpointWhenEndpointNotASupplierShouldThrowException() {
this.contextRunner.withUserConfiguration(TestServletEndpointNotASupplier.class)
.run(assertDiscoverer((discoverer) -> assertThatIllegalStateException().isThrownBy(discoverer::getEndpoints)
.withMessageContaining("must be a supplier")));
}
@Test
void getEndpointWhenEndpointSuppliesWrongTypeShouldThrowException() {
this.contextRunner.withUserConfiguration(TestServletEndpointSupplierOfWrongType.class)
.run(assertDiscoverer((discoverer) -> assertThatIllegalStateException().isThrownBy(discoverer::getEndpoints)
.withMessageContaining("must supply an EndpointServlet")));
}
@Test
void getEndpointWhenEndpointSuppliesNullShouldThrowException() {
this.contextRunner.withUserConfiguration(TestServletEndpointSupplierOfNull.class)
.run(assertDiscoverer((discoverer) -> assertThatIllegalStateException().isThrownBy(discoverer::getEndpoints)
.withMessageContaining("must not supply null")));
}
@Test
void shouldRegisterHints() {
RuntimeHints runtimeHints = new RuntimeHints();
new ServletEndpointDiscoverer.ServletEndpointDiscovererRuntimeHints().registerHints(runtimeHints,
getClass().getClassLoader());
assertThat(RuntimeHintsPredicates.reflection()
.onType(ServletEndpointFilter.class)
.withMemberCategories(MemberCategory.INVOKE_DECLARED_CONSTRUCTORS)).accepts(runtimeHints);
}
private ContextConsumer<AssertableApplicationContext> assertDiscoverer(
Consumer<ServletEndpointDiscoverer> consumer) {
return (context) -> {
ServletEndpointDiscoverer discoverer = new ServletEndpointDiscoverer(context, null,
Collections.emptyList());
consumer.accept(discoverer);
};
}
@Configuration(proxyBeanMethods = false)
static
|
ServletEndpointDiscovererTests
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/MissingFailTest.java
|
{
"start": 21697,
"end": 22293
}
|
class ____ extends TestCase {
public void testMethod() {
try {
new String();
} catch (IllegalArgumentException | IllegalStateException tolerated) {
}
}
}
""")
.doTest();
}
// verify that exceptions not named 'expected' are ignored
@Test
public void toleratedExceptionWithAssert() {
compilationHelper
.addSourceLines(
"test/A.java",
"""
package test;
import junit.framework.TestCase;
public
|
A
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunctionTests.java
|
{
"start": 814,
"end": 1946
}
|
class ____ extends GroupingAggregatorFunctionTestCase {
@Override
protected SourceOperator simpleInput(BlockFactory blockFactory, int end) {
return new LongDoubleTupleBlockSourceOperator(
blockFactory,
LongStream.range(0, end).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), randomDouble()))
);
}
@Override
protected AggregatorFunctionSupplier aggregatorFunction() {
return new MinDoubleAggregatorFunctionSupplier();
}
@Override
protected String expectedDescriptionOfAggregator() {
return "min of doubles";
}
@Override
protected void assertSimpleGroup(List<Page> input, Block result, int position, Long group) {
OptionalDouble min = input.stream().flatMapToDouble(p -> allDoubles(p, group)).min();
if (min.isEmpty()) {
assertThat(result.isNull(position), equalTo(true));
return;
}
assertThat(result.isNull(position), equalTo(false));
assertThat(((DoubleBlock) result).getDouble(position), equalTo(min.getAsDouble()));
}
}
|
MinDoubleGroupingAggregatorFunctionTests
|
java
|
google__dagger
|
dagger-spi/main/java/dagger/model/Key.java
|
{
"start": 1759,
"end": 6797
}
|
class ____ {
/**
* A {@link javax.inject.Qualifier} annotation that provides a unique namespace prefix
* for the type of this key.
*/
public final Optional<AnnotationMirror> qualifier() {
return wrappedQualifier().map(Wrapper::get);
}
/**
* The type represented by this key.
*/
public final TypeMirror type() {
return wrappedType().get();
}
/**
* A {@link javax.inject.Qualifier} annotation that provides a unique namespace prefix
* for the type of this key.
*
* Despite documentation in {@link AnnotationMirror}, equals and hashCode aren't implemented
* to represent logical equality, so {@link AnnotationMirrors#equivalence()}
* provides this facility.
*/
abstract Optional<Equivalence.Wrapper<AnnotationMirror>> wrappedQualifier();
/**
* The type represented by this key.
*
* As documented in {@link TypeMirror}, equals and hashCode aren't implemented to represent
* logical equality, so {@link MoreTypes#equivalence()} wraps this type.
*/
abstract Equivalence.Wrapper<TypeMirror> wrappedType();
/**
* Distinguishes keys for multibinding contributions that share a {@link #type()} and {@link
* #qualifier()}.
*
* <p>Each multibound map and set has a synthetic multibinding that depends on the specific
* contributions to that map or set using keys that identify those multibinding contributions.
*
* <p>Absent except for multibinding contributions.
*/
public abstract Optional<MultibindingContributionIdentifier> multibindingContributionIdentifier();
/** Returns a {@link Builder} that inherits the properties of this key. */
public abstract Builder toBuilder();
// The main hashCode/equality bottleneck is in MoreTypes.equivalence(). It's possible that we can
// avoid this by tuning that method. Perhaps we can also avoid the issue entirely by interning all
// Keys
@Memoized
@Override
public abstract int hashCode();
@Override
public abstract boolean equals(Object o);
/**
* Returns a String rendering of an {@link AnnotationMirror} that includes attributes in the order
* defined in the annotation type. This will produce the same output for {@linkplain
* AnnotationMirrors#equivalence() equal} {@link AnnotationMirror}s even if default values are
* omitted or their attributes were written in different orders, e.g. {@code @A(b = "b", c = "c")}
* and {@code @A(c = "c", b = "b", attributeWithDefaultValue = "default value")}.
*/
// TODO(ronshapiro): move this to auto-common
static String stableAnnotationMirrorToString(AnnotationMirror qualifier) {
StringBuilder builder = new StringBuilder("@").append(qualifier.getAnnotationType());
ImmutableMap<ExecutableElement, AnnotationValue> elementValues =
AnnotationMirrors.getAnnotationValuesWithDefaults(qualifier);
if (!elementValues.isEmpty()) {
ImmutableMap.Builder<String, String> namedValuesBuilder = ImmutableMap.builder();
elementValues.forEach(
(key, value) ->
namedValuesBuilder.put(
key.getSimpleName().toString(), stableAnnotationValueToString(value)));
ImmutableMap<String, String> namedValues = namedValuesBuilder.build();
builder.append('(');
if (namedValues.size() == 1 && namedValues.containsKey("value")) {
// Omit "value ="
builder.append(namedValues.get("value"));
} else {
builder.append(Joiner.on(", ").withKeyValueSeparator("=").join(namedValues));
}
builder.append(')');
}
return builder.toString();
}
private static String stableAnnotationValueToString(AnnotationValue annotationValue) {
return annotationValue.accept(
new SimpleAnnotationValueVisitor8<String, Void>() {
@Override
protected String defaultAction(Object value, Void ignore) {
return value.toString();
}
@Override
public String visitString(String value, Void ignore) {
return CodeBlock.of("$S", value).toString();
}
@Override
public String visitAnnotation(AnnotationMirror value, Void ignore) {
return stableAnnotationMirrorToString(value);
}
@Override
public String visitArray(List<? extends AnnotationValue> value, Void ignore) {
return value.stream()
.map(Key::stableAnnotationValueToString)
.collect(joining(", ", "{", "}"));
}
},
null);
}
@Override
public final String toString() {
return Joiner.on(' ')
.skipNulls()
.join(
qualifier().map(Key::stableAnnotationMirrorToString).orElse(null),
type(),
multibindingContributionIdentifier().orElse(null));
}
/** Returns a builder for {@link Key}s. */
public static Builder builder(TypeMirror type) {
return new AutoValue_Key.Builder().type(type);
}
/** A builder for {@link Key}s. */
@AutoValue.Builder
public abstract static
|
Key
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/update/JoinedInheritanceTest.java
|
{
"start": 3742,
"end": 4575
}
|
class ____ {
@Id
private Long id;
@Column
private String description;
@Column(name = "nbr_of_seats")
private int nbrOfSeats;
@Basic(fetch = LAZY)
private Boolean large = false;
public Plane() {
}
public Plane(Long id, String description, int nbrOfSeats, Boolean large) {
this.id = id;
this.description = description;
this.nbrOfSeats = nbrOfSeats;
this.large = large;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public int getNbrOfSeats() {
return nbrOfSeats;
}
public void setNbrOfSeats(int nbrOfSeats) {
this.nbrOfSeats = nbrOfSeats;
}
public Boolean getLarge() {
return large;
}
public void setLarge(Boolean large) {
this.large = large;
}
}
}
|
Plane
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java
|
{
"start": 1921,
"end": 48499
}
|
class ____ extends ESAllocationTestCase {
public void testAlways() {
AllocationService strategy = createAllocationService(
Settings.builder()
.put(
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()
)
.build()
);
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test1").settings(indexSettings(IndexVersion.current(), 1, 1)))
.put(
IndexMetadata.builder("test2")
.settings(indexSettings(IndexVersion.current(), 1, 1).put("index.routing.allocation.include._id", "node1,node2"))
)
.build();
RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test1"))
.addAsNew(metadata.getProject().index("test2"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1"))
.build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("now, start 1 more nodes, check that rebalancing will happen (for test1) because we set it to always");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
final var newNodesIterator = clusterState.getRoutingNodes().node("node3").iterator();
assertThat(newNodesIterator.next().shardId().getIndex().getName(), equalTo("test1"));
assertFalse(newNodesIterator.hasNext());
}
public void testClusterPrimariesActive1() {
AllocationService strategy = createAllocationService(
Settings.builder()
.put(
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()
)
.build()
);
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test1").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.put(
IndexMetadata.builder("test2")
.settings(
settings(IndexVersion.current()).put(
IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey(),
"node1,node2"
)
)
.numberOfShards(1)
.numberOfReplicas(1)
)
.build();
RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test1"))
.addAsNew(metadata.getProject().index("test2"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test2, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test2");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to primaries_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").size(), equalTo(1));
assertThat(routingNodes.node("node3").iterator().next().shardId().getIndex().getName(), equalTo("test1"));
}
public void testClusterPrimariesActive2() {
AllocationService strategy = createAllocationService(
Settings.builder()
.put(
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()
)
.build()
);
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test1").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.put(IndexMetadata.builder("test2").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test1"))
.addAsNew(metadata.getProject().index("test2"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to primaries_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
}
public void testClusterAllActive1() {
AllocationService strategy = createAllocationService(
Settings.builder()
.put(
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()
)
.build()
);
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test1").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.put(IndexMetadata.builder("test2").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test1"))
.addAsNew(metadata.getProject().index("test2"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test2, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test2");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("start the test2 replica shards");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test2");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").size(), equalTo(1));
assertThat(routingNodes.node("node3").iterator().next().shardId().getIndex().getName(), anyOf(equalTo("test1"), equalTo("test2")));
}
public void testClusterAllActive2() {
AllocationService strategy = createAllocationService(
Settings.builder()
.put(
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()
)
.build()
);
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test1").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.put(IndexMetadata.builder("test2").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test1"))
.addAsNew(metadata.getProject().index("test2"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
}
public void testClusterAllActive3() {
AllocationService strategy = createAllocationService(
Settings.builder()
.put(
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()
)
.build()
);
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test1").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.put(IndexMetadata.builder("test2").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test1"))
.addAsNew(metadata.getProject().index("test2"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test2, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test2");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
}
public void testRebalanceWithIgnoredUnassignedShards() {
final AtomicBoolean allocateTest1 = new AtomicBoolean(false);
AllocationService strategy = createAllocationService(
Settings.builder()
.put(
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()
)
.build(),
new TestGatewayAllocator() {
@Override
public void allocateUnassigned(
ShardRouting shardRouting,
RoutingAllocation allocation,
UnassignedAllocationHandler unassignedAllocationHandler
) {
if (allocateTest1.get() == false && "test1".equals(shardRouting.index().getName())) {
unassignedAllocationHandler.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes());
} else {
super.allocateUnassigned(shardRouting, allocation, unassignedAllocationHandler);
}
}
}
);
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(2).numberOfReplicas(0))
.put(IndexMetadata.builder("test1").settings(settings(IndexVersion.current())).numberOfShards(2).numberOfReplicas(0))
.build();
RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test"))
.addAsNew(metadata.getProject().index("test1"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
}
logger.debug("start all the primary shards for test");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test");
for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
}
logger.debug("now, start 1 more node, check that rebalancing will not happen since we unassigned shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
logger.debug("reroute and check that nothing has changed");
ClusterState resultingState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
assertThat(resultingState, equalTo(clusterState));
for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
}
logger.debug("now set allocateTest1 to true and reroute we should see the [test1] index initializing");
allocateTest1.set(true);
resultingState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
assertThat(resultingState, not(equalTo(clusterState)));
clusterState = resultingState;
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
}
logger.debug(
"now start initializing shards and expect exactly one rebalance" + " from node1 to node 2 since index [test] is all on node1"
);
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
}
int numStarted = 0;
int numRelocating = 0;
for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(1));
if (clusterState.routingTable().index("test").shard(i).primaryShard().state() == STARTED) {
numStarted++;
} else if (clusterState.routingTable().index("test").shard(i).primaryShard().state() == RELOCATING) {
numRelocating++;
}
}
assertEquals(numStarted, 1);
assertEquals(numRelocating, 1);
}
public void testRebalanceWhileShardFetching() {
final AtomicBoolean hasFetches = new AtomicBoolean(true);
AllocationService strategy = createAllocationService(
Settings.builder()
.put(
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()
)
.put("cluster.routing.allocation.type", "balanced") // TODO fix for desired_balance
.build(),
new TestGatewayAllocator() {
@Override
public void beforeAllocation(RoutingAllocation allocation) {
if (hasFetches.get()) {
allocation.setHasPendingAsyncFetch();
}
}
}
);
assertCriticalWarnings(
"[cluster.routing.allocation.type] setting was deprecated in Elasticsearch and will be removed in a future release. "
+ "See the breaking changes documentation for the next major version."
);
Metadata metadata = Metadata.builder()
.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(2).numberOfReplicas(0))
.put(
IndexMetadata.builder("test1")
.settings(
settings(IndexVersion.current()).put(
IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_id",
"node1,node2"
)
)
.numberOfShards(2)
.numberOfReplicas(0)
)
.build();
// we use a second index here (test1) that never gets assigned otherwise allocateUnassigned
// is never called if we don't have unassigned shards.
RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject().index("test"))
.addAsNew(metadata.getProject().index("test1"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
}
logger.debug("start all the primary shards for test");
clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test");
for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
}
logger.debug("now, start 1 more node, check that rebalancing will not happen since we have shard sync going on");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
logger.debug("reroute and check that nothing has changed");
ClusterState resultState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
assertThat(resultState, equalTo(clusterState));
for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
}
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
}
logger.debug("now set hasFetches to true and reroute we should now see exactly one relocating shard");
hasFetches.set(false);
resultState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
assertThat(resultState, not(equalTo(clusterState)));
clusterState = resultState;
int numStarted = 0;
int numRelocating = 0;
for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(1));
if (clusterState.routingTable().index("test").shard(i).primaryShard().state() == STARTED) {
numStarted++;
} else if (clusterState.routingTable().index("test").shard(i).primaryShard().state() == RELOCATING) {
numRelocating++;
}
}
for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(1));
assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
}
assertEquals(numStarted, 1);
assertEquals(numRelocating, 1);
}
}
|
ClusterRebalanceRoutingTests
|
java
|
netty__netty
|
codec-http3/src/test/java/io/netty/handler/codec/http3/QpackDecoderHandlerTest.java
|
{
"start": 1475,
"end": 13925
}
|
class ____ {
private static final QpackHeaderField fooBar = new QpackHeaderField("foo", "bar");
private final QpackEncoderDynamicTable dynamicTable = new QpackEncoderDynamicTable();
private EmbeddedQuicChannel parent;
private QpackEncoder encoder;
private EmbeddedQuicStreamChannel decoderStream;
private EmbeddedQuicStreamChannel encoderStream;
private int maxEntries;
private QpackAttributes attributes;
@AfterEach
public void tearDown() {
assertFalse(encoderStream.finish());
assertFalse(decoderStream.finish());
}
@Test
public void sectionAckNoIncrement() throws Exception {
setup(128L);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
Http3Exception e = assertThrows(Http3Exception.class, () -> sendAckForStreamId(decoderStream.streamId()));
assertThat(e.getCause(), instanceOf(QpackException.class));
Http3TestUtils.verifyClose(QPACK_DECODER_STREAM_ERROR, parent);
finishStreams();
}
@Test
public void sectionAck() throws Exception {
setup(128L);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
verifyRequiredInsertCount(1);
sendInsertCountIncrement(1);
verifyKnownReceivedCount(1);
// Refer now to dynamic table
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendAckForStreamId(decoderStream.streamId());
finishStreams();
verifyRequiredInsertCount(1);
verifyKnownReceivedCount(1);
}
@Test
public void sectionAckUnknownStream() throws Exception {
setup(128);
Http3Exception e = assertThrows(Http3Exception.class, () -> sendAckForStreamId(1));
assertThat(e.getCause(), instanceOf(QpackException.class));
Http3TestUtils.verifyClose(QPACK_DECODER_STREAM_ERROR, parent);
finishStreams();
}
@Test
public void sectionAckAlreadyAcked() throws Exception {
setup(128);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendInsertCountIncrement(1);
// Refer now to dynamic table
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendAckForStreamId(decoderStream.streamId());
Http3Exception e = assertThrows(Http3Exception.class, () -> sendAckForStreamId(decoderStream.streamId()));
assertThat(e.getCause(), instanceOf(QpackException.class));
Http3TestUtils.verifyClose(QPACK_DECODER_STREAM_ERROR, parent);
finishStreams();
verifyRequiredInsertCount(1);
verifyKnownReceivedCount(1);
}
@Test
public void sectionAckMultiPending() throws Exception {
setup(128L);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendInsertCountIncrement(1);
// Refer now to dynamic table
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendAckForStreamId(decoderStream.streamId());
sendAckForStreamId(decoderStream.streamId());
finishStreams();
verifyRequiredInsertCount(1);
verifyKnownReceivedCount(1);
}
@Test
public void sectionAckMultiPostAck() throws Exception {
setup(128L);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendInsertCountIncrement(1);
// Refer now to dynamic table
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendAckForStreamId(decoderStream.streamId());
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendAckForStreamId(decoderStream.streamId());
finishStreams();
verifyRequiredInsertCount(1);
verifyKnownReceivedCount(1);
}
@Test
public void sectionAckCancelledStream() throws Exception {
setup(128L);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendInsertCountIncrement(1);
// Refer now to dynamic table
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendStreamCancellation(decoderStream.streamId());
Http3Exception e = assertThrows(Http3Exception.class, () -> sendAckForStreamId(decoderStream.streamId()));
assertThat(e.getCause(), instanceOf(QpackException.class));
Http3TestUtils.verifyClose(QPACK_DECODER_STREAM_ERROR, parent);
finishStreams();
}
@Test
public void splitBufferForSectionAck() throws Exception {
setup(128);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
verifyRequiredInsertCount(1);
sendInsertCountIncrement(1);
verifyKnownReceivedCount(1);
// Refer now to dynamic table
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
final ByteBuf buf = encodeSectionAck(decoderStream.streamId());
try {
while (buf.isReadable()) {
assertFalse(decoderStream.writeInbound(buf.readBytes(1)));
}
} finally {
buf.release();
}
finishStreams();
}
@Test
public void splitBufferForInsertCountIncrement() throws Exception {
setup(128);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
verifyRequiredInsertCount(1);
final ByteBuf buf = encodeInsertCountIncrement(1);
try {
while (buf.isReadable()) {
assertFalse(decoderStream.writeInbound(buf.readBytes(1)));
}
} finally {
buf.release();
}
verifyKnownReceivedCount(1);
finishStreams();
}
@Test
public void splitBufferForStreamCancellation() throws Exception {
setup(128);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
verifyRequiredInsertCount(1);
final ByteBuf buf = encodeStreamCancellation(decoderStream.streamId());
try {
while (buf.isReadable()) {
assertFalse(decoderStream.writeInbound(buf.readBytes(1)));
}
} finally {
buf.release();
}
finishStreams();
}
@Test
public void streamCancel() throws Exception {
setup(128);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
verifyRequiredInsertCount(1);
sendInsertCountIncrement(1);
verifyKnownReceivedCount(1);
// Refer now to dynamic table
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
sendStreamCancellation(decoderStream.streamId());
verifyRequiredInsertCount(1);
verifyKnownReceivedCount(1);
finishStreams();
}
@Test
public void streamCancelUnknownStream() throws Exception {
setup(128);
sendStreamCancellation(decoderStream.streamId());
verifyRequiredInsertCount(0);
verifyKnownReceivedCount(0);
finishStreams();
}
@Test
public void streamCancelDynamicTableWithMaxCapacity0() throws Exception {
setup(0);
encodeHeaders(headers -> headers.add(fooBar.name, fooBar.value));
verifyRequiredInsertCount(0);
verifyKnownReceivedCount(0);
// Send a stream cancellation for a dynamic table of capacity 0.
// See https://www.rfc-editor.org/rfc/rfc9204.html#section-2.2.2.2
sendStreamCancellation(decoderStream.streamId());
finishStreams(false);
}
@Test
public void invalidIncrement() throws Exception {
setup(128);
Http3Exception e = assertThrows(Http3Exception.class, () -> sendInsertCountIncrement(2));
assertThat(e.getCause(), instanceOf(QpackException.class));
Http3TestUtils.verifyClose(QPACK_DECODER_STREAM_ERROR, parent);
finishStreams();
}
private void sendAckForStreamId(long streamId) throws Http3Exception {
assertFalse(decoderStream.writeInbound(encodeSectionAck(streamId)));
}
private ByteBuf encodeSectionAck(long streamId) {
final ByteBuf ack = decoderStream.alloc().buffer();
// https://quicwg.org/base-drafts/draft-ietf-quic-qpack.html#name-section-acknowledgment
// 0 1 2 3 4 5 6 7
// +---+---+---+---+---+---+---+---+
// | 1 | Stream ID (7+) |
// +---+---------------------------+
encodePrefixedInteger(ack, (byte) 0b1000_0000, 7, streamId);
return ack;
}
private void sendInsertCountIncrement(long increment) throws Http3Exception {
assertFalse(decoderStream.writeInbound(encodeInsertCountIncrement(increment)));
}
private ByteBuf encodeInsertCountIncrement(long increment) {
final ByteBuf incr = decoderStream.alloc().buffer();
// https://quicwg.org/base-drafts/draft-ietf-quic-qpack.html#name-insert-count-increment
// 0 1 2 3 4 5 6 7
// +---+---+---+---+---+---+---+---+
// | 0 | 0 | Increment (6+) |
// +---+---+-----------------------+
encodePrefixedInteger(incr, (byte) 0b0000_0000, 6, increment);
return incr;
}
private void sendStreamCancellation(long streamId) {
assertFalse(decoderStream.writeInbound(encodeStreamCancellation(streamId)));
}
private ByteBuf encodeStreamCancellation(long streamId) {
final ByteBuf incr = decoderStream.alloc().buffer();
// https://quicwg.org/base-drafts/draft-ietf-quic-qpack.html#name-stream-cancellation
// 0 1 2 3 4 5 6 7
// +---+---+---+---+---+---+---+---+
// | 0 | 1 | Stream ID (6+) |
// +---+---+-----------------------+
encodePrefixedInteger(incr, (byte) 0b0100_0000, 6, streamId);
return incr;
}
private void encodeHeaders(Consumer<Http3Headers> headersUpdater) {
Http3Headers headers = new DefaultHttp3Headers();
headersUpdater.accept(headers);
final ByteBuf buf = decoderStream.alloc().buffer();
try {
encoder.encodeHeaders(attributes, buf, decoderStream.alloc(), decoderStream.streamId(), headers);
} finally {
buf.release();
}
}
private void setup(long maxTableCapacity) throws Exception {
maxEntries = Math.toIntExact(QpackUtil.maxEntries(maxTableCapacity));
parent = new EmbeddedQuicChannel(true);
attributes = new QpackAttributes(parent, false);
setQpackAttributes(parent, attributes);
Http3SettingsFrame settings = new DefaultHttp3SettingsFrame();
settings.put(HTTP3_SETTINGS_QPACK_MAX_TABLE_CAPACITY, maxTableCapacity);
QpackDecoder decoder = new QpackDecoder(maxTableCapacity, 0);
encoderStream = (EmbeddedQuicStreamChannel) parent.createStream(QuicStreamType.UNIDIRECTIONAL,
new QpackEncoderHandler(maxTableCapacity, decoder)).get();
attributes.encoderStream(encoderStream);
encoder = new QpackEncoder(dynamicTable);
encoder.configureDynamicTable(attributes, maxTableCapacity, 0);
decoderStream = (EmbeddedQuicStreamChannel) parent.createStream(QuicStreamType.UNIDIRECTIONAL,
new QpackDecoderHandler(encoder)).get();
attributes.decoderStream(decoderStream);
}
private void finishStreams() {
finishStreams(true);
}
private void finishStreams(boolean encoderPendingMessage) {
assertThat("Unexpected decoder stream message", decoderStream.finishAndReleaseAll(), is(false));
assertThat("Unexpected encoder stream message", encoderStream.finishAndReleaseAll(), is(encoderPendingMessage));
assertThat("Unexpected parent stream message", parent.finishAndReleaseAll(), is(false));
}
private void verifyRequiredInsertCount(int insertCount) {
assertThat("Unexpected dynamic table insert count.",
dynamicTable.encodedRequiredInsertCount(dynamicTable.insertCount()),
is(insertCount == 0 ? 0 : insertCount % maxEntries + 1));
}
private void verifyKnownReceivedCount(int receivedCount) {
assertThat("Unexpected dynamic table known received count.", dynamicTable.encodedKnownReceivedCount(),
is(receivedCount == 0 ? 0 : receivedCount % maxEntries + 1));
}
}
|
QpackDecoderHandlerTest
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/connector/source/SourceReaderContext.java
|
{
"start": 1226,
"end": 2973
}
|
interface ____ {
/**
* @return The metric group this source belongs to.
*/
SourceReaderMetricGroup metricGroup();
/** Gets the configuration with which Flink was started. */
Configuration getConfiguration();
/**
* Gets the hostname of the machine where this reader is executed. This can be used to request
* splits local to the machine, if needed.
*/
String getLocalHostName();
/**
* @return The index of this subtask.
*/
int getIndexOfSubtask();
/**
* Sends a split request to the source's {@link SplitEnumerator}. This will result in a call to
* the {@link SplitEnumerator#handleSplitRequest(int, String)} method, with this reader's
* parallel subtask id and the hostname where this reader runs.
*/
void sendSplitRequest();
/**
* Send a source event to the source coordinator.
*
* @param sourceEvent the source event to coordinator.
*/
void sendSourceEventToCoordinator(SourceEvent sourceEvent);
/**
* Gets the {@link UserCodeClassLoader} to load classes that are not in system's classpath, but
* are part of the jar file of a user job.
*
* @see UserCodeClassLoader
*/
UserCodeClassLoader getUserCodeClassLoader();
/**
* Get the current parallelism of this Source.
*
* @return the parallelism of the Source.
*/
default int currentParallelism() {
throw new UnsupportedOperationException();
}
/**
* Send the watermark to source output.
*
* <p>This should only be used for datastream v2.
*/
default void emitWatermark(Watermark watermark) {
throw new UnsupportedOperationException();
}
}
|
SourceReaderContext
|
java
|
alibaba__nacos
|
persistence/src/main/java/com/alibaba/nacos/persistence/repository/embedded/hook/EmbeddedApplyHookHolder.java
|
{
"start": 805,
"end": 1346
}
|
class ____ {
private static final EmbeddedApplyHookHolder INSTANCE = new EmbeddedApplyHookHolder();
private final Set<EmbeddedApplyHook> hooks;
private EmbeddedApplyHookHolder() {
hooks = new HashSet<>();
}
public static EmbeddedApplyHookHolder getInstance() {
return INSTANCE;
}
public void register(EmbeddedApplyHook hook) {
this.hooks.add(hook);
}
public Set<EmbeddedApplyHook> getAllHooks() {
return this.hooks;
}
}
|
EmbeddedApplyHookHolder
|
java
|
junit-team__junit5
|
junit-jupiter-engine/src/main/java/org/junit/jupiter/engine/execution/JupiterEngineExecutionContext.java
|
{
"start": 3680,
"end": 4578
}
|
class ____ implements Cloneable {
final EngineExecutionListener executionListener;
final JupiterConfiguration configuration;
final LauncherStoreFacade launcherStoreFacade;
@Nullable
TestInstancesProvider testInstancesProvider;
@Nullable
MutableExtensionRegistry extensionRegistry;
@Nullable
ExtensionContext extensionContext;
@Nullable
ThrowableCollector throwableCollector;
State(EngineExecutionListener executionListener, JupiterConfiguration configuration,
LauncherStoreFacade launcherStoreFacade) {
this.executionListener = executionListener;
this.configuration = configuration;
this.launcherStoreFacade = launcherStoreFacade;
}
@Override
public State clone() {
try {
return (State) super.clone();
}
catch (CloneNotSupportedException e) {
throw new JUnitException("State could not be cloned", e);
}
}
}
public static
|
State
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/read/ValuesReader.java
|
{
"start": 572,
"end": 1749
}
|
class ____ implements ReleasableIterator<Block[]> {
protected final ValuesSourceReaderOperator operator;
protected final DocVector docs;
private int offset;
ValuesReader(ValuesSourceReaderOperator operator, DocVector docs) {
this.operator = operator;
this.docs = docs;
}
@Override
public boolean hasNext() {
return offset < docs.getPositionCount();
}
@Override
public Block[] next() {
Block[] target = new Block[operator.fields.length];
boolean success = false;
try {
load(target, offset);
success = true;
for (Block b : target) {
operator.valuesLoaded += b.getTotalValueCount();
}
offset += target[0].getPositionCount();
return target;
} catch (IOException e) {
throw new UncheckedIOException(e);
} finally {
if (success == false) {
Releasables.closeExpectNoException(target);
}
}
}
protected abstract void load(Block[] target, int offset) throws IOException;
@Override
public void close() {}
}
|
ValuesReader
|
java
|
apache__camel
|
components/camel-dapr/src/test/java/org/apache/camel/component/dapr/DaprComponentTest.java
|
{
"start": 1004,
"end": 1644
}
|
class ____ extends CamelTestSupport {
@Test
public void testCreateEndpoint() throws Exception {
String uri = "dapr:invokeService?serviceToInvoke=myService&methodToInvoke=myMethod&verb=GET";
final DaprEndpoint endpoint = (DaprEndpoint) context.getEndpoint(uri);
assertEquals(DaprOperation.invokeService, endpoint.getConfiguration().getOperation());
assertEquals("myService", endpoint.getConfiguration().getServiceToInvoke());
assertEquals("myMethod", endpoint.getConfiguration().getMethodToInvoke());
assertEquals("GET", endpoint.getConfiguration().getVerb());
}
}
|
DaprComponentTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/jdbc/cursor/internal/RefCursorSupportInitiator.java
|
{
"start": 511,
"end": 1559
}
|
class ____ implements StandardServiceInitiator<RefCursorSupport> {
/**
* Singleton access
*/
public static final RefCursorSupportInitiator INSTANCE = new RefCursorSupportInitiator();
@Override
public RefCursorSupport initiateService(Map<String, Object> configurationValues, ServiceRegistryImplementor registry) {
final JdbcServices jdbcServices = registry.requireService( JdbcServices.class );
final boolean supportsRefCursors = useRefCursorSupport( jdbcServices );
return supportsRefCursors
? new StandardRefCursorSupport( jdbcServices )
: new FallbackRefCursorSupport (jdbcServices );
}
private boolean useRefCursorSupport(JdbcServices jdbcServices) {
final Boolean dialectAnswer = jdbcServices.getDialect().supportsRefCursors();
if ( dialectAnswer != null ) {
return dialectAnswer;
}
return jdbcServices.getJdbcEnvironment().getExtractedDatabaseMetaData().supportsRefCursors();
}
@Override
public Class<RefCursorSupport> getServiceInitiated() {
return RefCursorSupport.class;
}
}
|
RefCursorSupportInitiator
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java
|
{
"start": 2202,
"end": 2455
}
|
class ____ users to set all configs on one object if only default column family
* is used. Because we use multiple column families, we need to use {@link DBOptions} and {@link ColumnFamilyOptions}
* that cover a part of all options each.
*
* This
|
allows
|
java
|
apache__kafka
|
connect/api/src/main/java/org/apache/kafka/connect/connector/policy/package-info.java
|
{
"start": 1045,
"end": 1206
}
|
interface ____ be used to control which Kafka client properties can be overridden on a per-connector basis.
*/
package org.apache.kafka.connect.connector.policy;
|
can
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/basicType/FloatTest3_random.java
|
{
"start": 1943,
"end": 2109
}
|
class ____ {
public float value;
public Model() {
}
public Model(float value) {
this.value = value;
}
}
}
|
Model
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/support/MutableQueryHintsUnitTests.java
|
{
"start": 1006,
"end": 2380
}
|
class ____ {
@Test // DATAJPA-872
void emptyQueryHint() {
new MutableQueryHints().forEach((k, v) -> Assertions.fail("Empty SimpleQueryHints shouldn't contain a value"));
}
@Test // DATAJPA-872
void queryHint() {
MutableQueryHints hints = new MutableQueryHints();
hints.add("key", "value");
hints.add("key", "other value");
hints.add("other key", "another value");
List<Object> calls = new ArrayList<>();
hints.forEach((k, v) -> calls.add(Pair.of(k, v)));
assertThat(calls).containsExactlyInAnyOrder(Pair.of("key", "value"), Pair.of("key", "other value"),
Pair.of("other key", "another value"));
}
@Test // DATAJPA-872
void shouldMergeQueryHints() {
MutableQueryHints hints = new MutableQueryHints();
hints.add("key", "value");
hints.add("key", "other value");
hints.add("other key", "another value");
MutableQueryHints additionalHints = new MutableQueryHints();
additionalHints.add("key", "23");
additionalHints.add("another key", "42");
QueryHints merged = QueryHints.from(hints, additionalHints);
List<Object> calls = new ArrayList<>();
merged.forEach((k, v) -> calls.add(Pair.of(k, v)));
assertThat(calls).containsExactlyInAnyOrder(Pair.of("key", "value"), Pair.of("key", "other value"),Pair.of("key", "23"),
Pair.of("other key", "another value"), Pair.of("another key", "42"));
}
}
|
MutableQueryHintsUnitTests
|
java
|
spring-projects__spring-framework
|
spring-jms/src/test/java/org/springframework/jms/support/converter/MessagingMessageConverterTests.java
|
{
"start": 2265,
"end": 2706
}
|
class ____ extends SimpleMessageConverter {
private boolean called;
@Override
public Object fromMessage(jakarta.jms.Message message) throws JMSException, MessageConversionException {
if (this.called) {
throw new java.lang.IllegalStateException("Converter called twice");
}
this.called = true;
TextMessage textMessage = (TextMessage) message;
return Long.parseLong(textMessage.getText());
}
}
}
|
TestMessageConverter
|
java
|
apache__kafka
|
connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java
|
{
"start": 22797,
"end": 23051
}
|
class ____ of the converter
* @param converterConfig the properties to configure the converter with
* @return the instantiated and configured {@link Converter}; never null
* @throws ConnectException if the {@link Converter} implementation
|
name
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/web/authentication/OAuth2AuthorizationConsentAuthenticationConverter.java
|
{
"start": 2462,
"end": 5385
}
|
class ____ implements AuthenticationConverter {
private static final String DEFAULT_ERROR_URI = "https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.2.1";
private static final Authentication ANONYMOUS_AUTHENTICATION = new AnonymousAuthenticationToken("anonymous",
"anonymousUser", AuthorityUtils.createAuthorityList("ROLE_ANONYMOUS"));
private final RequestMatcher requestMatcher = createDefaultRequestMatcher();
@Override
public Authentication convert(HttpServletRequest request) {
if (!this.requestMatcher.matches(request)) {
return null;
}
MultiValueMap<String, String> parameters = OAuth2EndpointUtils.getFormParameters(request);
String authorizationUri = request.getRequestURL().toString();
// client_id (REQUIRED)
String clientId = parameters.getFirst(OAuth2ParameterNames.CLIENT_ID);
if (!StringUtils.hasText(clientId) || parameters.get(OAuth2ParameterNames.CLIENT_ID).size() != 1) {
throwError(OAuth2ErrorCodes.INVALID_REQUEST, OAuth2ParameterNames.CLIENT_ID);
}
Authentication principal = SecurityContextHolder.getContext().getAuthentication();
if (principal == null) {
principal = ANONYMOUS_AUTHENTICATION;
}
// state (REQUIRED)
String state = parameters.getFirst(OAuth2ParameterNames.STATE);
if (!StringUtils.hasText(state) || parameters.get(OAuth2ParameterNames.STATE).size() != 1) {
throwError(OAuth2ErrorCodes.INVALID_REQUEST, OAuth2ParameterNames.STATE);
}
// scope (OPTIONAL)
Set<String> scopes = null;
if (parameters.containsKey(OAuth2ParameterNames.SCOPE)) {
scopes = new HashSet<>(parameters.get(OAuth2ParameterNames.SCOPE));
}
Map<String, Object> additionalParameters = new HashMap<>();
parameters.forEach((key, value) -> {
if (!key.equals(OAuth2ParameterNames.CLIENT_ID) && !key.equals(OAuth2ParameterNames.STATE)
&& !key.equals(OAuth2ParameterNames.SCOPE)) {
additionalParameters.put(key, (value.size() == 1) ? value.get(0) : value.toArray(new String[0]));
}
});
return new OAuth2AuthorizationConsentAuthenticationToken(authorizationUri, clientId, principal, state, scopes,
additionalParameters);
}
static RequestMatcher createDefaultRequestMatcher() {
return (request) -> "POST".equals(request.getMethod())
&& request.getParameter(OAuth2ParameterNames.RESPONSE_TYPE) == null
&& request.getParameter(OAuth2ParameterNames.REQUEST_URI) == null
&& request.getParameter(OAuth2ParameterNames.REDIRECT_URI) == null
&& request.getParameter(PkceParameterNames.CODE_CHALLENGE) == null
&& request.getParameter(PkceParameterNames.CODE_CHALLENGE_METHOD) == null;
}
private static void throwError(String errorCode, String parameterName) {
OAuth2Error error = new OAuth2Error(errorCode, "OAuth 2.0 Parameter: " + parameterName, DEFAULT_ERROR_URI);
throw new OAuth2AuthorizationCodeRequestAuthenticationException(error, null);
}
}
|
OAuth2AuthorizationConsentAuthenticationConverter
|
java
|
junit-team__junit5
|
junit-vintage-engine/src/testFixtures/java/org/junit/vintage/engine/samples/junit4/JUnit4TestCaseWithNotFilterableRunner.java
|
{
"start": 670,
"end": 756
}
|
class ____ {
@Test
public void someTest() {
}
}
|
JUnit4TestCaseWithNotFilterableRunner
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/sql/ast/spi/SqlAliasStemHelper.java
|
{
"start": 282,
"end": 705
}
|
class ____ {
/**
* Singleton access
*/
public static final SqlAliasStemHelper INSTANCE = new SqlAliasStemHelper();
public String generateStemFromEntityName(String entityName) {
return acronym( toSimpleEntityName( entityName ) );
}
private String toSimpleEntityName(String entityName) {
String simpleName = StringHelper.unqualify( entityName );
if ( simpleName.contains( "$" ) ) {
// inner
|
SqlAliasStemHelper
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/blockloader/docvalues/AbstractLongsFromDocValuesBlockLoader.java
|
{
"start": 2077,
"end": 4047
}
|
class ____ extends BlockDocValuesReader implements BlockDocValuesReader.NumericDocValuesAccessor {
final NumericDocValues numericDocValues;
public Singleton(NumericDocValues numericDocValues) {
this.numericDocValues = numericDocValues;
}
@Override
public Block read(BlockFactory factory, Docs docs, int offset, boolean nullsFiltered) throws IOException {
if (numericDocValues instanceof OptionalColumnAtATimeReader direct) {
Block result = direct.tryRead(factory, docs, offset, nullsFiltered, null, false);
if (result != null) {
return result;
}
}
try (LongBuilder builder = factory.longsFromDocValues(docs.count() - offset)) {
for (int i = offset; i < docs.count(); i++) {
int doc = docs.get(i);
if (numericDocValues.advanceExact(doc)) {
builder.appendLong(numericDocValues.longValue());
} else {
builder.appendNull();
}
}
return builder.build();
}
}
@Override
public void read(int docId, StoredFields storedFields, Builder builder) throws IOException {
LongBuilder blockBuilder = (LongBuilder) builder;
if (numericDocValues.advanceExact(docId)) {
blockBuilder.appendLong(numericDocValues.longValue());
} else {
blockBuilder.appendNull();
}
}
@Override
public int docId() {
return numericDocValues.docID();
}
@Override
public String toString() {
return "LongsFromDocValues.Singleton";
}
@Override
public NumericDocValues numericDocValues() {
return numericDocValues;
}
}
public static
|
Singleton
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/pool/exception/OracleExceptionSorterTest.java
|
{
"start": 455,
"end": 2189
}
|
class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
assertEquals(0, JdbcStatManager.getInstance().getSqlList().size());
dataSource = new DruidDataSource();
dataSource.setExceptionSorter(new OracleExceptionSorter());
dataSource.setDriver(new OracleMockDriver());
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setPoolPreparedStatements(true);
dataSource.setMaxOpenPreparedStatements(100);
dataSource.setFilters("log4j");
}
@Override
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
}
public void test_connect() throws Exception {
String sql = "SELECT 1";
{
DruidPooledConnection conn = dataSource.getConnection();
PreparedStatement pstmt = conn.prepareStatement(sql);
pstmt.execute();
pstmt.close();
conn.close();
}
DruidPooledConnection conn = dataSource.getConnection();
PreparedStatement pstmt = conn.prepareStatement(sql);
pstmt.setFetchSize(1000);
SQLException exception = new SQLException("xx", "xxx", 28);
boolean fatal = false;
try {
conn.handleException(exception);
} catch (SQLException ex) {
fatal = true;
}
assertTrue(fatal);
pstmt.close();
SQLException commitError = null;
try {
conn.commit();
} catch (SQLException ex) {
commitError = ex;
}
assertNotNull(commitError);
assertSame(exception, commitError.getCause());
conn.close();
}
}
|
OracleExceptionSorterTest
|
java
|
micronaut-projects__micronaut-core
|
inject-java/src/test/groovy/io/micronaut/inject/factory/parameterizedfactory/C.java
|
{
"start": 668,
"end": 790
}
|
class ____ {
B b;
int count;
public C(B b, int count) {
this.b = b;
this.count = count;
}
}
|
C
|
java
|
google__guava
|
android/guava/src/com/google/common/hash/BloomFilter.java
|
{
"start": 23607,
"end": 27543
}
|
class ____<T extends @Nullable Object> implements Serializable {
final long[] data;
final int numHashFunctions;
final Funnel<? super T> funnel;
final Strategy strategy;
SerialForm(BloomFilter<T> bf) {
this.data = LockFreeBitArray.toPlainArray(bf.bits.data);
this.numHashFunctions = bf.numHashFunctions;
this.funnel = bf.funnel;
this.strategy = bf.strategy;
}
Object readResolve() {
return new BloomFilter<T>(new LockFreeBitArray(data), numHashFunctions, funnel, strategy);
}
private static final long serialVersionUID = 1;
}
/**
* Writes this {@code BloomFilter} to an output stream, with a custom format (not Java
* serialization). This has been measured to save at least 400 bytes compared to regular
* serialization.
*
* <p>Use {@linkplain #readFrom(InputStream, Funnel)} to reconstruct the written BloomFilter.
*/
public void writeTo(OutputStream out) throws IOException {
// Serial form:
// 1 signed byte for the strategy
// 1 unsigned byte for the number of hash functions
// 1 big endian int, the number of longs in our bitset
// N big endian longs of our bitset
DataOutputStream dout = new DataOutputStream(out);
dout.writeByte(SignedBytes.checkedCast(strategy.ordinal()));
dout.writeByte(UnsignedBytes.checkedCast(numHashFunctions)); // note: checked at the c'tor
dout.writeInt(bits.data.length());
for (int i = 0; i < bits.data.length(); i++) {
dout.writeLong(bits.data.get(i));
}
}
/**
* Reads a byte stream, which was written by {@linkplain #writeTo(OutputStream)}, into a {@code
* BloomFilter}.
*
* <p>The {@code Funnel} to be used is not encoded in the stream, so it must be provided here.
* <b>Warning:</b> the funnel provided <b>must</b> behave identically to the one used to populate
* the original Bloom filter!
*
* @throws IOException if the InputStream throws an {@code IOException}, or if its data does not
* appear to be a BloomFilter serialized using the {@linkplain #writeTo(OutputStream)} method.
*/
@SuppressWarnings("CatchingUnchecked") // sneaky checked exception
public static <T extends @Nullable Object> BloomFilter<T> readFrom(
InputStream in, Funnel<? super T> funnel) throws IOException {
checkNotNull(in, "InputStream");
checkNotNull(funnel, "Funnel");
int strategyOrdinal = -1;
int numHashFunctions = -1;
int dataLength = -1;
try {
DataInputStream din = new DataInputStream(in);
// currently this assumes there is no negative ordinal; will have to be updated if we
// add non-stateless strategies (for which we've reserved negative ordinals; see
// Strategy.ordinal()).
strategyOrdinal = din.readByte();
numHashFunctions = toUnsignedInt(din.readByte());
dataLength = din.readInt();
/*
* We document in BloomFilterStrategies that we must not change the ordering, and we have a
* test that verifies that we don't do so.
*/
@SuppressWarnings("EnumOrdinal")
Strategy strategy = BloomFilterStrategies.values()[strategyOrdinal];
LockFreeBitArray dataArray = new LockFreeBitArray(Math.multiplyExact(dataLength, 64L));
for (int i = 0; i < dataLength; i++) {
dataArray.putData(i, din.readLong());
}
return new BloomFilter<>(dataArray, numHashFunctions, funnel, strategy);
} catch (IOException e) {
throw e;
} catch (Exception e) { // sneaky checked exception
String message =
"Unable to deserialize BloomFilter from InputStream."
+ " strategyOrdinal: "
+ strategyOrdinal
+ " numHashFunctions: "
+ numHashFunctions
+ " dataLength: "
+ dataLength;
throw new IOException(message, e);
}
}
private static final long serialVersionUID = 0xdecaf;
}
|
SerialForm
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/RecordsQueryBuilder.java
|
{
"start": 2068,
"end": 5299
}
|
class ____ {
public static final int DEFAULT_SIZE = 100;
private static final List<String> SECONDARY_SORT = Arrays.asList(
AnomalyRecord.RECORD_SCORE.getPreferredName(),
AnomalyRecord.OVER_FIELD_VALUE.getPreferredName(),
AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName(),
AnomalyRecord.BY_FIELD_VALUE.getPreferredName(),
AnomalyRecord.FIELD_NAME.getPreferredName(),
AnomalyRecord.FUNCTION.getPreferredName()
);
private int from = 0;
private int size = DEFAULT_SIZE;
private boolean includeInterim = false;
private String sortField;
private boolean sortDescending = true;
private double recordScore = 0.0;
private String start;
private String end;
private Date timestamp;
public RecordsQueryBuilder from(int from) {
this.from = from;
return this;
}
public RecordsQueryBuilder size(int size) {
this.size = size;
return this;
}
public RecordsQueryBuilder epochStart(String startTime) {
this.start = startTime;
return this;
}
public RecordsQueryBuilder epochEnd(String endTime) {
this.end = endTime;
return this;
}
public RecordsQueryBuilder includeInterim(boolean include) {
this.includeInterim = include;
return this;
}
public RecordsQueryBuilder sortField(String fieldname) {
this.sortField = fieldname;
return this;
}
public RecordsQueryBuilder sortDescending(boolean sortDescending) {
this.sortDescending = sortDescending;
return this;
}
public RecordsQueryBuilder recordScore(double recordScore) {
this.recordScore = recordScore;
return this;
}
public RecordsQueryBuilder timestamp(Date timestamp) {
this.timestamp = timestamp;
return this;
}
public SearchSourceBuilder build() {
QueryBuilder query = new ResultsFilterBuilder().timeRange(Result.TIMESTAMP.getPreferredName(), start, end)
.score(AnomalyRecord.RECORD_SCORE.getPreferredName(), recordScore)
.interim(includeInterim)
.build();
FieldSortBuilder sb;
if (sortField != null) {
sb = new FieldSortBuilder(sortField).missing("_last").order(sortDescending ? SortOrder.DESC : SortOrder.ASC);
} else {
sb = SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC);
}
BoolQueryBuilder recordFilter = new BoolQueryBuilder().filter(query)
.filter(new TermsQueryBuilder(Result.RESULT_TYPE.getPreferredName(), AnomalyRecord.RESULT_TYPE_VALUE));
if (timestamp != null) {
recordFilter.filter(QueryBuilders.termQuery(Result.TIMESTAMP.getPreferredName(), timestamp.getTime()));
}
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().from(from)
.size(size)
.query(recordFilter)
.sort(sb)
.fetchSource(true);
for (String eachSortField : SECONDARY_SORT) {
searchSourceBuilder.sort(eachSortField, sortDescending ? SortOrder.DESC : SortOrder.ASC);
}
return searchSourceBuilder;
}
}
|
RecordsQueryBuilder
|
java
|
quarkusio__quarkus
|
test-framework/common/src/main/java/io/quarkus/test/common/QuarkusTestResourceLifecycleManagerComparator.java
|
{
"start": 76,
"end": 356
}
|
class ____ implements Comparator<QuarkusTestResourceLifecycleManager> {
@Override
public int compare(QuarkusTestResourceLifecycleManager o1, QuarkusTestResourceLifecycleManager o2) {
return o1.order() - o2.order();
}
}
|
QuarkusTestResourceLifecycleManagerComparator
|
java
|
apache__camel
|
components/camel-ibm/camel-ibm-cos/src/test/java/org/apache/camel/component/ibm/cos/integration/IBMCOSProducerListOperationsIT.java
|
{
"start": 2193,
"end": 6633
}
|
class ____ extends IBMCOSTestSupport {
@EndpointInject
private ProducerTemplate template;
@EndpointInject("mock:result")
private MockEndpoint mockResult;
@BeforeEach
public void resetMocks() {
mockResult.reset();
}
@Test
public void testListObjects() throws Exception {
mockResult.expectedMessageCount(1);
// Create some test objects
template.send("direct:putObject", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(IBMCOSConstants.KEY, "file1.txt");
exchange.getIn().setBody(new ByteArrayInputStream("Content 1".getBytes()));
}
});
template.send("direct:putObject", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(IBMCOSConstants.KEY, "file2.txt");
exchange.getIn().setBody(new ByteArrayInputStream("Content 2".getBytes()));
}
});
// List objects
Exchange listExchange = template.request("direct:listObjects", new Processor() {
@Override
public void process(Exchange exchange) {
// No additional headers needed
}
});
assertNotNull(listExchange);
List<S3ObjectSummary> objects = listExchange.getMessage().getBody(List.class);
assertNotNull(objects);
assertTrue(objects.size() >= 2, "Should have at least 2 objects");
// Verify objects
boolean foundFile1 = objects.stream().anyMatch(obj -> "file1.txt".equals(obj.getKey()));
boolean foundFile2 = objects.stream().anyMatch(obj -> "file2.txt".equals(obj.getKey()));
assertTrue(foundFile1, "Should find file1.txt");
assertTrue(foundFile2, "Should find file2.txt");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testListObjectsWithPrefix() throws Exception {
// Create test objects with different prefixes
template.send("direct:putObject", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(IBMCOSConstants.KEY, "prefix1/file1.txt");
exchange.getIn().setBody(new ByteArrayInputStream("Content 1".getBytes()));
}
});
template.send("direct:putObject", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(IBMCOSConstants.KEY, "prefix2/file2.txt");
exchange.getIn().setBody(new ByteArrayInputStream("Content 2".getBytes()));
}
});
// List objects with prefix
Exchange listExchange = template.request("direct:listObjects", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(IBMCOSConstants.PREFIX, "prefix1/");
}
});
List<S3ObjectSummary> objects = listExchange.getMessage().getBody(List.class);
assertNotNull(objects);
// Should only find objects with prefix1
for (S3ObjectSummary obj : objects) {
assertTrue(obj.getKey().startsWith("prefix1/"), "All objects should start with prefix1/");
}
}
@Test
public void testListBuckets() throws Exception {
// List all buckets
Exchange listExchange = template.request("direct:listBuckets", new Processor() {
@Override
public void process(Exchange exchange) {
// No headers needed
}
});
assertNotNull(listExchange);
assertNotNull(listExchange.getMessage().getBody());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:putObject")
.to(buildEndpointUri("putObject"));
from("direct:listObjects")
.to(buildEndpointUri("listObjects"))
.to("mock:result");
from("direct:listBuckets")
.to(buildEndpointUri("listBuckets"))
.to("mock:result");
}
};
}
}
|
IBMCOSProducerListOperationsIT
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java
|
{
"start": 1612,
"end": 2640
}
|
class ____ {
public static LongIrateGroupingState initGrouping(DriverContext driverContext, boolean isDelta) {
return new LongIrateGroupingState(driverContext.bigArrays(), driverContext.breaker(), isDelta);
}
public static void combine(LongIrateGroupingState current, int groupId, long value, long timestamp) {
current.ensureCapacity(groupId);
current.append(groupId, timestamp, value);
}
public static String describe() {
return "instant change of longs";
}
public static void combineIntermediate(
LongIrateGroupingState current,
int groupId,
LongBlock timestamps,
LongBlock values,
int otherPosition
) {
current.combine(groupId, timestamps, values, otherPosition);
}
public static Block evaluateFinal(LongIrateGroupingState state, IntVector selected, GroupingAggregatorEvaluationContext evalContext) {
return state.evaluateFinal(selected, evalContext);
}
private static
|
IrateLongAggregator
|
java
|
apache__dubbo
|
dubbo-common/src/main/java/org/apache/dubbo/common/compiler/support/JavassistCompiler.java
|
{
"start": 2223,
"end": 4039
}
|
class
____ = EXTENDS_PATTERN.matcher(source);
if (matcher.find()) {
builder.setSuperClassName(matcher.group(1).trim());
}
// process implemented interfaces
matcher = IMPLEMENTS_PATTERN.matcher(source);
if (matcher.find()) {
String[] ifaces = matcher.group(1).trim().split("\\,");
Arrays.stream(ifaces).forEach(i -> builder.addInterface(i.trim()));
}
// process constructors, fields, methods
String body = source.substring(source.indexOf('{') + 1, source.length() - 1);
String[] methods = METHODS_PATTERN.split(body);
String className = ClassUtils.getSimpleClassName(name);
Arrays.stream(methods).map(String::trim).filter(m -> !m.isEmpty()).forEach(method -> {
if (method.startsWith(className)) {
builder.addConstructor("public " + method);
} else if (FIELD_PATTERN.matcher(method).matches()) {
builder.addField("private " + method);
} else {
builder.addMethod("public " + method);
}
});
// compile
CtClass cls = builder.build(classLoader);
ClassPool cp = cls.getClassPool();
if (classLoader == null) {
classLoader = cp.getClassLoader();
}
cp.insertClassPath(new LoaderClassPath(classLoader));
cp.insertClassPath(new DubboLoaderClassPath());
try {
return cp.toClass(cls, neighbor, classLoader, JavassistCompiler.class.getProtectionDomain());
} catch (Throwable t) {
if (!(t instanceof CannotCompileException)) {
return cp.toClass(cls, classLoader, JavassistCompiler.class.getProtectionDomain());
}
throw t;
}
}
}
|
matcher
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/UnmarshalVariableTest.java
|
{
"start": 3756,
"end": 4180
}
|
class ____ extends ServiceSupport implements DataFormat {
@Override
public void marshal(Exchange exchange, Object graph, OutputStream stream) {
// noop
}
@Override
public Object unmarshal(Exchange exchange, InputStream stream) {
return "Bye " + exchange.getContext().getTypeConverter().convertTo(String.class, exchange, stream);
}
}
}
|
MyByeDataFormat
|
java
|
processing__processing4
|
core/src/processing/opengl/PGraphics2D.java
|
{
"start": 1123,
"end": 13770
}
|
class ____ extends PGraphicsOpenGL {
public PGraphics2D() {
super();
}
//////////////////////////////////////////////////////////////
// RENDERER SUPPORT QUERIES
@Override
public boolean is2D() {
return true;
}
@Override
public boolean is3D() {
return false;
}
//////////////////////////////////////////////////////////////
// HINTS
@Override
public void hint(int which) {
if (which == ENABLE_STROKE_PERSPECTIVE) {
showWarning("Strokes cannot be perspective-corrected in 2D.");
return;
}
super.hint(which);
}
//////////////////////////////////////////////////////////////
// PROJECTION
@Override
public void ortho() {
showMethodWarning("ortho");
}
@Override
public void ortho(float left, float right,
float bottom, float top) {
showMethodWarning("ortho");
}
@Override
public void ortho(float left, float right,
float bottom, float top,
float near, float far) {
showMethodWarning("ortho");
}
@Override
public void perspective() {
showMethodWarning("perspective");
}
@Override
public void perspective(float fov, float aspect, float zNear, float zFar) {
showMethodWarning("perspective");
}
@Override
public void frustum(float left, float right, float bottom, float top,
float znear, float zfar) {
showMethodWarning("frustum");
}
@Override
protected void defaultPerspective() {
super.ortho(0, width, -height, 0, -1, +1);
}
//////////////////////////////////////////////////////////////
// CAMERA
@Override
public void beginCamera() {
showMethodWarning("beginCamera");
}
@Override
public void endCamera() {
showMethodWarning("endCamera");
}
@Override
public void camera() {
showMethodWarning("camera");
}
@Override
public void camera(float eyeX, float eyeY, float eyeZ,
float centerX, float centerY, float centerZ,
float upX, float upY, float upZ) {
showMethodWarning("camera");
}
@Override
protected void defaultCamera() {
eyeDist = 1;
resetMatrix();
}
//////////////////////////////////////////////////////////////
// MATRIX MORE!
@Override
protected void begin2D() {
pushProjection();
defaultPerspective();
pushMatrix();
defaultCamera();
}
@Override
protected void end2D() {
popMatrix();
popProjection();
}
//////////////////////////////////////////////////////////////
// SHAPE
@Override
public void shape(PShape shape) {
if (shape.is2D()) {
super.shape(shape);
} else {
showWarning("The shape object is not 2D, cannot be displayed with " +
"this renderer");
}
}
@Override
public void shape(PShape shape, float x, float y) {
if (shape.is2D()) {
super.shape(shape, x, y);
} else {
showWarning("The shape object is not 2D, cannot be displayed with " +
"this renderer");
}
}
@Override
public void shape(PShape shape, float a, float b, float c, float d) {
if (shape.is2D()) {
super.shape(shape, a, b, c, d);
} else {
showWarning("The shape object is not 2D, cannot be displayed with " +
"this renderer");
}
}
@Override
public void shape(PShape shape, float x, float y, float z) {
showDepthWarningXYZ("shape");
}
@Override
public void shape(PShape shape, float x, float y, float z,
float c, float d, float e) {
showDepthWarningXYZ("shape");
}
//////////////////////////////////////////////////////////////
// SHAPE I/O
static protected boolean isSupportedExtension(String extension) {
return extension.equals("svg") || extension.equals("svgz");
}
static protected PShape loadShapeImpl(PGraphics pg,
String filename, String extension) {
if (extension.equals("svg") || extension.equals("svgz")) {
PShapeSVG svg = new PShapeSVG(pg.parent.loadXML(filename));
return PShapeOpenGL.createShape((PGraphicsOpenGL) pg, svg);
}
return null;
}
//////////////////////////////////////////////////////////////
// SCREEN TRANSFORMS
@Override
public float modelX(float x, float y, float z) {
showDepthWarning("modelX");
return 0;
}
@Override
public float modelY(float x, float y, float z) {
showDepthWarning("modelY");
return 0;
}
@Override
public float modelZ(float x, float y, float z) {
showDepthWarning("modelZ");
return 0;
}
//////////////////////////////////////////////////////////////
// SHAPE CREATION
// @Override
// protected PShape createShapeFamily(int type) {
// return new PShapeOpenGL(this, type);
// }
//
//
// @Override
// protected PShape createShapePrimitive(int kind, float... p) {
// return new PShapeOpenGL(this, kind, p);
// }
/*
@Override
public PShape createShape(PShape source) {
return PShapeOpenGL.createShape2D(this, source);
}
@Override
public PShape createShape() {
return createShape(PShape.GEOMETRY);
}
@Override
public PShape createShape(int type) {
return createShapeImpl(this, type);
}
@Override
public PShape createShape(int kind, float... p) {
return createShapeImpl(this, kind, p);
}
static protected PShapeOpenGL createShapeImpl(PGraphicsOpenGL pg, int type) {
PShapeOpenGL shape = null;
if (type == PConstants.GROUP) {
shape = new PShapeOpenGL(pg, PConstants.GROUP);
} else if (type == PShape.PATH) {
shape = new PShapeOpenGL(pg, PShape.PATH);
} else if (type == PShape.GEOMETRY) {
shape = new PShapeOpenGL(pg, PShape.GEOMETRY);
}
shape.set3D(false);
return shape;
}
static protected PShapeOpenGL createShapeImpl(PGraphicsOpenGL pg,
int kind, float... p) {
PShapeOpenGL shape = null;
int len = p.length;
if (kind == POINT) {
if (len != 2) {
showWarning("Wrong number of parameters");
return null;
}
shape = new PShapeOpenGL(pg, PShape.PRIMITIVE);
shape.setKind(POINT);
} else if (kind == LINE) {
if (len != 4) {
showWarning("Wrong number of parameters");
return null;
}
shape = new PShapeOpenGL(pg, PShape.PRIMITIVE);
shape.setKind(LINE);
} else if (kind == TRIANGLE) {
if (len != 6) {
showWarning("Wrong number of parameters");
return null;
}
shape = new PShapeOpenGL(pg, PShape.PRIMITIVE);
shape.setKind(TRIANGLE);
} else if (kind == QUAD) {
if (len != 8) {
showWarning("Wrong number of parameters");
return null;
}
shape = new PShapeOpenGL(pg, PShape.PRIMITIVE);
shape.setKind(QUAD);
} else if (kind == RECT) {
if (len != 4 && len != 5 && len != 8 && len != 9) {
showWarning("Wrong number of parameters");
return null;
}
shape = new PShapeOpenGL(pg, PShape.PRIMITIVE);
shape.setKind(RECT);
} else if (kind == ELLIPSE) {
if (len != 4 && len != 5) {
showWarning("Wrong number of parameters");
return null;
}
shape = new PShapeOpenGL(pg, PShape.PRIMITIVE);
shape.setKind(ELLIPSE);
} else if (kind == ARC) {
if (len != 6 && len != 7) {
showWarning("Wrong number of parameters");
return null;
}
shape = new PShapeOpenGL(pg, PShape.PRIMITIVE);
shape.setKind(ARC);
} else if (kind == BOX) {
showWarning("Primitive not supported in 2D");
} else if (kind == SPHERE) {
showWarning("Primitive not supported in 2D");
} else {
showWarning("Unrecognized primitive type");
}
if (shape != null) {
shape.setParams(p);
}
shape.set3D(false);
return shape;
}
*/
//////////////////////////////////////////////////////////////
// BEZIER VERTICES
@Override
public void bezierVertex(float x2, float y2, float z2,
float x3, float y3, float z3,
float x4, float y4, float z4) {
showDepthWarningXYZ("bezierVertex");
}
//////////////////////////////////////////////////////////////
// QUADRATIC BEZIER VERTICES
@Override
public void quadraticVertex(float x2, float y2, float z2,
float x4, float y4, float z4) {
showDepthWarningXYZ("quadVertex");
}
//////////////////////////////////////////////////////////////
// CURVE VERTICES
@Override
public void curveVertex(float x, float y, float z) {
showDepthWarningXYZ("curveVertex");
}
//////////////////////////////////////////////////////////////
// BOX
@Override
public void box(float w, float h, float d) {
showMethodWarning("box");
}
//////////////////////////////////////////////////////////////
// SPHERE
@Override
public void sphere(float r) {
showMethodWarning("sphere");
}
//////////////////////////////////////////////////////////////
// VERTEX SHAPES
@Override
public void vertex(float x, float y, float z) {
showDepthWarningXYZ("vertex");
}
@Override
public void vertex(float x, float y, float z, float u, float v) {
showDepthWarningXYZ("vertex");
}
//////////////////////////////////////////////////////////////
// MATRIX TRANSFORMATIONS
@Override
public void translate(float tx, float ty, float tz) {
showDepthWarningXYZ("translate");
}
@Override
public void rotateX(float angle) {
showDepthWarning("rotateX");
}
@Override
public void rotateY(float angle) {
showDepthWarning("rotateY");
}
@Override
public void rotateZ(float angle) {
showDepthWarning("rotateZ");
}
@Override
public void rotate(float angle, float vx, float vy, float vz) {
showVariationWarning("rotate");
}
@Override
public void applyMatrix(PMatrix3D source) {
showVariationWarning("applyMatrix");
}
@Override
public void applyMatrix(float n00, float n01, float n02, float n03,
float n10, float n11, float n12, float n13,
float n20, float n21, float n22, float n23,
float n30, float n31, float n32, float n33) {
showVariationWarning("applyMatrix");
}
@Override
public void scale(float sx, float sy, float sz) {
showDepthWarningXYZ("scale");
}
//////////////////////////////////////////////////////////////
// SCREEN AND MODEL COORDS
@Override
public float screenX(float x, float y, float z) {
showDepthWarningXYZ("screenX");
return 0;
}
@Override
public float screenY(float x, float y, float z) {
showDepthWarningXYZ("screenY");
return 0;
}
@Override
public float screenZ(float x, float y, float z) {
showDepthWarningXYZ("screenZ");
return 0;
}
@Override
public PMatrix3D getMatrix(PMatrix3D target) {
showVariationWarning("getMatrix");
return target;
}
@Override
public void setMatrix(PMatrix3D source) {
showVariationWarning("setMatrix");
}
//////////////////////////////////////////////////////////////
// LIGHTS
@Override
public void lights() {
showMethodWarning("lights");
}
@Override
public void noLights() {
showMethodWarning("noLights");
}
@Override
public void ambientLight(float red, float green, float blue) {
showMethodWarning("ambientLight");
}
@Override
public void ambientLight(float red, float green, float blue,
float x, float y, float z) {
showMethodWarning("ambientLight");
}
@Override
public void directionalLight(float red, float green, float blue,
float nx, float ny, float nz) {
showMethodWarning("directionalLight");
}
@Override
public void pointLight(float red, float green, float blue,
float x, float y, float z) {
showMethodWarning("pointLight");
}
@Override
public void spotLight(float red, float green, float blue,
float x, float y, float z,
float nx, float ny, float nz,
float angle, float concentration) {
showMethodWarning("spotLight");
}
@Override
public void lightFalloff(float constant, float linear, float quadratic) {
showMethodWarning("lightFalloff");
}
@Override
public void lightSpecular(float v1, float v2, float v3) {
showMethodWarning("lightSpecular");
}
}
|
PGraphics2D
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/cid/aggregated/SmokeTests.java
|
{
"start": 1087,
"end": 1880
}
|
class ____ {
@Test
public void simpleTest(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
session.createQuery( "select i from LineItem i" ).list();
}
);
}
@BeforeEach
public void createTestData(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
final Order order = new Order( 1, "123-abc" );
session.persist( order );
session.persist( new LineItem( order, 1, "xyz", 500 ) );
session.persist( new LineItem( order, 2, "tuv", 60 ) );
session.persist( new LineItem( order, 3, "def", 350 ) );
}
);
}
@AfterEach
public void cleanUpTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Entity( name = "Order" )
@Table( name = "orders" )
public static
|
SmokeTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitSingleByteEvaluator.java
|
{
"start": 4274,
"end": 5128
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory str;
private final byte delim;
private final Function<DriverContext, BytesRef> scratch;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory str, byte delim,
Function<DriverContext, BytesRef> scratch) {
this.source = source;
this.str = str;
this.delim = delim;
this.scratch = scratch;
}
@Override
public SplitSingleByteEvaluator get(DriverContext context) {
return new SplitSingleByteEvaluator(source, str.get(context), delim, scratch.apply(context), context);
}
@Override
public String toString() {
return "SplitSingleByteEvaluator[" + "str=" + str + ", delim=" + delim + "]";
}
}
}
|
Factory
|
java
|
jhy__jsoup
|
src/main/java/org/jsoup/internal/StringUtil.java
|
{
"start": 2335,
"end": 15528
}
|
class ____ {
@Nullable StringBuilder sb = borrowBuilder(); // sets null on builder release so can't accidentally be reused
final String separator;
boolean first = true;
/**
Create a new joiner, that uses the specified separator. MUST call {@link #complete()} or will leak a thread
local string builder.
@param separator the token to insert between strings
*/
public StringJoiner(String separator) {
this.separator = separator;
}
/**
Add another item to the joiner, will be separated
*/
public StringJoiner add(Object stringy) {
Validate.notNull(sb); // don't reuse
if (!first)
sb.append(separator);
sb.append(stringy);
first = false;
return this;
}
/**
Append content to the current item; not separated
*/
public StringJoiner append(Object stringy) {
Validate.notNull(sb); // don't reuse
sb.append(stringy);
return this;
}
/**
Return the joined string, and release the builder back to the pool. This joiner cannot be reused.
*/
public String complete() {
String string = releaseBuilder(sb);
sb = null;
return string;
}
}
/**
* Returns space padding (up to the default max of 30). Use {@link #padding(int, int)} to specify a different limit.
* @param width amount of padding desired
* @return string of spaces * width
* @see #padding(int, int)
*/
public static String padding(int width) {
return padding(width, 30);
}
/**
* Returns space padding, up to a max of maxPaddingWidth.
* @param width amount of padding desired
* @param maxPaddingWidth maximum padding to apply. Set to {@code -1} for unlimited.
* @return string of spaces * width
*/
public static String padding(int width, int maxPaddingWidth) {
Validate.isTrue(width >= 0, "width must be >= 0");
Validate.isTrue(maxPaddingWidth >= -1);
if (maxPaddingWidth != -1)
width = Math.min(width, maxPaddingWidth);
if (width < padding.length)
return padding[width];
char[] out = new char[width];
for (int i = 0; i < width; i++)
out[i] = ' ';
return String.valueOf(out);
}
/**
* Tests if a string is blank: null, empty, or only whitespace (" ", \r\n, \t, etc)
* @param string string to test
* @return if string is blank
*/
public static boolean isBlank(@Nullable String string) {
if (string == null || string.isEmpty())
return true;
int l = string.length();
for (int i = 0; i < l; i++) {
if (!StringUtil.isWhitespace(string.codePointAt(i)))
return false;
}
return true;
}
/**
Tests if a string starts with a newline character
@param string string to test
@return if its first character is a newline
*/
public static boolean startsWithNewline(final String string) {
if (string == null || string.length() == 0)
return false;
return string.charAt(0) == '\n';
}
/**
* Tests if a string is numeric, i.e. contains only ASCII digit characters
* @param string string to test
* @return true if only digit chars, false if empty or null or contains non-digit chars
*/
public static boolean isNumeric(String string) {
if (string == null || string.length() == 0)
return false;
int l = string.length();
for (int i = 0; i < l; i++) {
if (!isDigit(string.charAt(i)))
return false;
}
return true;
}
/**
* Tests if a code point is "whitespace" as defined in the HTML spec. Used for output HTML.
* @param c code point to test
* @return true if code point is whitespace, false otherwise
* @see #isActuallyWhitespace(int)
*/
public static boolean isWhitespace(int c){
return c == ' ' || c == '\t' || c == '\n' || c == '\f' || c == '\r';
}
/**
* Tests if a code point is "whitespace" as defined by what it looks like. Used for Element.text etc.
* @param c code point to test
* @return true if code point is whitespace, false otherwise
*/
public static boolean isActuallyWhitespace(int c){
return c == ' ' || c == '\t' || c == '\n' || c == '\f' || c == '\r' || c == 160;
// 160 is (non-breaking space). Not in the spec but expected.
}
public static boolean isInvisibleChar(int c) {
return c == 8203 || c == 173; // zero width sp, soft hyphen
// previously also included zw non join, zw join - but removing those breaks semantic meaning of text
}
/**
* Normalise the whitespace within this string; multiple spaces collapse to a single, and all whitespace characters
* (e.g. newline, tab) convert to a simple space.
* @param string content to normalise
* @return normalised string
*/
public static String normaliseWhitespace(String string) {
StringBuilder sb = StringUtil.borrowBuilder();
appendNormalisedWhitespace(sb, string, false);
return StringUtil.releaseBuilder(sb);
}
/**
* After normalizing the whitespace within a string, appends it to a string builder.
* @param accum builder to append to
* @param string string to normalize whitespace within
* @param stripLeading set to true if you wish to remove any leading whitespace
*/
public static void appendNormalisedWhitespace(StringBuilder accum, String string, boolean stripLeading) {
boolean lastWasWhite = false;
boolean reachedNonWhite = false;
int len = string.length();
int c;
for (int i = 0; i < len; i+= Character.charCount(c)) {
c = string.codePointAt(i);
if (isActuallyWhitespace(c)) {
if ((stripLeading && !reachedNonWhite) || lastWasWhite)
continue;
accum.append(' ');
lastWasWhite = true;
}
else if (!isInvisibleChar(c)) {
accum.appendCodePoint(c);
lastWasWhite = false;
reachedNonWhite = true;
}
}
}
public static boolean in(final String needle, final String... haystack) {
final int len = haystack.length;
for (int i = 0; i < len; i++) {
if (haystack[i].equals(needle))
return true;
}
return false;
}
public static boolean inSorted(String needle, String[] haystack) {
return Arrays.binarySearch(haystack, needle) >= 0;
}
/**
Tests that a String contains only ASCII characters.
@param string scanned string
@return true if all characters are in range 0 - 127
*/
public static boolean isAscii(String string) {
Validate.notNull(string);
for (int i = 0; i < string.length(); i++) {
int c = string.charAt(i);
if (c > 127) { // ascii range
return false;
}
}
return true;
}
private static final Pattern extraDotSegmentsPattern = Pattern.compile("^/(?>(?>\\.\\.?/)+)");
/**
* Create a new absolute URL, from a provided existing absolute URL and a relative URL component.
* @param base the existing absolute base URL
* @param relUrl the relative URL to resolve. (If it's already absolute, it will be returned)
* @return the resolved absolute URL
* @throws MalformedURLException if an error occurred generating the URL
*/
public static URL resolve(URL base, String relUrl) throws MalformedURLException {
relUrl = stripControlChars(relUrl);
// workaround: java resolves '//path/file + ?foo' to '//path/?foo', not '//path/file?foo' as desired
if (relUrl.startsWith("?"))
relUrl = base.getPath() + relUrl;
// workaround: //example.com + ./foo = //example.com/./foo, not //example.com/foo
URL url = new URL(base, relUrl);
String fixedFile = extraDotSegmentsPattern.matcher(url.getFile()).replaceFirst("/");
if (url.getRef() != null) {
fixedFile = fixedFile + "#" + url.getRef();
}
return new URL(url.getProtocol(), url.getHost(), url.getPort(), fixedFile);
}
/**
* Create a new absolute URL, from a provided existing absolute URL and a relative URL component.
* @param baseUrl the existing absolute base URL
* @param relUrl the relative URL to resolve. (If it's already absolute, it will be returned)
* @return an absolute URL if one was able to be generated, or the empty string if not
*/
public static String resolve(String baseUrl, String relUrl) {
// workaround: java will allow control chars in a path URL and may treat as relative, but Chrome / Firefox will strip and may see as a scheme. Normalize to browser's view.
baseUrl = stripControlChars(baseUrl); relUrl = stripControlChars(relUrl);
try {
URL base;
try {
base = new URL(baseUrl);
} catch (MalformedURLException e) {
// the base is unsuitable, but the attribute/rel may be abs on its own, so try that
URL abs = new URL(relUrl);
return abs.toExternalForm();
}
return resolve(base, relUrl).toExternalForm();
} catch (MalformedURLException e) {
// it may still be valid, just that Java doesn't have a registered stream handler for it, e.g. tel
// we test here vs at start to normalize supported URLs (e.g. HTTP -> http)
return validUriScheme.matcher(relUrl).find() ? relUrl : "";
}
}
private static final Pattern validUriScheme = Pattern.compile("^[a-zA-Z][a-zA-Z0-9+-.]*:");
private static final Pattern controlChars = Pattern.compile("[\\x00-\\x1f]*"); // matches ascii 0 - 31, to strip from url
private static String stripControlChars(final String input) {
return controlChars.matcher(input).replaceAll("");
}
private static final int InitBuilderSize = 1024;
private static final int MaxBuilderSize = 8 * 1024;
private static final SoftPool<StringBuilder> BuilderPool = new SoftPool<>(
() -> new StringBuilder(InitBuilderSize));
/**
* Maintains cached StringBuilders in a flyweight pattern, to minimize new StringBuilder GCs. The StringBuilder is
* prevented from growing too large.
* <p>
* Care must be taken to release the builder once its work has been completed, with {@link #releaseBuilder}
* @return an empty StringBuilder
*/
public static StringBuilder borrowBuilder() {
return BuilderPool.borrow();
}
/**
* Release a borrowed builder. Care must be taken not to use the builder after it has been returned, as its
* contents may be changed by this method, or by a concurrent thread.
* @param sb the StringBuilder to release.
* @return the string value of the released String Builder (as an incentive to release it!).
*/
public static String releaseBuilder(StringBuilder sb) {
Validate.notNull(sb);
String string = sb.toString();
releaseBuilderVoid(sb);
return string;
}
/**
Releases a borrowed builder, but does not call .toString() on it. Useful in case you already have that string.
@param sb the StringBuilder to release.
@see #releaseBuilder(StringBuilder)
*/
public static void releaseBuilderVoid(StringBuilder sb) {
// if it hasn't grown too big, reset it and return it to the pool:
if (sb.length() <= MaxBuilderSize) {
sb.delete(0, sb.length()); // make sure it's emptied on release
BuilderPool.release(sb);
}
}
/**
* Return a {@link Collector} similar to the one returned by {@link Collectors#joining(CharSequence)},
* but backed by jsoup's {@link StringJoiner}, which allows for more efficient garbage collection.
*
* @param delimiter The delimiter for separating the strings.
* @return A {@code Collector} which concatenates CharSequence elements, separated by the specified delimiter
*/
public static Collector<CharSequence, ?, String> joining(String delimiter) {
return Collector.of(() -> new StringJoiner(delimiter),
StringJoiner::add,
(j1, j2) -> {
j1.append(j2.complete());
return j1;
},
StringJoiner::complete);
}
public static boolean isAsciiLetter(char c) {
return c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z';
}
public static boolean isDigit(char c) {
return c >= '0' && c <= '9';
}
public static boolean isHexDigit(char c) {
return isDigit(c) || c >= 'a' && c <= 'f' || c >= 'A' && c <= 'F';
}
}
|
StringJoiner
|
java
|
elastic__elasticsearch
|
x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichMetadataTests.java
|
{
"start": 858,
"end": 2768
}
|
class ____ extends AbstractChunkedSerializingTestCase<EnrichMetadata> {
@Override
protected EnrichMetadata doParseInstance(XContentParser parser) throws IOException {
return EnrichMetadata.fromXContent(parser);
}
@Override
protected EnrichMetadata createTestInstance() {
return randomEnrichMetadata(randomFrom(XContentType.values()));
}
@Override
protected EnrichMetadata mutateInstance(EnrichMetadata instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected EnrichMetadata createXContextTestInstance(XContentType xContentType) {
return randomEnrichMetadata(xContentType);
}
private static EnrichMetadata randomEnrichMetadata(XContentType xContentType) {
int numPolicies = randomIntBetween(8, 64);
Map<String, EnrichPolicy> policies = Maps.newMapWithExpectedSize(numPolicies);
for (int i = 0; i < numPolicies; i++) {
EnrichPolicy policy = randomEnrichPolicy(xContentType);
policies.put(randomAlphaOfLength(8), policy);
}
return new EnrichMetadata(policies);
}
@Override
protected Writeable.Reader<EnrichMetadata> instanceReader() {
return EnrichMetadata::new;
}
@Override
protected void assertEqualInstances(EnrichMetadata expectedInstance, EnrichMetadata newInstance) {
assertNotSame(expectedInstance, newInstance);
assertThat(newInstance.getPolicies().size(), equalTo(expectedInstance.getPolicies().size()));
for (Map.Entry<String, EnrichPolicy> entry : newInstance.getPolicies().entrySet()) {
EnrichPolicy actual = entry.getValue();
EnrichPolicy expected = expectedInstance.getPolicies().get(entry.getKey());
EnrichPolicyTests.assertEqualPolicies(expected, actual);
}
}
}
|
EnrichMetadataTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/RuncContainerExecutorConfig.java
|
{
"start": 24526,
"end": 25507
}
|
class ____ {
final private boolean allow;
final private String type;
final private long major;
final private long minor;
final private String access;
public boolean isAllow() {
return allow;
}
public String getType() {
return type;
}
public long getMajor() {
return major;
}
public long getMinor() {
return minor;
}
public String getAccess() {
return access;
}
public Device(boolean allow, String type, long major,
long minor, String access) {
this.allow = allow;
this.type = type;
this.major = major;
this.minor = minor;
this.access = access;
}
public Device() {
this(false, null, 0, 0, null);
}
}
/**
* This
|
Device
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/records/RecordNullFiltering5418Test.java
|
{
"start": 497,
"end": 2499
}
|
class ____ extends DatabindTestUtil
{
record TestRecord(String subject, String body) {}
@Test
public void testNonAbsentInclusionViaDefaultConfig() throws Exception
{
// Configure mapper to exclude absent values (which includes nulls)
ObjectMapper mapper = JsonMapper.builder()
.changeDefaultPropertyInclusion(
incl -> JsonInclude.Value.construct(
JsonInclude.Include.NON_ABSENT,
JsonInclude.Include.NON_ABSENT))
.build();
// Should exclude null fields
String json = mapper.writeValueAsString(new TestRecord("test subject", null));
assertEquals(a2q("{'subject':'test subject'}"), json);
// Both null
json = mapper.writeValueAsString(new TestRecord(null, null));
assertEquals("{}", json);
// Both present
json = mapper.writeValueAsString(new TestRecord("test subject", "test body"));
assertEquals(a2q("{'subject':'test subject','body':'test body'}"), json);
}
@Test
public void testNonNullInclusionViaDefaultConfig() throws Exception
{
// Configure mapper to exclude null values
ObjectMapper mapper = JsonMapper.builder()
.changeDefaultPropertyInclusion(
incl -> JsonInclude.Value.construct(
JsonInclude.Include.NON_NULL,
JsonInclude.Include.NON_NULL))
.build();
// Should exclude null fields
String json = mapper.writeValueAsString(new TestRecord("test subject", null));
assertEquals(a2q("{'subject':'test subject'}"), json);
// Both null
json = mapper.writeValueAsString(new TestRecord(null, null));
assertEquals("{}", json);
// Both present
json = mapper.writeValueAsString(new TestRecord("test subject", "test body"));
assertEquals(a2q("{'subject':'test subject','body':'test body'}"), json);
}
}
|
RecordNullFiltering5418Test
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/KafkaComponentBuilderFactory.java
|
{
"start": 51283,
"end": 58457
}
|
class ____ keys (defaults to the same as for messages
* if nothing is given).
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: org.apache.kafka.common.serialization.StringSerializer
* Group: producer
*
* @param keySerializer the value to set
* @return the dsl builder
*/
default KafkaComponentBuilder keySerializer(java.lang.String keySerializer) {
doSetProperty("keySerializer", keySerializer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default KafkaComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* The producer groups together any records that arrive in between
* request transmissions into a single, batched, request. Normally, this
* occurs only under load when records arrive faster than they can be
* sent out. However, in some circumstances, the client may want to
* reduce the number of requests even under a moderate load. This
* setting achieves this by adding a small amount of artificial delay.
* That is, rather than immediately sending out a record, the producer
* will wait for up to the given delay to allow other records to be sent
* so that they can be batched together. This can be thought of as
* analogous to Nagle's algorithm in TCP. This setting gives the upper
* bound on the delay for batching: once we get batch.size worth of
* records for a partition, it will be sent immediately regardless of
* this setting, however, if we have fewer than this many bytes
* accumulated for this partition, we will 'linger' for the specified
* time waiting for more records to show up. This setting defaults to 0
* (i.e., no delay). Setting linger.ms=5, for example, would have the
* effect of reducing the number of requests sent but would add up to
* 5ms of latency to records sent in the absence of load.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: producer
*
* @param lingerMs the value to set
* @return the dsl builder
*/
default KafkaComponentBuilder lingerMs(java.lang.Integer lingerMs) {
doSetProperty("lingerMs", lingerMs);
return this;
}
/**
* The configuration controls how long the KafkaProducer's send(),
* partitionsFor(), initTransactions(), sendOffsetsToTransaction(),
* commitTransaction() and abortTransaction() methods will block. For
* send() this timeout bounds the total time waiting for both metadata
* fetch and buffer allocation (blocking in the user-supplied
* serializers or partitioner is not counted against this timeout). For
* partitionsFor() this time out bounds the time spent waiting for
* metadata if it is unavailable. The transaction-related methods always
* block, but may time out if the transaction coordinator could not be
* discovered or did not respond within the timeout.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 60000
* Group: producer
*
* @param maxBlockMs the value to set
* @return the dsl builder
*/
default KafkaComponentBuilder maxBlockMs(java.lang.Integer maxBlockMs) {
doSetProperty("maxBlockMs", maxBlockMs);
return this;
}
/**
* The maximum number of unacknowledged requests the client will send on
* a single connection before blocking. Note that if this setting is set
* to be greater than 1 and there are failed sends, there is a risk of
* message re-ordering due to retries (i.e., if retries are enabled).
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 5
* Group: producer
*
* @param maxInFlightRequest the value to set
* @return the dsl builder
*/
default KafkaComponentBuilder maxInFlightRequest(java.lang.Integer maxInFlightRequest) {
doSetProperty("maxInFlightRequest", maxInFlightRequest);
return this;
}
/**
* The maximum size of a request. This is also effectively a cap on the
* maximum record size. Note that the server has its own cap on record
* size which may be different from this. This setting will limit the
* number of record batches the producer will send in a single request
* to avoid sending huge requests.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 1048576
* Group: producer
*
* @param maxRequestSize the value to set
* @return the dsl builder
*/
default KafkaComponentBuilder maxRequestSize(java.lang.Integer maxRequestSize) {
doSetProperty("maxRequestSize", maxRequestSize);
return this;
}
/**
* The period of time in milliseconds after which we force a refresh of
* metadata even if we haven't seen any partition leadership changes to
* proactively discover any new brokers or partitions.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Default: 300000
* Group: producer
*
* @param metadataMaxAgeMs the value to set
* @return the dsl builder
*/
default KafkaComponentBuilder metadataMaxAgeMs(java.lang.Integer metadataMaxAgeMs) {
doSetProperty("metadataMaxAgeMs", metadataMaxAgeMs);
return this;
}
/**
* A list of classes to use as metrics reporters. Implementing the
* MetricReporter
|
for
|
java
|
apache__flink
|
flink-table/flink-table-api-java/src/test/java/org/apache/flink/table/utils/FunctionLookupMock.java
|
{
"start": 1474,
"end": 2208
}
|
class ____ only a subset of builtin
* functions because those functions still depend on planner expressions for argument validation and
* type inference. Supported builtin functions are:
*
* <ul>
* <li>BuiltinFunctionDefinitions.EQUALS
* <li>BuiltinFunctionDefinitions.IS_NULL
* </ul>
*
* <p>Pseudo functions that are executed during expression resolution e.g.:
*
* <ul>
* <li>BuiltinFunctionDefinitions.WITH_COLUMNS
* <li>BuiltinFunctionDefinitions.WITHOUT_COLUMNS
* <li>BuiltinFunctionDefinitions.RANGE_TO
* <li>BuiltinFunctionDefinitions.FLATTEN
* </ul>
*
* <p>Built-in functions that use the Flink's type inference stack:
*
* <ul>
* <li>BuiltinFunctionDefinitions.ROW
* </ul>
*
* <p>This
|
supports
|
java
|
grpc__grpc-java
|
okhttp/third_party/okhttp/main/java/io/grpc/okhttp/internal/Platform.java
|
{
"start": 9191,
"end": 9534
}
|
class ____ in Android 5.0.
return true;
} catch (ClassNotFoundException e) {
logger.log(Level.FINE, "Can't find class", e);
}
return false;
}
private static boolean isAtLeastAndroid41() {
try {
Platform.class
.getClassLoader()
.loadClass("android.app.ActivityOptions"); // Arbitrary
|
added
|
java
|
netty__netty
|
codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandler.java
|
{
"start": 2543,
"end": 2705
}
|
class ____ extends WebSocketProtocolHandler {
/**
* Events that are fired to notify about handshake status
*/
public
|
WebSocketServerProtocolHandler
|
java
|
netty__netty
|
handler/src/test/java/io/netty/handler/ipfilter/UniqueIpFilterTest.java
|
{
"start": 1249,
"end": 2983
}
|
class ____ {
@Test
public void testUniqueIpFilterHandler() throws ExecutionException, InterruptedException {
final CyclicBarrier barrier = new CyclicBarrier(2);
ExecutorService executorService = Executors.newFixedThreadPool(2);
try {
for (int round = 0; round < 10000; round++) {
final UniqueIpFilter ipFilter = new UniqueIpFilter();
Future<EmbeddedChannel> future1 = newChannelAsync(barrier, executorService, ipFilter);
Future<EmbeddedChannel> future2 = newChannelAsync(barrier, executorService, ipFilter);
EmbeddedChannel ch1 = future1.get();
EmbeddedChannel ch2 = future2.get();
assertTrue(ch1.isActive() || ch2.isActive());
assertFalse(ch1.isActive() && ch2.isActive());
barrier.reset();
ch1.close().await();
ch2.close().await();
}
} finally {
executorService.shutdown();
}
}
private static Future<EmbeddedChannel> newChannelAsync(final CyclicBarrier barrier,
ExecutorService executorService,
final ChannelHandler... handler) {
return executorService.submit(new Callable<EmbeddedChannel>() {
@Override
public EmbeddedChannel call() throws Exception {
barrier.await();
return new EmbeddedChannel(handler) {
@Override
protected SocketAddress remoteAddress0() {
return isActive() ? SocketUtils.socketAddress("91.92.93.1", 5421) : null;
}
};
}
});
}
}
|
UniqueIpFilterTest
|
java
|
apache__camel
|
components/camel-jsch/src/main/java/org/apache/camel/component/scp/ScpConfiguration.java
|
{
"start": 1100,
"end": 6364
}
|
class ____ extends RemoteFileConfiguration {
public static final int DEFAULT_SFTP_PORT = 22;
public static final String DEFAULT_MOD = "664";
@UriParam(label = "security", defaultValue = "true")
private boolean useUserKnownHostsFile = true;
@UriParam(label = "security", secret = true)
@Metadata(supportFileReference = true)
private String knownHostsFile;
@UriParam(label = "security", secret = true)
private String privateKeyFile;
@UriParam(label = "security", secret = true)
private byte[] privateKeyBytes;
@UriParam(label = "security", secret = true)
private String privateKeyFilePassphrase;
@UriParam(enums = "no,yes", defaultValue = "no")
private String strictHostKeyChecking = "no";
@UriParam(defaultValue = DEFAULT_MOD)
private String chmod = DEFAULT_MOD;
// comma separated list of ciphers.
// null means default jsch list will be used
@UriParam(label = "security,advanced")
private String ciphers;
@UriParam(label = "security", secret = true)
private String preferredAuthentications;
public ScpConfiguration() {
setProtocol("scp");
}
public ScpConfiguration(URI uri) {
super(uri);
}
@Override
protected void setDefaultPort() {
setPort(DEFAULT_SFTP_PORT);
}
public String getKnownHostsFile() {
return knownHostsFile;
}
/**
* Sets the known_hosts file, so that the jsch endpoint can do host key verification. You can prefix with classpath:
* to load the file from classpath instead of file system.
*/
public void setKnownHostsFile(String knownHostsFile) {
this.knownHostsFile = knownHostsFile;
}
public boolean isUseUserKnownHostsFile() {
return useUserKnownHostsFile;
}
/**
* If knownHostFile has not been explicit configured, then use the host file from System.getProperty("user.home") +
* "/.ssh/known_hosts"
*/
public void setUseUserKnownHostsFile(boolean useUserKnownHostsFile) {
this.useUserKnownHostsFile = useUserKnownHostsFile;
}
public String getPrivateKeyFile() {
return privateKeyFile;
}
/**
* Set the private key file to that the endpoint can do private key verification. You can prefix with classpath: to
* load the file from classpath instead of file system.
*/
public void setPrivateKeyFile(String privateKeyFile) {
this.privateKeyFile = privateKeyFile;
}
public byte[] getPrivateKeyBytes() {
return privateKeyBytes;
}
/**
* Set the private key bytes to that the endpoint can do private key verification. This must be used only if
* privateKeyFile wasn't set. Otherwise the file will have the priority.
*/
public void setPrivateKeyBytes(byte[] privateKeyBytes) {
this.privateKeyBytes = privateKeyBytes;
}
public String getPrivateKeyFilePassphrase() {
return privateKeyFilePassphrase;
}
/**
* Set the private key file passphrase to that the endpoint can do private key verification.
*/
public void setPrivateKeyFilePassphrase(String privateKeyFilePassphrase) {
this.privateKeyFilePassphrase = privateKeyFilePassphrase;
}
public String getStrictHostKeyChecking() {
return strictHostKeyChecking;
}
/**
* Sets whether to use strict host key checking. Possible values are: no, yes
*/
public void setStrictHostKeyChecking(String strictHostKeyChecking) {
this.strictHostKeyChecking = strictHostKeyChecking;
}
/**
* Allows you to set chmod on the stored file. For example chmod=664.
*/
public void setChmod(String chmod) {
if (chmod.length() == 3) {
for (byte c : chmod.getBytes()) {
if (c < '0' || c > '7') {
chmod = DEFAULT_MOD;
break;
}
}
} else {
chmod = DEFAULT_MOD;
}
// May be interesting to log the fallback to DEFAULT_MOD for invalid configuration
this.chmod = chmod;
}
public String getChmod() {
return chmod;
}
/**
* Set a comma separated list of ciphers that will be used in order of preference. Possible cipher names are defined
* by JCraft JSCH. Some examples include:
* aes128-ctr,aes128-cbc,3des-ctr,3des-cbc,blowfish-cbc,aes192-cbc,aes256-cbc. If not specified the default list
* from JSCH will be used.
*/
public void setCiphers(String ciphers) {
this.ciphers = ciphers;
}
public String getCiphers() {
return ciphers;
}
/**
* Set a comma separated list of authentications that will be used in order of preference. Possible authentication
* methods are defined by JCraft JSCH. Some examples include:
* gssapi-with-mic,publickey,keyboard-interactive,password If not specified the JSCH and/or system defaults will be
* used.
*/
public void setPreferredAuthentications(final String preferredAuthentications) {
this.preferredAuthentications = preferredAuthentications;
}
public String getPreferredAuthentications() {
return preferredAuthentications;
}
}
|
ScpConfiguration
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/server/telemetry/ClientTelemetry.java
|
{
"start": 1407,
"end": 1722
}
|
interface ____ {
/**
* Called by the broker to fetch instance of {@link ClientTelemetryReceiver}.
* <p>
* This instance may be cached by the broker.
*
* @return broker side instance of {@link ClientTelemetryReceiver}.
*/
ClientTelemetryReceiver clientReceiver();
}
|
ClientTelemetry
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/test/benchmark/BenchmarkMain_EishayEncode.java
|
{
"start": 351,
"end": 1741
}
|
class ____ {
public static void main(String[] args) throws Exception {
System.out.println(System.getProperty("java.vm.name") + " " + System.getProperty("java.runtime.version"));
List<String> arguments = ManagementFactory.getRuntimeMXBean().getInputArguments();
System.out.println(arguments);
MediaContent content = EishayDecodeBytes.instance.getContent();
String text = encode(content);
System.out.println(text);
for (int i = 0; i < 10; ++i) {
perf(text);
}
}
static long perf(Object obj) {
long startYGC = TestUtils.getYoungGC();
long startYGCTime = TestUtils.getYoungGCTime();
long startFGC = TestUtils.getFullGC();
long startMillis = System.currentTimeMillis();
for (int i = 0; i < 1000 * 1000; ++i) {
encode(obj);
}
long millis = System.currentTimeMillis() - startMillis;
long ygc = TestUtils.getYoungGC() - startYGC;
long ygct = TestUtils.getYoungGCTime() - startYGCTime;
long fgc = TestUtils.getFullGC() - startFGC;
System.out.println("encode\t" + millis + ", ygc " + ygc + ", ygct " + ygct + ", fgc " + fgc);
return millis;
}
static String encode(Object text) {
return JSON.toJSONString(text, SerializerFeature.BeanToArray);
}
}
|
BenchmarkMain_EishayEncode
|
java
|
apache__logging-log4j2
|
log4j-core-test/src/test/java/org/apache/logging/log4j/core/async/AsyncLoggerNanoTimeTest.java
|
{
"start": 1830,
"end": 4264
}
|
class ____ {
@BeforeAll
static void beforeClass() {
System.setProperty(Constants.LOG4J_CONTEXT_SELECTOR, AsyncLoggerContextSelector.class.getName());
System.setProperty(ConfigurationFactory.CONFIGURATION_FILE_PROPERTY, "NanoTimeToFileTest.xml");
}
@AfterAll
static void afterClass() {
System.setProperty(Constants.LOG4J_CONTEXT_SELECTOR, Strings.EMPTY);
}
@Test
void testAsyncLogUsesNanoTimeClock() throws Exception {
final File file = new File("target", "NanoTimeToFileTest.log");
// System.out.println(f.getAbsolutePath());
file.delete();
final AsyncLogger log = (AsyncLogger) LogManager.getLogger("com.foo.Bar");
final long before = System.nanoTime();
log.info("Use actual System.nanoTime()");
assertInstanceOf(SystemNanoClock.class, log.getNanoClock(), "using SystemNanoClock");
final long DUMMYNANOTIME = -53;
log.getContext().getConfiguration().setNanoClock(new DummyNanoClock(DUMMYNANOTIME));
log.updateConfiguration(log.getContext().getConfiguration());
// trigger a new nano clock lookup
log.updateConfiguration(log.getContext().getConfiguration());
log.info("Use dummy nano clock");
assertInstanceOf(DummyNanoClock.class, log.getNanoClock(), "using SystemNanoClock");
CoreLoggerContexts.stopLoggerContext(file); // stop async thread
final BufferedReader reader = new BufferedReader(new FileReader(file));
final String line1 = reader.readLine();
final String line2 = reader.readLine();
// System.out.println(line1);
// System.out.println(line2);
reader.close();
file.delete();
assertNotNull(line1, "line1");
assertNotNull(line2, "line2");
final String[] line1Parts = line1.split(" AND ");
assertEquals("Use actual System.nanoTime()", line1Parts[2]);
assertEquals(line1Parts[0], line1Parts[1]);
final long loggedNanoTime = Long.parseLong(line1Parts[0]);
assertTrue(loggedNanoTime - before < TimeUnit.SECONDS.toNanos(1), "used system nano time");
final String[] line2Parts = line2.split(" AND ");
assertEquals("Use dummy nano clock", line2Parts[2]);
assertEquals(String.valueOf(DUMMYNANOTIME), line2Parts[0]);
assertEquals(String.valueOf(DUMMYNANOTIME), line2Parts[1]);
}
}
|
AsyncLoggerNanoTimeTest
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanPostProcessorTests.java
|
{
"start": 171421,
"end": 171510
}
|
interface ____<E> extends Set<E> {
}
@SuppressWarnings("serial")
public static
|
CustomSet
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/streams/TestStreamFactories.java
|
{
"start": 11527,
"end": 11914
}
|
class ____ implements ObjectInputStreamFactory.StreamFactoryCallbacks {
@Override
public S3Client getOrCreateSyncClient() throws IOException {
throw new UnsupportedOperationException("not implemented");
}
@Override
public void incrementFactoryStatistic(Statistic statistic) {
throw new UnsupportedOperationException("not implemented");
}
}
}
|
Callbacks
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/NamespaceHttpFirewallTests.java
|
{
"start": 3585,
"end": 3933
}
|
class ____ extends DefaultHttpFirewall {
@Override
public FirewalledRequest getFirewalledRequest(HttpServletRequest request) throws RequestRejectedException {
if (request.getParameter("deny") != null) {
throw new RequestRejectedException("custom rejection");
}
return super.getFirewalledRequest(request);
}
}
}
|
CustomHttpFirewall
|
java
|
apache__camel
|
components/camel-google/camel-google-sheets/src/main/java/org/apache/camel/component/google/sheets/internal/GoogleSheetsPropertiesHelper.java
|
{
"start": 1232,
"end": 1938
}
|
class ____ extends ApiMethodPropertiesHelper<GoogleSheetsConfiguration> {
private static final Lock LOCK = new ReentrantLock();
private static GoogleSheetsPropertiesHelper helper;
private GoogleSheetsPropertiesHelper(CamelContext context) {
super(context, GoogleSheetsConfiguration.class, GoogleSheetsConstants.PROPERTY_PREFIX);
}
public static GoogleSheetsPropertiesHelper getHelper(CamelContext context) {
LOCK.lock();
try {
if (helper == null) {
helper = new GoogleSheetsPropertiesHelper(context);
}
return helper;
} finally {
LOCK.unlock();
}
}
}
|
GoogleSheetsPropertiesHelper
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/main/java/org/springframework/web/reactive/accept/ApiVersionStrategy.java
|
{
"start": 1133,
"end": 3487
}
|
interface ____ {
/**
* Resolve the version value from a request, e.g. from a request header.
* @param exchange the current exchange
* @return the version, if present or {@code null}
* @see ApiVersionResolver
*/
@Nullable
String resolveVersion(ServerWebExchange exchange);
/**
* Parse the version of a request into an Object.
* @param version the value to parse
* @return an Object that represents the version
* @see org.springframework.web.accept.ApiVersionParser
*/
Comparable<?> parseVersion(String version);
/**
* Validate a request version, including required and supported version checks.
* @param requestVersion the version to validate
* @param exchange the exchange
* @throws MissingApiVersionException if the version is required, but not specified
* @throws InvalidApiVersionException if the version is not supported
*/
void validateVersion(@Nullable Comparable<?> requestVersion, ServerWebExchange exchange)
throws MissingApiVersionException, InvalidApiVersionException;
/**
* Return a default version to use for requests that don't specify one.
*/
@Nullable Comparable<?> getDefaultVersion();
/**
* Convenience method to return the parsed and validated request version,
* or the default version if configured.
* @param exchange the current exchange
* @return the parsed request version, or the default version
*/
default @Nullable Comparable<?> resolveParseAndValidateVersion(ServerWebExchange exchange) {
String value = resolveVersion(exchange);
Comparable<?> version;
if (value == null) {
version = getDefaultVersion();
}
else {
try {
version = parseVersion(value);
}
catch (Exception ex) {
throw new InvalidApiVersionException(value, null, ex);
}
}
validateVersion(version, exchange);
return version;
}
/**
* Check if the requested API version is deprecated, and if so handle it
* accordingly, e.g. by setting response headers to signal the deprecation,
* to specify relevant dates and provide links to further details.
* @param version the resolved and parsed request version
* @param handler the handler chosen for the exchange
* @param exchange the current exchange
* @see ApiVersionDeprecationHandler
*/
void handleDeprecations(Comparable<?> version, Object handler, ServerWebExchange exchange);
}
|
ApiVersionStrategy
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/FullBindingGraphValidationTest.java
|
{
"start": 10150,
"end": 12874
}
|
interface ____ {",
" @BindsInstance Builder object(Object object);",
" SubcomponentWithErrors build();",
" }",
"}");
// Make sure the error doesn't show other bindings or a dependency trace afterwards.
private static final Pattern SUBCOMPONENT_WITH_ERRORS_MESSAGE =
endsWithMessage(
"\033[1;31m[Dagger/DuplicateBindings]\033[0m Object is bound multiple times:",
" @Binds Object AModule.object(String)",
" @BindsInstance SubcomponentWithErrors.Builder"
+ " SubcomponentWithErrors.Builder.object(Object)",
" in component: [SubcomponentWithErrors]",
"",
"======================",
"Full classname legend:",
"======================",
"AModule: test.AModule",
"SubcomponentWithErrors: test.SubcomponentWithErrors",
"========================",
"End of classname legend:",
"========================");
private static final Pattern MODULE_WITH_SUBCOMPONENT_WITH_ERRORS_MESSAGE =
endsWithMessage(
"\033[1;31m[Dagger/DuplicateBindings]\033[0m Object is bound multiple times:",
" @Binds Object AModule.object(String)",
" @BindsInstance SubcomponentWithErrors.Builder"
+ " SubcomponentWithErrors.Builder.object(Object)",
" in component: [ModuleWithSubcomponentWithErrors → SubcomponentWithErrors]",
"",
"======================",
"Full classname legend:",
"======================",
"AModule: test.AModule",
"ModuleWithSubcomponentWithErrors: test.ModuleWithSubcomponentWithErrors",
"SubcomponentWithErrors: test.SubcomponentWithErrors",
"========================",
"End of classname legend:",
"========================");
@Test
public void subcomponentWithErrors_validationTypeNone() {
CompilerTests.daggerCompiler(SUBCOMPONENT_WITH_ERRORS, A_MODULE)
.compile(
subject -> {
subject.hasErrorCount(0);
subject.hasWarningCount(0);
});
}
@Test
public void subcomponentWithErrors_validationTypeError() {
CompilerTests.daggerCompiler(SUBCOMPONENT_WITH_ERRORS, A_MODULE)
.withProcessingOptions(ImmutableMap.of("dagger.fullBindingGraphValidation", "ERROR"))
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContainingMatch(SUBCOMPONENT_WITH_ERRORS_MESSAGE.pattern())
.onSource(SUBCOMPONENT_WITH_ERRORS)
.onLineContaining("
|
Builder
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/CoapComponentBuilderFactory.java
|
{
"start": 1407,
"end": 1889
}
|
interface ____ {
/**
* CoAP (camel-coap)
* Send and receive messages to/from CoAP (Constrained Application Protocol)
* capable devices.
*
* Category: iot
* Since: 2.16
* Maven coordinates: org.apache.camel:camel-coap
*
* @return the dsl builder
*/
static CoapComponentBuilder coap() {
return new CoapComponentBuilderImpl();
}
/**
* Builder for the CoAP component.
*/
|
CoapComponentBuilderFactory
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/util/ClassUtils.java
|
{
"start": 33496,
"end": 33669
}
|
class ____ the given interfaces.
* @param interfaces the interfaces to merge
* @param classLoader the ClassLoader to create the composite Class in
* @return the merged
|
for
|
java
|
apache__camel
|
components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/FtpSoTimeoutTest.java
|
{
"start": 1649,
"end": 4950
}
|
class ____ extends CamelTestSupport {
private ServerSocket serverSocket;
// --- Set up
@Override
public void doPreSetup() throws Exception {
// the created server socket makes it possible for the FTP client to
// establish the socket connection.
// However, no message will ever be sent back, thus a read timeout
// should occur within FTPClient#__getReply()
serverSocket = new ServerSocket(0);
}
@Override
public void doPostTearDown() throws Exception {
if (serverSocket != null) {
serverSocket.close();
}
}
@Override
protected int getShutdownTimeout() {
return 5; // speedup graceful shutdown
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:with").to("ftp://localhost:" + serverSocket.getLocalPort()
+ "?ftpClient=#myftpclient&connectTimeout=300&soTimeout=300&reconnectDelay=100");
from("direct:without").to("ftp://localhost:" + serverSocket.getLocalPort()
+ "?connectTimeout=300&soTimeout=300&reconnectDelay=100");
// using soTimeout=0 could potentially cause the ftp producer to dead-lock doing endless reconnection attempts
// this is a test to ensure we have fixed that; see CAMEL-8088
from("direct:soTimeoutZero").to("ftp://localhost:" + serverSocket.getLocalPort()
+ "?connectTimeout=300&soTimeout=0")
.to("mock:done")
.errorHandler(deadLetterChannel("mock:dead"));
}
};
}
@BindToRegistry("myftpclient")
public FTPClient createFtpClient() {
FTPClient ftpClient = new FTPClient();
ftpClient.setDefaultTimeout(300);
return ftpClient;
}
// --- Tests
@Test
@Timeout(value = 10, unit = TimeUnit.SECONDS)
public void testWithDefaultTimeout() {
assertThrows(CamelExecutionException.class, () -> {
// send exchange to the route using the custom FTPClient (with a
// default timeout)
// the soTimeout triggers in time and test is successful
template.sendBody("direct:with", "");
});
}
@Test
@Timeout(value = 10, unit = TimeUnit.SECONDS)
public void testWithoutDefaultTimeout() {
assertThrows(CamelExecutionException.class, () -> {
// send exchange to the route using the default FTPClient (without a
// default timeout)
// the soTimeout never triggers and test fails after its own timeout
template.sendBody("direct:without", "");
});
}
@Test
void testReConnectDeadlock() throws Exception {
// we should fail, but we are testing that we are not in a deadlock which could potentially happen
getMockEndpoint("mock:done").expectedMessageCount(0);
getMockEndpoint("mock:dead").expectedMessageCount(1);
template.sendBody("direct:soTimeoutZero", "test");
MockEndpoint.assertIsSatisfied(context);
}
}
|
FtpSoTimeoutTest
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/MinioEndpointBuilderFactory.java
|
{
"start": 54009,
"end": 62421
}
|
interface ____
extends
EndpointConsumerBuilder {
default MinioEndpointConsumerBuilder basic() {
return (MinioEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedMinioEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedMinioEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedMinioEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedMinioEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedMinioEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedMinioEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
*
* The option is a:
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
*
* Group: consumer (advanced)
*
* @param pollStrategy the value to set
* @return the dsl builder
*/
default AdvancedMinioEndpointConsumerBuilder pollStrategy(org.apache.camel.spi.PollingConsumerPollStrategy pollStrategy) {
doSetProperty("pollStrategy", pollStrategy);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
*
* The option will be converted to a
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
*
* Group: consumer (advanced)
*
* @param pollStrategy the value to set
* @return the dsl builder
*/
default AdvancedMinioEndpointConsumerBuilder pollStrategy(String pollStrategy) {
doSetProperty("pollStrategy", pollStrategy);
return this;
}
/**
* Set custom HTTP client for authenticated access.
*
* The option is a: <code>okhttp3.OkHttpClient</code> type.
*
* Group: advanced
*
* @param customHttpClient the value to set
* @return the dsl builder
*/
default AdvancedMinioEndpointConsumerBuilder customHttpClient(okhttp3.OkHttpClient customHttpClient) {
doSetProperty("customHttpClient", customHttpClient);
return this;
}
/**
* Set custom HTTP client for authenticated access.
*
* The option will be converted to a <code>okhttp3.OkHttpClient</code>
* type.
*
* Group: advanced
*
* @param customHttpClient the value to set
* @return the dsl builder
*/
default AdvancedMinioEndpointConsumerBuilder customHttpClient(String customHttpClient) {
doSetProperty("customHttpClient", customHttpClient);
return this;
}
}
/**
* Builder for endpoint producers for the Minio component.
*/
public
|
AdvancedMinioEndpointConsumerBuilder
|
java
|
apache__kafka
|
trogdor/src/main/java/org/apache/kafka/trogdor/rest/CoordinatorStatusResponse.java
|
{
"start": 1014,
"end": 1351
}
|
class ____ extends Message {
private final long serverStartMs;
@JsonCreator
public CoordinatorStatusResponse(@JsonProperty("serverStartMs") long serverStartMs) {
this.serverStartMs = serverStartMs;
}
@JsonProperty
public long serverStartMs() {
return serverStartMs;
}
}
|
CoordinatorStatusResponse
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundToDouble2Evaluator.java
|
{
"start": 1094,
"end": 4133
}
|
class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(RoundToDouble2Evaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator field;
private final double p0;
private final double p1;
private final DriverContext driverContext;
private Warnings warnings;
public RoundToDouble2Evaluator(Source source, EvalOperator.ExpressionEvaluator field, double p0,
double p1, DriverContext driverContext) {
this.source = source;
this.field = field;
this.p0 = p0;
this.p1 = p1;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (DoubleBlock fieldBlock = (DoubleBlock) field.eval(page)) {
DoubleVector fieldVector = fieldBlock.asVector();
if (fieldVector == null) {
return eval(page.getPositionCount(), fieldBlock);
}
return eval(page.getPositionCount(), fieldVector).asBlock();
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += field.baseRamBytesUsed();
return baseRamBytesUsed;
}
public DoubleBlock eval(int positionCount, DoubleBlock fieldBlock) {
try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (fieldBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
double field = fieldBlock.getDouble(fieldBlock.getFirstValueIndex(p));
result.appendDouble(RoundToDouble.process(field, this.p0, this.p1));
}
return result.build();
}
}
public DoubleVector eval(int positionCount, DoubleVector fieldVector) {
try(DoubleVector.FixedBuilder result = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
double field = fieldVector.getDouble(p);
result.appendDouble(p, RoundToDouble.process(field, this.p0, this.p1));
}
return result.build();
}
}
@Override
public String toString() {
return "RoundToDouble2Evaluator[" + "field=" + field + ", p0=" + p0 + ", p1=" + p1 + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(field);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static
|
RoundToDouble2Evaluator
|
java
|
apache__camel
|
tests/camel-itest/src/test/java/org/apache/camel/itest/tx/JmsToHttpRoute.java
|
{
"start": 1407,
"end": 3665
}
|
class ____ extends RouteBuilder {
protected static int counter;
protected int port;
@Resource(name = "PROPAGATION_REQUIRED")
protected SpringTransactionPolicy required;
@EndpointInject("ref:data")
protected Endpoint data;
protected String nok = "<?xml version=\"1.0\"?><reply><status>nok</status></reply>";
protected String ok = "<?xml version=\"1.0\"?><reply><status>ok</status></reply>";
@Override
public void configure() {
port = AvailablePortFinder.getNextAvailable();
// configure a global transacted error handler
errorHandler(transactionErrorHandler(required));
from(data)
// use transaction policy for this route
.policy(required)
// send a request to http and get the response
.to("http://localhost:" + port + "/sender")
// convert the response to String so we can work with it and avoid streams only be readable once
// as the http component will return data as a stream
.convertBodyTo(String.class)
// do a choice if the response is okay or not
.choice()
// do a xpath to compare if the status is NOT okay
.when().xpath("/reply/status != 'ok'")
// as this is based on an unit test we use mocks to verify how many times we did rollback
.to("mock:JmsToHttpRoute")
// response is not okay so force a rollback by throwing an exception
.process(exchange -> {
throw new IllegalArgumentException("Rollback please");
})
.otherwise()
// otherwise since its okay, the route ends and the response is sent back
// to the original caller
.end();
// this is our http route that will fail the first 2 attempts
// before it sends an ok response
from("jetty:http://localhost:" + port + "/sender").process(exchange -> {
if (counter++ < 2) {
exchange.getMessage().setBody(nok);
} else {
exchange.getMessage().setBody(ok);
}
});
}
}
|
JmsToHttpRoute
|
java
|
apache__kafka
|
metadata/src/test/java/org/apache/kafka/image/loader/MetadataBatchLoaderTest.java
|
{
"start": 4951,
"end": 21019
}
|
class ____ implements MetadataBatchLoader.MetadataUpdater {
MetadataImage latestImage = null;
MetadataDelta latestDelta = null;
LogDeltaManifest latestManifest = null;
int updates = 0;
@Override
public void update(MetadataDelta delta, MetadataImage image, LogDeltaManifest manifest) {
latestDelta = delta;
latestImage = image;
latestManifest = manifest;
updates++;
}
public void reset() {
latestImage = null;
latestDelta = null;
latestManifest = null;
updates = 0;
}
}
@Test
public void testAlignedTransactionBatches() {
Batch<ApiMessageAndVersion> batch1 = Batch.data(
10, 1, 0, 10, TOPIC_TXN_BATCH_1);
Batch<ApiMessageAndVersion> batch2 = Batch.data(
13, 2, 0, 10, noOpRecords(3));
Batch<ApiMessageAndVersion> batch3 = Batch.data(
16, 2, 0, 30, TOPIC_TXN_BATCH_2);
MockMetadataUpdater updater = new MockMetadataUpdater();
MetadataBatchLoader batchLoader = new MetadataBatchLoader(
new LogContext(),
new MockTime(),
new MockFaultHandler("testAlignedTransactionBatches"),
updater
);
batchLoader.resetToImage(MetadataImage.EMPTY);
batchLoader.loadBatch(batch1, LEADER_AND_EPOCH);
assertEquals(0, updater.updates);
batchLoader.loadBatch(batch2, LEADER_AND_EPOCH);
assertEquals(0, updater.updates);
batchLoader.loadBatch(batch3, LEADER_AND_EPOCH);
assertEquals(0, updater.updates);
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertEquals(1, updater.updates);
assertNotNull(updater.latestImage.topics().getTopic("foo"));
assertEquals(18, updater.latestImage.provenance().lastContainedOffset());
assertEquals(2, updater.latestImage.provenance().lastContainedEpoch());
assertTrue(updater.latestImage.provenance().isOffsetBatchAligned());
}
@Test
public void testSingletonBeginAndEnd() {
Batch<ApiMessageAndVersion> batch1 = Batch.data(
13, 1, 0, 30, noOpRecords(3));
Batch<ApiMessageAndVersion> batch2 = Batch.data(
16, 2, 0, 30, TXN_BEGIN_SINGLETON);
Batch<ApiMessageAndVersion> batch3 = Batch.data(
17, 3, 0, 10, TOPIC_NO_TXN_BATCH);
Batch<ApiMessageAndVersion> batch4 = Batch.data(
20, 4, 0, 10, TXN_END_SINGLETON);
MockMetadataUpdater updater = new MockMetadataUpdater();
MetadataBatchLoader batchLoader = new MetadataBatchLoader(
new LogContext(),
new MockTime(),
new MockFaultHandler("testSingletonBeginAndEnd"),
updater
);
// All in one commit
batchLoader.resetToImage(MetadataImage.EMPTY);
batchLoader.loadBatch(batch1, LEADER_AND_EPOCH);
assertEquals(0, updater.updates);
// batch1 is flushed in this loadBatch call
batchLoader.loadBatch(batch2, LEADER_AND_EPOCH);
assertEquals(1, updater.updates);
assertTrue(updater.latestImage.provenance().isOffsetBatchAligned());
assertNull(updater.latestImage.topics().getTopic("bar"));
batchLoader.loadBatch(batch3, LEADER_AND_EPOCH);
assertEquals(1, updater.updates);
batchLoader.loadBatch(batch4, LEADER_AND_EPOCH);
assertEquals(1, updater.updates);
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertNotNull(updater.latestImage.topics().getTopic("bar"));
assertEquals(20, updater.latestImage.provenance().lastContainedOffset());
assertEquals(4, updater.latestImage.provenance().lastContainedEpoch());
// Each batch in a separate commit
updater.reset();
batchLoader.resetToImage(MetadataImage.EMPTY);
batchLoader.loadBatch(batch1, LEADER_AND_EPOCH);
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertEquals(1, updater.updates);
batchLoader.loadBatch(batch2, LEADER_AND_EPOCH);
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertEquals(1, updater.updates);
batchLoader.loadBatch(batch3, LEADER_AND_EPOCH);
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertEquals(1, updater.updates);
batchLoader.loadBatch(batch4, LEADER_AND_EPOCH);
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertEquals(2, updater.updates);
}
@Test
public void testUnexpectedBeginTransaction() {
MockMetadataUpdater updater = new MockMetadataUpdater();
MockFaultHandler faultHandler = new MockFaultHandler("testUnexpectedBeginTransaction");
MetadataBatchLoader batchLoader = new MetadataBatchLoader(
new LogContext(),
new MockTime(),
faultHandler,
updater
);
Batch<ApiMessageAndVersion> batch1 = Batch.data(
10, 2, 0, 30, TOPIC_TXN_BATCH_1);
Batch<ApiMessageAndVersion> batch2 = Batch.data(
13, 2, 0, 30, TXN_BEGIN_SINGLETON);
batchLoader.resetToImage(MetadataImage.EMPTY);
batchLoader.loadBatch(batch1, LEADER_AND_EPOCH);
assertNull(faultHandler.firstException());
batchLoader.loadBatch(batch2, LEADER_AND_EPOCH);
assertEquals(RuntimeException.class, faultHandler.firstException().getCause().getClass());
assertEquals(
"Encountered BeginTransactionRecord while already in a transaction",
faultHandler.firstException().getCause().getMessage()
);
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertEquals(0, updater.updates);
}
@Test
public void testUnexpectedEndTransaction() {
MockMetadataUpdater updater = new MockMetadataUpdater();
MockFaultHandler faultHandler = new MockFaultHandler("testUnexpectedAbortTransaction");
MetadataBatchLoader batchLoader = new MetadataBatchLoader(
new LogContext(),
new MockTime(),
faultHandler,
updater
);
// First batch gets loaded fine
Batch<ApiMessageAndVersion> batch1 = Batch.data(
10, 2, 0, 30, TOPIC_NO_TXN_BATCH);
// Second batch throws an error, but shouldn't interfere with prior batches
Batch<ApiMessageAndVersion> batch2 = Batch.data(
13, 2, 0, 30, TXN_END_SINGLETON);
batchLoader.resetToImage(MetadataImage.EMPTY);
batchLoader.loadBatch(batch1, LEADER_AND_EPOCH);
assertNull(faultHandler.firstException());
batchLoader.loadBatch(batch2, LEADER_AND_EPOCH);
assertEquals(RuntimeException.class, faultHandler.firstException().getCause().getClass());
assertEquals(
"Encountered EndTransactionRecord without having seen a BeginTransactionRecord",
faultHandler.firstException().getCause().getMessage()
);
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertEquals(1, updater.updates);
assertNotNull(updater.latestImage.topics().getTopic("bar"));
}
@Test
public void testUnexpectedAbortTransaction() {
MockMetadataUpdater updater = new MockMetadataUpdater();
MockFaultHandler faultHandler = new MockFaultHandler("testUnexpectedAbortTransaction");
MetadataBatchLoader batchLoader = new MetadataBatchLoader(
new LogContext(),
new MockTime(),
faultHandler,
updater
);
// First batch gets loaded fine
Batch<ApiMessageAndVersion> batch1 = Batch.data(
10, 2, 0, 30, TOPIC_NO_TXN_BATCH);
// Second batch throws an error, but shouldn't interfere with prior batches
Batch<ApiMessageAndVersion> batch2 = Batch.data(
13, 2, 0, 30, TXN_ABORT_SINGLETON);
batchLoader.resetToImage(MetadataImage.EMPTY);
batchLoader.loadBatch(batch1, LEADER_AND_EPOCH);
assertNull(faultHandler.firstException());
batchLoader.loadBatch(batch2, LEADER_AND_EPOCH);
assertEquals(RuntimeException.class, faultHandler.firstException().getCause().getClass());
assertEquals(
"Encountered AbortTransactionRecord without having seen a BeginTransactionRecord",
faultHandler.firstException().getCause().getMessage()
);
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertEquals(1, updater.updates);
assertNotNull(updater.latestImage.topics().getTopic("bar"));
}
private MetadataBatchLoader loadSingleBatch(
MockMetadataUpdater updater,
MockFaultHandler faultHandler,
List<ApiMessageAndVersion> batchRecords
) {
Batch<ApiMessageAndVersion> batch = Batch.data(
10, 42, 0, 100, batchRecords);
MetadataBatchLoader batchLoader = new MetadataBatchLoader(
new LogContext(),
new MockTime(),
faultHandler,
updater
);
batchLoader.resetToImage(MetadataImage.EMPTY);
batchLoader.loadBatch(batch, LEADER_AND_EPOCH);
return batchLoader;
}
@Test
public void testMultipleTransactionsInOneBatch() {
List<ApiMessageAndVersion> batchRecords = new ArrayList<>();
batchRecords.addAll(TOPIC_TXN_BATCH_1);
batchRecords.addAll(TOPIC_TXN_BATCH_2);
batchRecords.addAll(TXN_BEGIN_SINGLETON);
batchRecords.addAll(TOPIC_NO_TXN_BATCH);
batchRecords.addAll(TXN_END_SINGLETON);
MockMetadataUpdater updater = new MockMetadataUpdater();
MockFaultHandler faultHandler = new MockFaultHandler("testMultipleTransactionsInOneBatch");
MetadataBatchLoader batchLoader = loadSingleBatch(updater, faultHandler, batchRecords);
assertEquals(1, updater.updates);
assertEquals(0, updater.latestManifest.numBytes());
assertEquals(15, updater.latestImage.provenance().lastContainedOffset());
// The first transaction is flushed in the middle of the batch, the offset flushed is not batch-aligned
assertFalse(updater.latestImage.provenance().isOffsetBatchAligned());
assertEquals(42, updater.latestImage.provenance().lastContainedEpoch());
assertNotNull(updater.latestImage.topics().getTopic("foo"));
assertNull(updater.latestImage.topics().getTopic("bar"));
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertEquals(2, updater.updates);
assertEquals(100, updater.latestManifest.numBytes());
assertEquals(20, updater.latestImage.provenance().lastContainedOffset());
assertEquals(42, updater.latestImage.provenance().lastContainedEpoch());
assertTrue(updater.latestImage.provenance().isOffsetBatchAligned());
assertNotNull(updater.latestImage.topics().getTopic("foo"));
assertNotNull(updater.latestImage.topics().getTopic("bar"));
}
@Test
public void testMultipleTransactionsInOneBatchesWithNoOp() {
List<ApiMessageAndVersion> batchRecords = new ArrayList<>();
batchRecords.addAll(noOpRecords(1));
batchRecords.addAll(TOPIC_TXN_BATCH_1);
batchRecords.addAll(noOpRecords(1));
batchRecords.addAll(TOPIC_TXN_BATCH_2);
// A batch with non-transactional records between two transactions causes a delta to get published
batchRecords.addAll(noOpRecords(1));
batchRecords.addAll(TXN_BEGIN_SINGLETON);
batchRecords.addAll(noOpRecords(1));
batchRecords.addAll(TOPIC_NO_TXN_BATCH);
batchRecords.addAll(noOpRecords(1));
batchRecords.addAll(TXN_END_SINGLETON);
batchRecords.addAll(noOpRecords(1));
MockMetadataUpdater updater = new MockMetadataUpdater();
MockFaultHandler faultHandler = new MockFaultHandler("testMultipleTransactionsInOneBatches");
MetadataBatchLoader batchLoader = loadSingleBatch(updater, faultHandler, batchRecords);
assertEquals(2, updater.updates);
assertEquals(0, updater.latestManifest.numBytes());
assertEquals(18, updater.latestImage.provenance().lastContainedOffset());
assertEquals(42, updater.latestImage.provenance().lastContainedEpoch());
assertFalse(updater.latestImage.provenance().isOffsetBatchAligned());
assertNotNull(updater.latestImage.topics().getTopic("foo"));
assertNull(updater.latestImage.topics().getTopic("bar"));
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
assertEquals(3, updater.updates);
assertEquals(100, updater.latestManifest.numBytes());
assertEquals(26, updater.latestImage.provenance().lastContainedOffset());
assertEquals(42, updater.latestImage.provenance().lastContainedEpoch());
assertNotNull(updater.latestImage.topics().getTopic("foo"));
assertNotNull(updater.latestImage.topics().getTopic("bar"));
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testOneTransactionInMultipleBatches(boolean abortTxn) {
MockMetadataUpdater updater = new MockMetadataUpdater();
MetadataBatchLoader batchLoader = new MetadataBatchLoader(
new LogContext(),
new MockTime(),
new MockFaultHandler("testOneTransactionInMultipleBatches"),
updater
);
batchLoader.resetToImage(MetadataImage.EMPTY);
batchLoader.loadBatch(Batch.data(
16, 2, 0, 10, TXN_BEGIN_SINGLETON), LEADER_AND_EPOCH);
assertEquals(0, updater.updates);
batchLoader.loadBatch(Batch.data(
17, 3, 0, 30, TOPIC_NO_TXN_BATCH), LEADER_AND_EPOCH);
assertEquals(0, updater.updates);
if (abortTxn) {
batchLoader.loadBatch(Batch.data(
20, 4, 0, 10, TXN_ABORT_SINGLETON), LEADER_AND_EPOCH);
} else {
batchLoader.loadBatch(Batch.data(
20, 4, 0, 10, TXN_END_SINGLETON), LEADER_AND_EPOCH);
}
assertEquals(0, updater.updates);
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH, true);
// Regardless of end/abort, we should publish an updated MetadataProvenance and manifest
assertEquals(50, updater.latestManifest.numBytes());
assertEquals(3, updater.latestManifest.numBatches());
assertEquals(20, updater.latestImage.provenance().lastContainedOffset());
assertEquals(4, updater.latestImage.provenance().lastContainedEpoch());
if (abortTxn) {
assertNull(updater.latestImage.topics().getTopic("bar"));
} else {
assertNotNull(updater.latestImage.topics().getTopic("bar"));
}
}
@Test
public void testTransactionAlignmentOnBatchBoundary() {
List<ApiMessageAndVersion> batchRecords = new ArrayList<>();
batchRecords.addAll(noOpRecords(3));
batchRecords.addAll(TOPIC_TXN_BATCH_1);
batchRecords.addAll(TOPIC_TXN_BATCH_2);
batchRecords.addAll(noOpRecords(3));
MockMetadataUpdater updater = new MockMetadataUpdater();
MockFaultHandler faultHandler = new MockFaultHandler("testMultipleTransactionsInOneBatch");
MetadataBatchLoader batchLoader = loadSingleBatch(updater, faultHandler, batchRecords);
assertEquals(1, updater.updates);
assertEquals(0, updater.latestManifest.numBytes());
assertEquals(12, updater.latestImage.provenance().lastContainedOffset());
assertFalse(updater.latestImage.provenance().isOffsetBatchAligned());
batchLoader.loadBatch(Batch.data(
22, 42, 0, 10, TXN_BEGIN_SINGLETON), LEADER_AND_EPOCH);
assertEquals(2, updater.updates);
assertEquals(100, updater.latestManifest.numBytes());
assertEquals(21, updater.latestImage.provenance().lastContainedOffset());
assertTrue(updater.latestImage.provenance().isOffsetBatchAligned());
}
}
|
MockMetadataUpdater
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ContainerLocalizationEvent.java
|
{
"start": 971,
"end": 1255
}
|
class ____ extends LocalizationEvent {
final Container container;
public ContainerLocalizationEvent(LocalizationEventType event, Container c) {
super(event);
this.container = c;
}
public Container getContainer() {
return container;
}
}
|
ContainerLocalizationEvent
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/interceptor/merge/MergeInterceptionTest.java
|
{
"start": 1856,
"end": 1981
}
|
class ____ {
@Id @GeneratedValue
private Long id;
private String name;
@ElementCollection
Set<String> names;
}
}
|
Thing
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java
|
{
"start": 1558,
"end": 7009
}
|
class ____ extends
AbstractFSContractTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractContractEtagTest.class);
/**
* basic consistency across operations, as well as being non-empty.
*/
@Test
public void testEtagConsistencyAcrossListAndHead() throws Throwable {
describe("Etag values must be non-empty and consistent across LIST and HEAD Calls.");
final Path path = methodPath();
final FileSystem fs = getFileSystem();
Assertions.assertThat(fs.hasPathCapability(path, ETAGS_AVAILABLE))
.describedAs("path capability %s of %s",
ETAGS_AVAILABLE, path)
.isTrue();
ContractTestUtils.touch(fs, path);
final FileStatus st = fs.getFileStatus(path);
final String etag = etagFromStatus(st);
LOG.info("etag of empty file is \"{}\"", etag);
final FileStatus[] statuses = fs.listStatus(path);
Assertions.assertThat(statuses)
.describedAs("List(%s)", path)
.hasSize(1);
final FileStatus lsStatus = statuses[0];
Assertions.assertThat(etagFromStatus(lsStatus))
.describedAs("etag of list status (%s) compared to HEAD value of %s", lsStatus, st)
.isEqualTo(etag);
}
/**
* Get an etag from a FileStatus which MUST BE
* an implementation of EtagSource and
* whose etag MUST NOT BE null/empty.
* @param st the status
* @return the etag
*/
String etagFromStatus(FileStatus st) {
Assertions.assertThat(st)
.describedAs("FileStatus %s", st)
.isInstanceOf(EtagSource.class);
final String etag = ((EtagSource) st).getEtag();
Assertions.assertThat(etag)
.describedAs("Etag of %s", st)
.isNotBlank();
return etag;
}
/**
* Overwritten data has different etags.
*/
@Test
public void testEtagsOfDifferentDataDifferent() throws Throwable {
describe("Verify that two different blocks of data written have different tags");
final Path path = methodPath();
final FileSystem fs = getFileSystem();
Path src = new Path(path, "src");
ContractTestUtils.createFile(fs, src, true,
"data1234".getBytes(StandardCharsets.UTF_8));
final FileStatus srcStatus = fs.getFileStatus(src);
final String srcTag = etagFromStatus(srcStatus);
LOG.info("etag of file 1 is \"{}\"", srcTag);
// now overwrite with data of same length
// (ensure that path or length aren't used exclusively as tag)
ContractTestUtils.createFile(fs, src, true,
"1234data".getBytes(StandardCharsets.UTF_8));
// validate
final String tag2 = etagFromStatus(fs.getFileStatus(src));
LOG.info("etag of file 2 is \"{}\"", tag2);
Assertions.assertThat(tag2)
.describedAs("etag of updated file")
.isNotEqualTo(srcTag);
}
/**
* If supported, rename preserves etags.
*/
@Test
public void testEtagConsistencyAcrossRename() throws Throwable {
describe("Verify that when a file is renamed, the etag remains unchanged");
final Path path = methodPath();
final FileSystem fs = getFileSystem();
assumeThat(fs.hasPathCapability(path, ETAGS_PRESERVED_IN_RENAME))
.withFailMessage("Filesystem does not declare that etags are preserved across renames")
.isTrue();
Path src = new Path(path, "src");
Path dest = new Path(path, "dest");
ContractTestUtils.createFile(fs, src, true,
"sample data".getBytes(StandardCharsets.UTF_8));
final FileStatus srcStatus = fs.getFileStatus(src);
LOG.info("located file status string value " + srcStatus);
final String srcTag = etagFromStatus(srcStatus);
LOG.info("etag of short file is \"{}\"", srcTag);
Assertions.assertThat(srcTag)
.describedAs("Etag of %s", srcStatus)
.isNotBlank();
// rename
fs.rename(src, dest);
// validate
FileStatus destStatus = fs.getFileStatus(dest);
final String destTag = etagFromStatus(destStatus);
Assertions.assertThat(destTag)
.describedAs("etag of list status (%s) compared to HEAD value of %s",
destStatus, srcStatus)
.isEqualTo(srcTag);
}
/**
* For effective use of etags, listLocatedStatus SHOULD return status entries
* with consistent values.
* This ensures that listing during query planning can collect and use the etags.
*/
@Test
public void testLocatedStatusAlsoHasEtag() throws Throwable {
describe("verify that listLocatedStatus() and listFiles() are etag sources");
final Path path = methodPath();
final FileSystem fs = getFileSystem();
Path src = new Path(path, "src");
ContractTestUtils.createFile(fs, src, true,
"sample data".getBytes(StandardCharsets.UTF_8));
final FileStatus srcStatus = fs.getFileStatus(src);
final String srcTag = etagFromStatus(srcStatus);
final LocatedFileStatus entry = fs.listLocatedStatus(path).next();
LOG.info("located file status string value " + entry);
final String listTag = etagFromStatus(entry);
Assertions.assertThat(listTag)
.describedAs("etag of listLocatedStatus (%s) compared to HEAD value of %s",
entry, srcStatus)
.isEqualTo(srcTag);
final LocatedFileStatus entry2 = fs.listFiles(path, false).next();
Assertions.assertThat(etagFromStatus(entry2))
.describedAs("etag of listFiles (%s) compared to HEAD value of %s",
entry, srcStatus)
.isEqualTo(srcTag);
}
}
|
AbstractContractEtagTest
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java
|
{
"start": 1247,
"end": 7391
}
|
class ____ extends ESTestCase {
public void testAddingAndRemovingFilters() {
HashSet<String> hashSet = new HashSet<>(Arrays.asList("foo", "bar", "baz"));
SettingsFilter settingsFilter = new SettingsFilter(hashSet);
assertEquals(settingsFilter.getPatterns(), hashSet);
}
public void testSettingsFiltering() throws IOException {
testFiltering(
Settings.builder()
.put("foo", "foo_test")
.put("foo1", "foo1_test")
.put("bar", "bar_test")
.put("bar1", "bar1_test")
.put("bar.2", "bar2_test")
.build(),
Settings.builder().put("foo1", "foo1_test").build(),
"foo",
"bar*"
);
testFiltering(
Settings.builder()
.put("foo", "foo_test")
.put("foo1", "foo1_test")
.put("bar", "bar_test")
.put("bar1", "bar1_test")
.put("bar.2", "bar2_test")
.build(),
Settings.builder().put("foo", "foo_test").put("foo1", "foo1_test").build(),
"bar*"
);
testFiltering(
Settings.builder()
.put("foo", "foo_test")
.put("foo1", "foo1_test")
.put("bar", "bar_test")
.put("bar1", "bar1_test")
.put("bar.2", "bar2_test")
.build(),
Settings.builder().build(),
"foo",
"bar*",
"foo*"
);
testFiltering(
Settings.builder().put("foo", "foo_test").put("bar", "bar_test").put("baz", "baz_test").build(),
Settings.builder().put("foo", "foo_test").put("bar", "bar_test").put("baz", "baz_test").build()
);
testFiltering(
Settings.builder().put("a.b.something.d", "foo_test").put("a.b.something.c", "foo1_test").build(),
Settings.builder().put("a.b.something.c", "foo1_test").build(),
"a.b.*.d"
);
}
public void testFilteredSettingIsNotLogged() throws Exception {
Settings oldSettings = Settings.builder().put("key", "old").build();
Settings newSettings = Settings.builder().put("key", "new").build();
Setting<String> filteredSetting = Setting.simpleString("key", Property.Filtered);
assertExpectedLogMessages(
(testLogger) -> Setting.logSettingUpdate(filteredSetting, newSettings, oldSettings, testLogger),
new MockLog.SeenEventExpectation("secure logging", "org.elasticsearch.test", Level.INFO, "updating [key]"),
new MockLog.UnseenEventExpectation("unwanted old setting name", "org.elasticsearch.test", Level.INFO, "*old*"),
new MockLog.UnseenEventExpectation("unwanted new setting name", "org.elasticsearch.test", Level.INFO, "*new*")
);
}
public void testIndexScopeSettingUpdateLoggedAsDebug() throws Exception {
Settings oldSettings = Settings.builder().put("key", "old").build();
Settings newSettings = Settings.builder().put("key", "new").build();
// With INFO log level nothing gets logged.
Setting<String> filteredSetting = Setting.simpleString("key", Property.IndexScope);
assertExpectedLogMessages((testLogger) -> Setting.logSettingUpdate(filteredSetting, newSettings, oldSettings, testLogger));
try {
// With DEBUG log level something gets logged
Configurator.setLevel("org.elasticsearch.test", Level.DEBUG);
assertExpectedLogMessages(
(logger) -> Setting.logSettingUpdate(filteredSetting, newSettings, oldSettings, logger),
new MockLog.SeenEventExpectation(
"regular logging",
"org.elasticsearch.test",
Level.DEBUG,
"updating [key] from [old] to [new]"
)
);
} finally {
Configurator.setLevel("org.elasticsearch.test", Level.INFO);
}
}
public void testRegularSettingUpdateIsFullyLogged() throws Exception {
Settings oldSettings = Settings.builder().put("key", "old").build();
Settings newSettings = Settings.builder().put("key", "new").build();
Setting<String> regularSetting = Setting.simpleString("key");
assertExpectedLogMessages(
(testLogger) -> Setting.logSettingUpdate(regularSetting, newSettings, oldSettings, testLogger),
new MockLog.SeenEventExpectation("regular logging", "org.elasticsearch.test", Level.INFO, "updating [key] from [old] to [new]")
);
}
private void assertExpectedLogMessages(Consumer<Logger> consumer, MockLog.LoggingExpectation... expectations) {
Logger testLogger = LogManager.getLogger("org.elasticsearch.test");
try (var mockLog = MockLog.capture("org.elasticsearch.test")) {
Arrays.stream(expectations).forEach(mockLog::addExpectation);
consumer.accept(testLogger);
mockLog.assertAllExpectationsMatched();
}
}
private void testFiltering(Settings source, Settings filtered, String... patterns) throws IOException {
SettingsFilter settingsFilter = new SettingsFilter(Arrays.asList(patterns));
// Test using direct filtering
Settings filteredSettings = settingsFilter.filter(source);
assertThat(filteredSettings, equalTo(filtered));
// Test using toXContent filtering
RestRequest request = new FakeRestRequest();
settingsFilter.addFilterSettingParams(request);
XContentBuilder xContentBuilder = XContentBuilder.builder(JsonXContent.jsonXContent);
xContentBuilder.startObject();
source.toXContent(xContentBuilder, request);
xContentBuilder.endObject();
String filteredSettingsString = Strings.toString(xContentBuilder);
filteredSettings = Settings.builder().loadFromSource(filteredSettingsString, xContentBuilder.contentType()).build();
assertThat(filteredSettings, equalTo(filtered));
}
}
|
SettingsFilterTests
|
java
|
quarkusio__quarkus
|
independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/EventImpl.java
|
{
"start": 17865,
"end": 18705
}
|
class ____ implements Synchronization {
private List<DeferredEventNotification<?>> deferredEvents;
ArcSynchronization(List<DeferredEventNotification<?>> deferredEvents) {
this.deferredEvents = deferredEvents;
}
@Override
public void beforeCompletion() {
for (DeferredEventNotification<?> event : deferredEvents) {
if (event.isBeforeCompletion()) {
event.run();
}
}
}
@Override
public void afterCompletion(int i) {
for (DeferredEventNotification<?> event : deferredEvents) {
if (!event.isBeforeCompletion() && event.getStatus().matches(i)) {
event.run();
}
}
}
}
private static
|
ArcSynchronization
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/pool/TimeBetweenLogStatsMillisTest.java
|
{
"start": 166,
"end": 1296
}
|
class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setTimeBetweenLogStatsMillis(1000);
// dataSource.setFilters("log4j");
}
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
}
public void test_0() throws Exception {
assertEquals(true, dataSource.isResetStatEnable());
dataSource.init();
assertEquals(1000, dataSource.getTimeBetweenLogStatsMillis());
assertEquals(false, dataSource.isResetStatEnable());
dataSource.resetStat();
assertEquals(0, dataSource.getResetCount());
dataSource.setConnectionProperties("druid.resetStatEnable=true");
assertEquals(true, dataSource.isResetStatEnable());
dataSource.setConnectionProperties("druid.resetStatEnable=false");
assertEquals(false, dataSource.isResetStatEnable());
dataSource.setConnectionProperties("druid.resetStatEnable=xxx");
}
}
|
TimeBetweenLogStatsMillisTest
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/MultipleInputStreamTaskTest.java
|
{
"start": 81021,
"end": 81963
}
|
class ____<T> extends AbstractStreamOperator<T>
implements OneInputStreamOperator<T, T>, BoundedOneInput {
public static final String OPEN = "LifeCycleTrackingMap#open";
public static final String CLOSE = "LifeCycleTrackingMap#close";
public static final String END_INPUT = "LifeCycleTrackingMap#endInput";
@Override
public void processElement(StreamRecord<T> element) throws Exception {
output.collect(element);
}
@Override
public void open() throws Exception {
LIFE_CYCLE_EVENTS.add(OPEN);
super.open();
}
@Override
public void close() throws Exception {
LIFE_CYCLE_EVENTS.add(CLOSE);
super.close();
}
@Override
public void endInput() throws Exception {
LIFE_CYCLE_EVENTS.add(END_INPUT);
}
}
private static
|
LifeCycleTrackingMap
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-rocksdb/src/test/java/org/apache/flink/state/rocksdb/sstmerge/CompactionTaskProducerTest.java
|
{
"start": 6474,
"end": 10096
}
|
class ____ {
private byte[] columnFamilyName;
private String fileName;
private int level;
private long size;
private byte[] smallestKey;
private boolean beingCompacted;
public SstFileMetaDataBuilder(ColumnFamilyHandle columnFamily) {
try {
this.columnFamilyName = columnFamily.getName();
} catch (RocksDBException e) {
throw new RuntimeException(e);
}
}
public SstFileMetaDataBuilder setColumnFamily(ColumnFamilyHandle columnFamily) {
try {
this.columnFamilyName = columnFamily.getName();
} catch (RocksDBException e) {
throw new RuntimeException(e);
}
return this;
}
public SstFileMetaDataBuilder setFileName(String fileName) {
this.fileName = fileName;
return this;
}
public SstFileMetaDataBuilder setLevel(int level) {
this.level = level;
return this;
}
public SstFileMetaDataBuilder setSize(long size) {
this.size = size;
return this;
}
public SstFileMetaDataBuilder setSmallestKey(byte[] smallestKey) {
this.smallestKey = smallestKey;
return this;
}
public SstFileMetaDataBuilder setBeingCompacted(boolean beingCompacted) {
this.beingCompacted = beingCompacted;
return this;
}
public SstFileMetaData build() {
return new SstFileMetaData(
columnFamilyName, fileName, level, size, smallestKey, beingCompacted);
}
}
private List<CompactionTask> produce(
RocksDBManualCompactionConfig config, SstFileMetaData... sst) {
return new CompactionTaskProducer(() -> Arrays.asList(sst), config, defaultCfLookup)
.produce();
}
private static RocksDBManualCompactionConfig.Builder configBuilder() {
return RocksDBManualCompactionConfig.builder()
.setMaxFilesToCompact(Integer.MAX_VALUE)
.setMaxAutoCompactions(Integer.MAX_VALUE)
.setMaxParallelCompactions(Integer.MAX_VALUE)
.setMaxOutputFileSize(MemorySize.MAX_VALUE)
.setMinFilesToCompact(1)
.setMinInterval(1L);
}
private SstFileMetaDataBuilder sstBuilder() {
byte[] bytes = new byte[128];
RANDOM.nextBytes(bytes);
return new SstFileMetaDataBuilder(rocksDBExtension.getDefaultColumnFamily())
.setFileName(RANDOM.nextInt() + ".sst")
.setLevel(1)
.setSize(4)
.setSmallestKey(bytes);
}
private SstFileMetaData[] buildSstFiles(int level, int fileSize, int numFiles) {
return IntStream.range(0, numFiles)
.mapToObj(
i ->
sstBuilder()
.setSmallestKey(new byte[] {(byte) i})
.setLevel(level)
.setSize(fileSize)
.build())
.toArray(SstFileMetaData[]::new);
}
private CompactionTask createTask(int level, SstFileMetaData... files) {
return new CompactionTask(
level,
Arrays.stream(files).map(SstFileMetaData::fileName).collect(Collectors.toList()),
rocksDBExtension.getDefaultColumnFamily());
}
}
|
SstFileMetaDataBuilder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/planner/QueryFolder.java
|
{
"start": 4866,
"end": 5323
}
|
class ____ extends FoldingRule<EsQueryExec> {
@Override
protected PhysicalPlan rule(EsQueryExec exec) {
QueryContainer qContainer = exec.queryContainer();
for (Attribute attr : exec.output()) {
qContainer = qContainer.addColumn(attr);
}
// after all attributes have been resolved
return exec.with(qContainer);
}
}
abstract static
|
PlanOutputToQueryRef
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
|
{
"start": 47480,
"end": 69432
}
|
class ____ not be instantiated.
*/
private S3ATestUtils() {
}
/**
* Verify the core size, block size and timestamp values of a file.
* @param status status entry to check
* @param size file size
* @param blockSize block size
* @param modTime modified time
*/
public static void verifyFileStatus(FileStatus status, long size,
long blockSize, long modTime) {
verifyFileStatus(status, size, 0, modTime, 0, blockSize, null, null, null);
}
/**
* Verify the status entry of a file matches that expected.
* @param status status entry to check
* @param size file size
* @param replication replication factor (may be 0)
* @param modTime modified time
* @param accessTime access time (may be 0)
* @param blockSize block size
* @param owner owner (may be null)
* @param group user group (may be null)
* @param permission permission (may be null)
*/
public static void verifyFileStatus(FileStatus status,
long size,
int replication,
long modTime,
long accessTime,
long blockSize,
String owner,
String group,
FsPermission permission) {
String details = status.toString();
assertFalse(status.isDirectory(), "Not a dir: " + details);
assertEquals(modTime, status.getModificationTime(), "Mod time: " + details);
assertEquals(size, status.getLen(), "File size: " + details);
assertEquals(blockSize, status.getBlockSize(), "Block size: " + details);
if (replication > 0) {
assertEquals(replication, status.getReplication(),
"Replication value: " + details);
}
if (accessTime != 0) {
assertEquals(accessTime, status.getAccessTime(),
"Access time: " + details);
}
if (owner != null) {
assertEquals("Owner: " + details, owner, status.getOwner());
}
if (group != null) {
assertEquals("Group: " + details, group, status.getGroup());
}
if (permission != null) {
assertEquals(permission, status.getPermission(),
"Permission: " + details);
}
}
/**
* Verify the status entry of a directory matches that expected.
* @param status status entry to check
* @param replication replication factor
* @param owner owner
*/
public static void verifyDirStatus(S3AFileStatus status,
int replication,
String owner) {
String details = status.toString();
assertTrue(status.isDirectory(), "Is a dir: " + details);
assertEquals(0, status.getLen(), "zero length: " + details);
// S3AFileStatus always assigns modTime = System.currentTimeMillis()
assertTrue(status.getModificationTime() > 0, "Mod time: " + details);
assertEquals(replication, status.getReplication(),
"Replication value: " + details);
assertEquals(0, status.getAccessTime(),
"Access time: " + details);
assertEquals("Owner: " + details, owner, status.getOwner());
// S3AFileStatus always assigns group=owner
assertEquals("Group: " + details, owner, status.getGroup());
// S3AFileStatus always assigns permission = default
assertEquals(FsPermission.getDefault(), status.getPermission(),
"Permission: " + details);
}
/**
* Assert that a configuration option matches the expected value.
* @param conf configuration
* @param key option key
* @param expected expected value
*/
public static void assertOptionEquals(Configuration conf,
String key,
String expected) {
String actual = conf.get(key);
String origin = actual == null
? "(none)"
: "[" + StringUtils.join(conf.getPropertySources(key), ", ") + "]";
Assertions.assertThat(actual)
.describedAs("Value of %s with origin %s", key, origin)
.isEqualTo(expected);
}
/**
* Assume that a condition is met. If not: log at WARN and
* then throw an {@link TestAbortedException}.
* @param message
* @param condition
*/
public static void assume(String message, boolean condition) {
if (!condition) {
LOG.warn(message);
}
Assumptions.assumeThat(condition).
describedAs(message)
.isTrue();
}
/**
* Convert a throwable to an assumption failure.
* @param t thrown exception.
*/
public static void raiseAsAssumption(Throwable t) {
throw new TestAbortedException(t.toString(), t);
}
/**
* Get the statistics from a wrapped block output stream.
* @param out output stream
* @return the (active) stats of the write
*/
public static BlockOutputStreamStatistics
getOutputStreamStatistics(FSDataOutputStream out) {
S3ABlockOutputStream blockOutputStream
= (S3ABlockOutputStream) out.getWrappedStream();
return blockOutputStream.getStatistics();
}
/**
* Read in a file and convert to an ascii string.
* @param fs filesystem
* @param path path to read
* @return the bytes read and converted to a string
* @throws IOException IO problems
*/
public static String read(FileSystem fs,
Path path) throws IOException {
FileStatus status = fs.getFileStatus(path);
try (FSDataInputStream in = fs.open(path)) {
byte[] buf = new byte[(int)status.getLen()];
in.readFully(0, buf);
return new String(buf);
}
}
/**
* Read in a file and convert to an ascii string, using the openFile
* builder API and the file status.
* If the status is an S3A FileStatus, any etag or versionId used
* will be picked up.
* @param fs filesystem
* @param status file status, including path
* @return the bytes read and converted to a string
* @throws IOException IO problems
*/
public static String readWithStatus(
final FileSystem fs,
final FileStatus status) throws IOException {
final CompletableFuture<FSDataInputStream> future =
fs.openFile(status.getPath())
.withFileStatus(status)
.build();
try (FSDataInputStream in = FutureIO.awaitFuture(future)) {
byte[] buf = new byte[(int) status.getLen()];
in.readFully(0, buf);
return new String(buf);
}
}
/**
* List a directory/directory tree.
* @param fileSystem FS
* @param path path
* @param recursive do a recursive listing?
* @return the number of files found.
* @throws IOException failure.
*/
public static long lsR(FileSystem fileSystem, Path path, boolean recursive)
throws Exception {
if (path == null) {
// surfaces when someone calls getParent() on something at the top
// of the path
LOG.info("Empty path");
return 0;
}
return S3AUtils.applyLocatedFiles(fileSystem.listFiles(path, recursive),
(status) -> LOG.info("{}", status));
}
/**
* Date format used for mapping upload initiation time to human string.
*/
public static final DateFormat LISTING_FORMAT = new SimpleDateFormat(
"yyyy-MM-dd HH:mm:ss");
/**
* Probe for the configuration containing a specific credential provider.
* If the list is empty, there will be no match, even if the named provider
* is on the default list.
*
* @param conf configuration
* @param providerClassname provider class
* @return true if the configuration contains that classname.
*/
public static boolean authenticationContains(Configuration conf,
String providerClassname) {
return conf.getTrimmedStringCollection(AWS_CREDENTIALS_PROVIDER)
.contains(providerClassname);
}
public static void checkListingDoesNotContainPath(S3AFileSystem fs, Path filePath)
throws IOException {
final RemoteIterator<LocatedFileStatus> listIter =
fs.listFiles(filePath.getParent(), false);
while (listIter.hasNext()) {
final LocatedFileStatus lfs = listIter.next();
assertNotEquals(filePath, lfs.getPath(),
"Listing was not supposed to include " + filePath);
}
LOG.info("{}; file omitted from listFiles listing as expected.", filePath);
final FileStatus[] fileStatuses = fs.listStatus(filePath.getParent());
for (FileStatus fileStatus : fileStatuses) {
assertNotEquals(filePath, fileStatus.getPath(),
"Listing was not supposed to include " + filePath);
}
LOG.info("{}; file omitted from listStatus as expected.", filePath);
}
public static void checkListingContainsPath(S3AFileSystem fs, Path filePath)
throws IOException {
boolean listFilesHasIt = false;
boolean listStatusHasIt = false;
final RemoteIterator<LocatedFileStatus> listIter =
fs.listFiles(filePath.getParent(), false);
while (listIter.hasNext()) {
final LocatedFileStatus lfs = listIter.next();
if (filePath.equals(lfs.getPath())) {
listFilesHasIt = true;
}
}
final FileStatus[] fileStatuses = fs.listStatus(filePath.getParent());
for (FileStatus fileStatus : fileStatuses) {
if (filePath.equals(fileStatus.getPath())) {
listStatusHasIt = true;
}
}
assertTrue(listFilesHasIt,
"fs.listFiles didn't include " + filePath);
assertTrue(listStatusHasIt,
"fs.listStatus didn't include " + filePath);
}
/**
* This creates a set containing all current threads and some well-known
* thread names whose existence should not fail test runs.
* They are generally static cleaner threads created by various classes
* on instantiation.
* @return a set of threads to use in later assertions.
*/
public static Set<String> listInitialThreadsForLifecycleChecks() {
Set<String> threadSet = getCurrentThreadNames();
// static filesystem statistics cleaner
threadSet.add(
"org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner");
// AWS progress callbacks
threadSet.add("java-sdk-progress-listener-callback-thread");
// another AWS thread
threadSet.add("java-sdk-http-connection-reaper");
// java.lang.UNIXProcess. maybe if chmod is called?
threadSet.add("process reaper");
// once a quantile has been scheduled, the mutable quantile thread pool
// is initialized; it has a minimum thread size of 1.
threadSet.add("MutableQuantiles-0");
// IDE?
threadSet.add("Attach Listener");
return threadSet;
}
/**
* Get a set containing the names of all active threads,
* stripping out all test runner threads.
* @return the current set of threads.
*/
public static Set<String> getCurrentThreadNames() {
TreeSet<String> threads = Thread.getAllStackTraces().keySet()
.stream()
.map(Thread::getName)
.filter(n -> n.startsWith("JUnit"))
.filter(n -> n.startsWith("surefire"))
.collect(Collectors.toCollection(TreeSet::new));
return threads;
}
/**
* Call the package-private {@code innerGetFileStatus()} method
* on the passed in FS.
* @param fs filesystem
* @param path path
* @param needEmptyDirectoryFlag look for empty directory
* @param probes file status probes to perform
* @return the status
* @throws IOException
*/
public static S3AFileStatus innerGetFileStatus(
S3AFileSystem fs,
Path path,
boolean needEmptyDirectoryFlag,
Set<StatusProbeEnum> probes) throws IOException {
return fs.innerGetFileStatus(
path,
needEmptyDirectoryFlag,
probes);
}
/**
* Skip a test if encryption algorithm or encryption key is not set.
*
* @param configuration configuration to probe.
* @param s3AEncryptionMethods list of encryption algorithms to probe.
* @throws IOException if the secret lookup fails.
*/
public static void skipIfEncryptionNotSet(Configuration configuration,
S3AEncryptionMethods... s3AEncryptionMethods) throws IOException {
if (s3AEncryptionMethods == null || s3AEncryptionMethods.length == 0) {
throw new IllegalArgumentException("Specify at least one encryption method");
}
// if S3 encryption algorithm is not set to desired method or AWS encryption
// key is not set, then skip.
String bucket = getTestBucketName(configuration);
final EncryptionSecrets secrets = buildEncryptionSecrets(bucket, configuration);
boolean encryptionMethodMatching = Arrays.stream(s3AEncryptionMethods).anyMatch(
s3AEncryptionMethod -> s3AEncryptionMethod.getMethod()
.equals(secrets.getEncryptionMethod().getMethod()));
if (!encryptionMethodMatching || StringUtils.isBlank(secrets.getEncryptionKey())) {
skip(S3_ENCRYPTION_KEY + " is not set or " + S3_ENCRYPTION_ALGORITHM + " is not set to "
+ Arrays.stream(s3AEncryptionMethods).map(S3AEncryptionMethods::getMethod)
.collect(Collectors.toList()) + " in " + secrets);
}
}
/**
* Skip a test if encryption algorithm is not empty, or if it is set to
* anything other than AES256.
*
* @param configuration configuration
*/
public static void skipForAnyEncryptionExceptSSES3(Configuration configuration) {
String bucket = getTestBucketName(configuration);
try {
final EncryptionSecrets secrets = buildEncryptionSecrets(bucket, configuration);
S3AEncryptionMethods s3AEncryptionMethods = secrets.getEncryptionMethod();
if (s3AEncryptionMethods.getMethod().equals(SSE_S3.getMethod())
|| s3AEncryptionMethods.getMethod().isEmpty()) {
return;
}
skip("Encryption method is set to " + s3AEncryptionMethods.getMethod());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
/**
* Get the input stream statistics of an input stream.
* Raises an exception if the inner stream is not an S3A input stream
* or prefetching input stream
* @param in wrapper
* @return the statistics for the inner stream
*/
public static S3AInputStreamStatistics getInputStreamStatistics(
FSDataInputStream in) {
InputStream inner = in.getWrappedStream();
if (inner instanceof S3AInputStream) {
return ((S3AInputStream) inner).getS3AStreamStatistics();
} else if (inner instanceof S3APrefetchingInputStream) {
return ((S3APrefetchingInputStream) inner).getS3AStreamStatistics();
} else {
throw new AssertionError("Not an S3AInputStream or S3APrefetchingInputStream: " + inner);
}
}
/**
* Get the inner stream of an input stream.
* Raises an exception if the inner stream is not an S3A input stream
* @param in wrapper
* @return the inner stream
* @throws AssertionError if the inner stream is of the wrong type
*/
public static S3AInputStream getS3AInputStream(
FSDataInputStream in) {
InputStream inner = in.getWrappedStream();
if (inner instanceof S3AInputStream) {
return (S3AInputStream) inner;
} else {
throw new AssertionError("Not an S3AInputStream: " + inner);
}
}
/**
* Get the inner stream of a FilterInputStream.
* Uses reflection to access a protected field.
* @param fis input stream.
* @return the inner stream.
*/
public static InputStream getInnerStream(FilterInputStream fis) {
try {
final Field field = FilterInputStream.class.getDeclaredField("in");
field.setAccessible(true);
return (InputStream) field.get(fis);
} catch (IllegalAccessException | NoSuchFieldException e) {
throw new AssertionError("Failed to get inner stream: " + e, e);
}
}
/**
* Get the innermost stream of a chain of FilterInputStreams.
* This allows tests into the internals of an AWS SDK stream chain.
* @param fis input stream.
* @return the inner stream.
*/
public static InputStream getInnermostStream(FilterInputStream fis) {
InputStream inner = fis;
while (inner instanceof FilterInputStream) {
inner = getInnerStream((FilterInputStream) inner);
}
return inner;
}
/**
* Verify that an s3a stream is not checksummed.
* The inner stream must be active.
*/
public static void assertStreamIsNotChecksummed(final S3AInputStream wrappedS3A) {
final ResponseInputStream<GetObjectResponse> wrappedStream =
wrappedS3A.getWrappedStream();
Assertions.assertThat(wrappedStream)
.describedAs("wrapped stream is not open: call read() on %s", wrappedS3A)
.isNotNull();
final InputStream inner = getInnermostStream(wrappedStream);
Assertions.assertThat(inner)
.describedAs("innermost stream of %s", wrappedS3A)
.isNotInstanceOf(ChecksumValidatingInputStream.class)
.isNotInstanceOf(S3ChecksumValidatingInputStream.class);
}
/**
* Disable Prefetching streams from S3AFileSystem in tests.
* @param conf Configuration to remove the prefetch property from.
* @return patched config
*/
public static Configuration disablePrefetching(Configuration conf) {
removeBaseAndBucketOverrides(conf,
PREFETCH_ENABLED_KEY,
INPUT_STREAM_TYPE);
return conf;
}
/**
*Enable Prefetching streams from S3AFileSystem in tests.
* @param conf Configuration to update
* @return patched config
*/
public static Configuration enablePrefetching(Configuration conf) {
removeBaseAndBucketOverrides(conf,
PREFETCH_ENABLED_KEY,
INPUT_STREAM_TYPE);
conf.setEnum(INPUT_STREAM_TYPE, Prefetch);
return conf;
}
/**
* Enable analytics stream for S3A S3AFileSystem in tests.
* @param conf Configuration to update
* @return patched config
*/
public static Configuration enableAnalyticsAccelerator(Configuration conf) {
removeBaseAndBucketOverrides(conf,
INPUT_STREAM_TYPE);
conf.setEnum(INPUT_STREAM_TYPE, Analytics);
return conf;
}
/**
* Disable analytics stream for S3A S3AFileSystem in tests.
* @param conf Configuration to update
* @return patched config
*/
public static Configuration disableAnalyticsAccelerator(Configuration conf) {
removeBaseAndBucketOverrides(conf,
INPUT_STREAM_TYPE);
conf.setEnum(INPUT_STREAM_TYPE, Classic);
return conf;
}
/**
* Probe for a filesystem having a specific stream type;
* this is done through filesystem capabilities.
* @param fs filesystem
* @param type stream type
* @return true if the fs has the specific type.
*/
public static boolean hasInputStreamType(FileSystem fs, InputStreamType type) {
return uncheckIOExceptions(() ->
fs.hasPathCapability(new Path("/"),
type.capability()));
}
/**
* What is the stream type of this filesystem?
* @param fs filesystem to probe
* @return the stream type
*/
public static InputStreamType streamType(S3AFileSystem fs) {
return fs.getS3AInternals().getStore().streamType();
}
/**
* Skip root tests if the system properties/config says so.
* @param conf configuration to check
*/
public static void maybeSkipRootTests(Configuration conf) {
assume("Root tests disabled",
getTestPropertyBool(conf, ROOT_TESTS_ENABLED, DEFAULT_ROOT_TESTS_ENABLED));
}
/**
* Does this FS support multi object delete?
* @param fs filesystem
* @return true if multi-delete is enabled.
*/
public static boolean isBulkDeleteEnabled(FileSystem fs) {
return fs.getConf().getBoolean(Constants.ENABLE_MULTI_DELETE,
true);
}
/**
* Does this FS have create performance enabled?
* @param fs filesystem
* @return true if create performance is enabled
* @throws IOException IO problems
*/
public static boolean isCreatePerformanceEnabled(FileSystem fs)
throws IOException {
return fs.hasPathCapability(new Path("/"), FS_S3A_CREATE_PERFORMANCE_ENABLED);
}
/**
* Is the filesystem connector bonded to S3Express storage?
* @param fs filesystem.
* @return true if the store has the relevant path capability.
* @throws IOException IO failure
*/
public static boolean isS3ExpressStorage(FileSystem fs) throws IOException {
return fs.hasPathCapability(new Path("/"), STORE_CAPABILITY_S3_EXPRESS_STORAGE);
}
/**
* Get an etag from a FileStatus which must implement
* the {@link EtagSource} interface -which S3AFileStatus does.
*
* @param status the status.
* @return the etag
*/
public static String etag(FileStatus status) {
Preconditions.checkArgument(status instanceof EtagSource,
"Not an EtagSource: %s", status);
return ((EtagSource) status).getEtag();
}
/**
* Create an SDK client exception.
* @param message message
* @param cause nullable cause
* @return the exception
*/
public static SdkClientException sdkClientException(
String message, Throwable cause) {
return SdkClientException.builder()
.message(message)
.cause(cause)
.build();
}
/**
* Create an SDK client exception using the string value of the cause
* as the message.
* @param cause nullable cause
* @return the exception
*/
public static SdkClientException sdkClientException(
Throwable cause) {
return SdkClientException.builder()
.message(cause.toString())
.cause(cause)
.build();
}
private static final String BYTES_PREFIX = "bytes=";
/**
* Given a range header, split into start and end.
* Based on AWSRequestAnalyzer.
* @param rangeHeader header string
* @return parse range, or (-1, -1) for problems
*/
public static Pair<Long, Long> requestRange(String rangeHeader) {
if (rangeHeader != null && rangeHeader.startsWith(BYTES_PREFIX)) {
String[] values = rangeHeader
.substring(BYTES_PREFIX.length())
.split("-");
if (values.length == 2) {
try {
long start = Long.parseUnsignedLong(values[0]);
long end = Long.parseUnsignedLong(values[1]);
return Pair.of(start, end);
} catch (NumberFormatException e) {
LOG.warn("Failed to parse range header {}", rangeHeader, e);
}
}
}
// error case
return Pair.of(-1L, -1L);
}
}
|
should
|
java
|
square__okhttp
|
regression-test/src/androidTest/java/okhttp/regression/compare/OkHttpClientTest.java
|
{
"start": 1020,
"end": 1424
}
|
class ____ {
@Test public void get() throws IOException {
OkHttpClient client = new OkHttpClient();
Request request = new Request.Builder()
.url("https://google.com/robots.txt")
.build();
try (Response response = client.newCall(request).execute()) {
assertEquals(200, response.code());
assertEquals(Protocol.HTTP_2, response.protocol());
}
}
}
|
OkHttpClientTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/subquery/EntityA.java
|
{
"start": 182,
"end": 581
}
|
class ____ {
@Id
@Column(name = "id", nullable = false)
private int id;
private String name;
public EntityA() {
}
public EntityA(int id, String name) {
this.id = id;
this.name = name;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
|
EntityA
|
java
|
apache__maven
|
impl/maven-core/src/main/java/org/apache/maven/classrealm/ClassRealmManager.java
|
{
"start": 3167,
"end": 3630
}
|
class ____ for the specified plugin.
*
* @param plugin The plugin for which to create a realm, must not be {@code null}.
* @param parent The parent realm for the new realm, may be {@code null}.
* @param parentImports The packages/types to import from the parent realm, may be {@code null}.
* @param foreignImports The packages/types to import from foreign realms, may be {@code null}.
* @param artifacts The artifacts to add to the
|
realm
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/factory/config/ServiceLocatorFactoryBeanTests.java
|
{
"start": 13007,
"end": 13240
}
|
interface ____ {
TestService2 getTestService();
TestService2 getTestService(String serviceName, String defaultNotAllowedParameter);
}
@SuppressWarnings("serial")
public static
|
ServiceLocatorInterfaceWithExtraNonCompliantMethod
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/id/generators/pkg/PackageLevelGeneratorTest.java
|
{
"start": 2182,
"end": 2300
}
|
class ____ {
@Id
@GeneratedValue(strategy = GenerationType.TABLE)
long id;
}
}
|
EntityWithDefaultedPackageGenerator4
|
java
|
quarkusio__quarkus
|
extensions/grpc/deployment/src/test/java/io/quarkus/grpc/deployment/GrpcServerProcessorTest.java
|
{
"start": 7914,
"end": 8192
}
|
class ____ extends OverridingTransactionalRoot {
static final Set<String> EXPECTED = ImmutableSet.of("transactional", "another");
@NonBlocking
void method() {
}
void another() {
}
}
static
|
NonBlockingOverridingTransactional
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/PreferInstanceofOverGetKindTest.java
|
{
"start": 1206,
"end": 1563
}
|
class ____ {
boolean isMemberSelect(Tree tree) {
return tree.getKind() == Tree.Kind.MEMBER_SELECT;
}
}
""")
.addOutputLines(
"Test.java",
"""
import com.sun.source.tree.MemberSelectTree;
import com.sun.source.tree.Tree;
|
Test
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/collections/UnidirectionalOrderColumnListTest.java
|
{
"start": 1763,
"end": 2178
}
|
class ____ {
@Id
private Long id;
private String type;
@Column(name = "`number`")
private String number;
public Phone() {
}
public Phone(Long id, String type, String number) {
this.id = id;
this.type = type;
this.number = number;
}
public Long getId() {
return id;
}
public String getType() {
return type;
}
public String getNumber() {
return number;
}
}
}
|
Phone
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/security/FluentApiTlsConfigValidationFailureTest.java
|
{
"start": 1686,
"end": 1889
}
|
class ____ {
void configure(@Observes HttpSecurity httpSecurity) {
httpSecurity.mTLS("my-tls-config", new BaseTlsConfiguration() {
});
}
}
}
|
AuthMechanismConfig
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/generictypes/User.java
|
{
"start": 708,
"end": 1098
}
|
class ____<T> {
private Integer id;
private String name;
public User() {
}
public User(Integer id, String name) {
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
|
User
|
java
|
netty__netty
|
codec-base/src/main/java/io/netty/handler/codec/MessageToByteEncoder.java
|
{
"start": 1657,
"end": 6153
}
|
class ____<I> extends ChannelOutboundHandlerAdapter {
private final TypeParameterMatcher matcher;
private final boolean preferDirect;
/**
* see {@link #MessageToByteEncoder(boolean)} with {@code true} as boolean parameter.
*/
protected MessageToByteEncoder() {
this(true);
}
/**
* see {@link #MessageToByteEncoder(Class, boolean)} with {@code true} as boolean value.
*/
protected MessageToByteEncoder(Class<? extends I> outboundMessageType) {
this(outboundMessageType, true);
}
/**
* Create a new instance which will try to detect the types to match out of the type parameter of the class.
*
* @param preferDirect {@code true} if a direct {@link ByteBuf} should be tried to be used as target for
* the encoded messages. If {@code false} is used it will allocate a heap
* {@link ByteBuf}, which is backed by an byte array.
*/
protected MessageToByteEncoder(boolean preferDirect) {
matcher = TypeParameterMatcher.find(this, MessageToByteEncoder.class, "I");
this.preferDirect = preferDirect;
}
/**
* Create a new instance
*
* @param outboundMessageType The type of messages to match
* @param preferDirect {@code true} if a direct {@link ByteBuf} should be tried to be used as target for
* the encoded messages. If {@code false} is used it will allocate a heap
* {@link ByteBuf}, which is backed by an byte array.
*/
protected MessageToByteEncoder(Class<? extends I> outboundMessageType, boolean preferDirect) {
matcher = TypeParameterMatcher.get(outboundMessageType);
this.preferDirect = preferDirect;
}
/**
* Returns {@code true} if the given message should be handled. If {@code false} it will be passed to the next
* {@link ChannelOutboundHandler} in the {@link ChannelPipeline}.
*/
public boolean acceptOutboundMessage(Object msg) throws Exception {
return matcher.match(msg);
}
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
ByteBuf buf = null;
try {
if (acceptOutboundMessage(msg)) {
@SuppressWarnings("unchecked")
I cast = (I) msg;
buf = allocateBuffer(ctx, cast, preferDirect);
try {
encode(ctx, cast, buf);
} finally {
ReferenceCountUtil.release(cast);
}
if (buf.isReadable()) {
ctx.write(buf, promise);
} else {
buf.release();
ctx.write(Unpooled.EMPTY_BUFFER, promise);
}
buf = null;
} else {
ctx.write(msg, promise);
}
} catch (EncoderException e) {
throw e;
} catch (Throwable e) {
throw new EncoderException(e);
} finally {
if (buf != null) {
buf.release();
}
}
}
/**
* Allocate a {@link ByteBuf} which will be used as argument of {@link #encode(ChannelHandlerContext, I, ByteBuf)}.
* Sub-classes may override this method to return {@link ByteBuf} with a perfect matching {@code initialCapacity}.
*/
protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, @SuppressWarnings("unused") I msg,
boolean preferDirect) throws Exception {
if (preferDirect) {
return ctx.alloc().ioBuffer();
} else {
return ctx.alloc().heapBuffer();
}
}
/**
* Encode a message into a {@link ByteBuf}. This method will be called for each written message that can be handled
* by this encoder.
*
* @param ctx the {@link ChannelHandlerContext} which this {@link MessageToByteEncoder} belongs to
* @param msg the message to encode
* @param out the {@link ByteBuf} into which the encoded message will be written
* @throws Exception is thrown if an error occurs
*/
protected abstract void encode(ChannelHandlerContext ctx, I msg, ByteBuf out) throws Exception;
protected boolean isPreferDirect() {
return preferDirect;
}
}
|
MessageToByteEncoder
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/masterreplica/ReplicaTopologyProvider.java
|
{
"start": 2080,
"end": 7672
}
|
enum ____ {
ROLE(Pattern.compile("^role\\:([a-z]+)$", Pattern.MULTILINE)),
SLAVE(Pattern.compile("^slave(\\d+)\\:([a-zA-Z\\,\\=\\d\\.\\:\\-]+)$", Pattern.MULTILINE)),
MASTER_HOST(Pattern.compile("^master_host\\:([a-zA-Z\\,\\=\\d\\.\\:\\-]+)$", Pattern.MULTILINE)),
MASTER_PORT(Pattern.compile("^master_port\\:(\\d+)$", Pattern.MULTILINE)),
IP(Pattern.compile("ip\\=([a-zA-Z\\d\\.\\:\\-]+)")),
PORT(Pattern.compile("port\\=([\\d]+)"));
private final Pattern pattern;
InfoPatterns(Pattern pattern) {
this.pattern = pattern;
}
public Pattern getPattern() {
return pattern;
}
public Matcher matcher(String input) {
return pattern.matcher(input);
}
}
private static final InternalLogger logger = InternalLoggerFactory.getInstance(ReplicaTopologyProvider.class);
private final StatefulRedisConnection<?, ?> connection;
private final RedisURI redisURI;
/**
* Creates a new {@link ReplicaTopologyProvider}.
*
* @param connection must not be {@code null}
* @param redisURI must not be {@code null}
*/
public ReplicaTopologyProvider(StatefulRedisConnection<?, ?> connection, RedisURI redisURI) {
LettuceAssert.notNull(connection, "Redis Connection must not be null");
LettuceAssert.notNull(redisURI, "RedisURI must not be null");
this.connection = connection;
this.redisURI = redisURI;
}
@Override
public List<RedisNodeDescription> getNodes() {
logger.debug("Performing topology lookup");
String info = connection.sync().info("replication");
try {
return getNodesFromInfo(info);
} catch (RuntimeException e) {
throw Exceptions.bubble(e);
}
}
@Override
public CompletableFuture<List<RedisNodeDescription>> getNodesAsync() {
logger.debug("Performing topology lookup");
RedisFuture<String> info = connection.async().info("replication");
try {
return Mono.fromCompletionStage(info).timeout(redisURI.getTimeout()).map(this::getNodesFromInfo).toFuture();
} catch (RuntimeException e) {
throw Exceptions.bubble(e);
}
}
protected List<RedisNodeDescription> getNodesFromInfo(String info) {
List<RedisNodeDescription> result = new ArrayList<>();
RedisNodeDescription currentNodeDescription = getCurrentNodeDescription(info);
result.add(currentNodeDescription);
if (currentNodeDescription.getRole().isUpstream()) {
result.addAll(getReplicasFromInfo(info));
} else {
result.add(getMasterFromInfo(info));
}
return result;
}
private RedisNodeDescription getCurrentNodeDescription(String info) {
Matcher matcher = InfoPatterns.ROLE.matcher(info);
if (!matcher.find()) {
throw new IllegalStateException("No role property in info " + info);
}
return getRedisNodeDescription(matcher);
}
private List<RedisNodeDescription> getReplicasFromInfo(String info) {
List<RedisNodeDescription> replicas = new ArrayList<>();
Matcher matcher = InfoPatterns.SLAVE.matcher(info);
while (matcher.find()) {
String group = matcher.group(2);
String ip = getNested(InfoPatterns.IP, group, 1);
String port = getNested(InfoPatterns.PORT, group, 1);
replicas.add(new RedisMasterReplicaNode(ip, Integer.parseInt(port), redisURI, RedisInstance.Role.SLAVE));
}
return replicas;
}
private RedisNodeDescription getMasterFromInfo(String info) {
Matcher masterHostMatcher = InfoPatterns.MASTER_HOST.matcher(info);
Matcher masterPortMatcher = InfoPatterns.MASTER_PORT.matcher(info);
boolean foundHost = masterHostMatcher.find();
boolean foundPort = masterPortMatcher.find();
if (!foundHost || !foundPort) {
throw new IllegalStateException("Cannot resolve master from info " + info);
}
String host = masterHostMatcher.group(1);
int port = Integer.parseInt(masterPortMatcher.group(1));
return new RedisMasterReplicaNode(host, port, redisURI, RedisInstance.Role.UPSTREAM);
}
private String getNested(InfoPatterns pattern, String string, int group) {
Matcher matcher = pattern.matcher(string);
if (matcher.find()) {
return matcher.group(group);
}
throw new IllegalArgumentException(
"Cannot extract group " + group + " with pattern " + pattern.getPattern() + " from " + string);
}
private RedisNodeDescription getRedisNodeDescription(Matcher matcher) {
String roleString = matcher.group(1);
RedisInstance.Role role = null;
if (RedisInstance.Role.MASTER.name().equalsIgnoreCase(roleString)) {
role = RedisInstance.Role.UPSTREAM;
}
if (RedisInstance.Role.SLAVE.name().equalsIgnoreCase(roleString)
| RedisInstance.Role.REPLICA.name().equalsIgnoreCase(roleString)) {
role = RedisInstance.Role.REPLICA;
}
if (role == null) {
throw new IllegalStateException("Cannot resolve role " + roleString + " to " + RedisInstance.Role.UPSTREAM + " or "
+ RedisInstance.Role.REPLICA);
}
return new RedisMasterReplicaNode(redisURI.getHost(), redisURI.getPort(), redisURI, role);
}
}
|
InfoPatterns
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.