language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | quarkusio__quarkus | extensions/smallrye-fault-tolerance/deployment/src/test/java/io/quarkus/smallrye/faulttolerance/test/asynchronous/additional/BlockingTest.java | {
"start": 352,
"end": 1427
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar.addClasses(BlockingService.class));
@Inject
BlockingService service;
@Test
public void threadOffloadAndFallback() throws Exception {
Thread mainThread = Thread.currentThread();
CompletionStage<String> future = service.hello();
assertThat(future.toCompletableFuture().get()).isEqualTo("hello");
assertThat(service.getHelloThreads()).allSatisfy(thread -> {
assertThat(thread).isNotSameAs(mainThread);
});
assertThat(service.getHelloStackTraces()).allSatisfy(stackTrace -> {
assertThat(stackTrace).anySatisfy(frame -> {
assertThat(frame.getClassName()).contains("io.smallrye.faulttolerance.core");
});
});
// 1 initial execution + 3 retries
assertThat(service.getInvocationCounter()).hasValue(4);
assertThat(service.getFallbackThread()).isNotSameAs(mainThread);
}
}
| BlockingTest |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/inference/strategies/ExtractInputTypeStrategy.java | {
"start": 1963,
"end": 4250
} | class ____ implements InputTypeStrategy {
@Override
public ArgumentCount getArgumentCount() {
return ConstantArgumentCount.of(2);
}
@Override
public Optional<List<DataType>> inferInputTypes(
CallContext callContext, boolean throwOnFailure) {
final List<DataType> args = callContext.getArgumentDataTypes();
final LogicalType temporalArg = args.get(1).getLogicalType();
if (!temporalArg.isAnyOf(LogicalTypeFamily.DATETIME, LogicalTypeFamily.INTERVAL)) {
return callContext.fail(
throwOnFailure,
"EXTRACT requires 2nd argument to be a temporal type, but type is %s",
temporalArg);
}
final Optional<TimeIntervalUnit> timeIntervalUnit =
callContext.getArgumentValue(0, TimeIntervalUnit.class);
if (!timeIntervalUnit.isPresent()) {
return callContext.fail(
throwOnFailure,
"EXTRACT requires 1st argument to be a TimeIntervalUnit literal");
}
switch (timeIntervalUnit.get()) {
case MILLENNIUM:
case CENTURY:
case DECADE:
case YEAR:
case QUARTER:
case MONTH:
case WEEK:
case DAY:
case EPOCH:
return Optional.of(args);
case HOUR:
case MINUTE:
case SECOND:
case MILLISECOND:
case MICROSECOND:
case NANOSECOND:
if (temporalArg.isAnyOf(LogicalTypeFamily.TIME, LogicalTypeFamily.TIMESTAMP)
|| temporalArg.is(INTERVAL_DAY_TIME)) {
return Optional.of(args);
}
}
return callContext.fail(
throwOnFailure,
"EXTRACT does not support TimeIntervalUnit %s for type %s",
timeIntervalUnit.get(),
temporalArg);
}
@Override
public List<Signature> getExpectedSignatures(FunctionDefinition definition) {
return Collections.singletonList(
Signature.of(
Argument.ofGroup(TimeIntervalUnit.class), Argument.ofGroup("TEMPORAL")));
}
}
| ExtractInputTypeStrategy |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/util/pool/FactoryPools.java | {
"start": 5052,
"end": 5147
} | interface ____ {
@NonNull
StateVerifier getVerifier();
}
private static final | Poolable |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/additionalsupportedoptions/UnknownEnumMappingStrategyMapper.java | {
"start": 290,
"end": 481
} | interface ____ {
UnknownEnumMappingStrategyMapper INSTANCE = Mappers.getMapper( UnknownEnumMappingStrategyMapper.class );
PetWithMissing map(Pet pet);
}
| UnknownEnumMappingStrategyMapper |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/OAuth2TokenAuthenticator.java | {
"start": 1166,
"end": 4001
} | class ____ implements Authenticator {
private static final Logger logger = LogManager.getLogger(OAuth2TokenAuthenticator.class);
private final SecurityMetrics<BearerToken> authenticationMetrics;
private final TokenService tokenService;
OAuth2TokenAuthenticator(TokenService tokenService, MeterRegistry meterRegistry) {
this(tokenService, meterRegistry, System::nanoTime);
}
OAuth2TokenAuthenticator(TokenService tokenService, MeterRegistry meterRegistry, LongSupplier nanoTimeSupplier) {
this.authenticationMetrics = new SecurityMetrics<>(
SecurityMetricType.AUTHC_OAUTH2_TOKEN,
meterRegistry,
token -> Map.of(),
nanoTimeSupplier
);
this.tokenService = tokenService;
}
@Override
public String name() {
return "oauth2 token";
}
@Override
public AuthenticationToken extractCredentials(Context context) {
final SecureString bearerString = context.getBearerString();
return bearerString == null ? null : new BearerToken(bearerString);
}
@Override
public void authenticate(Context context, ActionListener<AuthenticationResult<Authentication>> listener) {
final AuthenticationToken authenticationToken = context.getMostRecentAuthenticationToken();
if (false == authenticationToken instanceof BearerToken) {
listener.onResponse(AuthenticationResult.notHandled());
return;
}
final BearerToken bearerToken = (BearerToken) authenticationToken;
doAuthenticate(context, bearerToken, InstrumentedSecurityActionListener.wrapForAuthc(authenticationMetrics, bearerToken, listener));
}
private void doAuthenticate(Context context, BearerToken bearerToken, ActionListener<AuthenticationResult<Authentication>> listener) {
tokenService.tryAuthenticateToken(bearerToken.credentials(), ActionListener.wrap(userToken -> {
if (userToken != null) {
listener.onResponse(AuthenticationResult.success(userToken.getAuthentication()));
} else {
listener.onResponse(AuthenticationResult.unsuccessful("invalid token", null));
}
}, e -> {
logger.debug(() -> "Failed to validate token authentication for request [" + context.getRequest() + "]", e);
if (e instanceof ElasticsearchSecurityException
&& false == TokenService.isExpiredTokenException((ElasticsearchSecurityException) e)) {
// intentionally ignore the returned exception; we call this primarily
// for the auditing as we already have a purpose built exception
context.getRequest().tamperedRequest();
}
listener.onFailure(e);
}));
}
}
| OAuth2TokenAuthenticator |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/shard/DenseVectorStats.java | {
"start": 1141,
"end": 7442
} | class ____ implements Writeable, ToXContentFragment {
private static final TransportVersion DENSE_VECTOR_OFF_HEAP_STATS = TransportVersion.fromName("dense_vector_off_heap_stats");
private long valueCount = 0;
/** Per-field off-heap desired memory byte size, categorized by file extension. */
Map<String, Map<String, Long>> offHeapStats;
public DenseVectorStats() {}
public DenseVectorStats(long count) {
this(count, null);
}
public DenseVectorStats(long count, Map<String, Map<String, Long>> offHeapStats) {
this.valueCount = count;
this.offHeapStats = offHeapStats;
}
public DenseVectorStats(StreamInput in) throws IOException {
this.valueCount = in.readVLong();
if (in.getTransportVersion().supports(DENSE_VECTOR_OFF_HEAP_STATS)) {
this.offHeapStats = readOptionalOffHeapStats(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(valueCount);
if (out.getTransportVersion().supports(DENSE_VECTOR_OFF_HEAP_STATS)) {
writeOptionalOffHeapStats(out);
}
}
private Map<String, Map<String, Long>> readOptionalOffHeapStats(StreamInput in) throws IOException {
if (in.readBoolean()) {
return in.readMap(v -> in.readMap(StreamInput::readLong));
} else {
return null;
}
}
private void writeOptionalOffHeapStats(StreamOutput out) throws IOException {
if (offHeapStats != null) {
out.writeBoolean(true);
out.writeMap(offHeapStats, StreamOutput::writeString, DenseVectorStats::writeFieldStatsMap);
} else {
out.writeBoolean(false);
}
}
static void writeFieldStatsMap(StreamOutput out, Map<String, Long> map) throws IOException {
out.writeMap(map, StreamOutput::writeString, StreamOutput::writeLong);
}
public void add(DenseVectorStats other) {
if (other == null) {
return;
}
this.valueCount += other.valueCount;
if (other.offHeapStats != null) {
if (this.offHeapStats == null) {
this.offHeapStats = other.offHeapStats;
} else {
this.offHeapStats = Stream.of(this.offHeapStats, other.offHeapStats)
.flatMap(map -> map.entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, KnnVectorsReader::mergeOffHeapByteSizeMaps));
}
}
}
/** Returns the total number of dense vectors added in the index. */
public long getValueCount() {
return valueCount;
}
/** Returns a map of per-field off-heap stats. */
public Map<String, Map<String, Long>> offHeapStats() {
return offHeapStats;
}
private Map<String, Long> getTotalsByCategory() {
if (offHeapStats == null) {
return Map.of("veb", 0L, "vec", 0L, "veq", 0L, "vex", 0L, "cenivf", 0L, "clivf", 0L);
} else {
return offHeapStats.entrySet()
.stream()
.flatMap(map -> map.getValue().entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, Long::sum));
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.NAME);
builder.field(Fields.VALUE_COUNT, valueCount);
if (params.paramAsBoolean(INCLUDE_OFF_HEAP, false)) {
toXContentWithFields(builder, params);
}
builder.endObject();
return builder;
}
private void toXContentWithFields(XContentBuilder builder, Params params) throws IOException {
var totals = getTotalsByCategory();
builder.startObject("off_heap");
builder.humanReadableField("total_size_bytes", "total_size", ofBytes(totals.values().stream().mapToLong(Long::longValue).sum()));
builder.humanReadableField("total_veb_size_bytes", "total_veb_size", ofBytes(totals.getOrDefault("veb", 0L)));
builder.humanReadableField("total_vec_size_bytes", "total_vec_size", ofBytes(totals.getOrDefault("vec", 0L)));
builder.humanReadableField("total_veq_size_bytes", "total_veq_size", ofBytes(totals.getOrDefault("veq", 0L)));
builder.humanReadableField("total_vex_size_bytes", "total_vex_size", ofBytes(totals.getOrDefault("vex", 0L)));
builder.humanReadableField("total_cenivf_size_bytes", "total_cenivf_size", ofBytes(totals.getOrDefault("cenivf", 0L)));
builder.humanReadableField("total_clivf_size_bytes", "total_clivf_size", ofBytes(totals.getOrDefault("clivf", 0L)));
if (params.paramAsBoolean(INCLUDE_PER_FIELD_STATS, false) && offHeapStats != null && offHeapStats.size() > 0) {
toXContentWithPerFieldStats(builder);
}
builder.endObject();
}
private void toXContentWithPerFieldStats(XContentBuilder builder) throws IOException {
builder.startObject(Fields.FIELDS);
for (var key : offHeapStats.keySet().stream().sorted().toList()) {
Map<String, Long> entry = offHeapStats.get(key);
if (entry.isEmpty() == false) {
builder.startObject(key);
for (var eKey : entry.keySet().stream().sorted().toList()) {
long value = entry.get(eKey);
assert value > 0L;
builder.humanReadableField(eKey + "_size_bytes", eKey + "_size", ofBytes(value));
}
builder.endObject();
}
}
builder.endObject();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DenseVectorStats that = (DenseVectorStats) o;
return valueCount == that.valueCount && Objects.equals(offHeapStats, that.offHeapStats);
}
@Override
public int hashCode() {
return Objects.hash(valueCount, offHeapStats);
}
public static final String INCLUDE_OFF_HEAP = "include_off_heap";
public static final String INCLUDE_PER_FIELD_STATS = "include_per_field_stats";
static final | DenseVectorStats |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/single/SingleFlatMapIterableFlowable.java | {
"start": 1407,
"end": 1954
} | class ____<T, R> extends Flowable<R> {
final SingleSource<T> source;
final Function<? super T, ? extends Iterable<? extends R>> mapper;
public SingleFlatMapIterableFlowable(SingleSource<T> source,
Function<? super T, ? extends Iterable<? extends R>> mapper) {
this.source = source;
this.mapper = mapper;
}
@Override
protected void subscribeActual(Subscriber<? super R> s) {
source.subscribe(new FlatMapIterableObserver<>(s, mapper));
}
static final | SingleFlatMapIterableFlowable |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java | {
"start": 39544,
"end": 43114
} | class ____ extends RecoveryTarget {
private final CountDownLatch recoveryBlocked;
private final CountDownLatch releaseRecovery;
private final RecoveryState.Stage stageToBlock;
static final EnumSet<RecoveryState.Stage> SUPPORTED_STAGES = EnumSet.of(
RecoveryState.Stage.INDEX,
RecoveryState.Stage.TRANSLOG,
RecoveryState.Stage.FINALIZE
);
private final Logger logger;
public BlockingTarget(
RecoveryState.Stage stageToBlock,
CountDownLatch recoveryBlocked,
CountDownLatch releaseRecovery,
IndexShard shard,
DiscoveryNode sourceNode,
PeerRecoveryTargetService.RecoveryListener listener,
Logger logger
) {
super(shard, sourceNode, 0L, null, null, listener);
this.recoveryBlocked = recoveryBlocked;
this.releaseRecovery = releaseRecovery;
this.stageToBlock = stageToBlock;
this.logger = logger;
if (SUPPORTED_STAGES.contains(stageToBlock) == false) {
throw new UnsupportedOperationException(stageToBlock + " is not supported");
}
}
private boolean hasBlocked() {
return recoveryBlocked.getCount() == 0;
}
private void blockIfNeeded(RecoveryState.Stage currentStage) {
if (currentStage == stageToBlock) {
logger.info("--> blocking recovery on stage [{}]", currentStage);
recoveryBlocked.countDown();
try {
releaseRecovery.await();
logger.info("--> recovery continues from stage [{}]", currentStage);
} catch (InterruptedException e) {
throw new RuntimeException("blockage released");
}
}
}
@Override
public void indexTranslogOperations(
final List<Translog.Operation> operations,
final int totalTranslogOps,
final long maxAutoIdTimestamp,
final long maxSeqNoOfUpdates,
final RetentionLeases retentionLeases,
final long mappingVersion,
final ActionListener<Long> listener
) {
if (hasBlocked() == false) {
blockIfNeeded(RecoveryState.Stage.TRANSLOG);
}
super.indexTranslogOperations(
operations,
totalTranslogOps,
maxAutoIdTimestamp,
maxSeqNoOfUpdates,
retentionLeases,
mappingVersion,
listener
);
}
@Override
public void cleanFiles(
int totalTranslogOps,
long globalCheckpoint,
Store.MetadataSnapshot sourceMetadata,
ActionListener<Void> listener
) {
blockIfNeeded(RecoveryState.Stage.INDEX);
super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetadata, listener);
}
@Override
public void finalizeRecovery(long globalCheckpoint, long trimAboveSeqNo, ActionListener<Void> listener) {
if (hasBlocked() == false) {
// it maybe that not ops have been transferred, block now
blockIfNeeded(RecoveryState.Stage.TRANSLOG);
}
blockIfNeeded(RecoveryState.Stage.FINALIZE);
super.finalizeRecovery(globalCheckpoint, trimAboveSeqNo, listener);
}
}
static | BlockingTarget |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java | {
"start": 1644,
"end": 2024
} | class ____ extends HtmlPage {
@Override
public void render(Page.HTML<__> html) {
html.
title($("title")).
h1($("title")).__();
}
}
@Test
void shouldNotThrow() {
WebAppTests.testPage(TwoColumnCssLayout.class);
}
public static void main(String[] args) {
WebApps.$for("test").at(8888).inDevMode().start().joinThread();
}
}
| TestView |
java | netty__netty | buffer/src/test/java/io/netty/buffer/BigEndianDirectByteBufTest.java | {
"start": 964,
"end": 1630
} | class ____ extends AbstractByteBufTest {
@Override
protected ByteBuf newBuffer(int length, int maxCapacity) {
ByteBuf buffer = newDirectBuffer(length, maxCapacity);
assertSame(ByteOrder.BIG_ENDIAN, buffer.order());
assertEquals(0, buffer.writerIndex());
return buffer;
}
protected ByteBuf newDirectBuffer(int length, int maxCapacity) {
return new UnpooledDirectByteBuf(UnpooledByteBufAllocator.DEFAULT, length, maxCapacity);
}
@Test
public void testIsContiguous() {
ByteBuf buf = newBuffer(4);
assertTrue(buf.isContiguous());
buf.release();
}
}
| BigEndianDirectByteBufTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/AutoValueBoxedValuesTest.java | {
"start": 1290,
"end": 2155
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(AutoValueBoxedValues.class, getClass())
.setArgs(ImmutableList.of("-processor", AutoValueProcessor.class.getName()));
;
private final BugCheckerRefactoringTestHelper refactoringHelper =
BugCheckerRefactoringTestHelper.newInstance(AutoValueBoxedValues.class, getClass())
.setArgs(ImmutableList.of("-processor", AutoValueProcessor.class.getName()));
@TestParameter private boolean withBuilder;
@Test
public void unnecessaryBoxedTypes_refactoring() {
refactoringHelper
.addInputLines(
"in/Test.java",
mergeLines(
lines(
"import com.google.auto.value.AutoValue;",
"@AutoValue",
"abstract | AutoValueBoxedValuesTest |
java | apache__maven | its/core-it-suite/src/test/resources/mng-3259/module4/src/main/java/mng/Module4Bean.java | {
"start": 957,
"end": 1282
} | class ____ implements SessionBean {
public void ejbCreate() throws CreateException {}
public void ejbRemove() {}
public void ejbActivate() {}
public void ejbPassivate() {}
public void setSessionContext(SessionContext sessionContext) {}
public boolean doIt() {
return true;
}
}
| Module4Bean |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/profile/RestActivateProfileActionTests.java | {
"start": 648,
"end": 2658
} | class ____ extends ESTestCase {
public void testParseXContentForGrantApiKeyRequest() throws Exception {
final String grantType = randomAlphaOfLength(8);
final String username = randomAlphaOfLength(8);
final String password = randomAlphaOfLength(8);
final String accessToken = randomAlphaOfLength(8);
final String clientAuthenticationScheme = randomAlphaOfLength(8);
final String clientAuthenticationValue = randomAlphaOfLength(8);
try (
XContentParser content = createParser(
XContentFactory.jsonBuilder()
.startObject()
.field("grant_type", grantType)
.field("username", username)
.field("password", password)
.field("access_token", accessToken)
.startObject("client_authentication")
.field("scheme", clientAuthenticationScheme)
.field("value", clientAuthenticationValue)
.endObject()
.endObject()
)
) {
ActivateProfileRequest activateProfileRequest = RestActivateProfileAction.fromXContent(content);
assertThat(activateProfileRequest.getGrant().getType(), is(grantType));
assertThat(activateProfileRequest.getGrant().getUsername(), is(username));
assertThat(activateProfileRequest.getGrant().getPassword(), is(new SecureString(password.toCharArray())));
assertThat(activateProfileRequest.getGrant().getAccessToken(), is(new SecureString(accessToken.toCharArray())));
assertThat(activateProfileRequest.getGrant().getClientAuthentication().scheme(), is(clientAuthenticationScheme));
assertThat(
activateProfileRequest.getGrant().getClientAuthentication().value(),
is(new SecureString(clientAuthenticationValue.toCharArray()))
);
}
}
}
| RestActivateProfileActionTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/onetoone/hhh4851/Hardware.java | {
"start": 773,
"end": 1135
} | class ____ extends BaseEntity {
private Hardware parent = null;
protected Hardware() {
}
public Hardware(Hardware parent) {
this.parent = parent;
}
@OneToOne(fetch = FetchType.LAZY)
@JoinColumn(name = "parent_id")
public Hardware getParent() {
return this.parent;
}
public void setParent(Hardware parent) {
this.parent = parent;
}
}
| Hardware |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatTests.java | {
"start": 1026,
"end": 5450
} | class ____ extends AbstractScalarFunctionTestCase {
public RepeatTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
List<TestCaseSupplier> cases = new ArrayList<>();
cases.add(new TestCaseSupplier("Repeat basic test", List.of(DataType.KEYWORD, DataType.INTEGER), () -> {
String text = randomAlphaOfLength(10);
int number = between(0, 10);
return new TestCaseSupplier.TestCase(
List.of(
new TestCaseSupplier.TypedData(new BytesRef(text), DataType.KEYWORD, "str"),
new TestCaseSupplier.TypedData(number, DataType.INTEGER, "number")
),
"RepeatEvaluator[str=Attribute[channel=0], number=Attribute[channel=1]]",
DataType.KEYWORD,
equalTo(new BytesRef(text.repeat(number)))
);
}));
cases.add(new TestCaseSupplier("Repeat basic test with text input", List.of(DataType.TEXT, DataType.INTEGER), () -> {
String text = randomAlphaOfLength(10);
int number = between(0, 10);
return new TestCaseSupplier.TestCase(
List.of(
new TestCaseSupplier.TypedData(new BytesRef(text), DataType.TEXT, "str"),
new TestCaseSupplier.TypedData(number, DataType.INTEGER, "number")
),
"RepeatEvaluator[str=Attribute[channel=0], number=Attribute[channel=1]]",
DataType.KEYWORD,
equalTo(new BytesRef(text.repeat(number)))
);
}));
cases.add(new TestCaseSupplier("Repeat with number zero", List.of(DataType.KEYWORD, DataType.INTEGER), () -> {
String text = randomAlphaOfLength(10);
int number = 0;
return new TestCaseSupplier.TestCase(
List.of(
new TestCaseSupplier.TypedData(new BytesRef(text), DataType.KEYWORD, "str"),
new TestCaseSupplier.TypedData(number, DataType.INTEGER, "number")
),
"RepeatEvaluator[str=Attribute[channel=0], number=Attribute[channel=1]]",
DataType.KEYWORD,
equalTo(new BytesRef(""))
);
}));
cases.add(new TestCaseSupplier("Repeat Unicode", List.of(DataType.KEYWORD, DataType.INTEGER), () -> {
String text = randomUnicodeOfLength(10);
int number = randomIntBetween(0, 10);
return new TestCaseSupplier.TestCase(
List.of(
new TestCaseSupplier.TypedData(new BytesRef(text), DataType.KEYWORD, "str"),
new TestCaseSupplier.TypedData(number, DataType.INTEGER, "number")
),
"RepeatEvaluator[str=Attribute[channel=0], number=Attribute[channel=1]]",
DataType.KEYWORD,
equalTo(new BytesRef(text.repeat(number)))
);
}));
cases.add(new TestCaseSupplier("Repeat Negative Number", List.of(DataType.KEYWORD, DataType.INTEGER), () -> {
String text = randomAlphaOfLength(10);
int number = randomIntBetween(-10, -1);
return new TestCaseSupplier.TestCase(
List.of(
new TestCaseSupplier.TypedData(new BytesRef(text), DataType.KEYWORD, "str"),
new TestCaseSupplier.TypedData(number, DataType.INTEGER, "number")
),
"RepeatEvaluator[str=Attribute[channel=0], number=Attribute[channel=1]]",
DataType.KEYWORD,
nullValue()
).withWarning("Line 1:1: evaluation of [source] failed, treating result as null. Only first 20 failures recorded.")
.withWarning("Line 1:1: java.lang.IllegalArgumentException: Number parameter cannot be negative, found [" + number + "]")
.withFoldingException(IllegalArgumentException.class, "Number parameter cannot be negative, found [" + number + "]");
}));
return parameterSuppliersFromTypedDataWithDefaultChecks(true, cases);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Repeat(source, args.get(0), args.get(1));
}
}
| RepeatTests |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/DelegatingCreatorsTest.java | {
"start": 811,
"end": 1076
} | class ____
{
protected Integer value;
public IntegerBean(Integer v) { value = v; }
@JsonCreator
protected static IntegerBean create(Integer value) {
return new IntegerBean(value);
}
}
static | IntegerBean |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/operators/coordination/CoordinatorEventsExactlyOnceITCase.java | {
"start": 29179,
"end": 33284
} | class ____ {
private static final Map<String, TestScript> MAP_FOR_OPERATOR = new HashMap<>();
public static TestScript getForOperator(String operatorName) {
return MAP_FOR_OPERATOR.computeIfAbsent(operatorName, (key) -> new TestScript());
}
public static void reset() {
MAP_FOR_OPERATOR.clear();
}
private final Collection<CountDownLatch> recoveredTaskRunning = new ArrayList<>();
private boolean failedBefore;
public void recordHasFailed() {
this.failedBefore = true;
}
public boolean hasAlreadyFailed() {
return failedBefore;
}
void registerHookToNotifyAfterTaskRecovered(CountDownLatch latch) {
synchronized (recoveredTaskRunning) {
recoveredTaskRunning.add(latch);
}
}
void signalRecoveredTaskReady() {
// We complete all latches that were registered. We may need to complete
// multiple ones here, because it can happen that after a previous failure, the next
// executions fails immediately again, before even registering at the coordinator.
// in that case, we have multiple latches from multiple failure notifications waiting
// to be completed.
synchronized (recoveredTaskRunning) {
for (CountDownLatch latch : recoveredTaskRunning) {
latch.countDown();
}
recoveredTaskRunning.clear();
}
}
}
// ------------------------------------------------------------------------
// serialization shenannigans
// ------------------------------------------------------------------------
static byte[] intToBytes(int value) {
final byte[] bytes = new byte[4];
ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN).putInt(0, value);
return bytes;
}
static int bytesToInt(byte[] bytes) {
assertThat(bytes).hasSize(4);
return ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN).getInt(0);
}
static ByteStreamStateHandle stateToHandle(List<Integer> state) throws IOException {
final byte[] bytes = InstantiationUtil.serializeObject(state);
return new ByteStreamStateHandle("state", bytes);
}
static List<Integer> handleToState(StreamStateHandle handle)
throws IOException, ClassNotFoundException {
final ByteStreamStateHandle byteHandle = (ByteStreamStateHandle) handle;
return InstantiationUtil.deserializeObject(
byteHandle.getData(), EventCollectingTask.class.getClassLoader());
}
static TaskStateSnapshot createSnapshot(StreamStateHandle handle, OperatorID operatorId) {
final OperatorStateHandle.StateMetaInfo metaInfo =
new OperatorStateHandle.StateMetaInfo(
new long[] {0}, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE);
final OperatorStateHandle state =
new OperatorStreamStateHandle(
Collections.singletonMap("état_et_moi_:_ça_fait_deux", metaInfo), handle);
final OperatorSubtaskState oss =
OperatorSubtaskState.builder().setManagedOperatorState(state).build();
return new TaskStateSnapshot(Collections.singletonMap(operatorId, oss));
}
@Nullable
static StreamStateHandle readSnapshot(TaskStateManager stateManager, OperatorID operatorId) {
final PrioritizedOperatorSubtaskState poss =
stateManager.prioritizedOperatorState(operatorId);
if (!poss.isRestored()) {
return null;
}
final StateObjectCollection<OperatorStateHandle> opState =
stateManager
.prioritizedOperatorState(operatorId)
.getPrioritizedManagedOperatorState()
.get(0);
final OperatorStateHandle handle = Iterators.getOnlyElement(opState.iterator());
return handle.getDelegateStateHandle();
}
}
| TestScript |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/customizers/FooContextCustomizerFactory.java | {
"start": 948,
"end": 1262
} | class ____ implements ContextCustomizerFactory {
@Override
public ContextCustomizer createContextCustomizer(Class<?> testClass,
List<ContextConfigurationAttributes> configAttributes) {
return (context, mergedConfig) -> context.getBeanFactory().registerSingleton("foo", "bar");
}
}
| FooContextCustomizerFactory |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/hash/Murmur3Hasher.java | {
"start": 955,
"end": 4837
} | class ____ {
public static final String METHOD = "MurmurHash3";
private final long seed;
private final byte[] remainder = new byte[16];
private int remainderLength = 0;
private int length;
private long h1, h2;
public Murmur3Hasher(long seed) {
this.seed = seed;
h1 = h2 = seed;
}
/**
* Supplies some or all of the bytes to be hashed. Multiple calls to this method may
* be made to sequentially supply the bytes for hashing. Once all bytes have been supplied, either the
* {@link #digestHash} method (preferred) or the {@link #digest()} method should be called to complete the hash calculation.
*/
public void update(byte[] inputBytes) {
update(inputBytes, 0, inputBytes.length);
}
/**
* Similar to {@link #update(byte[])}, but processes a specific portion of the input bytes
* starting from the given {@code offset} for the specified {@code length}.
* @see #update(byte[])
*/
public void update(byte[] inputBytes, int offset, int length) {
if (remainderLength + length >= remainder.length) {
if (remainderLength > 0) {
// fill rest of remainder from inputBytes and hash remainder
int bytesToCopyFromInputToRemainder = remainder.length - remainderLength;
System.arraycopy(inputBytes, offset, remainder, remainderLength, bytesToCopyFromInputToRemainder);
offset = bytesToCopyFromInputToRemainder;
length = length - bytesToCopyFromInputToRemainder;
MurmurHash3.IntermediateResult result = MurmurHash3.intermediateHash(remainder, 0, remainder.length, h1, h2);
h1 = result.h1;
h2 = result.h2;
remainderLength = 0;
this.length += remainder.length;
}
// hash as many bytes as available in integer multiples of 16 as intermediateHash can only process multiples of 16
int numBytesToHash = length & 0xFFFFFFF0;
if (numBytesToHash > 0) {
MurmurHash3.IntermediateResult result = MurmurHash3.intermediateHash(inputBytes, offset, numBytesToHash, h1, h2);
h1 = result.h1;
h2 = result.h2;
this.length += numBytesToHash;
}
// save the remaining bytes, if any
if (length > numBytesToHash) {
this.remainderLength = length - numBytesToHash;
System.arraycopy(inputBytes, offset + numBytesToHash, remainder, 0, remainderLength);
}
} else {
System.arraycopy(inputBytes, offset, remainder, remainderLength, length);
remainderLength += length;
}
}
/**
* Clears all bytes previously passed to {@link #update(byte[])} and prepares for the calculation
* of a new hash.
*/
public void reset() {
length = 0;
remainderLength = 0;
h1 = h2 = seed;
}
/**
* Completes the hash of all bytes previously passed to {@link #update}.
*/
public byte[] digest() {
return digestHash().getBytes();
}
/**
* Completes the hash of all bytes previously passed to {@link #update}.
*/
public MurmurHash3.Hash128 digestHash() {
return digestHash(new MurmurHash3.Hash128());
}
/**
* Completes the hash of all bytes previously passed to {@link #update}.
* Allows passing in a re-usable {@link org.elasticsearch.common.hash.MurmurHash3.Hash128} instance to avoid allocations.
*/
public MurmurHash3.Hash128 digestHash(MurmurHash3.Hash128 hash) {
length += remainderLength;
MurmurHash3.finalizeHash(hash, remainder, 0, length, h1, h2);
return hash;
}
public static String getAlgorithm() {
return METHOD;
}
}
| Murmur3Hasher |
java | google__dagger | javatests/dagger/hilt/processor/internal/aliasof/AliasOfProcessorTest.java | {
"start": 3449,
"end": 4232
} | interface ____{}");
HiltCompilerTests.hiltCompiler(scope)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
"AliasOf should only be used on scopes. However, it was found "
+ "annotating test.AliasScope");
});
}
@Test
public void succeeds_aliasOfJakartaScope() {
Source scope =
HiltCompilerTests.javaSource(
"test.AliasScope",
"package test;",
"",
"import jakarta.inject.Scope;",
"import javax.inject.Singleton;",
"import dagger.hilt.migration.AliasOf;",
"",
"@Scope",
"@AliasOf(Singleton.class)",
"public @ | AliasScope |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/session/HttpSessionCreatedEvent.java | {
"start": 997,
"end": 1213
} | class ____ extends SessionCreationEvent {
public HttpSessionCreatedEvent(HttpSession session) {
super(session);
}
public HttpSession getSession() {
return (HttpSession) getSource();
}
}
| HttpSessionCreatedEvent |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldSatisfy_create_Test.java | {
"start": 1379,
"end": 2593
} | class ____ {
@Test
void should_create_error_message_if_condition_is_not_satisfied() {
// GIVEN
ErrorMessageFactory factory = shouldSatisfy("Yoda", new TestCondition<>("green lightsaber bearer"));
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting actual:%n"
+ " \"Yoda\"%n"
+ "to satisfy:%n"
+ " green lightsaber bearer"));
}
@Test
void should_create_error_message_if_consumers_are_not_all_satisfied() {
// GIVEN
ErrorMessageFactory factory = shouldSatisfyExactlyInAnyOrder(newArrayList("Luke", "Leia", "Yoda"));
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo(format("[Test] %n"
+ "Expecting actual:%n"
+ " [\"Luke\", \"Leia\", \"Yoda\"]%n"
+ "to satisfy all the consumers in any order."));
}
}
| ShouldSatisfy_create_Test |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/internal/client/SyncResponseCallback.java | {
"start": 1191,
"end": 2136
} | class ____ implements RestClient.ResponseCallback {
private InputStream response;
private SalesforceException exception;
private Map<String, String> headers;
private CountDownLatch latch = new CountDownLatch(1);
@Override
public void onResponse(InputStream response, Map<String, String> headers, SalesforceException exception) {
this.response = response;
this.headers = headers;
this.exception = exception;
latch.countDown();
}
public void reset() {
latch = new CountDownLatch(1);
}
public boolean await(long duration, TimeUnit unit) throws InterruptedException {
return latch.await(duration, unit);
}
public InputStream getResponse() {
return response;
}
public SalesforceException getException() {
return exception;
}
public Map<String, String> getHeaders() {
return headers;
}
}
| SyncResponseCallback |
java | apache__flink | flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/timestamps/AscendingTimestampExtractor.java | {
"start": 4570,
"end": 5194
} | class ____ implements MonotonyViolationHandler {
private static final long serialVersionUID = 1L;
@Override
public void handleViolation(long elementTimestamp, long lastTimestamp) {
throw new RuntimeException(
"Ascending timestamps condition violated. Element timestamp "
+ elementTimestamp
+ " is smaller than last timestamp "
+ lastTimestamp);
}
}
/** Handler that only logs violations of timestamp monotony, on WARN log level. */
public static final | FailingHandler |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/fgl/FSNLockBenchmarkThroughput.java | {
"start": 1604,
"end": 2150
} | class ____ the throughput of NN for both global-lock and fine-grained lock.
* Using some common used RPCs, such as create,addBlock,complete,append,rename,
* delete,setPermission,setOwner,setReplication,getFileInfo,getListing,getBlockLocation,
* to build some tasks according to readWrite ratio and testing count.
* Then create a thread pool with a concurrency of the numClient to perform these tasks.
* The performance difference between Global Lock and Fine-grained Lock can be
* obtained according to the execution time.
*/
public | benchmarks |
java | apache__camel | components/camel-groovy/src/main/java/org/apache/camel/language/groovy/GroovyLanguage.java | {
"start": 7417,
"end": 7779
} | class ____ {
private final Map<String, GroovyClassService> cache = new HashMap<>();
public void addScript(String content, Class<Script> scriptClass) {
cache.put(content, new GroovyClassService(scriptClass));
}
public GroovyLanguage build() {
return new GroovyLanguage(cache, false);
}
}
}
| Builder |
java | quarkusio__quarkus | extensions/spring-data-jpa/deployment/src/test/java/io/quarkus/spring/data/devmode/RepositoryReloadTest.java | {
"start": 278,
"end": 1587
} | class ____ {
@RegisterExtension
static QuarkusDevModeTest TEST = new QuarkusDevModeTest()
.withApplicationRoot((jar) -> jar
.addAsResource("application.properties")
.addAsResource("import_books.sql", "import.sql")
.addClasses(Book.class, BookRepository.class, BookResource.class));
@Test
public void testRepositoryIsReloaded() {
RestAssured.get("/book").then()
.statusCode(200)
.body(containsString("Strangers"), containsString("Ascent"), containsString("Everything"));
TEST.modifySourceFile("BookRepository.java", s -> s.replace("// <placeholder>",
"java.util.Optional<Book> findById(Integer id);"));
TEST.modifySourceFile("BookResource.java", s -> s.replace("// <placeholder>",
"@GET @Path(\"/{id}\") @Produces(MediaType.APPLICATION_JSON)\n" +
" public java.util.Optional<Book> findById(@jakarta.ws.rs.PathParam(\"id\") Integer id) {\n" +
" return bookRepository.findById(id);\n" +
" }"));
RestAssured.get("/book/1").then()
.statusCode(200)
.body(containsString("Strangers"));
}
}
| RepositoryReloadTest |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/codec/AbstractDecoder.java | {
"start": 1626,
"end": 2490
} | class ____.
* @param logger the logger to use
* @since 5.1
*/
public void setLogger(Log logger) {
this.logger = logger;
}
/**
* Return the currently configured Logger.
* @since 5.1
*/
public Log getLogger() {
return logger;
}
@Override
public List<MimeType> getDecodableMimeTypes() {
return this.decodableMimeTypes;
}
@Override
public boolean canDecode(ResolvableType elementType, @Nullable MimeType mimeType) {
if (mimeType == null) {
return true;
}
for (MimeType candidate : this.decodableMimeTypes) {
if (candidate.isCompatibleWith(mimeType)) {
return true;
}
}
return false;
}
@Override
public Mono<T> decodeToMono(Publisher<DataBuffer> inputStream, ResolvableType elementType,
@Nullable MimeType mimeType, @Nullable Map<String, Object> hints) {
throw new UnsupportedOperationException();
}
}
| name |
java | micronaut-projects__micronaut-core | websocket/src/main/java/io/micronaut/websocket/bind/WebSocketStateBinder.java | {
"start": 841,
"end": 921
} | interface ____<T> extends ArgumentBinder<T, WebSocketState> {
}
| WebSocketStateBinder |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/cdi/bcextensions/ArrayTypeTest.java | {
"start": 3480,
"end": 3585
} | interface ____ {
}
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE_USE)
@ | MyAnn1 |
java | grpc__grpc-java | interop-testing/src/main/java/io/grpc/testing/integration/CustomBackendMetricsLoadBalancerProvider.java | {
"start": 1381,
"end": 1972
} | class ____ extends LoadBalancerProvider {
static final String TEST_ORCA_LB_POLICY_NAME = "test_backend_metrics_load_balancer";
private volatile TestOrcaReport latestOobReport;
@Override
public LoadBalancer newLoadBalancer(LoadBalancer.Helper helper) {
return new CustomBackendMetricsLoadBalancer(helper);
}
@Override
public boolean isAvailable() {
return true;
}
@Override
public int getPriority() {
return 0;
}
@Override
public String getPolicyName() {
return TEST_ORCA_LB_POLICY_NAME;
}
private final | CustomBackendMetricsLoadBalancerProvider |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/MergedAnnotationsTests.java | {
"start": 120132,
"end": 120203
} | interface ____ {
@Transactional
void doIt();
}
| TransactionalService |
java | apache__camel | components/camel-cxf/camel-cxf-transport/src/main/java/org/apache/camel/component/cxf/transport/message/CxfMessageMapper.java | {
"start": 1093,
"end": 1613
} | interface ____ {
/**
* Create a CXF {@link Message} from a Camel exchange.
*/
Message createCxfMessageFromCamelExchange(
Exchange camelExchange,
HeaderFilterStrategy headerFilterStrategy);
/**
* Given a CXF out/response Message, this method propagates response headers to a Camel exchange.
*/
void propagateResponseHeadersToCamel(
Message cxfMessage, Exchange camelExchange,
HeaderFilterStrategy headerFilterStrategy);
}
| CxfMessageMapper |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/metadata/storage/FormatterTest.java | {
"start": 3340,
"end": 3604
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(FormatterTest.class);
private static final int DEFAULT_NODE_ID = 1;
private static final Uuid DEFAULT_CLUSTER_ID = Uuid.fromString("b3dGE68sQQKzfk80C_aLZw");
static | FormatterTest |
java | google__error-prone | check_api/src/test/java/com/google/errorprone/util/FindIdentifiersTest.java | {
"start": 2150,
"end": 2857
} | class ____ extends BugChecker implements MethodInvocationTreeMatcher {
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (MethodMatchers.staticMethod()
.onClass("java.lang.String")
.named("format")
.matches(tree, state)) {
return buildDescription(tree)
.setMessage(FindIdentifiers.findAllIdents(state).toString())
.build();
}
return Description.NO_MATCH;
}
}
@Test
public void findAllIdentsLocals() {
CompilationTestHelper.newInstance(PrintIdents.class, getClass())
.addSourceLines(
"Test.java",
"""
| PrintIdents |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/spi/NavigablePath.java | {
"start": 7397,
"end": 8500
} | class ____ {
private boolean matchedBase;
private StringBuilder buffer;
public void collectPath(String path) {
if ( matchedBase ) {
if ( buffer == null ) {
buffer = new StringBuilder();
}
else {
buffer.append( '.' );
}
buffer.append( path );
}
}
public @Nullable String resolve() {
if ( buffer == null ) {
// Return an empty string instead of null in case the two navigable paths are equal
return matchedBase ? "" : null;
}
else {
return buffer.toString();
}
}
}
protected void relativize(NavigablePath base, RelativePathCollector collector) {
if ( this.equals( base ) ) {
collector.matchedBase = true;
}
else {
if ( !collector.matchedBase ) {
if ( parent != null ) {
parent.relativize( base, collector );
}
}
collector.collectPath( getLocalName() );
}
}
@Override
public String getFullPath() {
return alias == null
? identifierForTableGroup
: identifierForTableGroup + "(" + alias + ")";
}
@Override
public String toString() {
return getFullPath();
}
}
| RelativePathCollector |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/LiteProtoToStringTest.java | {
"start": 2625,
"end": 3444
} | class ____ {
private String test(EnumLite e) {
// BUG: Diagnostic contains:
return e.toString();
}
private String testImplicit(EnumLite e) {
// BUG: Diagnostic contains:
return "" + e;
}
private String test2(ProtocolMessageEnum e) {
return e.toString();
}
private String test3(ProtocolMessageEnum e) {
return e.getValueDescriptor().toString();
}
}
""")
.doTest();
}
@Test
public void nestedLogStatement() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.protobuf.GeneratedMessageLite;
| Test |
java | micronaut-projects__micronaut-core | http-client-core/src/main/java/io/micronaut/http/client/multipart/StringPart.java | {
"start": 844,
"end": 1412
} | class ____ extends Part<String> {
protected final String value;
/**
* @param name parameter name
* @param value String value
*/
StringPart(String name, String value) {
super(name);
if (value == null) {
this.value = "";
} else {
this.value = value;
}
}
@Override
String getContent() {
return value;
}
@NonNull
@Override
<T> T getData(@NonNull MultipartDataFactory<T> factory) {
return factory.createAttribute(name, value);
}
}
| StringPart |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/manytoone/lazy/AddressVersion.java | {
"start": 547,
"end": 1529
} | class ____ extends BaseDomainEntityVersion {
private static final long serialVersionUID = 1100389518057335117L;
@Id
@ManyToOne(optional = false, fetch = FetchType.LAZY)
@JoinColumn(name = "id", referencedColumnName = "id", updatable = false, nullable = false)
private Address id;
@Column(name = "description", updatable = false)
private String description;
AddressVersion() {
}
AddressVersion(Instant when, String who, Address id, long version, String description) {
setCreatedAt( when );
setCreatedBy( who );
setVersion( version );
this.id = Objects.requireNonNull(id );
this.description = description;
}
@Override
public Address getId() {
return id;
}
public String getDescription() {
return description;
}
public AddressVersion update(Instant when, String who, String description) {
AddressVersion version = new AddressVersion( when, who, id, getVersion() + 1, description );
id.versions.add( version );
return version;
}
}
| AddressVersion |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit/jupiter/FailingBeforeAndAfterMethodsSpringExtensionTests.java | {
"start": 7811,
"end": 8078
} | class ____ {
@Test
void testNothing() {
}
@BeforeTransaction
void beforeTransaction() {
fail("always failing beforeTransaction()");
}
}
@FailingTestCase
@SpringJUnitConfig(DatabaseConfig.class)
@Transactional
static | FailingBeforeTransactionTestCase |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java | {
"start": 794,
"end": 5081
} | class ____ extends HttpClientRequestBase {
private final HttpClientStream stream;
private final MultiMap headers;
public HttpClientRequestPushPromise(
HttpConnection connection,
HttpClientStream stream,
HttpMethod method,
String uri,
MultiMap headers) {
super(connection, stream, stream.connection().context().promise(), method, uri);
this.stream = stream;
this.headers = headers;
}
@Override
public HttpVersion version() {
return stream.version();
}
@Override
void handleResponse(Promise<HttpClientResponse> promise, HttpClientResponse resp, long timeoutMs) {
promise.complete(resp);
}
@Override
public HttpClientRequest exceptionHandler(Handler<Throwable> handler) {
return this;
}
@Override
public boolean isChunked() {
return false;
}
@Override
public MultiMap headers() {
return headers;
}
@Override
public Future<Void> write(Buffer data) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest setWriteQueueMaxSize(int maxSize) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest drainHandler(Handler<Void> handler) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest setFollowRedirects(boolean followRedirect) {
throw new IllegalStateException();
}
@Override
public boolean isFollowRedirects() {
return false;
}
@Override
public HttpClientRequest setMaxRedirects(int maxRedirects) {
throw new IllegalStateException();
}
@Override
public int getMaxRedirects() {
return 0;
}
@Override
public int numberOfRedirections() {
return 0;
}
@Override
public HttpClientRequest redirectHandler(@Nullable Function<HttpClientResponse, Future<HttpClientRequest>> handler) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest setChunked(boolean chunked) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest putHeader(String name, String value) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest putHeader(CharSequence name, CharSequence value) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest putHeader(String name, Iterable<String> values) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest putHeader(CharSequence name, Iterable<CharSequence> values) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest traceOperation(String op) {
throw new IllegalStateException();
}
@Override
public String traceOperation() {
throw new IllegalStateException();
}
@Override
public Future<Void> write(String chunk) {
throw new IllegalStateException();
}
@Override
public Future<Void> write(String chunk, String enc) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest continueHandler(@Nullable Handler<Void> handler) {
throw new IllegalStateException();
}
@Override
public HttpClientRequest earlyHintsHandler(@Nullable Handler<MultiMap> handler) {
throw new IllegalStateException();
}
@Override
public Future<Void> sendHead() {
throw new IllegalStateException();
}
@Override
public Future<HttpClientResponse> send(ClientForm form) {
throw new IllegalStateException();
}
@Override
public Future<HttpClientResponse> connect() {
throw new IllegalStateException();
}
@Override
public Future<Void> end(String chunk) {
throw new IllegalStateException();
}
@Override
public Future<Void> end(String chunk, String enc) {
throw new IllegalStateException();
}
@Override
public Future<Void> end(Buffer chunk) {
throw new IllegalStateException();
}
@Override
public Future<Void> end() {
throw new IllegalStateException();
}
@Override
public boolean writeQueueFull() {
throw new IllegalStateException();
}
@Override
public StreamPriority getStreamPriority() {
return stream.priority();
}
@Override
public Future<Void> writeCustomFrame(int type, int flags, Buffer payload) {
throw new UnsupportedOperationException("Cannot write frame with HTTP/1.x ");
}
}
| HttpClientRequestPushPromise |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanationTests.java | {
"start": 740,
"end": 2077
} | class ____ extends AbstractXContentSerializingTestCase<QueryExplanation> {
static QueryExplanation createRandomQueryExplanation(boolean isValid) {
String index = "index_" + randomInt(1000);
int shard = randomInt(100);
Boolean valid = isValid;
String errorField = null;
if (valid == false) {
errorField = randomAlphaOfLength(randomIntBetween(10, 100));
}
String explanation = randomAlphaOfLength(randomIntBetween(10, 100));
return new QueryExplanation(index, shard, valid, explanation, errorField);
}
static QueryExplanation createRandomQueryExplanation() {
return createRandomQueryExplanation(randomBoolean());
}
@Override
protected QueryExplanation doParseInstance(XContentParser parser) throws IOException {
return QueryExplanation.fromXContent(parser);
}
@Override
protected QueryExplanation createTestInstance() {
return createRandomQueryExplanation();
}
@Override
protected QueryExplanation mutateInstance(QueryExplanation instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<QueryExplanation> instanceReader() {
return QueryExplanation::new;
}
}
| QueryExplanationTests |
java | google__error-prone | core/src/main/java/com/google/errorprone/refaster/UDoWhileLoop.java | {
"start": 1081,
"end": 2108
} | class ____ extends USimpleStatement implements DoWhileLoopTree {
public static UDoWhileLoop create(UStatement body, UExpression condition) {
return new AutoValue_UDoWhileLoop((USimpleStatement) body, condition);
}
@Override
public abstract USimpleStatement getStatement();
@Override
public abstract UExpression getCondition();
@Override
public @Nullable Choice<Unifier> visitDoWhileLoop(
DoWhileLoopTree loop, @Nullable Unifier unifier) {
return getStatement()
.unify(loop.getStatement(), unifier)
.flatMap(unifications(getCondition(), loop.getCondition()));
}
@Override
public <R, D> R accept(TreeVisitor<R, D> visitor, D data) {
return visitor.visitDoWhileLoop(this, data);
}
@Override
public Kind getKind() {
return Kind.DO_WHILE_LOOP;
}
@Override
public JCDoWhileLoop inline(Inliner inliner) throws CouldNotResolveImportException {
return inliner.maker().DoLoop(getStatement().inline(inliner), getCondition().inline(inliner));
}
}
| UDoWhileLoop |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rpc/TestingRpcGateway.java | {
"start": 979,
"end": 1680
} | class ____ implements RpcGateway {
private final Supplier<String> addressSupplier;
private final Supplier<String> hostnameSupplier;
private TestingRpcGateway(Supplier<String> addressSupplier, Supplier<String> hostnameSupplier) {
this.addressSupplier = addressSupplier;
this.hostnameSupplier = hostnameSupplier;
}
@Override
public String getAddress() {
return addressSupplier.get();
}
@Override
public String getHostname() {
return hostnameSupplier.get();
}
public static Builder newBuilder() {
return new Builder();
}
/** {@code Builder} for {@code TestingRpcGateway}. */
public static | TestingRpcGateway |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1600/Issue_for_gaorui.java | {
"start": 161,
"end": 692
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
String json = "{\"@type\":\"java.util.HashMap\",\"COUPON\":[{\"@type\":\"com.alibaba.json.bvt.issue_1600.Issue_for_gaorui.PromotionTermDetail\",\"activityId\":\"1584034\",\"choose\":true,\"couponId\":1251068987,\"couponType\":\"limitp\",\"match\":true,\"realPrice\":{\"amount\":0.6,\"currency\":\"USD\"}}],\"grayTrade\":\"true\"}";
JSON.parseObject(json, Object.class, Feature.SupportAutoType);
}
public static | Issue_for_gaorui |
java | netty__netty | transport-native-io_uring/src/test/java/io/netty/channel/uring/IoUringBufferRingSocketFileRegionTest.java | {
"start": 1120,
"end": 2199
} | class ____ extends SocketFileRegionTest {
@BeforeAll
public static void loadJNI() {
assumeTrue(IoUring.isAvailable());
assumeTrue(IoUring.isRegisterBufferRingSupported());
}
@Override
protected List<TestsuitePermutation.BootstrapComboFactory<ServerBootstrap, Bootstrap>> newFactories() {
return IoUringSocketTestPermutation.INSTANCE.socket();
}
@Override
protected boolean supportsCustomFileRegion() {
return false;
}
//@Disabled("Fix me")
@Test
public void testFileRegionCountLargerThenFile(TestInfo testInfo) throws Throwable {
super.testFileRegionCountLargerThenFile(testInfo);
}
@Override
protected void configure(ServerBootstrap sb, Bootstrap cb, ByteBufAllocator allocator) {
super.configure(sb, cb, allocator);
sb.childOption(IoUringChannelOption.IO_URING_BUFFER_GROUP_ID, IoUringSocketTestPermutation.BGID);
cb.option(IoUringChannelOption.IO_URING_BUFFER_GROUP_ID, IoUringSocketTestPermutation.BGID);
}
}
| IoUringBufferRingSocketFileRegionTest |
java | spring-projects__spring-framework | spring-websocket/src/test/java/org/springframework/web/socket/sockjs/transport/handler/SockJsWebSocketHandlerTests.java | {
"start": 1422,
"end": 2635
} | class ____ {
@Test
void getSubProtocols() {
SubscribableChannel channel = mock();
SubProtocolWebSocketHandler handler = new SubProtocolWebSocketHandler(channel, channel);
StompSubProtocolHandler stompHandler = new StompSubProtocolHandler();
handler.addProtocolHandler(stompHandler);
TaskScheduler scheduler = mock();
DefaultSockJsService service = new DefaultSockJsService(scheduler);
WebSocketServerSockJsSession session = new WebSocketServerSockJsSession("1", service, handler, null);
SockJsWebSocketHandler sockJsHandler = new SockJsWebSocketHandler(service, handler, session);
assertThat(sockJsHandler.getSubProtocols()).isEqualTo(stompHandler.getSupportedProtocols());
}
@Test
void getSubProtocolsNone() {
WebSocketHandler handler = new TextWebSocketHandler();
TaskScheduler scheduler = mock();
DefaultSockJsService service = new DefaultSockJsService(scheduler);
WebSocketServerSockJsSession session = new WebSocketServerSockJsSession("1", service, handler, null);
SockJsWebSocketHandler sockJsHandler = new SockJsWebSocketHandler(service, handler, session);
assertThat(sockJsHandler.getSubProtocols()).isEqualTo(Collections.emptyList());
}
}
| SockJsWebSocketHandlerTests |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/junitrule/JUnitTestRuleIntegratesWithRuleChainTest.java | {
"start": 726,
"end": 1307
} | class ____ {
JUnitCore runner = new JUnitCore();
@Test
public void rule_can_be_changed_to_strict() {
// when
Result result = runner.run(StrictByDefault.class);
// then
JUnitResultAssert.assertThat(result).succeeds(1).fails(1, RuntimeException.class);
}
@Test
public void rule_can_be_changed_to_lenient() {
// when
Result result = runner.run(LenientByDefault.class);
// then
JUnitResultAssert.assertThat(result).isSuccessful();
}
public static | JUnitTestRuleIntegratesWithRuleChainTest |
java | spring-projects__spring-security | messaging/src/main/java/org/springframework/security/messaging/access/expression/MessageAuthorizationContextSecurityExpressionHandler.java | {
"start": 1314,
"end": 2674
} | class ____
implements SecurityExpressionHandler<MessageAuthorizationContext<?>> {
private final SecurityExpressionHandler<Message<?>> delegate;
@SuppressWarnings("rawtypes")
public MessageAuthorizationContextSecurityExpressionHandler() {
this(new DefaultMessageSecurityExpressionHandler());
}
public MessageAuthorizationContextSecurityExpressionHandler(
SecurityExpressionHandler<Message<?>> expressionHandler) {
this.delegate = expressionHandler;
}
@Override
public ExpressionParser getExpressionParser() {
return this.delegate.getExpressionParser();
}
@Override
public EvaluationContext createEvaluationContext(@Nullable Authentication authentication,
MessageAuthorizationContext<?> message) {
return createEvaluationContext(() -> authentication, message);
}
@Override
public EvaluationContext createEvaluationContext(Supplier<? extends @Nullable Authentication> authentication,
MessageAuthorizationContext<?> message) {
EvaluationContext context = this.delegate.createEvaluationContext(authentication, message.getMessage());
Map<String, String> variables = message.getVariables();
if (variables != null) {
for (Map.Entry<String, String> entry : variables.entrySet()) {
context.setVariable(entry.getKey(), entry.getValue());
}
}
return context;
}
}
| MessageAuthorizationContextSecurityExpressionHandler |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/snapshots/SnapshotInfoUtils.java | {
"start": 2392,
"end": 5864
} | class ____ {
private SnapshotInfoUtils() {/* no instances */}
static final ConstructingObjectParser<CreateSnapshotResponse, Void> CREATE_SNAPSHOT_RESPONSE_PARSER = new ConstructingObjectParser<>(
CreateSnapshotResponse.class.getName(),
true,
args -> new CreateSnapshotResponse(((SnapshotInfoBuilder) args[0]).build())
);
static final ObjectParser<SnapshotInfoBuilder, Void> SNAPSHOT_INFO_PARSER = new ObjectParser<>(
SnapshotInfoBuilder.class.getName(),
true,
SnapshotInfoBuilder::new
);
static final ConstructingObjectParser<ShardStatsBuilder, Void> SHARD_STATS_PARSER = new ConstructingObjectParser<>(
ShardStatsBuilder.class.getName(),
true,
args -> new ShardStatsBuilder((int) Objects.requireNonNullElse(args[0], 0), (int) Objects.requireNonNullElse(args[1], 0))
);
static {
SHARD_STATS_PARSER.declareInt(optionalConstructorArg(), new ParseField(TOTAL));
SHARD_STATS_PARSER.declareInt(optionalConstructorArg(), new ParseField(SUCCESSFUL));
SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setSnapshotName, new ParseField(SNAPSHOT));
SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setSnapshotUUID, new ParseField(UUID));
SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setRepository, new ParseField(REPOSITORY));
SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setState, new ParseField(STATE));
SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::setReason, new ParseField(REASON));
SNAPSHOT_INFO_PARSER.declareStringArray(SnapshotInfoBuilder::setIndices, new ParseField(INDICES));
SNAPSHOT_INFO_PARSER.declareStringArray(SnapshotInfoBuilder::setDataStreams, new ParseField(DATA_STREAMS));
SNAPSHOT_INFO_PARSER.declareObjectArray(
SnapshotInfoBuilder::setFeatureStates,
SnapshotFeatureInfo.SNAPSHOT_FEATURE_INFO_PARSER,
new ParseField(FEATURE_STATES)
);
SNAPSHOT_INFO_PARSER.declareObject(
SnapshotInfoBuilder::setIndexSnapshotDetails,
(p, c) -> p.map(HashMap::new, p2 -> SnapshotInfo.IndexSnapshotDetails.PARSER.parse(p2, c)),
new ParseField(INDEX_DETAILS)
);
SNAPSHOT_INFO_PARSER.declareLong(SnapshotInfoBuilder::setStartTime, new ParseField(START_TIME_IN_MILLIS));
SNAPSHOT_INFO_PARSER.declareLong(SnapshotInfoBuilder::setEndTime, new ParseField(END_TIME_IN_MILLIS));
SNAPSHOT_INFO_PARSER.declareObject(SnapshotInfoBuilder::setShardStatsBuilder, SHARD_STATS_PARSER, new ParseField(SHARDS));
SNAPSHOT_INFO_PARSER.declareBoolean(SnapshotInfoBuilder::setIncludeGlobalState, new ParseField(INCLUDE_GLOBAL_STATE));
SNAPSHOT_INFO_PARSER.declareObject(SnapshotInfoBuilder::setUserMetadata, (p, c) -> p.map(), new ParseField(USER_METADATA));
SNAPSHOT_INFO_PARSER.declareInt(SnapshotInfoBuilder::setVersion, new ParseField(VERSION_ID));
SNAPSHOT_INFO_PARSER.declareObjectArray(
SnapshotInfoBuilder::setShardFailures,
SnapshotShardFailure.SNAPSHOT_SHARD_FAILURE_PARSER,
new ParseField(FAILURES)
);
CREATE_SNAPSHOT_RESPONSE_PARSER.declareObject(optionalConstructorArg(), SNAPSHOT_INFO_PARSER, new ParseField("snapshot"));
}
private record ShardStatsBuilder(int totalShards, int successfulShards) {}
public static final | SnapshotInfoUtils |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/longarray/LongArrayAssert_containsOnly_Test.java | {
"start": 965,
"end": 1314
} | class ____ extends LongArrayAssertBaseTest {
@Override
protected LongArrayAssert invoke_api_method() {
return assertions.containsOnly(6L, 8L);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContainsOnly(getInfo(assertions), getActual(assertions), arrayOf(6L, 8L));
}
}
| LongArrayAssert_containsOnly_Test |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java | {
"start": 4370,
"end": 31697
} | enum ____ {
SEND_APIVERSIONS_REQUEST, // Initial state for authentication: client sends ApiVersionsRequest in this state when authenticating
RECEIVE_APIVERSIONS_RESPONSE, // Awaiting ApiVersionsResponse from server
SEND_HANDSHAKE_REQUEST, // Received ApiVersionsResponse, send SaslHandshake request
RECEIVE_HANDSHAKE_RESPONSE, // Awaiting SaslHandshake response from server when authenticating
INITIAL, // Initial authentication state starting SASL token exchange for configured mechanism, send first token
INTERMEDIATE, // Intermediate state during SASL token exchange, process challenges and send responses
CLIENT_COMPLETE, // Sent response to last challenge. If using SaslAuthenticate, wait for authentication status from server, else COMPLETE
COMPLETE, // Authentication sequence complete. If using SaslAuthenticate, this state implies successful authentication.
FAILED, // Failed authentication due to an error at some stage
REAUTH_PROCESS_ORIG_APIVERSIONS_RESPONSE, // Initial state for re-authentication: process ApiVersionsResponse from original authentication
REAUTH_SEND_HANDSHAKE_REQUEST, // Processed original ApiVersionsResponse, send SaslHandshake request as part of re-authentication
REAUTH_RECEIVE_HANDSHAKE_OR_OTHER_RESPONSE, // Awaiting SaslHandshake response from server when re-authenticating, and may receive other, in-flight responses sent prior to start of re-authentication as well
REAUTH_INITIAL, // Initial re-authentication state starting SASL token exchange for configured mechanism, send first token
}
private static final short DISABLE_KAFKA_SASL_AUTHENTICATE_HEADER = -1;
private static final Random RNG = new Random();
/**
* the reserved range of correlation id for Sasl requests.
*
* Noted: there is a story about reserved range. The response of LIST_OFFSET is compatible to response of SASL_HANDSHAKE.
* Hence, we could miss the schema error when using schema of SASL_HANDSHAKE to parse response of LIST_OFFSET.
* For example: the IllegalStateException caused by mismatched correlation id is thrown if following steps happens.
* 1) sent LIST_OFFSET
* 2) sent SASL_HANDSHAKE
* 3) receive response of LIST_OFFSET
* 4) succeed to use schema of SASL_HANDSHAKE to parse response of LIST_OFFSET
* 5) throw IllegalStateException due to mismatched correlation id
* As a simple approach, we force Sasl requests to use a reserved correlation id which is separated from those
* used in NetworkClient for Kafka requests. Hence, we can guarantee that every SASL request will throw
* SchemaException due to correlation id mismatch during reauthentication
*/
public static final int MAX_RESERVED_CORRELATION_ID = Integer.MAX_VALUE;
/**
* We only expect one request in-flight a time during authentication so the small range is fine.
*/
public static final int MIN_RESERVED_CORRELATION_ID = MAX_RESERVED_CORRELATION_ID - 7;
/**
* @return true if the correlation id is reserved for SASL request. otherwise, false
*/
public static boolean isReserved(int correlationId) {
return correlationId >= MIN_RESERVED_CORRELATION_ID;
}
private final Subject subject;
private final String servicePrincipal;
private final String host;
private final String node;
private final String mechanism;
private final TransportLayer transportLayer;
private final SaslClient saslClient;
private final Map<String, ?> configs;
private final String clientPrincipalName;
private final AuthenticateCallbackHandler callbackHandler;
private final Time time;
private final Logger log;
private final ReauthInfo reauthInfo;
// buffers used in `authenticate`
private NetworkReceive netInBuffer;
private Send netOutBuffer;
// Current SASL state
private SaslState saslState;
// Next SASL state to be set when outgoing writes associated with the current SASL state complete
private SaslState pendingSaslState;
// Correlation ID for the next request
private int correlationId;
// Request header for which response from the server is pending
private RequestHeader currentRequestHeader;
// Version of SaslAuthenticate request/responses
private short saslAuthenticateVersion;
// Version of SaslHandshake request/responses
private short saslHandshakeVersion;
@SuppressWarnings("this-escape")
public SaslClientAuthenticator(Map<String, ?> configs,
AuthenticateCallbackHandler callbackHandler,
String node,
Subject subject,
String servicePrincipal,
String host,
String mechanism,
TransportLayer transportLayer,
Time time,
LogContext logContext) {
this.node = node;
this.subject = subject;
this.callbackHandler = callbackHandler;
this.host = host;
this.servicePrincipal = servicePrincipal;
this.mechanism = mechanism;
this.correlationId = 0;
this.transportLayer = transportLayer;
this.configs = configs;
this.saslAuthenticateVersion = DISABLE_KAFKA_SASL_AUTHENTICATE_HEADER;
this.time = time;
this.log = logContext.logger(getClass());
this.reauthInfo = new ReauthInfo();
try {
setSaslState(SaslState.SEND_APIVERSIONS_REQUEST);
// determine client principal from subject for Kerberos to use as authorization id for the SaslClient.
// For other mechanisms, the authenticated principal (username for PLAIN and SCRAM) is used as
// authorization id. Hence the principal is not specified for creating the SaslClient.
if (mechanism.equals(SaslConfigs.GSSAPI_MECHANISM))
this.clientPrincipalName = firstPrincipal(subject);
else
this.clientPrincipalName = null;
saslClient = createSaslClient();
} catch (Exception e) {
throw new SaslAuthenticationException("Failed to configure SaslClientAuthenticator", e);
}
}
// visible for testing
SaslClient createSaslClient() {
try {
return SecurityManagerCompatibility.get().callAs(subject, () -> {
String[] mechs = {mechanism};
log.debug("Creating SaslClient: client={};service={};serviceHostname={};mechs={}",
clientPrincipalName, servicePrincipal, host, Arrays.toString(mechs));
SaslClient retvalSaslClient = Sasl.createSaslClient(mechs, clientPrincipalName, servicePrincipal, host, configs, callbackHandler);
if (retvalSaslClient == null) {
throw new SaslAuthenticationException("Failed to create SaslClient with mechanism " + mechanism);
}
return retvalSaslClient;
});
} catch (CompletionException e) {
throw new SaslAuthenticationException("Failed to create SaslClient with mechanism " + mechanism, e.getCause());
}
}
/**
* Sends an empty message to the server to initiate the authentication process. It then evaluates server challenges
* via `SaslClient.evaluateChallenge` and returns client responses until authentication succeeds or fails.
*
* The messages are sent and received as size delimited bytes that consists of a 4 byte network-ordered size N
* followed by N bytes representing the opaque payload.
*/
@SuppressWarnings("fallthrough")
public void authenticate() throws IOException {
if (netOutBuffer != null && !flushNetOutBufferAndUpdateInterestOps())
return;
switch (saslState) {
case SEND_APIVERSIONS_REQUEST:
// Always use version 0 request since brokers treat requests with schema exceptions as GSSAPI tokens
ApiVersionsRequest apiVersionsRequest = new ApiVersionsRequest.Builder().build((short) 0);
send(apiVersionsRequest.toSend(nextRequestHeader(ApiKeys.API_VERSIONS, apiVersionsRequest.version())));
setSaslState(SaslState.RECEIVE_APIVERSIONS_RESPONSE);
break;
case RECEIVE_APIVERSIONS_RESPONSE:
ApiVersionsResponse apiVersionsResponse = (ApiVersionsResponse) receiveKafkaResponse();
if (apiVersionsResponse == null)
break;
else {
setSaslAuthenticateAndHandshakeVersions(apiVersionsResponse);
reauthInfo.apiVersionsResponseReceivedFromBroker = apiVersionsResponse;
setSaslState(SaslState.SEND_HANDSHAKE_REQUEST);
// Fall through to send handshake request with the latest supported version
}
case SEND_HANDSHAKE_REQUEST:
sendHandshakeRequest(saslHandshakeVersion);
setSaslState(SaslState.RECEIVE_HANDSHAKE_RESPONSE);
break;
case RECEIVE_HANDSHAKE_RESPONSE:
SaslHandshakeResponse handshakeResponse = (SaslHandshakeResponse) receiveKafkaResponse();
if (handshakeResponse == null)
break;
else {
handleSaslHandshakeResponse(handshakeResponse);
setSaslState(SaslState.INITIAL);
// Fall through and start SASL authentication using the configured client mechanism
}
case INITIAL:
sendInitialToken();
setSaslState(SaslState.INTERMEDIATE);
break;
case REAUTH_PROCESS_ORIG_APIVERSIONS_RESPONSE:
setSaslAuthenticateAndHandshakeVersions(reauthInfo.apiVersionsResponseFromOriginalAuthentication);
setSaslState(SaslState.REAUTH_SEND_HANDSHAKE_REQUEST); // Will set immediately
// Fall through to send handshake request with the latest supported version
case REAUTH_SEND_HANDSHAKE_REQUEST:
sendHandshakeRequest(saslHandshakeVersion);
setSaslState(SaslState.REAUTH_RECEIVE_HANDSHAKE_OR_OTHER_RESPONSE);
break;
case REAUTH_RECEIVE_HANDSHAKE_OR_OTHER_RESPONSE:
handshakeResponse = (SaslHandshakeResponse) receiveKafkaResponse();
if (handshakeResponse == null)
break;
handleSaslHandshakeResponse(handshakeResponse);
setSaslState(SaslState.REAUTH_INITIAL); // Will set immediately
/*
* Fall through and start SASL authentication using the configured client
* mechanism. Note that we have to either fall through or add a loop to enter
* the switch statement again. We will fall through to avoid adding the loop and
* therefore minimize the changes to authentication-related code due to the
* changes related to re-authentication.
*/
case REAUTH_INITIAL:
sendInitialToken();
setSaslState(SaslState.INTERMEDIATE);
break;
case INTERMEDIATE:
byte[] serverToken = receiveToken();
boolean noResponsesPending = serverToken != null && !sendSaslClientToken(serverToken, false);
// For versions without SASL_AUTHENTICATE header, SASL exchange may be complete after a token is sent to server.
// For versions with SASL_AUTHENTICATE header, server always sends a response to each SASL_AUTHENTICATE request.
if (saslClient.isComplete()) {
if (saslAuthenticateVersion == DISABLE_KAFKA_SASL_AUTHENTICATE_HEADER || noResponsesPending)
setSaslState(SaslState.COMPLETE);
else
setSaslState(SaslState.CLIENT_COMPLETE);
}
break;
case CLIENT_COMPLETE:
byte[] serverResponse = receiveToken();
if (serverResponse != null)
setSaslState(SaslState.COMPLETE);
break;
case COMPLETE:
break;
case FAILED:
// Should never get here since exception would have been propagated earlier
throw new IllegalStateException("SASL handshake has already failed");
}
}
private void sendHandshakeRequest(short version) throws IOException {
SaslHandshakeRequest handshakeRequest = createSaslHandshakeRequest(version);
send(handshakeRequest.toSend(nextRequestHeader(ApiKeys.SASL_HANDSHAKE, handshakeRequest.version())));
}
private void sendInitialToken() throws IOException {
sendSaslClientToken(new byte[0], true);
}
@Override
public void reauthenticate(ReauthenticationContext reauthenticationContext) throws IOException {
SaslClientAuthenticator previousSaslClientAuthenticator = (SaslClientAuthenticator) Objects
.requireNonNull(reauthenticationContext).previousAuthenticator();
ApiVersionsResponse apiVersionsResponseFromOriginalAuthentication = previousSaslClientAuthenticator.reauthInfo
.apiVersionsResponse();
previousSaslClientAuthenticator.close();
reauthInfo.reauthenticating(apiVersionsResponseFromOriginalAuthentication,
reauthenticationContext.reauthenticationBeginNanos());
netInBuffer = reauthenticationContext.networkReceive();
setSaslState(SaslState.REAUTH_PROCESS_ORIG_APIVERSIONS_RESPONSE); // Will set immediately
authenticate();
}
@Override
public Optional<NetworkReceive> pollResponseReceivedDuringReauthentication() {
return reauthInfo.pollResponseReceivedDuringReauthentication();
}
@Override
public Long clientSessionReauthenticationTimeNanos() {
return reauthInfo.clientSessionReauthenticationTimeNanos;
}
@Override
public Long reauthenticationLatencyMs() {
return reauthInfo.reauthenticationLatencyMs();
}
// visible for testing
int nextCorrelationId() {
if (!isReserved(correlationId))
correlationId = MIN_RESERVED_CORRELATION_ID;
return correlationId++;
}
private RequestHeader nextRequestHeader(ApiKeys apiKey, short version) {
String clientId = (String) configs.get(CommonClientConfigs.CLIENT_ID_CONFIG);
short requestApiKey = apiKey.id;
currentRequestHeader = new RequestHeader(
new RequestHeaderData().
setRequestApiKey(requestApiKey).
setRequestApiVersion(version).
setClientId(clientId).
setCorrelationId(nextCorrelationId()),
apiKey.requestHeaderVersion(version));
return currentRequestHeader;
}
// Visible to override for testing
protected SaslHandshakeRequest createSaslHandshakeRequest(short version) {
return new SaslHandshakeRequest.Builder(
new SaslHandshakeRequestData().setMechanism(mechanism)).build(version);
}
// Visible to override for testing
protected void setSaslAuthenticateAndHandshakeVersions(ApiVersionsResponse apiVersionsResponse) {
ApiVersion authenticateVersion = apiVersionsResponse.apiVersion(ApiKeys.SASL_AUTHENTICATE.id);
if (authenticateVersion != null) {
this.saslAuthenticateVersion = (short) Math.min(authenticateVersion.maxVersion(),
ApiKeys.SASL_AUTHENTICATE.latestVersion());
}
ApiVersion handshakeVersion = apiVersionsResponse.apiVersion(ApiKeys.SASL_HANDSHAKE.id);
if (handshakeVersion != null) {
this.saslHandshakeVersion = (short) Math.min(handshakeVersion.maxVersion(),
ApiKeys.SASL_HANDSHAKE.latestVersion());
}
}
private void setSaslState(SaslState saslState) {
if (netOutBuffer != null && !netOutBuffer.completed())
pendingSaslState = saslState;
else {
this.pendingSaslState = null;
this.saslState = saslState;
log.debug("Set SASL client state to {}", saslState);
if (saslState == SaslState.COMPLETE) {
reauthInfo.setAuthenticationEndAndSessionReauthenticationTimes(time.nanoseconds());
if (!reauthInfo.reauthenticating())
transportLayer.removeInterestOps(SelectionKey.OP_WRITE);
else
/*
* Re-authentication is triggered by a write, so we have to make sure that
* pending write is actually sent.
*/
transportLayer.addInterestOps(SelectionKey.OP_WRITE);
}
}
}
/**
* Sends a SASL client token to server if required. This may be an initial token to start
* SASL token exchange or response to a challenge from the server.
* @return true if a token was sent to the server
*/
private boolean sendSaslClientToken(byte[] serverToken, boolean isInitial) throws IOException {
if (!saslClient.isComplete()) {
byte[] saslToken = createSaslToken(serverToken, isInitial);
if (saslToken != null) {
ByteBuffer tokenBuf = ByteBuffer.wrap(saslToken);
Send send;
if (saslAuthenticateVersion == DISABLE_KAFKA_SASL_AUTHENTICATE_HEADER) {
send = ByteBufferSend.sizePrefixed(tokenBuf);
} else {
SaslAuthenticateRequestData data = new SaslAuthenticateRequestData()
.setAuthBytes(tokenBuf.array());
SaslAuthenticateRequest request = new SaslAuthenticateRequest.Builder(data).build(saslAuthenticateVersion);
send = request.toSend(nextRequestHeader(ApiKeys.SASL_AUTHENTICATE, saslAuthenticateVersion));
}
send(send);
return true;
}
}
return false;
}
private void send(Send send) throws IOException {
try {
netOutBuffer = send;
flushNetOutBufferAndUpdateInterestOps();
} catch (IOException e) {
setSaslState(SaslState.FAILED);
throw e;
}
}
private boolean flushNetOutBufferAndUpdateInterestOps() throws IOException {
boolean flushedCompletely = flushNetOutBuffer();
if (flushedCompletely) {
transportLayer.removeInterestOps(SelectionKey.OP_WRITE);
if (pendingSaslState != null)
setSaslState(pendingSaslState);
} else
transportLayer.addInterestOps(SelectionKey.OP_WRITE);
return flushedCompletely;
}
private byte[] receiveResponseOrToken() throws IOException {
if (netInBuffer == null) netInBuffer = new NetworkReceive(node);
netInBuffer.readFrom(transportLayer);
byte[] serverPacket = null;
if (netInBuffer.complete()) {
netInBuffer.payload().rewind();
serverPacket = new byte[netInBuffer.payload().remaining()];
netInBuffer.payload().get(serverPacket, 0, serverPacket.length);
netInBuffer = null; // reset the networkReceive as we read all the data.
}
return serverPacket;
}
public KafkaPrincipal principal() {
return new KafkaPrincipal(KafkaPrincipal.USER_TYPE, clientPrincipalName);
}
@Override
public Optional<KafkaPrincipalSerde> principalSerde() {
return Optional.empty();
}
public boolean complete() {
return saslState == SaslState.COMPLETE;
}
public void close() throws IOException {
if (saslClient != null)
saslClient.dispose();
}
private byte[] receiveToken() throws IOException {
if (saslAuthenticateVersion == DISABLE_KAFKA_SASL_AUTHENTICATE_HEADER) {
return receiveResponseOrToken();
} else {
SaslAuthenticateResponse response = (SaslAuthenticateResponse) receiveKafkaResponse();
if (response != null) {
Errors error = response.error();
if (error != Errors.NONE) {
setSaslState(SaslState.FAILED);
String errMsg = response.errorMessage();
throw errMsg == null ? error.exception() : error.exception(errMsg);
}
long sessionLifetimeMs = response.sessionLifetimeMs();
if (sessionLifetimeMs > 0L)
reauthInfo.positiveSessionLifetimeMs = sessionLifetimeMs;
return Utils.copyArray(response.saslAuthBytes());
} else
return null;
}
}
private byte[] createSaslToken(final byte[] saslToken, boolean isInitial) throws SaslException {
if (saslToken == null)
throw new IllegalSaslStateException("Error authenticating with the Kafka Broker: received a `null` saslToken.");
try {
if (isInitial && !saslClient.hasInitialResponse())
return saslToken;
else
return SecurityManagerCompatibility.get().callAs(subject, () -> saslClient.evaluateChallenge(saslToken));
} catch (CompletionException e) {
String error = "An error: (" + e + ") occurred when evaluating SASL token received from the Kafka Broker.";
KerberosError kerberosError = KerberosError.fromException(e);
// Try to provide hints to use about what went wrong so they can fix their configuration.
if (kerberosError == KerberosError.SERVER_NOT_FOUND) {
error += " This may be caused by Java's being unable to resolve the Kafka Broker's" +
" hostname correctly. You may want to try to adding" +
" '-Dsun.net.spi.nameservice.provider.1=dns,sun' to your client's JVMFLAGS environment." +
" Users must configure FQDN of kafka brokers when authenticating using SASL and" +
" `socketChannel.socket().getInetAddress().getHostName()` must match the hostname in `principal/hostname@realm`";
}
//Unwrap the SaslException
Throwable cause = e.getCause();
// Treat transient Kerberos errors as non-fatal SaslExceptions that are processed as I/O exceptions
// and all other failures as fatal SaslAuthenticationException.
if ((kerberosError != null && kerberosError.retriable()) || (kerberosError == null && KerberosError.isRetriableClientGssException(e))) {
error += " Kafka Client will retry.";
throw new SaslException(error, cause);
} else {
error += " Kafka Client will go to AUTHENTICATION_FAILED state.";
throw new SaslAuthenticationException(error, cause);
}
}
}
private boolean flushNetOutBuffer() throws IOException {
if (!netOutBuffer.completed()) {
netOutBuffer.writeTo(transportLayer);
}
return netOutBuffer.completed();
}
private AbstractResponse receiveKafkaResponse() throws IOException {
if (netInBuffer == null)
netInBuffer = new NetworkReceive(node);
NetworkReceive receive = netInBuffer;
try {
byte[] responseBytes = receiveResponseOrToken();
if (responseBytes == null)
return null;
else {
AbstractResponse response = NetworkClient.parseResponse(ByteBuffer.wrap(responseBytes), currentRequestHeader);
currentRequestHeader = null;
return response;
}
} catch (BufferUnderflowException | SchemaException | IllegalArgumentException e) {
/*
* Account for the fact that during re-authentication there may be responses
* arriving for requests that were sent in the past.
*/
if (reauthInfo.reauthenticating()) {
/*
* It didn't match the current request header, so it must be unrelated to
* re-authentication. Save it so it can be processed later.
*/
receive.payload().rewind();
reauthInfo.pendingAuthenticatedReceives.add(receive);
return null;
}
log.debug("Invalid SASL mechanism response, server may be expecting only GSSAPI tokens");
setSaslState(SaslState.FAILED);
throw new IllegalSaslStateException("Invalid SASL mechanism response, server may be expecting a different protocol", e);
}
}
private void handleSaslHandshakeResponse(SaslHandshakeResponse response) {
Errors error = response.error();
if (error != Errors.NONE)
setSaslState(SaslState.FAILED);
switch (error) {
case NONE:
break;
case UNSUPPORTED_SASL_MECHANISM:
throw new UnsupportedSaslMechanismException(String.format("Client SASL mechanism '%s' not enabled in the server, enabled mechanisms are %s",
mechanism, response.enabledMechanisms()));
case ILLEGAL_SASL_STATE:
throw new IllegalSaslStateException(String.format("Unexpected handshake request with client mechanism %s, enabled mechanisms are %s",
mechanism, response.enabledMechanisms()));
default:
throw new IllegalSaslStateException(String.format("Unknown error code %s, client mechanism is %s, enabled mechanisms are %s",
response.error(), mechanism, response.enabledMechanisms()));
}
}
/**
* Returns the first Principal from Subject.
* @throws KafkaException if there are no Principals in the Subject.
* During Kerberos re-login, principal is reset on Subject. An exception is
* thrown so that the connection is retried after any configured backoff.
*/
public static String firstPrincipal(Subject subject) {
Set<Principal> principals = subject.getPrincipals();
synchronized (principals) {
Iterator<Principal> iterator = principals.iterator();
if (iterator.hasNext())
return iterator.next().getName();
else
throw new KafkaException("Principal could not be determined from Subject, this may be a transient failure due to Kerberos re-login");
}
}
/**
* Information related to re-authentication
*/
private | SaslState |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/TestWithGenerics.java | {
"start": 1381,
"end": 1634
} | class ____ {
public ContainerWithField<?> animalContainer;
}
// Beans for [JACKSON-387], [JACKSON-430]
@JsonTypeInfo(use=JsonTypeInfo.Id.CLASS, include=JsonTypeInfo.As.PROPERTY, property="@classAttr1")
static | WrappedContainerWithField |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/util/LeaderRetrievalUtils.java | {
"start": 4386,
"end": 4547
} | class ____ is used by the retrieveLeaderInformation method to retrieve the leader's
* rpc URL and the current leader session ID.
*/
public static | which |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/jaxb/mapping/spi/JaxbEmbeddedMapping.java | {
"start": 363,
"end": 480
} | interface ____ extends JaxbSingularAttribute {
String getTarget();
void setTarget(String target);
}
| JaxbEmbeddedMapping |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldScriptTests.java | {
"start": 1030,
"end": 2902
} | class ____ extends FieldScriptTestCase<BooleanFieldScript.Factory> {
public static final BooleanFieldScript.Factory DUMMY = (fieldName, params, lookup, onScriptError) -> ctx -> new BooleanFieldScript(
fieldName,
params,
lookup,
OnScriptError.FAIL,
ctx
) {
@Override
public void execute() {
emit(false);
}
};
@Override
protected ScriptContext<BooleanFieldScript.Factory> context() {
return BooleanFieldScript.CONTEXT;
}
@Override
protected BooleanFieldScript.Factory dummyScript() {
return DUMMY;
}
@Override
protected BooleanFieldScript.Factory fromSource() {
return BooleanFieldScript.PARSE_FROM_SOURCE;
}
public void testTooManyValues() throws IOException {
try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
iw.addDocument(List.of(new StoredField("_source", new BytesRef("{}"))));
try (DirectoryReader reader = iw.getReader()) {
BooleanFieldScript script = new BooleanFieldScript(
"test",
Map.of(),
new SearchLookup(field -> null, (ft, lookup, fdt) -> null, (ctx, doc) -> null),
OnScriptError.FAIL,
reader.leaves().get(0)
) {
@Override
public void execute() {
for (int i = 0; i <= AbstractFieldScript.MAX_VALUES * 1000; i++) {
new Emit(this).value(i % 2 == 0);
}
}
};
// There isn't a limit to the number of values so this won't throw
script.execute();
}
}
}
}
| BooleanFieldScriptTests |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/MetricRegistryImpl.java | {
"start": 29001,
"end": 29437
} | class ____ may become garbage-collectible, whereas with an anonymous inner class, the
* timer thread (which is a GC root) will hold a reference via the timer task and its enclosing
* instance pointer. Making the MetricsRegistry garbage collectible makes the java.util.Timer
* garbage collectible, which acts as a fail-safe to stop the timer thread and prevents resource
* leaks.
*/
private static final | instance |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/reattachment/CollectionReattachmentTest.java | {
"start": 576,
"end": 1812
} | class ____ {
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testUpdateOwnerAfterClear(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Parent p = new Parent( "p" );
p.getChildren().add( new Child( "c" ) );
session.persist( p );
}
);
Parent parent = scope.fromTransaction(
session -> {
Parent p = session.get( Parent.class, "p" );
// clear...
session.clear();
// now try to reattach...
return session.merge( p );
}
);
scope.inTransaction(
session ->
session.remove( parent )
);
}
@Test
public void testUpdateOwnerAfterEvict(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Parent p = new Parent( "p" );
p.getChildren().add( new Child( "c" ) );
session.persist( p );
}
);
Parent parent = scope.fromTransaction(
session -> {
Parent p = session.get( Parent.class, "p" );
// evict...
session.evict( p );
// now try to reattach...
return session.merge( p );
}
);
scope.inTransaction(
session ->
session.remove( parent )
);
}
}
| CollectionReattachmentTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/DateCheckerTest.java | {
"start": 11921,
"end": 12497
} | class ____ {
public void foo(Date date) {
// BUG: Diagnostic contains: The minutes value (-1) is out of bounds [0..59].
date.setMinutes(-1);
// BUG: Diagnostic contains: The minutes value (60) is out of bounds [0..59].
date.setMinutes(60);
}
}
""")
.doTest();
}
@Test
public void setters_badSeconds() {
helper
.addSourceLines(
"TestClass.java",
"""
import java.util.Date;
public | TestClass |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java | {
"start": 84200,
"end": 148812
} | class ____ {
private String index;
private State state = State.OPEN;
private long version = 1;
private TransportVersion transportVersion = TransportVersion.fromId(0);
private long mappingVersion = 1;
private long settingsVersion = 1;
private long aliasesVersion = 1;
private long[] primaryTerms = null;
private Settings settings = Settings.EMPTY;
private MappingMetadata mapping;
private IndexVersion mappingsUpdatedVersion = IndexVersion.current();
private final ImmutableOpenMap.Builder<String, InferenceFieldMetadata> inferenceFields;
private final ImmutableOpenMap.Builder<String, AliasMetadata> aliases;
private final ImmutableOpenMap.Builder<String, DiffableStringMap> customMetadata;
private final Map<Integer, Set<String>> inSyncAllocationIds;
private final ImmutableOpenMap.Builder<String, RolloverInfo> rolloverInfos;
private Integer routingNumShards;
private boolean isSystem;
private IndexLongFieldRange timestampRange = IndexLongFieldRange.NO_SHARDS;
private IndexLongFieldRange eventIngestedRange = IndexLongFieldRange.NO_SHARDS;
private LifecycleExecutionState lifecycleExecutionState = LifecycleExecutionState.EMPTY_STATE;
private IndexMetadataStats stats = null;
private Double indexWriteLoadForecast = null;
private Long shardSizeInBytesForecast = null;
private IndexReshardingMetadata reshardingMetadata = null;
public Builder(String index) {
this.index = index;
this.inferenceFields = ImmutableOpenMap.builder();
this.aliases = ImmutableOpenMap.builder();
this.customMetadata = ImmutableOpenMap.builder();
this.inSyncAllocationIds = new HashMap<>();
this.rolloverInfos = ImmutableOpenMap.builder();
this.isSystem = false;
}
public Builder(IndexMetadata indexMetadata) {
this.index = indexMetadata.getIndex().getName();
this.state = indexMetadata.state;
this.version = indexMetadata.version;
this.transportVersion = indexMetadata.transportVersion;
this.mappingVersion = indexMetadata.mappingVersion;
this.settingsVersion = indexMetadata.settingsVersion;
this.aliasesVersion = indexMetadata.aliasesVersion;
this.settings = indexMetadata.getSettings();
this.primaryTerms = indexMetadata.primaryTerms.clone();
this.mapping = indexMetadata.mapping;
this.inferenceFields = ImmutableOpenMap.builder(indexMetadata.inferenceFields);
this.aliases = ImmutableOpenMap.builder(indexMetadata.aliases);
this.customMetadata = ImmutableOpenMap.builder(indexMetadata.customData);
this.routingNumShards = indexMetadata.routingNumShards;
this.inSyncAllocationIds = new HashMap<>(indexMetadata.inSyncAllocationIds);
this.mappingsUpdatedVersion = indexMetadata.mappingsUpdatedVersion;
this.rolloverInfos = ImmutableOpenMap.builder(indexMetadata.rolloverInfos);
this.isSystem = indexMetadata.isSystem;
this.timestampRange = indexMetadata.timestampRange;
this.eventIngestedRange = indexMetadata.eventIngestedRange;
this.lifecycleExecutionState = indexMetadata.lifecycleExecutionState;
this.stats = indexMetadata.stats;
this.indexWriteLoadForecast = indexMetadata.writeLoadForecast;
this.shardSizeInBytesForecast = indexMetadata.shardSizeInBytesForecast;
this.reshardingMetadata = indexMetadata.reshardingMetadata;
}
public Builder index(String index) {
this.index = index;
return this;
}
public Builder numberOfShards(int numberOfShards) {
settings = Settings.builder().put(settings).put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build();
return this;
}
/**
* Builder to create IndexMetadata that has an increased shard count (used for re-shard).
* The new shard count must be a multiple of the original shardcount as well as a factor
* of routingNumShards.
* We do not support shrinking the shard count.
* @param targetShardCount target shard count after resharding
*/
public Builder reshardAddShards(int targetShardCount) {
final int sourceNumShards = numberOfShards();
if (targetShardCount % sourceNumShards != 0) {
throw new IllegalArgumentException(
"New shard count ["
+ targetShardCount
+ "] should be a multiple"
+ " of current shard count ["
+ sourceNumShards
+ "] for ["
+ index
+ "]"
);
}
settings = Settings.builder().put(settings).put(SETTING_NUMBER_OF_SHARDS, targetShardCount).build();
var newPrimaryTerms = new long[targetShardCount];
Arrays.fill(newPrimaryTerms, this.primaryTerms.length, newPrimaryTerms.length, SequenceNumbers.UNASSIGNED_PRIMARY_TERM);
System.arraycopy(primaryTerms, 0, newPrimaryTerms, 0, this.primaryTerms.length);
primaryTerms = newPrimaryTerms;
routingNumShards = MetadataCreateIndexService.getIndexNumberOfRoutingShards(settings, sourceNumShards, this.routingNumShards);
return this;
}
/**
* Sets the number of shards that should be used for routing. This should only be used if the number of shards in
* an index has changed ie if the index is shrunk.
*/
public Builder setRoutingNumShards(int routingNumShards) {
this.routingNumShards = routingNumShards;
return this;
}
/**
* Returns number of shards that should be used for routing. By default this method will return the number of shards
* for this index.
*
* @see #setRoutingNumShards(int)
* @see #numberOfShards()
*/
public int getRoutingNumShards() {
return routingNumShards == null ? numberOfShards() : routingNumShards;
}
/**
* Returns the number of shards.
*
* @return the provided value or -1 if it has not been set.
*/
public int numberOfShards() {
return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1);
}
public Builder numberOfReplicas(int numberOfReplicas) {
settings = Settings.builder().put(settings).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
return this;
}
public Builder routingPartitionSize(int routingPartitionSize) {
settings = Settings.builder().put(settings).put(SETTING_ROUTING_PARTITION_SIZE, routingPartitionSize).build();
return this;
}
public Builder creationDate(long creationDate) {
settings = Settings.builder().put(settings).put(SETTING_CREATION_DATE, creationDate).build();
return this;
}
public Builder settings(Settings.Builder settings) {
return settings(settings.build());
}
public Builder settings(Settings settings) {
this.settings = settings;
return this;
}
public MappingMetadata mapping() {
return mapping;
}
public Builder putMapping(String source) {
putMapping(
new MappingMetadata(
MapperService.SINGLE_MAPPING_NAME,
XContentHelper.convertToMap(XContentFactory.xContent(source), source, true)
)
);
return this;
}
public Builder putMapping(MappingMetadata mappingMd) {
mapping = mappingMd;
return this;
}
public Builder mappingsUpdatedVersion(IndexVersion indexVersion) {
this.mappingsUpdatedVersion = indexVersion;
return this;
}
public Builder state(State state) {
this.state = state;
return this;
}
public Builder putAlias(AliasMetadata aliasMetadata) {
aliases.put(aliasMetadata.alias(), aliasMetadata);
return this;
}
public Builder putAlias(AliasMetadata.Builder aliasMetadata) {
aliases.put(aliasMetadata.alias(), aliasMetadata.build());
return this;
}
public Builder removeAlias(String alias) {
aliases.remove(alias);
return this;
}
public Builder removeAllAliases() {
aliases.clear();
return this;
}
public Builder putCustom(Map<String, ? extends Map<String, String>> customMetadata) {
customMetadata.forEach(this::putCustom);
return this;
}
public Builder putCustom(String type, Map<String, String> customIndexMetadata) {
this.customMetadata.put(type, new DiffableStringMap(customIndexMetadata));
return this;
}
public Map<String, String> removeCustom(String type) {
return this.customMetadata.remove(type);
}
public Set<String> getInSyncAllocationIds(int shardId) {
return inSyncAllocationIds.get(shardId);
}
public Builder putInSyncAllocationIds(int shardId, Set<String> allocationIds) {
inSyncAllocationIds.put(shardId, Set.copyOf(allocationIds));
return this;
}
public Builder putRolloverInfo(RolloverInfo rolloverInfo) {
rolloverInfos.put(rolloverInfo.getAlias(), rolloverInfo);
return this;
}
public Builder putRolloverInfos(Map<String, RolloverInfo> rolloverInfos) {
this.rolloverInfos.clear();
this.rolloverInfos.putAllFromMap(rolloverInfos);
return this;
}
public long version() {
return this.version;
}
public Builder version(long version) {
this.version = version;
return this;
}
public TransportVersion transportVersion() {
return this.transportVersion;
}
public Builder transportVersion(TransportVersion transportVersion) {
this.transportVersion = transportVersion;
return this;
}
public long mappingVersion() {
return mappingVersion;
}
public Builder mappingVersion(final long mappingVersion) {
this.mappingVersion = mappingVersion;
return this;
}
public long settingsVersion() {
return settingsVersion;
}
public Builder settingsVersion(final long settingsVersion) {
this.settingsVersion = settingsVersion;
return this;
}
public Builder aliasesVersion(final long aliasesVersion) {
this.aliasesVersion = aliasesVersion;
return this;
}
/**
* returns the primary term for the given shard.
* See {@link IndexMetadata#primaryTerm(int)} for more information.
*/
public long primaryTerm(int shardId) {
if (primaryTerms == null) {
initializePrimaryTerms();
}
return this.primaryTerms[shardId];
}
/**
* sets the primary term for the given shard.
* See {@link IndexMetadata#primaryTerm(int)} for more information.
*/
public Builder primaryTerm(int shardId, long primaryTerm) {
if (primaryTerms == null) {
initializePrimaryTerms();
}
this.primaryTerms[shardId] = primaryTerm;
return this;
}
private void primaryTerms(long[] primaryTerms) {
this.primaryTerms = primaryTerms.clone();
}
private void initializePrimaryTerms() {
assert primaryTerms == null;
if (numberOfShards() < 0) {
throw new IllegalStateException("you must set the number of shards before setting/reading primary terms");
}
primaryTerms = new long[numberOfShards()];
Arrays.fill(primaryTerms, SequenceNumbers.UNASSIGNED_PRIMARY_TERM);
}
public Builder system(boolean system) {
this.isSystem = system;
return this;
}
public boolean isSystem() {
return isSystem;
}
public Builder timestampRange(IndexLongFieldRange timestampRange) {
this.timestampRange = timestampRange;
return this;
}
public Builder eventIngestedRange(IndexLongFieldRange eventIngestedRange) {
assert eventIngestedRange != null : "eventIngestedRange cannot be null";
this.eventIngestedRange = eventIngestedRange;
return this;
}
public Builder stats(IndexMetadataStats stats) {
this.stats = stats;
return this;
}
public Builder indexWriteLoadForecast(Double indexWriteLoadForecast) {
this.indexWriteLoadForecast = indexWriteLoadForecast;
return this;
}
public Builder shardSizeInBytesForecast(Long shardSizeInBytesForecast) {
this.shardSizeInBytesForecast = shardSizeInBytesForecast;
return this;
}
public Builder putInferenceField(InferenceFieldMetadata value) {
this.inferenceFields.put(value.getName(), value);
return this;
}
public Builder putInferenceFields(Map<String, InferenceFieldMetadata> values) {
this.inferenceFields.putAllFromMap(values);
return this;
}
public Builder reshardingMetadata(IndexReshardingMetadata reshardingMetadata) {
this.reshardingMetadata = reshardingMetadata;
return this;
}
public IndexMetadata build() {
return build(false);
}
// package private for testing
IndexMetadata build(boolean repair) {
/*
* We expect that the metadata has been properly built to set the number of shards and the number of replicas, and do not rely
* on the default values here. Those must have been set upstream.
*/
if (INDEX_NUMBER_OF_SHARDS_SETTING.exists(settings) == false) {
throw new IllegalArgumentException("must specify number of shards for index [" + index + "]");
}
final int numberOfShards = INDEX_NUMBER_OF_SHARDS_SETTING.get(settings);
if (INDEX_NUMBER_OF_REPLICAS_SETTING.exists(settings) == false) {
throw new IllegalArgumentException("must specify number of replicas for index [" + index + "]");
}
final int numberOfReplicas = INDEX_NUMBER_OF_REPLICAS_SETTING.get(settings);
int routingPartitionSize = INDEX_ROUTING_PARTITION_SIZE_SETTING.get(settings);
if (routingPartitionSize != 1 && routingPartitionSize >= getRoutingNumShards()) {
throw new IllegalArgumentException(
"routing partition size ["
+ routingPartitionSize
+ "] should be a positive number"
+ " less than the number of routing shards ["
+ getRoutingNumShards()
+ "] for ["
+ index
+ "]"
);
}
// fill missing slots in inSyncAllocationIds with empty set if needed and make all entries immutable
@SuppressWarnings({ "unchecked", "rawtypes" })
Map.Entry<Integer, Set<String>> denseInSyncAllocationIds[] = new Map.Entry[numberOfShards];
for (int i = 0; i < numberOfShards; i++) {
Set<String> allocIds = inSyncAllocationIds.getOrDefault(i, Set.of());
denseInSyncAllocationIds[i] = Map.entry(i, allocIds);
}
var requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters requireFilters;
if (requireMap.isEmpty()) {
requireFilters = null;
} else {
requireFilters = DiscoveryNodeFilters.buildFromKeyValues(AND, requireMap);
}
var includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters includeFilters;
if (includeMap.isEmpty()) {
includeFilters = null;
} else {
includeFilters = DiscoveryNodeFilters.buildFromKeyValues(OR, includeMap);
}
var excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters excludeFilters;
if (excludeMap.isEmpty()) {
excludeFilters = null;
} else {
excludeFilters = DiscoveryNodeFilters.buildFromKeyValues(OR, excludeMap);
}
var initialRecoveryMap = INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters initialRecoveryFilters;
if (initialRecoveryMap.isEmpty()) {
initialRecoveryFilters = null;
} else {
initialRecoveryFilters = DiscoveryNodeFilters.buildFromKeyValues(OR, initialRecoveryMap);
}
IndexVersion indexCreatedVersion = indexCreatedVersion(settings);
if (primaryTerms == null) {
initializePrimaryTerms();
} else if (primaryTerms.length != numberOfShards) {
throw new IllegalStateException(
"primaryTerms length is ["
+ primaryTerms.length
+ "] but should be equal to number of shards ["
+ numberOfShards()
+ "]"
);
}
final ActiveShardCount waitForActiveShards = SETTING_WAIT_FOR_ACTIVE_SHARDS.get(settings);
if (waitForActiveShards.validate(numberOfReplicas) == false) {
throw new IllegalArgumentException(
"invalid "
+ SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey()
+ "["
+ waitForActiveShards
+ "]: cannot be greater than "
+ "number of shard copies ["
+ (numberOfReplicas + 1)
+ "]"
);
}
final List<String> routingPaths = INDEX_ROUTING_PATH.get(settings);
final List<String> dimensions = INDEX_DIMENSIONS.get(settings);
final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
List<String> tierPreference;
try {
tierPreference = DataTier.parseTierList(DataTier.TIER_PREFERENCE_SETTING.get(settings));
} catch (Exception e) {
assert e instanceof IllegalArgumentException : e;
// BwC hack: the setting failed validation but it will be fixed in
// #IndexMetadataVerifier#convertSharedCacheTierPreference(IndexMetadata)} later so we just store a null
// to be able to build a temporary instance
tierPreference = null;
}
ImmutableOpenMap<String, DiffableStringMap> newCustomMetadata = customMetadata.build();
Map<String, String> custom = newCustomMetadata.get(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY);
if (custom != null && custom.isEmpty() == false) {
lifecycleExecutionState = LifecycleExecutionState.fromCustomMetadata(custom);
} else {
lifecycleExecutionState = LifecycleExecutionState.EMPTY_STATE;
}
if (stats != null && stats.writeLoad().numberOfShards() != numberOfShards) {
assert false;
throw new IllegalArgumentException(
"The number of write load shards ["
+ stats.writeLoad().numberOfShards()
+ "] is different than the number of index shards ["
+ numberOfShards
+ "]"
);
}
var aliasesMap = aliases.build();
for (AliasMetadata alias : aliasesMap.values()) {
if (alias.alias().equals(index)) {
if (repair && indexCreatedVersion.equals(IndexVersions.V_8_5_0)) {
var updatedBuilder = ImmutableOpenMap.builder(aliasesMap);
final var brokenAlias = updatedBuilder.remove(index);
final var fixedAlias = AliasMetadata.newAliasMetadata(brokenAlias, index + "-alias-corrupted-by-8-5");
aliasesMap = updatedBuilder.fPut(fixedAlias.getAlias(), fixedAlias).build();
logger.warn("Repaired corrupted alias with the same name as its index for [{}]", index);
break;
} else {
throw new IllegalArgumentException("alias name [" + index + "] self-conflicts with index name");
}
}
}
assert eventIngestedRange != null : "eventIngestedRange must be set (non-null) when building IndexMetadata";
final boolean isSearchableSnapshot = SearchableSnapshotsSettings.isSearchableSnapshotStore(settings);
String indexModeString = settings.get(IndexSettings.MODE.getKey());
final IndexMode indexMode = indexModeString != null ? IndexMode.fromString(indexModeString.toLowerCase(Locale.ROOT)) : null;
final boolean isTsdb = indexMode == IndexMode.TIME_SERIES;
boolean useTimeSeriesSyntheticId = false;
if (isTsdb
&& IndexSettings.TSDB_SYNTHETIC_ID_FEATURE_FLAG
&& indexCreatedVersion.onOrAfter(IndexVersions.TIME_SERIES_USE_SYNTHETIC_ID)) {
var setting = settings.get(IndexSettings.USE_SYNTHETIC_ID.getKey());
if (setting != null && setting.equalsIgnoreCase(Boolean.TRUE.toString())) {
assert IndexSettings.TSDB_SYNTHETIC_ID_FEATURE_FLAG;
useTimeSeriesSyntheticId = true;
}
}
return new IndexMetadata(
new Index(index, uuid),
version,
transportVersion,
mappingVersion,
settingsVersion,
aliasesVersion,
primaryTerms,
state,
numberOfShards,
numberOfReplicas,
settings,
mapping,
inferenceFields.build(),
aliasesMap,
newCustomMetadata,
Map.ofEntries(denseInSyncAllocationIds),
requireFilters,
initialRecoveryFilters,
includeFilters,
excludeFilters,
indexCreatedVersion,
mappingsUpdatedVersion,
getRoutingNumShards(),
routingPartitionSize,
routingPaths,
dimensions,
waitForActiveShards,
rolloverInfos.build(),
isSystem,
INDEX_HIDDEN_SETTING.get(settings),
timestampRange,
eventIngestedRange,
IndexMetadata.INDEX_PRIORITY_SETTING.get(settings),
settings.getAsLong(SETTING_CREATION_DATE, -1L),
DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS.get(settings),
tierPreference,
ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(settings),
settings.get(IndexMetadata.LIFECYCLE_NAME), // n.b. lookup by name to get null-if-not-present semantics
lifecycleExecutionState,
AutoExpandReplicas.SETTING.get(settings),
isSearchableSnapshot,
isSearchableSnapshot && settings.getAsBoolean(SEARCHABLE_SNAPSHOT_PARTIAL_SETTING_KEY, false),
indexMode,
isTsdb ? IndexSettings.TIME_SERIES_START_TIME.get(settings) : null,
isTsdb ? IndexSettings.TIME_SERIES_END_TIME.get(settings) : null,
SETTING_INDEX_VERSION_COMPATIBILITY.get(settings),
stats,
indexWriteLoadForecast,
shardSizeInBytesForecast,
reshardingMetadata,
useTimeSeriesSyntheticId
);
}
@SuppressWarnings("unchecked")
public static void toXContent(IndexMetadata indexMetadata, XContentBuilder builder, ToXContent.Params params) throws IOException {
Metadata.XContentContext context = Metadata.XContentContext.valueOf(
params.param(CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_API)
);
builder.startObject(indexMetadata.getIndex().getName());
builder.field(KEY_VERSION, indexMetadata.getVersion());
builder.field(KEY_TRANSPORT_VERSION, indexMetadata.getTransportVersion().toString());
builder.field(KEY_MAPPING_VERSION, indexMetadata.getMappingVersion());
builder.field(KEY_SETTINGS_VERSION, indexMetadata.getSettingsVersion());
builder.field(KEY_ALIASES_VERSION, indexMetadata.getAliasesVersion());
builder.field(KEY_ROUTING_NUM_SHARDS, indexMetadata.getRoutingNumShards());
builder.field(KEY_STATE, indexMetadata.getState().toString().toLowerCase(Locale.ENGLISH));
boolean binary = params.paramAsBoolean("binary", false);
builder.startObject(KEY_SETTINGS);
if (context != Metadata.XContentContext.API) {
indexMetadata.getSettings().toXContent(builder, Settings.FLAT_SETTINGS_TRUE);
} else {
indexMetadata.getSettings().toXContent(builder, params);
}
builder.endObject();
if (context == Metadata.XContentContext.GATEWAY && params.paramAsBoolean(DEDUPLICATED_MAPPINGS_PARAM, false)) {
MappingMetadata mmd = indexMetadata.mapping();
if (mmd != null) {
builder.field(KEY_MAPPINGS_HASH, mmd.source().getSha256());
}
} else if (context != Metadata.XContentContext.API) {
builder.startArray(KEY_MAPPINGS);
MappingMetadata mmd = indexMetadata.mapping();
if (mmd != null) {
if (binary) {
builder.value(mmd.source().compressed());
} else {
mmd.source().copyTo(builder);
}
}
builder.endArray();
} else {
builder.startObject(KEY_MAPPINGS);
MappingMetadata mmd = indexMetadata.mapping();
if (mmd != null) {
Map<String, Object> mapping = XContentHelper.convertToMap(mmd.source().uncompressed(), false).v2();
if (mapping.size() == 1 && mapping.containsKey(mmd.type())) {
// the type name is the root value, reduce it
mapping = (Map<String, Object>) mapping.get(mmd.type());
}
builder.field(mmd.type());
builder.map(mapping);
}
builder.endObject();
}
for (Map.Entry<String, DiffableStringMap> cursor : indexMetadata.customData.entrySet()) {
builder.stringStringMap(cursor.getKey(), cursor.getValue());
}
if (context != Metadata.XContentContext.API) {
builder.startObject(KEY_ALIASES);
for (AliasMetadata aliasMetadata : indexMetadata.getAliases().values()) {
AliasMetadata.Builder.toXContent(aliasMetadata, builder, params);
}
builder.endObject();
builder.startArray(KEY_PRIMARY_TERMS);
for (int i = 0; i < indexMetadata.getNumberOfShards(); i++) {
builder.value(indexMetadata.primaryTerm(i));
}
builder.endArray();
} else {
builder.startArray(KEY_ALIASES);
for (Map.Entry<String, AliasMetadata> cursor : indexMetadata.getAliases().entrySet()) {
builder.value(cursor.getKey());
}
builder.endArray();
builder.startObject(IndexMetadata.KEY_PRIMARY_TERMS);
for (int shard = 0; shard < indexMetadata.getNumberOfShards(); shard++) {
builder.field(Integer.toString(shard), indexMetadata.primaryTerm(shard));
}
builder.endObject();
}
builder.startObject(KEY_IN_SYNC_ALLOCATIONS);
for (Map.Entry<Integer, Set<String>> cursor : indexMetadata.inSyncAllocationIds.entrySet()) {
builder.startArray(String.valueOf(cursor.getKey()));
for (String allocationId : cursor.getValue()) {
builder.value(allocationId);
}
builder.endArray();
}
builder.endObject();
builder.startObject(KEY_ROLLOVER_INFOS);
for (RolloverInfo rolloverInfo : indexMetadata.getRolloverInfos().values()) {
rolloverInfo.toXContent(builder, params);
}
builder.endObject();
builder.field(KEY_MAPPINGS_UPDATED_VERSION, indexMetadata.mappingsUpdatedVersion);
builder.field(KEY_SYSTEM, indexMetadata.isSystem);
builder.startObject(KEY_TIMESTAMP_RANGE);
indexMetadata.timestampRange.toXContent(builder, params);
builder.endObject();
builder.startObject(KEY_EVENT_INGESTED_RANGE);
indexMetadata.eventIngestedRange.toXContent(builder, params);
builder.endObject();
if (indexMetadata.stats != null) {
builder.startObject(KEY_STATS);
indexMetadata.stats.toXContent(builder, params);
builder.endObject();
}
if (indexMetadata.writeLoadForecast != null) {
builder.field(KEY_WRITE_LOAD_FORECAST, indexMetadata.writeLoadForecast);
}
if (indexMetadata.shardSizeInBytesForecast != null) {
builder.field(KEY_SHARD_SIZE_FORECAST, indexMetadata.shardSizeInBytesForecast);
}
if (indexMetadata.getInferenceFields().isEmpty() == false) {
builder.startObject(KEY_INFERENCE_FIELDS);
for (InferenceFieldMetadata field : indexMetadata.getInferenceFields().values()) {
field.toXContent(builder, params);
}
builder.endObject();
}
if (indexMetadata.reshardingMetadata != null) {
builder.startObject(KEY_RESHARDING);
indexMetadata.reshardingMetadata.toXContent(builder, params);
builder.endObject();
}
builder.endObject();
}
public static IndexMetadata fromXContent(XContentParser parser) throws IOException {
return fromXContent(parser, null);
}
public static IndexMetadata fromXContent(XContentParser parser, Map<String, MappingMetadata> mappingsByHash) throws IOException {
if (parser.currentToken() == null) { // fresh parser? move to the first token
parser.nextToken();
}
if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token
parser.nextToken();
}
XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser);
Builder builder = new Builder(parser.currentName());
// default to UNKNOWN so that reading 'event.ingested' range content works in older versions
builder.eventIngestedRange(IndexLongFieldRange.UNKNOWN);
String currentFieldName;
XContentParser.Token token = parser.nextToken();
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser);
boolean mappingVersion = false;
boolean settingsVersion = false;
boolean aliasesVersion = false;
while ((currentFieldName = parser.nextFieldName()) != null) {
token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
switch (currentFieldName) {
case KEY_SETTINGS:
builder.settings(Settings.fromXContent(parser));
break;
case KEY_MAPPINGS:
while ((currentFieldName = parser.nextFieldName()) != null) {
token = parser.nextToken();
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser);
builder.putMapping(new MappingMetadata(currentFieldName, Map.of(currentFieldName, parser.mapOrdered())));
}
break;
case KEY_ALIASES:
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
builder.putAlias(AliasMetadata.Builder.fromXContent(parser));
}
break;
case KEY_IN_SYNC_ALLOCATIONS:
while ((currentFieldName = parser.nextFieldName()) != null) {
token = parser.nextToken();
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, token, parser);
final int shardId = Integer.parseInt(currentFieldName);
Set<String> allocationIds = new HashSet<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
allocationIds.add(parser.text());
}
}
builder.putInSyncAllocationIds(shardId, allocationIds);
}
break;
case KEY_ROLLOVER_INFOS:
while ((currentFieldName = parser.nextFieldName()) != null) {
token = parser.nextToken();
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser);
builder.putRolloverInfo(RolloverInfo.parse(parser, currentFieldName));
}
break;
case "warmers":
// TODO: do this in 6.0:
// throw new IllegalArgumentException("Warmers are not supported anymore - are you upgrading from 1.x?");
// ignore: warmers have been removed in 5.0 and are
// simply ignored when upgrading from 2.x
assert Version.CURRENT.major <= 5;
parser.skipChildren();
break;
case KEY_TIMESTAMP_RANGE:
builder.timestampRange(IndexLongFieldRange.fromXContent(parser));
break;
case KEY_EVENT_INGESTED_RANGE:
builder.eventIngestedRange(IndexLongFieldRange.fromXContent(parser));
break;
case KEY_STATS:
builder.stats(IndexMetadataStats.fromXContent(parser));
break;
case KEY_INFERENCE_FIELDS:
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
builder.putInferenceField(InferenceFieldMetadata.fromXContent(parser));
}
break;
case KEY_RESHARDING:
builder.reshardingMetadata(IndexReshardingMetadata.fromXContent(parser));
break;
default:
// assume it's custom index metadata
builder.putCustom(currentFieldName, parser.mapStrings());
break;
}
} else if (token == XContentParser.Token.START_ARRAY) {
switch (currentFieldName) {
case KEY_MAPPINGS:
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
builder.putMapping(new MappingMetadata(new CompressedXContent(parser.binaryValue())));
} else {
Map<String, Object> mapping = parser.mapOrdered();
if (mapping.size() == 1) {
String mappingType = mapping.keySet().iterator().next();
builder.putMapping(new MappingMetadata(mappingType, mapping));
}
}
}
break;
case KEY_PRIMARY_TERMS:
ArrayList<Long> list = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, parser);
list.add(parser.longValue());
}
builder.primaryTerms(list.stream().mapToLong(i -> i).toArray());
break;
default:
throw new IllegalArgumentException("Unexpected field for an array " + currentFieldName);
}
} else if (token.isValue()) {
switch (currentFieldName) {
case KEY_STATE -> builder.state(State.fromString(parser.text()));
case KEY_VERSION -> builder.version(parser.longValue());
case KEY_TRANSPORT_VERSION -> builder.transportVersion(TransportVersion.fromString(parser.text()));
case KEY_MAPPING_VERSION -> {
mappingVersion = true;
builder.mappingVersion(parser.longValue());
}
case KEY_SETTINGS_VERSION -> {
settingsVersion = true;
builder.settingsVersion(parser.longValue());
}
case KEY_ALIASES_VERSION -> {
aliasesVersion = true;
builder.aliasesVersion(parser.longValue());
}
case KEY_ROUTING_NUM_SHARDS -> builder.setRoutingNumShards(parser.intValue());
case KEY_SYSTEM -> builder.system(parser.booleanValue());
case KEY_MAPPINGS_HASH -> {
assert mappingsByHash != null : "no deduplicated mappings given";
if (mappingsByHash.containsKey(parser.text()) == false) {
throw new IllegalArgumentException(
"mapping of index [" + builder.index + "] with hash [" + parser.text() + "] not found"
);
}
builder.putMapping(mappingsByHash.get(parser.text()));
}
case KEY_MAPPINGS_UPDATED_VERSION -> builder.mappingsUpdatedVersion(IndexVersion.fromId(parser.intValue()));
case KEY_WRITE_LOAD_FORECAST -> builder.indexWriteLoadForecast(parser.doubleValue());
case KEY_SHARD_SIZE_FORECAST -> builder.shardSizeInBytesForecast(parser.longValue());
default -> throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]");
}
} else {
throw new IllegalArgumentException("Unexpected token " + token);
}
}
XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser);
assert mappingVersion : "mapping version should be present for indices created on or after 6.5.0";
assert settingsVersion : "settings version should be present for indices created on or after 6.5.0";
assert indexCreatedVersion(builder.settings).before(IndexVersions.V_7_2_0) || aliasesVersion
: "aliases version should be present for indices created on or after 7.2.0";
return builder.build(true);
}
/**
* Used to load legacy metadata from ES versions that are no longer index-compatible.
* Returns information on best-effort basis.
* Throws an exception if the metadata is index-compatible with the current version (in that case,
* {@link #fromXContent} should be used to load the content.
*/
public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOException {
if (parser.currentToken() == null) { // fresh parser? move to the first token
parser.nextToken();
}
if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token
parser.nextToken();
}
XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser);
Builder builder = new Builder(parser.currentName());
String currentFieldName = null;
XContentParser.Token token = parser.nextToken();
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser);
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("settings".equals(currentFieldName)) {
Settings settings = Settings.fromXContent(parser);
if (SETTING_INDEX_VERSION_COMPATIBILITY.get(settings).isLegacyIndexVersion() == false) {
throw new IllegalStateException(
"this method should only be used to parse older incompatible index metadata versions "
+ "but got "
+ SETTING_INDEX_VERSION_COMPATIBILITY.get(settings).toReleaseVersion()
);
}
builder.settings(settings);
} else if ("mappings".equals(currentFieldName)) {
Map<String, Object> mappingSourceBuilder = new HashMap<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
String mappingType = currentFieldName;
mappingSourceBuilder.put(mappingType, parser.mapOrdered());
} else {
throw new IllegalArgumentException("Unexpected token: " + token);
}
}
handleLegacyMapping(builder, mappingSourceBuilder);
} else if ("in_sync_allocations".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
String shardId = currentFieldName;
Set<String> allocationIds = new HashSet<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
allocationIds.add(parser.text());
}
}
builder.putInSyncAllocationIds(Integer.parseInt(shardId), allocationIds);
} else {
throw new IllegalArgumentException("Unexpected token: " + token);
}
}
} else {
// assume it's custom index metadata
parser.skipChildren();
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("mappings".equals(currentFieldName)) {
Map<String, Object> mappingSourceBuilder = new HashMap<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
Map<String, Object> mapping;
if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
CompressedXContent compressedXContent = new CompressedXContent(parser.binaryValue());
mapping = XContentHelper.convertToMap(compressedXContent.compressedReference(), true).v2();
} else {
mapping = parser.mapOrdered();
}
mappingSourceBuilder.putAll(mapping);
}
handleLegacyMapping(builder, mappingSourceBuilder);
} else {
parser.skipChildren();
}
} else if (token.isValue()) {
if ("state".equals(currentFieldName)) {
builder.state(State.fromString(parser.text()));
} else if ("version".equals(currentFieldName)) {
builder.version(parser.longValue());
} else if ("mapping_version".equals(currentFieldName)) {
builder.mappingVersion(parser.longValue());
} else if ("settings_version".equals(currentFieldName)) {
builder.settingsVersion(parser.longValue());
} else if ("routing_num_shards".equals(currentFieldName)) {
builder.setRoutingNumShards(parser.intValue());
} else {
// unknown, ignore
}
} else {
XContentParserUtils.throwUnknownToken(token, parser);
}
}
XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser);
if (builder.mapping() == null) {
builder.putMapping(MappingMetadata.EMPTY_MAPPINGS); // just make sure it's not empty so that _source can be read
}
IndexMetadata indexMetadata = builder.build(true);
assert indexMetadata.getCreationVersion().isLegacyIndexVersion();
assert indexMetadata.getCompatibilityVersion().isLegacyIndexVersion();
return indexMetadata;
}
private static void handleLegacyMapping(Builder builder, Map<String, Object> mapping) {
if (mapping.size() == 1) {
String mappingType = mapping.keySet().iterator().next();
builder.putMapping(new MappingMetadata(mappingType, mapping));
} else if (mapping.size() > 1) {
builder.putMapping(new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, mapping));
}
}
}
/**
* Return the {@link IndexVersion} of Elasticsearch that has been used to create an index given its settings.
*
* @throws IllegalArgumentException if the given index settings doesn't contain a value for the key
* {@value IndexMetadata#SETTING_VERSION_CREATED}
*/
private static IndexVersion indexCreatedVersion(Settings indexSettings) {
IndexVersion indexVersion = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(indexSettings);
if (indexVersion.equals(IndexVersions.ZERO)) {
final String message = String.format(
Locale.ROOT,
"[%s] is not present in the index settings for index with UUID [%s]",
IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(),
indexSettings.get(IndexMetadata.SETTING_INDEX_UUID)
);
throw new IllegalArgumentException(message);
}
return indexVersion;
}
/**
* Adds human readable version and creation date settings.
* This method is used to display the settings in a human readable format in REST API
*/
public static Settings addHumanReadableSettings(Settings settings) {
Settings.Builder builder = Settings.builder().put(settings);
IndexVersion version = SETTING_INDEX_VERSION_CREATED.get(settings);
if (version.equals(IndexVersions.ZERO) == false) {
builder.put(SETTING_VERSION_CREATED_STRING, version.toReleaseVersion());
}
Long creationDate = settings.getAsLong(SETTING_CREATION_DATE, null);
if (creationDate != null) {
ZonedDateTime creationDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(creationDate), ZoneOffset.UTC);
builder.put(SETTING_CREATION_DATE_STRING, creationDateTime.toString());
}
return builder.build();
}
private static final ToXContent.Params FORMAT_PARAMS;
static {
Map<String, String> params = Maps.newMapWithExpectedSize(2);
params.put("binary", "true");
params.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY);
FORMAT_PARAMS = new MapParams(params);
}
/**
* State format for {@link IndexMetadata} to write to and load from disk
*/
public static final MetadataStateFormat<IndexMetadata> FORMAT = new MetadataStateFormat<IndexMetadata>(INDEX_STATE_FILE_PREFIX) {
@Override
public void toXContent(XContentBuilder builder, IndexMetadata state) throws IOException {
Builder.toXContent(state, builder, FORMAT_PARAMS);
}
@Override
public IndexMetadata fromXContent(XContentParser parser) throws IOException {
return Builder.fromXContent(parser);
}
};
/**
* Returns the number of shards that should be used for routing. This basically defines the hash space we use in
* {@link IndexRouting#indexShard} to route documents
* to shards based on their ID or their specific routing value. The default value is {@link #getNumberOfShards()}. This value only
* changes if an index is shrunk.
*/
public int getRoutingNumShards() {
return routingNumShards;
}
/**
* Returns the routing factor for this index. The default is {@code 1}.
*
* @see #getRoutingFactor(int, int) for details
*/
public int getRoutingFactor() {
return routingFactor;
}
/**
* Returns the source shard ID to split the given target shard off
* @param shardId the id of the target shard to split into
* @param sourceIndexMetadata the source index metadata
* @param numTargetShards the total number of shards in the target index
* @return the source shard ID to split off from
*/
public static ShardId selectSplitShard(int shardId, IndexMetadata sourceIndexMetadata, int numTargetShards) {
int numSourceShards = sourceIndexMetadata.getNumberOfShards();
if (shardId >= numTargetShards) {
throw new IllegalArgumentException(
"the number of target shards (" + numTargetShards + ") must be greater than the shard id: " + shardId
);
}
final int routingFactor = getRoutingFactor(numSourceShards, numTargetShards);
assertSplitMetadata(numSourceShards, numTargetShards, sourceIndexMetadata);
return new ShardId(sourceIndexMetadata.getIndex(), shardId / routingFactor);
}
/**
* Returns the source shard ID to clone the given target shard off
* @param shardId the id of the target shard to clone into
* @param sourceIndexMetadata the source index metadata
* @param numTargetShards the total number of shards in the target index
* @return a the source shard ID to clone from
*/
public static ShardId selectCloneShard(int shardId, IndexMetadata sourceIndexMetadata, int numTargetShards) {
int numSourceShards = sourceIndexMetadata.getNumberOfShards();
if (numSourceShards != numTargetShards) {
throw new IllegalArgumentException(
"the number of target shards ("
+ numTargetShards
+ ") must be the same as the number of "
+ " source shards ( "
+ numSourceShards
+ ")"
);
}
return new ShardId(sourceIndexMetadata.getIndex(), shardId);
}
public static void assertSplitMetadata(int numSourceShards, int numTargetShards, IndexMetadata sourceIndexMetadata) {
if (numSourceShards > numTargetShards) {
throw new IllegalArgumentException(
"the number of source shards ["
+ numSourceShards
+ "] must be less that the number of target shards ["
+ numTargetShards
+ "]"
);
}
// now we verify that the numRoutingShards is valid in the source index
// note: if the number of shards is 1 in the source index we can just assume it's correct since from 1 we can split into anything
// this is important to special case here since we use this to validate this in various places in the code but allow to split form
// 1 to N but we never modify the sourceIndexMetadata to accommodate for that
int routingNumShards = numSourceShards == 1 ? numTargetShards : sourceIndexMetadata.getRoutingNumShards();
if (routingNumShards % numTargetShards != 0) {
throw new IllegalStateException(
"the number of routing shards [" + routingNumShards + "] must be a multiple of the target shards [" + numTargetShards + "]"
);
}
// this is just an additional assertion that ensures we are a factor of the routing num shards.
assert sourceIndexMetadata.getNumberOfShards() == 1 // special case - we can split into anything from 1 shard
|| getRoutingFactor(numTargetShards, routingNumShards) >= 0;
}
/**
* Selects the source shards for a local shard recovery. This might either be a split or a shrink operation.
* @param shardId the target shard ID to select the source shards for
* @param sourceIndexMetadata the source metadata
* @param numTargetShards the number of target shards
*/
public static Set<ShardId> selectRecoverFromShards(int shardId, IndexMetadata sourceIndexMetadata, int numTargetShards) {
if (sourceIndexMetadata.getNumberOfShards() > numTargetShards) {
return selectShrinkShards(shardId, sourceIndexMetadata, numTargetShards);
} else if (sourceIndexMetadata.getNumberOfShards() < numTargetShards) {
return Collections.singleton(selectSplitShard(shardId, sourceIndexMetadata, numTargetShards));
} else {
return Collections.singleton(selectCloneShard(shardId, sourceIndexMetadata, numTargetShards));
}
}
/**
* Returns the source shard ids to shrink into the given shard id.
* @param shardId the id of the target shard to shrink to
* @param sourceIndexMetadata the source index metadata
* @param numTargetShards the total number of shards in the target index
* @return a set of shard IDs to shrink into the given shard ID.
*/
public static Set<ShardId> selectShrinkShards(int shardId, IndexMetadata sourceIndexMetadata, int numTargetShards) {
if (shardId >= numTargetShards) {
throw new IllegalArgumentException(
"the number of target shards (" + numTargetShards + ") must be greater than the shard id: " + shardId
);
}
if (sourceIndexMetadata.getNumberOfShards() < numTargetShards) {
throw new IllegalArgumentException(
"the number of target shards ["
+ numTargetShards
+ "] must be less that the number of source shards ["
+ sourceIndexMetadata.getNumberOfShards()
+ "]"
);
}
int routingFactor = getRoutingFactor(sourceIndexMetadata.getNumberOfShards(), numTargetShards);
Set<ShardId> shards = Sets.newHashSetWithExpectedSize(routingFactor);
for (int i = shardId * routingFactor; i < routingFactor * shardId + routingFactor; i++) {
shards.add(new ShardId(sourceIndexMetadata.getIndex(), i));
}
return shards;
}
/**
* Returns the routing factor for and shrunk index with the given number of target shards.
* This factor is used in the hash function in
* {@link IndexRouting#indexShard} to guarantee consistent
* hashing / routing of documents even if the number of shards changed (ie. a shrunk index).
*
* @param sourceNumberOfShards the total number of shards in the source index
* @param targetNumberOfShards the total number of shards in the target index
* @return the routing factor for and shrunk index with the given number of target shards.
* @throws IllegalArgumentException if the number of source shards is less than the number of target shards or if the source shards
* are not divisible by the number of target shards.
*/
public static int getRoutingFactor(int sourceNumberOfShards, int targetNumberOfShards) {
final int factor;
if (sourceNumberOfShards < targetNumberOfShards) { // split
factor = targetNumberOfShards / sourceNumberOfShards;
if (factor * sourceNumberOfShards != targetNumberOfShards || factor <= 1) {
throw new IllegalArgumentException(
"the number of source shards [" + sourceNumberOfShards + "] must be a " + "factor of [" + targetNumberOfShards + "]"
);
}
} else if (sourceNumberOfShards > targetNumberOfShards) { // shrink
factor = sourceNumberOfShards / targetNumberOfShards;
if (factor * targetNumberOfShards != sourceNumberOfShards || factor <= 1) {
throw new IllegalArgumentException(
"the number of source shards [" + sourceNumberOfShards + "] must be a " + "multiple of [" + targetNumberOfShards + "]"
);
}
} else {
factor = 1;
}
return factor;
}
/**
* Parses the number from the rolled over index name. It also supports the date-math format (ie. index name is wrapped in < and >)
* E.g.
* - For ".ds-logs-000002" it will return 2
* - For "<logs-{now/d}-3>" it'll return 3
* @throws IllegalArgumentException if the index doesn't contain a "-" separator or if the last token after the separator is not a
* number
*/
public static int parseIndexNameCounter(String indexName) {
int numberIndex = indexName.lastIndexOf('-');
if (numberIndex == -1) {
throw new IllegalArgumentException("no - separator found in index name [" + indexName + "]");
}
try {
return Integer.parseInt(
indexName.substring(numberIndex + 1, indexName.endsWith(">") ? indexName.length() - 1 : indexName.length())
);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("unable to parse the index name [" + indexName + "] to extract the counter", e);
}
}
/**
* An overload of {@link #getMatchingInferenceFields(Map, boolean, boolean)}}, where the inference field metadata map to match against
* is provided by the caller. {@code useDefaultFields} is unavailable because the index's {@link IndexSettings#DEFAULT_FIELD_SETTING} is
* out of scope.
*
* @param inferenceFieldMetadataMap The inference field metadata map to match against.
* @param fieldMap The field pattern map, where the key is the field pattern and the value is the pattern weight.
* @param resolveWildcards If {@code true}, wildcards in field patterns will be resolved. Otherwise, only explicit matches will be
* returned.
* @return A map of inference field matches
*/
public static Map<InferenceFieldMetadata, Float> getMatchingInferenceFields(
Map<String, InferenceFieldMetadata> inferenceFieldMetadataMap,
Map<String, Float> fieldMap,
boolean resolveWildcards
) {
Map<InferenceFieldMetadata, Float> matches = new HashMap<>();
for (var entry : fieldMap.entrySet()) {
String field = entry.getKey();
Float weight = entry.getValue();
if (inferenceFieldMetadataMap.containsKey(field)) {
// No wildcards in field name
addToMatchingInferenceFieldsMap(matches, inferenceFieldMetadataMap.get(field), weight);
} else if (resolveWildcards) {
if (Regex.isMatchAllPattern(field)) {
inferenceFieldMetadataMap.values().forEach(ifm -> addToMatchingInferenceFieldsMap(matches, ifm, weight));
} else if (Regex.isSimpleMatchPattern(field)) {
inferenceFieldMetadataMap.values()
.stream()
.filter(ifm -> Regex.simpleMatch(field, ifm.getName()))
.forEach(ifm -> addToMatchingInferenceFieldsMap(matches, ifm, weight));
}
}
}
return matches;
}
private static void addToMatchingInferenceFieldsMap(
Map<InferenceFieldMetadata, Float> matches,
InferenceFieldMetadata inferenceFieldMetadata,
Float weight
) {
matches.compute(inferenceFieldMetadata, (k, v) -> v == null ? weight : v * weight);
}
}
| Builder |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ConfigurationClassWithConditionTests.java | {
"start": 8729,
"end": 9017
} | class ____ implements ImportBeanDefinitionRegistrar {
static {
if (true) {
throw new RuntimeException();
}
}
@Override
public void registerBeanDefinitions(AnnotationMetadata importingClassMetadata,
BeanDefinitionRegistry registry) {
}
}
static | RegistrarNotCreated |
java | google__guice | extensions/throwingproviders/test/com/google/inject/throwingproviders/TestScope.java | {
"start": 1124,
"end": 1682
} | interface ____ {}
private Map<Key<?>, Object> inScopeObjectsMap = new HashMap<>();
@Override
public <T> Provider<T> scope(final Key<T> key, final Provider<T> provider) {
return new Provider<T>() {
@Override
@SuppressWarnings({"unchecked"})
public T get() {
T t = (T) inScopeObjectsMap.get(key);
if (t == null) {
t = provider.get();
inScopeObjectsMap.put(key, t);
}
return t;
}
};
}
public void beginNewScope() {
inScopeObjectsMap = new HashMap<>();
}
}
| Scoped |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/slot/ThreadSafeTaskSlotTable.java | {
"start": 1981,
"end": 2121
} | class ____ a given {@link TaskSlotTable},
* guarantees all the accesses are invoked on the given {@link MainThreadExecutable}.
*/
public | wraps |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/generated/CompileUtilsTest.java | {
"start": 1279,
"end": 1367
} | class ____ {
@BeforeEach
void before() {
// cleanup cached | CompileUtilsTest |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FromFtpRemoteFileSortByExpressionIT.java | {
"start": 1063,
"end": 2750
} | class ____ extends FtpServerTestSupport {
private String getFtpUrl() {
return "ftp://admin@localhost:{{ftp.server.port}}/sortby?password=admin&delay=5000";
}
@Override
public void doPostSetup() throws Exception {
prepareFtpServer();
}
@Test
public void testSortFiles() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from(getFtpUrl() + "&sortBy=file:ext").to("mock:result");
}
});
context.start();
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello Paris", "Hello London", "Hello Copenhagen");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testSortFilesReverse() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from(getFtpUrl() + "&sortBy=reverse:file:ext").to("mock:reverse");
}
});
context.start();
MockEndpoint mock = getMockEndpoint("mock:reverse");
mock.expectedBodiesReceived("Hello Copenhagen", "Hello London", "Hello Paris");
MockEndpoint.assertIsSatisfied(context);
}
private void prepareFtpServer() {
// prepares the FTP Server by creating files on the server that we want
// to unit
// test that we can pool
sendFile(getFtpUrl(), "Hello Paris", "paris.dat");
sendFile(getFtpUrl(), "Hello London", "london.txt");
sendFile(getFtpUrl(), "Hello Copenhagen", "copenhagen.xml");
}
}
| FromFtpRemoteFileSortByExpressionIT |
java | google__guava | android/guava/src/com/google/common/collect/ContiguousSet.java | {
"start": 2013,
"end": 3400
} | class ____<C extends Comparable> extends ImmutableSortedSet<C> {
/**
* Returns a {@code ContiguousSet} containing the same values in the given domain {@linkplain
* Range#contains contained} by the range.
*
* @throws IllegalArgumentException if neither range nor the domain has a lower bound, or if
* neither has an upper bound
* @since 13.0
*/
public static <C extends Comparable> ContiguousSet<C> create(
Range<C> range, DiscreteDomain<C> domain) {
checkNotNull(range);
checkNotNull(domain);
Range<C> effectiveRange = range;
try {
if (!range.hasLowerBound()) {
effectiveRange = effectiveRange.intersection(Range.atLeast(domain.minValue()));
}
if (!range.hasUpperBound()) {
effectiveRange = effectiveRange.intersection(Range.atMost(domain.maxValue()));
}
} catch (NoSuchElementException e) {
throw new IllegalArgumentException(e);
}
boolean empty;
if (effectiveRange.isEmpty()) {
empty = true;
} else {
/*
* requireNonNull is safe because the effectiveRange operations above would have thrown or
* effectiveRange.isEmpty() would have returned true.
*/
C afterLower = requireNonNull(range.lowerBound.leastValueAbove(domain));
C beforeUpper = requireNonNull(range.upperBound.greatestValueBelow(domain));
// Per | ContiguousSet |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/StopCamelFromRouteTest.java | {
"start": 1332,
"end": 3097
} | class ____ {
// START SNIPPET: e1
// use a latch as signal when to stop Camel
private final CountDownLatch latch = new CountDownLatch(1);
@Test
public void testStopCamelFromRoute() throws Exception {
// create camel, add routes, and start camel
CamelContext context = new DefaultCamelContext();
context.addRoutes(createMyRoutes());
context.start();
// setup mock expectations for unit test
MockEndpoint start = context.getEndpoint("mock:start", MockEndpoint.class);
start.expectedMessageCount(1);
MockEndpoint done = context.getEndpoint("mock:done", MockEndpoint.class);
done.expectedMessageCount(1);
// send a message to the route
ProducerTemplate template = context.createProducerTemplate();
template.sendBody("direct:start", "Hello Camel");
// wait for the latch (use 1 minute as fail safe, due unit test)
assertTrue(latch.await(1, TimeUnit.MINUTES));
// stop camel
context.stop();
// unit test assertions
start.assertIsSatisfied();
done.assertIsSatisfied();
}
// END SNIPPET: e1
// START SNIPPET: e2
public RouteBuilder createMyRoutes() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").routeId("myRoute").to("mock:start").process(new Processor() {
@Override
public void process(Exchange exchange) {
// stop Camel by signalling to the latch
latch.countDown();
}
}).to("mock:done");
}
};
}
// END SNIPPET: e2
}
| StopCamelFromRouteTest |
java | google__guava | android/guava/src/com/google/common/collect/Iterables.java | {
"start": 2623,
"end": 3596
} | class ____ {
private Iterables() {}
/** Returns an unmodifiable view of {@code iterable}. */
public static <T extends @Nullable Object> Iterable<T> unmodifiableIterable(
Iterable<? extends T> iterable) {
checkNotNull(iterable);
if (iterable instanceof UnmodifiableIterable || iterable instanceof ImmutableCollection) {
@SuppressWarnings("unchecked") // Since it's unmodifiable, the covariant cast is safe
Iterable<T> result = (Iterable<T>) iterable;
return result;
}
return new UnmodifiableIterable<>(iterable);
}
/**
* Simply returns its argument.
*
* @deprecated no need to use this
* @since 10.0
*/
@InlineMe(
replacement = "checkNotNull(iterable)",
staticImports = "com.google.common.base.Preconditions.checkNotNull")
@Deprecated
public static <E> Iterable<E> unmodifiableIterable(ImmutableCollection<E> iterable) {
return checkNotNull(iterable);
}
private static final | Iterables |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MethodCanBeStaticTest.java | {
"start": 9947,
"end": 10288
} | class ____ {
static void foo() {
new Object() {
private void foo() {}
};
}
}
""")
.doTest();
}
@Test
public void negativeLocal() {
testHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/route/RouteDefinitionMetrics.java | {
"start": 1174,
"end": 2280
} | class ____ implements ApplicationListener<RefreshRoutesEvent> {
private static final Log log = LogFactory.getLog(GatewayMetricsFilter.class);
private final RouteDefinitionLocator routeLocator;
private final AtomicInteger routeDefinitionCount;
private final String metricsPrefix;
public RouteDefinitionMetrics(MeterRegistry meterRegistry, RouteDefinitionLocator routeLocator,
String metricsPrefix) {
this.routeLocator = routeLocator;
if (metricsPrefix.endsWith(".")) {
this.metricsPrefix = metricsPrefix.substring(0, metricsPrefix.length() - 1);
}
else {
this.metricsPrefix = metricsPrefix;
}
routeDefinitionCount = meterRegistry.gauge(this.metricsPrefix + ".routes.count", new AtomicInteger(0));
}
public String getMetricsPrefix() {
return metricsPrefix;
}
@Override
public void onApplicationEvent(RefreshRoutesEvent event) {
routeLocator.getRouteDefinitions().count().subscribe(count -> {
routeDefinitionCount.set(count.intValue());
if (log.isDebugEnabled()) {
log.debug("New routes count: " + routeDefinitionCount);
}
});
}
}
| RouteDefinitionMetrics |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-guava-tests/src/test/java/org/assertj/tests/guava/api/Assertions_sync_with_InstanceOfAssertFactories_Test.java | {
"start": 1877,
"end": 7099
} | class ____ {
private static final Class<?>[] FIELD_FACTORIES_IGNORED_TYPES = {
// There can be no Range field factory with a base type.
RangeAssert.class,
// There can be no RangeMap field factory with a base type.
RangeMapAssert.class,
// There can be no RangeSet field factory with a base type.
RangeSetAssert.class,
};
private static final Class<?>[] METHOD_FACTORIES_IGNORED_TYPES = {
};
@Test
void each_guava_assertion_should_have_an_instance_of_assert_factory_static_field() {
// GIVEN
Map<Type, Type> assertThatMethods = findAssertThatParameterAndReturnTypes();
// WHEN
Map<Type, Type> fieldFactories = findFieldFactoryTypes();
// THEN
then(fieldFactories).containsAllEntriesOf(assertThatMethods)
.hasSameSizeAs(assertThatMethods);
}
@Test
void each_guava_assertion_with_type_parameters_should_have_an_instance_of_assert_factory_static_method() {
// GIVEN
Map<Type, Type> assertThatMethods = findTypedAssertThatParameterAndReturnTypes();
// WHEN
Map<Type, Type> methodFactories = findMethodFactoryTypes();
// THEN
then(methodFactories).containsAllEntriesOf(assertThatMethods)
.hasSameSizeAs(assertThatMethods);
}
private Map<Type, Type> findAssertThatParameterAndReturnTypes() {
return Stream.of(findAssertThatMethods(FIELD_FACTORIES_IGNORED_TYPES))
.map(this::toParameterAndReturnTypeEntry)
.filter(not(this::isPrimitiveTypeKey))
.collect(toMap(Entry::getKey, Entry::getValue));
}
private <K, V> boolean isPrimitiveTypeKey(Entry<K, V> entry) {
if (entry.getKey() instanceof Class) {
return ((Class<?>) entry.getKey()).isPrimitive();
}
return false;
}
private Map<Type, Type> findTypedAssertThatParameterAndReturnTypes() {
return Stream.of(findAssertThatMethods(METHOD_FACTORIES_IGNORED_TYPES))
.filter(this::hasTypeParameters)
.map(this::toParameterAndReturnTypeEntry)
.collect(toMap(Entry::getKey, Entry::getValue));
}
private static Method[] findAssertThatMethods(Class<?>... ignoredReturnTypes) {
Set<Class<?>> ignoredReturnTypesSet = newLinkedHashSet(ignoredReturnTypes);
return Arrays.stream(Assertions.class.getMethods())
.filter(method -> method.getName().equals("assertThat"))
.filter(method -> !ignoredReturnTypesSet.contains(method.getReturnType()))
.toArray(Method[]::new);
}
private boolean hasTypeParameters(Method method) {
return method.getTypeParameters().length != 0;
}
private Entry<Type, Type> toParameterAndReturnTypeEntry(Method method) {
return entry(normalize(genericParameterType(method)), normalize(method.getGenericReturnType()));
}
private Type genericParameterType(Method method) {
Type[] parameterTypes = method.getGenericParameterTypes();
assertThat(parameterTypes).hasSize(1);
return parameterTypes[0];
}
private Map<Type, Type> findFieldFactoryTypes() {
return Stream.of(InstanceOfAssertFactories.class.getFields())
.filter(not(Field::isSynthetic)) // Exclude $jacocoData - see #590 and jacoco/jacoco#168
.map(Field::getGenericType)
.map(this::extractTypeParameters)
.filter(not(this::isIgnoredFieldFactory))
.collect(toMap(Entry::getKey, Entry::getValue));
}
private boolean isIgnoredFieldFactory(Entry<Type, Type> e) {
return isIgnoredFactory(e, FIELD_FACTORIES_IGNORED_TYPES);
}
private Map<Type, Type> findMethodFactoryTypes() {
return Stream.of(InstanceOfAssertFactories.class.getMethods())
.map(Method::getGenericReturnType)
.map(this::extractTypeParameters)
.filter(not(this::isIgnoredMethodFactory))
.collect(toMap(Entry::getKey, Entry::getValue));
}
private boolean isIgnoredMethodFactory(Entry<Type, Type> e) {
return isIgnoredFactory(e, METHOD_FACTORIES_IGNORED_TYPES);
}
private boolean isIgnoredFactory(Entry<Type, Type> e, Class<?>... ignoredTypes) {
return Stream.of(ignoredTypes).anyMatch(type -> e.getValue().equals(type));
}
private Entry<Type, Type> extractTypeParameters(Type type) {
assertThat(type).asInstanceOf(type(ParameterizedType.class))
.returns(InstanceOfAssertFactory.class, from(ParameterizedType::getRawType))
.extracting(ParameterizedType::getActualTypeArguments)
.asInstanceOf(ARRAY)
.hasSize(2);
Type[] typeArguments = ((ParameterizedType) type).getActualTypeArguments();
return entry(normalize(typeArguments[0]), normalize(typeArguments[1]));
}
private Type normalize(Type type) {
if (type instanceof ParameterizedType parameterizedType) {
return parameterizedType.getRawType();
} else if (type instanceof TypeVariable<?> typeVariable) {
Type[] bounds = typeVariable.getBounds();
assertThat(bounds).hasSize(1);
return normalize(bounds[0]);
}
return type;
}
}
| Assertions_sync_with_InstanceOfAssertFactories_Test |
java | apache__camel | components/camel-javascript/src/test/java/org/apache/camel/language/js/JavaScriptTest.java | {
"start": 1085,
"end": 1404
} | class ____ extends LanguageTestSupport {
@Test
public void testJavaScriptExpression() {
assertExpression("2 + 3", 5);
exchange.getMessage().setBody(7);
assertExpression("2 + body", 9);
}
@Override
protected String getLanguageName() {
return "js";
}
}
| JavaScriptTest |
java | elastic__elasticsearch | client/rest/src/test/java/org/elasticsearch/client/PreferHasAttributeNodeSelectorTests.java | {
"start": 1245,
"end": 3246
} | class ____ extends RestClientTestCase {
public void testFoundPreferHasAttribute() {
Node hasAttributeValue = dummyNode(singletonMap("attr", singletonList("val")));
Node hasAttributeButNotValue = dummyNode(singletonMap("attr", singletonList("notval")));
Node hasAttributeValueInList = dummyNode(singletonMap("attr", Arrays.asList("val", "notval")));
Node notHasAttribute = dummyNode(singletonMap("notattr", singletonList("val")));
List<Node> nodes = new ArrayList<>();
nodes.add(hasAttributeValue);
nodes.add(hasAttributeButNotValue);
nodes.add(hasAttributeValueInList);
nodes.add(notHasAttribute);
List<Node> expected = new ArrayList<>();
expected.add(hasAttributeValue);
expected.add(hasAttributeValueInList);
new PreferHasAttributeNodeSelector("attr", "val").select(nodes);
assertEquals(expected, nodes);
}
public void testNotFoundPreferHasAttribute() {
Node notHasAttribute = dummyNode(singletonMap("notattr", singletonList("val")));
List<Node> nodes = new ArrayList<>();
nodes.add(notHasAttribute);
List<Node> expected = new ArrayList<>();
expected.add(notHasAttribute);
new PreferHasAttributeNodeSelector("attr", "val").select(nodes);
assertEquals(expected, nodes);
}
private static Node dummyNode(Map<String, List<String>> attributes) {
final Set<String> roles = new TreeSet<>();
if (randomBoolean()) {
roles.add("master");
}
if (randomBoolean()) {
roles.add("data");
}
if (randomBoolean()) {
roles.add("ingest");
}
return new Node(
new HttpHost("dummy"),
Collections.<HttpHost>emptySet(),
randomAsciiAlphanumOfLength(5),
randomAsciiAlphanumOfLength(5),
new Roles(roles),
attributes
);
}
}
| PreferHasAttributeNodeSelectorTests |
java | google__guava | guava/src/com/google/common/io/MoreFiles.java | {
"start": 2789,
"end": 3442
} | class ____ {
private MoreFiles() {}
/**
* Returns a view of the given {@code path} as a {@link ByteSource}.
*
* <p>Any {@linkplain OpenOption open options} provided are used when opening streams to the file
* and may affect the behavior of the returned source and the streams it provides. See {@link
* StandardOpenOption} for the standard options that may be provided. Providing no options is
* equivalent to providing the {@link StandardOpenOption#READ READ} option.
*/
public static ByteSource asByteSource(Path path, OpenOption... options) {
return new PathByteSource(path, options);
}
private static final | MoreFiles |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/samples/client/standalone/RedirectTests.java | {
"start": 2028,
"end": 4858
} | class ____ {
private final WebTestClient testClient =
MockMvcWebTestClient.bindToController(new PersonController()).build();
@Test
public void save() throws Exception {
EntityExchangeResult<Void> exchangeResult =
testClient.post().uri("/persons?name=Andy")
.exchange()
.expectStatus().isFound()
.expectHeader().location("/persons/Joe")
.expectBody().isEmpty();
// Further assertions on the server response
MockMvcWebTestClient.resultActionsFor(exchangeResult)
.andExpect(model().size(1))
.andExpect(model().attributeExists("name"))
.andExpect(flash().attributeCount(1))
.andExpect(flash().attribute("message", "success!"));
}
@Test
public void saveSpecial() throws Exception {
EntityExchangeResult<Void> result =
testClient.post().uri("/people?name=Andy")
.exchange()
.expectStatus().isFound()
.expectHeader().location("/persons/Joe")
.expectBody().isEmpty();
// Further assertions on the server response
MockMvcWebTestClient.resultActionsFor(result)
.andExpect(model().size(1))
.andExpect(model().attributeExists("name"))
.andExpect(flash().attributeCount(1))
.andExpect(flash().attribute("message", "success!"));
}
@Test
public void saveWithErrors() throws Exception {
EntityExchangeResult<Void> result =
testClient.post().uri("/persons").exchange().expectStatus().isOk().expectBody().isEmpty();
MockMvcWebTestClient.resultActionsFor(result)
.andExpect(forwardedUrl("persons/add"))
.andExpect(model().size(1))
.andExpect(model().attributeExists("person"))
.andExpect(flash().attributeCount(0));
}
@Test
public void saveSpecialWithErrors() throws Exception {
EntityExchangeResult<Void> result =
testClient.post().uri("/people").exchange().expectStatus().isOk().expectBody().isEmpty();
MockMvcWebTestClient.resultActionsFor(result)
.andExpect(forwardedUrl("persons/add"))
.andExpect(model().size(1))
.andExpect(model().attributeExists("person"))
.andExpect(flash().attributeCount(0));
}
@Test
public void getPerson() throws Exception {
EntityExchangeResult<Void> result =
MockMvcWebTestClient.bindToController(new PersonController())
.defaultRequest(get("/").flashAttr("message", "success!"))
.build()
.get().uri("/persons/Joe")
.exchange()
.expectStatus().isOk()
.expectBody().isEmpty();
// Further assertions on the server response
MockMvcWebTestClient.resultActionsFor(result)
.andDo(MockMvcResultHandlers.print())
.andExpect(forwardedUrl("persons/index"))
.andExpect(model().size(2))
.andExpect(model().attribute("person", new Person("Joe")))
.andExpect(model().attribute("message", "success!"))
.andExpect(flash().attributeCount(0));
}
@Controller
private static | RedirectTests |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/benckmark/pool/Case0.java | {
"start": 1070,
"end": 6719
} | class ____ extends TestCase {
private String jdbcUrl;
private String user;
private String password;
private String driverClass;
private int initialSize = 1;
private int minIdle = 3;
private int maxIdle = 8;
private int maxActive = 8;
private String validationQuery = "SELECT 1";
private boolean testOnBorrow;
private long minEvictableIdleTimeMillis = 3000;
public final int LOOP_COUNT = 5;
public final int COUNT = 1000 * 1000 * 1;
protected void setUp() throws Exception {
jdbcUrl = "jdbc:fake:dragoon_v25masterdb";
user = "dragoon25";
password = "dragoon25";
driverClass = "com.alibaba.druid.mock.MockDriver";
// jdbcUrl = "jdbc:mysql://a.b.c.d:3306/masterdb";
// user = "x";
// password = "x";
}
public void test_druid() throws Exception {
DruidDataSource dataSource = new DruidDataSource();
dataSource.setInitialSize(initialSize);
dataSource.setMaxActive(maxActive);
dataSource.setMinIdle(minIdle);
dataSource.setMaxIdle(maxIdle);
dataSource.setPoolPreparedStatements(true);
dataSource.setDriverClassName(driverClass);
dataSource.setUrl(jdbcUrl);
dataSource.setPoolPreparedStatements(true);
dataSource.setUsername(user);
dataSource.setPassword(password);
dataSource.setValidationQuery(validationQuery);
dataSource.setTestOnBorrow(testOnBorrow);
dataSource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
for (int i = 0; i < LOOP_COUNT; ++i) {
p0(dataSource, "druid");
}
System.out.println();
}
public void f_test_1() throws Exception {
final BasicDataSource dataSource = new BasicDataSource();
dataSource.setInitialSize(initialSize);
dataSource.setMaxActive(maxActive);
dataSource.setMinIdle(minIdle);
dataSource.setMaxIdle(maxIdle);
dataSource.setPoolPreparedStatements(true);
dataSource.setDriverClassName(driverClass);
dataSource.setUrl(jdbcUrl);
dataSource.setPoolPreparedStatements(true);
dataSource.setUsername(user);
dataSource.setPassword(password);
dataSource.setValidationQuery(validationQuery);
dataSource.setTestOnBorrow(testOnBorrow);
dataSource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
for (int i = 0; i < LOOP_COUNT; ++i) {
p0(dataSource, "dbcp");
}
System.out.println();
}
public void f_test_2() throws Exception {
BoneCPDataSource dataSource = new BoneCPDataSource();
// dataSource.(10);
// dataSource.setMaxActive(50);
dataSource.setMinConnectionsPerPartition(minIdle);
dataSource.setMaxConnectionsPerPartition(maxIdle);
dataSource.setDriverClass(driverClass);
dataSource.setJdbcUrl(jdbcUrl);
// dataSource.setPoolPreparedStatements(true);
// dataSource.setMaxOpenPreparedStatements(100);
dataSource.setUsername(user);
dataSource.setPassword(password);
dataSource.setConnectionTestStatement(validationQuery);
dataSource.setPartitionCount(1);
for (int i = 0; i < LOOP_COUNT; ++i) {
p0(dataSource, "boneCP");
}
System.out.println();
}
public void f_test_c3p0() throws Exception {
ComboPooledDataSource dataSource = new ComboPooledDataSource();
// dataSource.(10);
// dataSource.setMaxActive(50);
dataSource.setMinPoolSize(minIdle);
dataSource.setMaxPoolSize(maxIdle);
dataSource.setDriverClass(driverClass);
dataSource.setJdbcUrl(jdbcUrl);
// dataSource.setPoolPreparedStatements(true);
// dataSource.setMaxOpenPreparedStatements(100);
dataSource.setUser(user);
dataSource.setPassword(password);
for (int i = 0; i < LOOP_COUNT; ++i) {
p0(dataSource, "c3p0");
}
System.out.println();
}
public void f_test_tomcat_jdbc() throws Exception {
org.apache.tomcat.jdbc.pool.DataSource dataSource = new org.apache.tomcat.jdbc.pool.DataSource();
// dataSource.(10);
// dataSource.setMaxActive(50);
dataSource.setMinIdle(minIdle);
dataSource.setMaxActive(maxIdle);
dataSource.setDriverClassName(driverClass);
dataSource.setUrl(jdbcUrl);
// dataSource.setPoolPreparedStatements(true);
// dataSource.setMaxOpenPreparedStatements(100);
dataSource.setUsername(user);
dataSource.setPassword(password);
for (int i = 0; i < LOOP_COUNT; ++i) {
p0(dataSource, "tomcat-jdbc");
}
System.out.println();
}
private void p0(DataSource dataSource, String name) throws SQLException {
long startMillis = System.currentTimeMillis();
long startYGC = TestUtil.getYoungGC();
long startFullGC = TestUtil.getFullGC();
for (int i = 0; i < COUNT; ++i) {
Connection conn = dataSource.getConnection();
Statement stmt = conn.createStatement();
// ResultSet rs = stmt.executeQuery("SELECT 1");
// rs.close();
// stmt.close();
conn.close();
}
long millis = System.currentTimeMillis() - startMillis;
long ygc = TestUtil.getYoungGC() - startYGC;
long fullGC = TestUtil.getFullGC() - startFullGC;
System.out.println(name + " millis : " + NumberFormat.getInstance().format(millis) + ", YGC " + ygc + " FGC "
+ fullGC);
}
}
| Case0 |
java | apache__kafka | storage/src/main/java/org/apache/kafka/storage/internals/log/UnifiedLog.java | {
"start": 4861,
"end": 5103
} | class ____ state and behavior specific to tiered segments as well as any behavior combining both tiered
* and local segments. The state and behavior specific to local segments are handled by the encapsulated LocalLog instance.
*/
public | handles |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransferTest.java | {
"start": 3038,
"end": 18069
} | class ____ extends TestLogger {
@TempDir private java.nio.file.Path temporaryFolder;
private ForStPathContainer createPathContainer() throws IOException {
Path localJobPath = Path.fromLocalFile(TempDirUtils.newFolder(temporaryFolder));
Path localBasePath = new Path(localJobPath, "base");
Path remoteJobPath = Path.fromLocalFile(TempDirUtils.newFolder(temporaryFolder));
Path remoteBasePath = new Path(remoteJobPath, "base");
return ForStPathContainer.of(localJobPath, localBasePath, remoteJobPath, remoteBasePath);
}
/** Test that the exception arose in the thread pool will rethrow to the main thread. */
@Test
void testMultiThreadTransferThreadPoolExceptionRethrow() throws IOException {
SpecifiedException expectedException =
new SpecifiedException("throw exception while multi thread transfer states.");
CheckpointStateOutputStream outputStream =
createFailingCheckpointStateOutputStream(expectedException);
CheckpointStreamFactory checkpointStreamFactory =
new CheckpointStreamFactory() {
@Override
public CheckpointStateOutputStream createCheckpointStateOutputStream(
CheckpointedStateScope scope) {
return outputStream;
}
@Override
public boolean canFastDuplicate(
StreamStateHandle stateHandle, CheckpointedStateScope scope) {
return false;
}
@Override
public List<StreamStateHandle> duplicate(
List<StreamStateHandle> stateHandles, CheckpointedStateScope scope) {
return null;
}
};
File file = TempDirUtils.newFile(temporaryFolder, String.valueOf(UUID.randomUUID()));
generateRandomFileContent(file.getPath(), 20);
List<Path> filePaths = new ArrayList<>(1);
filePaths.add(Path.fromLocalFile(file));
try (ForStStateDataTransfer stateTransfer = new ForStStateDataTransfer(5)) {
assertThatThrownBy(
() ->
stateTransfer.transferFilesToCheckpointFs(
SnapshotType.SharingFilesStrategy.FORWARD_BACKWARD,
filePaths,
checkpointStreamFactory,
CheckpointedStateScope.SHARED,
new CloseableRegistry(),
new CloseableRegistry(),
false))
.isEqualTo(expectedException);
}
}
@Test
void testTransferredSstCanBeCleanedUp() throws Exception {
SpecifiedException expectedException =
new SpecifiedException("throw exception while multi thread transfer states.");
File checkpointPrivateFolder = TempDirUtils.newFolder(temporaryFolder, "private");
Path checkpointPrivateDirectory = Path.fromLocalFile(checkpointPrivateFolder);
File checkpointSharedFolder = TempDirUtils.newFolder(temporaryFolder, "shared");
Path checkpointSharedDirectory = Path.fromLocalFile(checkpointSharedFolder);
FileSystem fileSystem = checkpointPrivateDirectory.getFileSystem();
int sstFileCount = 6;
int fileStateSizeThreshold = 1024;
int writeBufferSize = 4096;
CheckpointStreamFactory checkpointStreamFactory =
new FsCheckpointStreamFactory(
fileSystem,
checkpointPrivateDirectory,
checkpointSharedDirectory,
fileStateSizeThreshold,
writeBufferSize);
String localFolder = "local";
TempDirUtils.newFolder(temporaryFolder, localFolder);
List<Path> filePaths =
generateRandomSstFiles(localFolder, sstFileCount, fileStateSizeThreshold);
CloseableRegistry tmpResourcesRegistry = new CloseableRegistry();
try (ForStStateDataTransfer stateTransfer = new ForStStateDataTransfer(1)) {
stateTransfer.transferFilesToCheckpointFs(
SnapshotType.SharingFilesStrategy.FORWARD_BACKWARD,
filePaths,
checkpointStreamFactory,
CheckpointedStateScope.SHARED,
new CloseableRegistry(),
tmpResourcesRegistry,
false);
assertThatThrownBy(
() ->
stateTransfer.transferFilesToCheckpointFs(
SnapshotType.SharingFilesStrategy.FORWARD_BACKWARD,
filePaths,
new LastFailingCheckpointStateOutputStreamFactory(
checkpointStreamFactory,
sstFileCount,
expectedException),
CheckpointedStateScope.SHARED,
new CloseableRegistry(),
tmpResourcesRegistry,
false))
.isEqualTo(expectedException);
assertThat(checkpointPrivateFolder.list()).isEmpty();
assertThat(checkpointSharedFolder.list()).isNotEmpty();
tmpResourcesRegistry.close();
// Check whether the temporary file before the exception can be cleaned up
assertThat(checkpointPrivateFolder.list()).isEmpty();
assertThat(checkpointSharedFolder.list()).isEmpty();
Path first = filePaths.stream().findFirst().get();
assertThatThrownBy(
() ->
stateTransfer.transferFilesToCheckpointFs(
SnapshotType.SharingFilesStrategy.FORWARD_BACKWARD,
Collections.singletonList(first),
checkpointStreamFactory,
CheckpointedStateScope.SHARED,
new CloseableRegistry(),
tmpResourcesRegistry,
false))
.as("Cannot register Closeable, registry is already closed. Closing argument.")
.isInstanceOf(IOException.class);
// Check whether the temporary file after the exception can be cleaned up.
assertThat(checkpointPrivateFolder.list()).isEmpty();
assertThat(checkpointSharedFolder.list()).isEmpty();
}
}
/** Test that transfer file head part correctly. */
@Test
void testTransferHeadPartCorrectly() throws Exception {
File checkpointPrivateFolder = TempDirUtils.newFolder(temporaryFolder, "private");
Path checkpointPrivateDirectory = Path.fromLocalFile(checkpointPrivateFolder);
File checkpointSharedFolder = TempDirUtils.newFolder(temporaryFolder, "shared");
Path checkpointSharedDirectory = Path.fromLocalFile(checkpointSharedFolder);
FileSystem fileSystem = checkpointPrivateDirectory.getFileSystem();
int fileStateSizeThreshold = 1024;
int headBytes = 512; // make sure just a part of origin state file
int writeBufferSize = 4096;
FsCheckpointStreamFactory checkpointStreamFactory =
new FsCheckpointStreamFactory(
fileSystem,
checkpointPrivateDirectory,
checkpointSharedDirectory,
fileStateSizeThreshold,
writeBufferSize);
String localFolder = "local";
TempDirUtils.newFolder(temporaryFolder, localFolder);
Path sstFile = generateRandomSstFile(localFolder, 1, fileStateSizeThreshold);
try (ForStStateDataTransfer stateTransfer = new ForStStateDataTransfer(5)) {
HandleAndLocalPath handleAndLocalPath =
stateTransfer.transferFileToCheckpointFs(
SnapshotType.SharingFilesStrategy.FORWARD_BACKWARD,
sstFile,
headBytes,
checkpointStreamFactory,
CheckpointedStateScope.SHARED,
new CloseableRegistry(),
new CloseableRegistry(),
false);
assertStateContentEqual(
sstFile, headBytes, handleAndLocalPath.getHandle().openInputStream());
}
}
/** Test that transfer files with multi-thread correctly. */
@Test
void testMultiThreadTransferCorrectly() throws Exception {
File checkpointPrivateFolder = TempDirUtils.newFolder(temporaryFolder, "private");
Path checkpointPrivateDirectory = Path.fromLocalFile(checkpointPrivateFolder);
File checkpointSharedFolder = TempDirUtils.newFolder(temporaryFolder, "shared");
Path checkpointSharedDirectory = Path.fromLocalFile(checkpointSharedFolder);
FileSystem fileSystem = checkpointPrivateDirectory.getFileSystem();
int fileStateSizeThreshold = 1024;
int writeBufferSize = 4096;
FsCheckpointStreamFactory checkpointStreamFactory =
new FsCheckpointStreamFactory(
fileSystem,
checkpointPrivateDirectory,
checkpointSharedDirectory,
fileStateSizeThreshold,
writeBufferSize);
String localFolder = "local";
TempDirUtils.newFolder(temporaryFolder, localFolder);
int sstFileCount = 6;
List<Path> sstFilePaths =
generateRandomSstFiles(localFolder, sstFileCount, fileStateSizeThreshold);
try (ForStStateDataTransfer stateTransfer = new ForStStateDataTransfer(5)) {
List<HandleAndLocalPath> sstFiles =
stateTransfer.transferFilesToCheckpointFs(
SnapshotType.SharingFilesStrategy.FORWARD_BACKWARD,
sstFilePaths,
checkpointStreamFactory,
CheckpointedStateScope.SHARED,
new CloseableRegistry(),
new CloseableRegistry(),
false);
for (Path path : sstFilePaths) {
assertStateContentEqual(
path,
-1,
sstFiles.stream()
.filter(e -> e.getLocalPath().equals(path.getName()))
.findFirst()
.get()
.getHandle()
.openInputStream());
}
}
}
private static CheckpointStateOutputStream createFailingCheckpointStateOutputStream(
IOException failureException) {
return new CheckpointStateOutputStream() {
@Nullable
@Override
public StreamStateHandle closeAndGetHandle() {
return new ByteStreamStateHandle("testHandle", "testHandle".getBytes());
}
@Override
public void close() {}
@Override
public long getPos() {
return 0;
}
@Override
public void flush() {}
@Override
public void sync() {}
@Override
public void write(int b) throws IOException {
throw failureException;
}
};
}
private List<Path> generateRandomSstFiles(
String localFolder, int fileCount, int fileStateSizeThreshold) throws IOException {
List<Path> sstFilePaths = new ArrayList<>(fileCount);
for (int i = 0; i < fileCount; ++i) {
sstFilePaths.add(generateRandomSstFile(localFolder, i, fileStateSizeThreshold));
}
return sstFilePaths;
}
private Path generateRandomSstFile(String localFolder, int fileNum, int fileStateSizeThreshold)
throws IOException {
ThreadLocalRandom random = ThreadLocalRandom.current();
File file =
TempDirUtils.newFile(
temporaryFolder, String.format("%s/%d.sst", localFolder, fileNum));
generateRandomFileContent(
file.getPath(), random.nextInt(1_000_000) + fileStateSizeThreshold);
return Path.fromLocalFile(file);
}
private void generateRandomFileContent(String filePath, int fileLength) throws IOException {
FileOutputStream fileStream = new FileOutputStream(filePath);
byte[] contents = new byte[fileLength];
ThreadLocalRandom.current().nextBytes(contents);
fileStream.write(contents);
fileStream.close();
}
private void assertStateContentEqual(
Path stateFilePath, long headBytes, FSDataInputStream inputStream) throws IOException {
byte[] excepted = readHeadBytes(stateFilePath, headBytes);
byte[] actual = new byte[excepted.length];
IOUtils.readFully(inputStream, actual, 0, actual.length);
// make sure there is no more bytes in inputStream
assertThat(inputStream.read()).isEqualTo(-1);
assertThat(actual).isEqualTo(excepted);
inputStream.close();
}
private byte[] readHeadBytes(Path path, long headBytes) throws IOException {
FileSystem fileSystem = path.getFileSystem();
FileStatus fileStatus = fileSystem.getFileStatus(path);
Preconditions.checkNotNull(fileStatus);
long len = fileStatus.getLen();
Preconditions.checkState(len >= headBytes);
try (FSDataInputStream inputStream = fileSystem.open(path)) {
int toRead = (int) (headBytes > 0 ? headBytes : len);
byte[] content = new byte[toRead];
int offset = 0;
final int singleReadSize = 16 * 1024;
while (toRead > 0) {
int num = inputStream.read(content, offset, Math.min(toRead, singleReadSize));
if (num == -1) {
break;
}
offset += num;
toRead -= num;
}
return content;
}
}
private static | ForStStateDataTransferTest |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/util/spring/SecureRandomParametersFactoryBeanTest.java | {
"start": 1286,
"end": 1549
} | class ____ {
@Resource
SecureRandomParameters srp;
@Test
public void testKeyStoreParameters() {
assertEquals("algorithm", srp.getAlgorithm());
assertEquals("provider", srp.getProvider());
}
}
| SecureRandomParametersFactoryBeanTest |
java | apache__maven | impl/maven-logging/src/main/java/org/apache/maven/logging/api/LogLevelRecorder.java | {
"start": 888,
"end": 1129
} | enum ____ {
DEBUG,
INFO,
WARN,
ERROR
}
boolean hasReachedMaxLevel();
Level getMaxLevelReached();
Level getMaxLevelAllowed();
void setMaxLevelAllowed(Level level);
void reset();
}
| Level |
java | google__dagger | javatests/dagger/internal/codegen/DuplicateBindingsValidationTest.java | {
"start": 45645,
"end": 46082
} | interface ____ {",
" @Provides",
" @Singleton",
" @Foo(bar = String.class)",
" static String foo(",
" @SuppressWarnings(\"unused\") int a,",
" @SuppressWarnings(\"unused\") ImmutableList<Boolean> blah) {",
" return \"\";",
" }",
" }",
"",
" @Module",
" | Module1 |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramExtensions.java | {
"start": 1088,
"end": 1552
} | class ____ extends SaslExtensions {
public ScramExtensions() {
this(Collections.emptyMap());
}
public ScramExtensions(String extensions) {
this(Utils.parseMap(extensions, "=", ","));
}
public ScramExtensions(Map<String, String> extensionMap) {
super(extensionMap);
}
public boolean tokenAuthenticated() {
return Boolean.parseBoolean(map().get(ScramLoginModule.TOKEN_AUTH_CONFIG));
}
}
| ScramExtensions |
java | apache__avro | lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroSerialization.java | {
"start": 1739,
"end": 3048
} | class ____<T> extends Configured implements Serialization<AvroWrapper<T>> {
/**
* Conf key for the writer schema of the AvroKey datum being
* serialized/deserialized.
*/
private static final String CONF_KEY_WRITER_SCHEMA = "avro.serialization.key.writer.schema";
/**
* Conf key for the reader schema of the AvroKey datum being
* serialized/deserialized.
*/
private static final String CONF_KEY_READER_SCHEMA = "avro.serialization.key.reader.schema";
/**
* Conf key for the writer schema of the AvroValue datum being
* serialized/deserialized.
*/
private static final String CONF_VALUE_WRITER_SCHEMA = "avro.serialization.value.writer.schema";
/**
* Conf key for the reader schema of the AvroValue datum being
* serialized/deserialized.
*/
private static final String CONF_VALUE_READER_SCHEMA = "avro.serialization.value.reader.schema";
/** Conf key for the data model implementation class. */
private static final String CONF_DATA_MODEL = "avro.serialization.data.model";
/** {@inheritDoc} */
@Override
public boolean accept(Class<?> c) {
return AvroKey.class.isAssignableFrom(c) || AvroValue.class.isAssignableFrom(c);
}
/**
* Gets an object capable of deserializing the output from a Mapper.
*
* @param c The | AvroSerialization |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/sqlserver/SQLServerWallTest_5.java | {
"start": 901,
"end": 1352
} | class ____ extends TestCase {
public void test_true() throws Exception {
WallProvider provider = new SQLServerWallProvider();
provider.getConfig().setSelectHavingAlwayTrueCheck(true);
assertFalse(provider.checkValid(//
"delete t where LEN(HOST_NAME()) > 0"));
assertEquals(1, provider.getTableStats().size());
assertTrue(provider.getTableStats().containsKey("t"));
}
}
| SQLServerWallTest_5 |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rpc/FencedRpcEndpointTest.java | {
"start": 6194,
"end": 7060
} | class ____ extends FencedRpcEndpoint<UUID>
implements FencedTestingGateway {
private final OneShotLatch computationLatch;
private final String value;
protected FencedTestingEndpoint(
RpcService rpcService, String value, UUID initialFencingToken) {
super(rpcService, initialFencingToken);
computationLatch = new OneShotLatch();
this.value = value;
}
@Override
public CompletableFuture<String> foobar(Duration timeout) {
return CompletableFuture.completedFuture(value);
}
@Override
public CompletableFuture<Acknowledge> triggerComputationLatch(Duration timeout) {
computationLatch.trigger();
return CompletableFuture.completedFuture(Acknowledge.get());
}
}
}
| FencedTestingEndpoint |
java | spring-projects__spring-framework | framework-docs/src/main/java/org/springframework/docs/integration/observability/applicationevents/EmailNotificationListener.java | {
"start": 971,
"end": 1411
} | class ____ {
private final Log logger = LogFactory.getLog(EmailNotificationListener.class);
@EventListener(EmailReceivedEvent.class)
@Async("propagatingContextExecutor")
public void emailReceived(EmailReceivedEvent event) {
// asynchronously process the received event
// this logging statement will contain the expected MDC entries from the propagated context
logger.info("email has been received");
}
}
| EmailNotificationListener |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/JmsRoutingSlipInOutIT.java | {
"start": 1669,
"end": 3632
} | class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
@BindToRegistry("myBean")
private final MyBean bean = new MyBean();
@Test
public void testInOutRoutingSlip() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Result-Done-B-A-Hello");
template.sendBody("activemq:queue:JmsRoutingSlipInOutTest.start", "Hello");
MockEndpoint.assertIsSatisfied(context, 20, TimeUnit.SECONDS);
}
@Override
protected String getComponentName() {
return "activemq";
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("activemq:queue:JmsRoutingSlipInOutTest.start").to("direct:start").to("bean:myBean?method=doResult")
.to("mock:result");
from("direct:start").to("bean:myBean?method=createSlip").setExchangePattern(ExchangePattern.InOut)
.routingSlip(header("mySlip"))
.to("bean:myBean?method=backFromSlip");
from("activemq:queue:JmsRoutingSlipInOutTest.a").to("bean:myBean?method=doA");
from("activemq:queue:JmsRoutingSlipInOutTest.b").to("bean:myBean?method=doB");
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
public static final | JmsRoutingSlipInOutIT |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/deploy/Deployer.java | {
"start": 934,
"end": 2367
} | interface ____<E extends ScopeModel> {
/**
* Initialize the component
*/
void initialize() throws IllegalStateException;
/**
* Starts the component.
* @return
*/
Future start() throws IllegalStateException;
/**
* Stops the component.
*/
void stop() throws IllegalStateException;
/**
* @return true if the component is added and waiting to start
*/
boolean isPending();
/**
* @return true if the component is starting or has been started.
*/
boolean isRunning();
/**
* @return true if the component has been started.
* @see #start()
* @see #isStarting()
*/
boolean isStarted();
boolean isCompletion();
/**
* @return true if the component is starting.
* @see #isStarted()
*/
boolean isStarting();
/**
* @return true if the component is stopping.
* @see #isStopped()
*/
boolean isStopping();
/**
* @return true if the component is stopping.
* @see #isStopped()
*/
boolean isStopped();
/**
* @return true if the component has failed to start or has failed to stop.
*/
boolean isFailed();
/**
* @return current state
*/
DeployState getState();
void addDeployListener(DeployListener<E> listener);
void removeDeployListener(DeployListener<E> listener);
Throwable getError();
}
| Deployer |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/io/ParseException.java | {
"start": 1059,
"end": 1484
} | class ____ extends RuntimeException {
private static final long serialVersionUID = -6721968786653128017L;
public ParseException() {
super();
}
public ParseException(String message) {
super(message);
}
public ParseException(Throwable cause) {
super(cause);
}
public ParseException(String message, Throwable cause) {
super(message, cause);
}
}
| ParseException |
java | spring-projects__spring-boot | module/spring-boot-jdbc/src/main/java/org/springframework/boot/jdbc/init/DatabaseInitializationProperties.java | {
"start": 903,
"end": 1047
} | class ____ performing SQL database initialization.
*
* @author Yanming Zhou
* @since 4.0.0
*/
@ConfigurationPropertiesSource
public abstract | for |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/test/fakemetrics/FakeHttpServerMetrics.java | {
"start": 1012,
"end": 3786
} | class ____ extends FakeTCPMetrics implements HttpServerMetrics<HttpServerMetric, WebSocketMetric, SocketMetric> {
private final ConcurrentMap<String, WebSocketMetric> webSockets = new ConcurrentHashMap<>();
private final Set<HttpServerMetric> requests = ConcurrentHashMap.newKeySet();
public WebSocketMetric getWebSocketMetric(ServerWebSocket ws) {
return webSockets.get(ws.path());
}
public HttpServerMetric getRequestMetric(HttpServerRequest request) {
return requests.stream().filter(m -> m.uri.equals(request.uri())).findFirst().orElse(null);
}
public HttpServerMetric getResponseMetric(String uri) {
return requests.stream().filter(m -> m.uri.equals(uri)).findFirst().orElse(null);
}
@Override
public HttpServerMetric requestBegin(SocketMetric socketMetric, HttpRequest request) {
HttpServerMetric metric = new HttpServerMetric(request, socketMetric);
requests.add(metric);
return metric;
}
@Override
public void requestEnd(HttpServerMetric requestMetric, HttpRequest request, long bytesRead) {
requestMetric.requestEnded.set(true);
requestMetric.bytesRead.set(bytesRead);
}
@Override
public HttpServerMetric responsePushed(SocketMetric socketMetric, HttpMethod method, String uri, HttpResponse response) {
HttpServerMetric requestMetric = new HttpServerMetric(uri, socketMetric);
requestMetric.response.set(response);
requests.add(requestMetric);
return requestMetric;
}
@Override
public void requestReset(HttpServerMetric requestMetric) {
requestMetric.failed.set(true);
requests.remove(requestMetric);
}
@Override
public void responseBegin(HttpServerMetric requestMetric, HttpResponse response) {
requestMetric.response.set(response);
}
@Override
public void responseEnd(HttpServerMetric requestMetric, HttpResponse response, long bytesWritten) {
requests.remove(requestMetric);
requestMetric.responseEnded.set(true);
requestMetric.bytesWritten.set(bytesWritten);
}
@Override
public WebSocketMetric connected(SocketMetric socketMetric, HttpServerMetric requestMetric, ServerWebSocket serverWebSocket) {
WebSocketMetric metric = new WebSocketMetric(serverWebSocket);
if (webSockets.put(serverWebSocket.path(), metric) != null) {
throw new AssertionError();
}
return metric;
}
@Override
public void disconnected(WebSocketMetric serverWebSocketMetric) {
webSockets.remove(((ServerWebSocket)serverWebSocketMetric.ws).path());
}
@Override
public void exceptionOccurred(SocketMetric socketMetric, SocketAddress remoteAddress, Throwable t) {
}
@Override
public void requestRouted(HttpServerMetric requestMetric, String route) {
requestMetric.route.set(route);
}
}
| FakeHttpServerMetrics |
java | apache__camel | components/camel-vertx/camel-vertx/src/test/java/org/apache/camel/component/vertx/VertxBufferConverterTest.java | {
"start": 1165,
"end": 3469
} | class ____ extends CamelTestSupport {
private static final String BODY = "Hello World";
@Test
public void testStringToBuffer() {
Buffer buffer = context.getTypeConverter().convertTo(Buffer.class, BODY);
Assertions.assertEquals(BODY, buffer.toString());
}
@Test
public void testStringToBufferWithEncoding() {
Exchange exchange = ExchangeBuilder.anExchange(context)
.withHeader(Exchange.CONTENT_TYPE, "text/html; charset=iso-8859-4").build();
context.getTypeConverter().convertTo(Buffer.class, exchange, BODY);
Buffer buffer = context.getTypeConverter().convertTo(Buffer.class, BODY);
Assertions.assertEquals(BODY, buffer.toString());
}
@Test
public void testByteArrayToBuffer() {
Buffer buffer = context.getTypeConverter().convertTo(Buffer.class, BODY.getBytes());
Assertions.assertEquals(BODY, buffer.toString());
}
@Test
public void testByteBufToBuffer() {
Buffer buffer = context.getTypeConverter().convertTo(Buffer.class, Unpooled.wrappedBuffer(BODY.getBytes()));
Assertions.assertEquals(BODY, buffer.toString());
}
@Test
public void testInputStreamToBuffer() {
InputStream inputStream = context.getTypeConverter().convertTo(InputStream.class, BODY);
Buffer buffer = context.getTypeConverter().convertTo(Buffer.class, inputStream);
Assertions.assertEquals(BODY, buffer.toString());
}
@Test
public void testBufferToString() {
String result = context.getTypeConverter().convertTo(String.class, Buffer.buffer(BODY));
Assertions.assertEquals(BODY, result);
}
@Test
public void testBufferToStringWithEncoding() {
Exchange exchange = ExchangeBuilder.anExchange(context)
.withHeader(Exchange.CONTENT_TYPE, "text/html; charset=iso-8859-4").build();
String result = context.getTypeConverter().convertTo(String.class, exchange, Buffer.buffer(BODY));
Assertions.assertEquals(BODY, result);
}
@Test
public void testBufferToByteArray() {
byte[] result = context.getTypeConverter().convertTo(byte[].class, Buffer.buffer(BODY.getBytes()));
Assertions.assertEquals(BODY, new String(result));
}
}
| VertxBufferConverterTest |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java | {
"start": 9560,
"end": 10000
} | class ____'t loaded during plugin discovery
String t2Class = Mockito.class.getName();
// PluginClassLoader breakpoint will only trigger on this thread
pclBreakpoint.set(t2Class::equals);
Runnable thread2 = () -> {
// Use the PluginClassLoader as the current context loader
try (LoaderSwap loaderSwap = plugins.withClassLoader(connectorLoader)) {
// Load a non-isolated | isn |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/id/enhanced/OptimizerLogger.java | {
"start": 1069,
"end": 2747
} | interface ____ extends BasicLogger {
String NAME = SubSystemLogging.BASE + ".id.optimizer";
OptimizerLogger OPTIMIZER_MESSAGE_LOGGER = Logger.getMessageLogger(
MethodHandles.lookup(),
OptimizerLogger.class,
NAME
);
@LogMessage(level = TRACE)
@Message(value = "Creating hilo optimizer with [incrementSize=%s, returnClass=%s]", id = 90401)
void creatingHiLoOptimizer(int incrementSize, String returnClassName);
@LogMessage(level = TRACE)
@Message(value = "Creating hilo optimizer (legacy) with [incrementSize=%s, returnClass=%s]", id = 90402)
void creatingLegacyHiLoOptimizer(int incrementSize, String returnClassName);
@LogMessage(level = TRACE)
@Message(value = "Creating pooled optimizer with [incrementSize=%s, returnClass=%s]", id = 90403)
void creatingPooledOptimizer(int incrementSize, String returnClassName);
@LogMessage(level = DEBUG)
@Message(value = "Creating pooled optimizer (lo) with [incrementSize=%s, returnClass=%s]", id = 90404)
void creatingPooledLoOptimizer(int incrementSize, String returnClassName);
@LogMessage(level = INFO)
@Message(value = "Pooled optimizer source reported [%s] as the initial value; use of 1 or greater highly recommended", id = 90405)
void pooledOptimizerReportedInitialValue(IntegralDataTypeHolder value);
@LogMessage(level = WARN)
@Message(value = "Unable to interpret specified optimizer [%s], falling back to noop optimizer", id = 90406)
void unableToLocateCustomOptimizerClass(String type);
@LogMessage(level = WARN)
@Message(value = "Unable to instantiate specified optimizer [%s], falling back to noop optimizer", id = 90407)
void unableToInstantiateOptimizer(String type);
}
| OptimizerLogger |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/schemaupdate/SchemaDropTest.java | {
"start": 1751,
"end": 3660
} | class ____ implements ExceptionHandler {
@BeforeEach
public void setUp(DomainModelScope modelScope) throws Exception {
var model = modelScope.getDomainModel();
model.orderColumns( false );
model.validate();
}
@Test
public void testDropSequence(ServiceRegistryScope registryScope, DomainModelScope modelScope) {
getSchemaDropper( registryScope ).doDrop(
modelScope.getDomainModel(),
options( registryScope ),
ContributableMatcher.ALL,
getSourceDescriptor(),
getTargetDescriptor()
);
}
private SchemaDropper getSchemaDropper(ServiceRegistryScope registryScope) {
return registryScope.getRegistry().requireService( SchemaManagementTool.class ).getSchemaDropper( null );
}
private ExecutionOptions options(ServiceRegistryScope registryScope) {
return new ExecutionOptions() {
@Override
public Map<String, Object> getConfigurationValues() {
return registryScope.getRegistry().requireService( ConfigurationService.class ).getSettings();
}
@Override
public boolean shouldManageNamespaces() {
return false;
}
@Override
public ExceptionHandler getExceptionHandler() {
return SchemaDropTest.this;
}
};
}
private TargetDescriptor getTargetDescriptor() {
return new TargetDescriptor() {
@Override
public EnumSet<TargetType> getTargetTypes() {
return EnumSet.of( TargetType.DATABASE );
}
@Override
public ScriptTargetOutput getScriptTargetOutput() {
return null;
}
};
}
private SourceDescriptor getSourceDescriptor() {
return new SourceDescriptor() {
@Override
public SourceType getSourceType() {
return SourceType.METADATA;
}
@Override
public ScriptSourceInput getScriptSourceInput() {
return null;
}
};
}
@Override
public void handleException(CommandAcceptanceException exception) {
throw exception;
}
@Entity(name = "MyEntity")
public static | SchemaDropTest |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/ClassUtils.java | {
"start": 43233,
"end": 44618
} | class ____ - it is not checked. The string has to be formatted the way as the
* JDK method {@code Class.getName()} returns it, and not the usual way as we write it, for example in import
* statements, or as it is formatted by {@code Class.getCanonicalName()}.
* </p>
*
* <p>
* The difference is significant only in case of classes that are inner classes of some other classes. In this case
* the separator between the outer and inner class (possibly on multiple hierarchy level) has to be {@code $} (dollar
* sign) and not {@code .} (dot), as it is returned by {@code Class.getName()}
* </p>
*
* <p>
* Note that this method is called from the {@link #getShortClassName(Class)} method using the string returned by
* {@code Class.getName()}.
* </p>
*
* <p>
* Note that this method differs from {@link #getSimpleName(Class)} in that this will return, for example
* {@code "Map.Entry"} whilst the {@link Class} variant will simply return {@code "Entry"}. In this example
* the argument {@code className} is the string {@code java.util.Map$Entry} (note the {@code $} sign).
* </p>
*
* @param className the className to get the short name for. It has to be formatted as returned by
* {@code Class.getName()} and not {@code Class.getCanonicalName()}.
* @return the | name |
java | spring-projects__spring-boot | module/spring-boot-jackson/src/main/java/org/springframework/boot/jackson/JacksonComponentModule.java | {
"start": 6519,
"end": 7468
} | class ____
implements BeanFactoryInitializationAotProcessor {
@Override
public @Nullable BeanFactoryInitializationAotContribution processAheadOfTime(
ConfigurableListableBeanFactory beanFactory) {
String[] jacksonComponents = beanFactory.getBeanNamesForAnnotation(JacksonComponent.class);
Map<Class<?>, List<Class<?>>> innerComponents = new HashMap<>();
for (String jacksonComponent : jacksonComponents) {
Class<?> type = beanFactory.getType(jacksonComponent, true);
Assert.state(type != null, "'type' must not be null");
for (Class<?> declaredClass : type.getDeclaredClasses()) {
if (isSuitableInnerClass(declaredClass)) {
innerComponents.computeIfAbsent(type, (t) -> new ArrayList<>()).add(declaredClass);
}
}
}
return innerComponents.isEmpty() ? null : new JacksonComponentAotContribution(innerComponents);
}
}
private static final | JacksonComponentBeanFactoryInitializationAotProcessor |
java | jhy__jsoup | src/test/java/org/jsoup/integration/ConnectIT.java | {
"start": 942,
"end": 11826
} | class ____ {
@BeforeAll
public static void setUp() {
TestServer.start();
System.setProperty(SharedConstants.UseHttpClient, "false"); // use the default UrlConnection. See HttpClientConnectIT for other version
}
// Slow Rider tests.
@Test
public void canInterruptBodyStringRead() throws InterruptedException {
final String[] body = new String[1];
Thread runner = new Thread(() -> {
try {
Connection.Response res = Jsoup.connect(SlowRider.Url)
.timeout(15 * 1000)
.execute();
body[0] = res.body();
} catch (IOException e) {
throw new RuntimeException(e);
}
});
runner.start();
Thread.sleep(1000 * 3);
runner.interrupt();
assertTrue(runner.isInterrupted());
runner.join();
assertTrue(body[0].length() > 0);
assertTrue(body[0].contains("<p>Are you still there?"));
}
@Test
public void canInterruptDocumentRead() throws InterruptedException {
long start = System.currentTimeMillis();
final String[] body = new String[1];
Thread runner = new Thread(() -> {
try {
Connection.Response res = Jsoup.connect(SlowRider.Url)
.timeout(15 * 1000)
.execute();
body[0] = res.parse().text();
} catch (IOException e) {
throw new RuntimeException(e);
}
});
runner.start();
Thread.sleep(3 * 1000);
runner.interrupt();
assertTrue(runner.isInterrupted());
runner.join();
long end = System.currentTimeMillis();
// check we are between 3 and connect timeout seconds (should be just over 3; but allow some slack for slow CI runners)
assertTrue(end - start > 3 * 1000);
assertTrue(end - start < 10 * 1000);
}
@Test public void canInterruptThenJoinASpawnedThread() throws InterruptedException {
// https://github.com/jhy/jsoup/issues/1991
AtomicBoolean ioException = new AtomicBoolean();
Thread runner = new Thread(() -> {
try {
while (!Thread.currentThread().isInterrupted()) {
Document doc = Jsoup.connect(SlowRider.Url)
.timeout(30000)
.get();
}
} catch (IOException e) {
ioException.set(true); // don't expect to catch, because the outer sleep will complete before this timeout
}
});
runner.start();
Thread.sleep(2 * 1000);
runner.interrupt();
runner.join();
assertFalse(ioException.get());
}
@Test
public void totalTimeout() throws IOException {
int timeout = 3 * 1000;
long start = System.currentTimeMillis();
boolean threw = false;
try {
Jsoup.connect(SlowRider.Url).timeout(timeout).get();
} catch (SocketTimeoutException e) {
long end = System.currentTimeMillis();
long took = end - start;
assertTrue(took > timeout, ("Time taken was " + took));
assertTrue(took < timeout * 1.8, ("Time taken was " + took));
threw = true;
}
assertTrue(threw);
}
@Test
public void slowReadOk() throws IOException {
// make sure that a slow read that is under the request timeout is still OK
Document doc = Jsoup.connect(SlowRider.Url)
.data(SlowRider.MaxTimeParam, "2000") // the request completes in 2 seconds
.get();
Element h1 = doc.selectFirst("h1");
assertEquals("outatime", h1.text());
}
@Test void readFullyThrowsOnTimeout() throws IOException {
// tests that response.readFully excepts on timeout
boolean caught = false;
Connection.Response res = Jsoup.connect(SlowRider.Url).timeout(3000).execute();
try {
res.readFully();
} catch (IOException e) {
caught = true;
}
assertTrue(caught);
}
@Test void readBodyThrowsOnTimeout() throws IOException {
// tests that response.readBody excepts on timeout
boolean caught = false;
Connection.Response res = Jsoup.connect(SlowRider.Url).timeout(3000).execute();
try {
res.readBody();
} catch (IOException e) {
caught = true;
}
assertTrue(caught);
}
@Test void bodyThrowsUncheckedOnTimeout() throws IOException {
// tests that response.body unchecked excepts on timeout
boolean caught = false;
Connection.Response res = Jsoup.connect(SlowRider.Url).timeout(3000).execute();
try {
res.body();
} catch (UncheckedIOException e) {
caught = true;
}
assertTrue(caught);
}
@Test
public void infiniteReadSupported() throws IOException {
Document doc = Jsoup.connect(SlowRider.Url)
.timeout(0)
.data(SlowRider.MaxTimeParam, "2000")
.get();
Element h1 = doc.selectFirst("h1");
assertEquals("outatime", h1.text());
}
@Test void streamParserUncheckedExceptionOnTimeoutInStream() throws IOException {
boolean caught = false;
try (StreamParser streamParser = Jsoup.connect(SlowRider.Url)
.data(SlowRider.MaxTimeParam, "10000")
.data(SlowRider.IntroSizeParam, "8000") // 8K to pass first buffer, or the timeout would occur in execute or streamparser()
.timeout(4000) // has a 1000 sleep at the start
.execute()
.streamParser()) {
// we should expect to timeout while in stream
try {
long count = streamParser.stream().count();
} catch (Exception e) {
caught = true;
UncheckedIOException ioe = (UncheckedIOException) e;
IOException cause = ioe.getCause();
//assertInstanceOf(SocketTimeoutException.class, cause); // different JDKs seem to wrap this differently
assertInstanceOf(IOException.class, cause);
}
}
assertTrue(caught);
}
@Test void streamParserCheckedExceptionOnTimeoutInSelect() throws IOException {
boolean caught = false;
try (StreamParser streamParser = Jsoup.connect(SlowRider.Url)
.data(SlowRider.MaxTimeParam, "10000")
.data(SlowRider.IntroSizeParam, "8000") // 8K to pass first buffer, or the timeout would occur in execute or streamparser()
.timeout(4000) // has a 1000 sleep at the start
.execute()
.streamParser()) {
// we should expect to timeout while in stream
try {
long count = 0;
while (streamParser.selectNext("p") != null) {
count++;
}
} catch (IOException e) {
caught = true;
}
}
assertTrue(caught);
}
private static final int LargeHtmlSize = 280735;
@Test
public void remainingAfterFirstRead() throws IOException {
int bufferSize = 5 * 1024;
int capSize = 100 * 1024;
String url = FileServlet.urlTo("/htmltests/large.html"); // 280 K
try (BufferedInputStream stream = Jsoup.connect(url).maxBodySize(capSize)
.execute().bodyStream()) {
// simulates parse which does a limited read first
stream.mark(bufferSize);
ByteBuffer firstBytes = DataUtil.readToByteBuffer(stream, bufferSize);
byte[] array = firstBytes.array();
String firstText = new String(array, StandardCharsets.UTF_8);
assertTrue(firstText.startsWith("<html><head><title>Large"));
assertEquals(bufferSize, array.length);
boolean fullyRead = stream.read() == -1;
assertFalse(fullyRead);
// reset and read again
stream.reset();
ByteBuffer fullRead = DataUtil.readToByteBuffer(stream, 0);
byte[] fullArray = fullRead.array();
// bodyStream is not capped to body size - only for jsoup consumed stream
assertTrue(fullArray.length > capSize);
assertEquals(LargeHtmlSize, fullRead.limit());
String fullText = new String(fullRead.array(), 0, fullRead.limit(), StandardCharsets.UTF_8);
assertTrue(fullText.startsWith(firstText));
assertEquals(LargeHtmlSize, fullText.length());
}
}
@Test
public void noLimitAfterFirstRead() throws IOException {
int firstMaxRead = 5 * 1024;
String url = FileServlet.urlTo("/htmltests/large.html"); // 280 K
try (BufferedInputStream stream = Jsoup.connect(url).execute().bodyStream()) {
// simulates parse which does a limited read first
stream.mark(firstMaxRead);
ByteBuffer firstBytes = DataUtil.readToByteBuffer(stream, firstMaxRead);
byte[] array = firstBytes.array();
String firstText = new String(array, StandardCharsets.UTF_8);
assertTrue(firstText.startsWith("<html><head><title>Large"));
assertEquals(firstMaxRead, array.length);
// reset and read fully
stream.reset();
ByteBuffer fullRead = DataUtil.readToByteBuffer(stream, 0);
assertEquals(LargeHtmlSize, fullRead.limit());
String fullText = new String(fullRead.array(), 0, fullRead.limit(), StandardCharsets.UTF_8);
assertTrue(fullText.startsWith(firstText));
assertEquals(LargeHtmlSize, fullText.length());
}
}
@Test public void bodyStreamConstrainedViaReadFully() throws IOException {
int cap = 5 * 1024;
String url = FileServlet.urlTo("/htmltests/large.html"); // 280 K
try (BufferedInputStream stream = Jsoup
.connect(url)
.maxBodySize(cap)
.execute()
.readFully()
.bodyStream()) {
ByteBuffer cappedRead = DataUtil.readToByteBuffer(stream, 0);
assertEquals(cap, cappedRead.limit());
}
}
@Test public void bodyStreamConstrainedViaBufferUp() throws IOException {
int cap = 5 * 1024;
String url = FileServlet.urlTo("/htmltests/large.html"); // 280 K
try (BufferedInputStream stream = Jsoup
.connect(url)
.maxBodySize(cap)
.execute()
.bufferUp()
.bodyStream()) {
ByteBuffer cappedRead = DataUtil.readToByteBuffer(stream, 0);
assertEquals(cap, cappedRead.limit());
}
}
}
| ConnectIT |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/mvnup/goals/CompatibilityFixStrategy.java | {
"start": 12252,
"end": 22769
} | class ____ {
final Element element;
final String sectionName;
BuildContainer(Element element, String sectionName) {
this.element = element;
this.sectionName = sectionName;
}
}
/**
* Fixes unsupported repository URL expressions.
*/
private boolean fixUnsupportedRepositoryExpressions(Document pomDocument, UpgradeContext context) {
Element root = pomDocument.root();
// Collect all repository containers to process
Stream<Element> repositoryContainers = Stream.concat(
// Root level repositories
Stream.of(
root.child(REPOSITORIES).orElse(null),
root.child(PLUGIN_REPOSITORIES).orElse(null))
.filter(Objects::nonNull),
// Profile repositories
root.child(PROFILES).stream()
.flatMap(profiles -> profiles.children(PROFILE))
.flatMap(profile -> Stream.of(
profile.child(REPOSITORIES).orElse(null),
profile.child(PLUGIN_REPOSITORIES).orElse(null))
.filter(Objects::nonNull)));
return repositoryContainers
.map(container -> fixRepositoryExpressions(container, pomDocument, context))
.reduce(false, Boolean::logicalOr);
}
/**
* Fixes incorrect parent relative paths.
*/
private boolean fixIncorrectParentRelativePaths(
Document pomDocument, Path pomPath, Map<Path, Document> pomMap, UpgradeContext context) {
Element root = pomDocument.root();
Element parentElement = root.child(PARENT).orElse(null);
if (parentElement == null) {
return false; // No parent to fix
}
Element relativePathElement = parentElement.child(RELATIVE_PATH).orElse(null);
String currentRelativePath =
relativePathElement != null ? relativePathElement.textContent().trim() : DEFAULT_PARENT_RELATIVE_PATH;
// Try to find the correct parent POM
String parentGroupId = parentElement.childText(MavenPomElements.Elements.GROUP_ID);
String parentArtifactId = parentElement.childText(MavenPomElements.Elements.ARTIFACT_ID);
String parentVersion = parentElement.childText(MavenPomElements.Elements.VERSION);
Path correctParentPath = findParentPomInMap(context, parentGroupId, parentArtifactId, parentVersion, pomMap);
if (correctParentPath != null) {
try {
Path correctRelativePath = pomPath.getParent().relativize(correctParentPath);
String correctRelativePathStr = correctRelativePath.toString().replace('\\', '/');
if (!correctRelativePathStr.equals(currentRelativePath)) {
// Update or create relativePath element using DomUtils convenience method
DomUtils.updateOrCreateChildElement(parentElement, RELATIVE_PATH, correctRelativePathStr);
context.detail("Fixed: " + "relativePath corrected from '" + currentRelativePath + "' to '"
+ correctRelativePathStr + "'");
return true;
}
} catch (Exception e) {
context.failure("Failed to compute correct relativePath" + ": " + e.getMessage());
}
}
return false;
}
/**
* Recursively finds all elements with a specific attribute value.
*/
private Stream<Element> findElementsWithAttribute(Element element, String attributeName, String attributeValue) {
return Stream.concat(
// Check current element
Stream.of(element).filter(e -> {
String attr = e.attribute(attributeName);
return attr != null && attributeValue.equals(attr);
}),
// Recursively check children
element.children().flatMap(child -> findElementsWithAttribute(child, attributeName, attributeValue)));
}
/**
* Helper methods extracted from BaseUpgradeGoal for compatibility fixes.
*/
private boolean fixDuplicateDependenciesInSection(
Element dependenciesElement, UpgradeContext context, String sectionName) {
List<Element> dependencies = dependenciesElement.children(DEPENDENCY).toList();
Map<String, Element> seenDependencies = new HashMap<>();
List<Element> duplicates = dependencies.stream()
.filter(dependency -> {
String key = createDependencyKey(dependency);
if (seenDependencies.containsKey(key)) {
context.detail("Fixed: Removed duplicate dependency: " + key + " in " + sectionName);
return true; // This is a duplicate
} else {
seenDependencies.put(key, dependency);
return false; // This is the first occurrence
}
})
.toList();
// Remove duplicates while preserving formatting
duplicates.forEach(DomUtils::removeElement);
return !duplicates.isEmpty();
}
private String createDependencyKey(Element dependency) {
String groupId = dependency.childText(MavenPomElements.Elements.GROUP_ID);
String artifactId = dependency.childText(MavenPomElements.Elements.ARTIFACT_ID);
String type = dependency.childText(MavenPomElements.Elements.TYPE);
String classifier = dependency.childText(MavenPomElements.Elements.CLASSIFIER);
return groupId + ":" + artifactId + ":" + (type != null ? type : "jar") + ":"
+ (classifier != null ? classifier : "");
}
private boolean fixPluginsInBuildElement(Element buildElement, UpgradeContext context, String sectionName) {
boolean fixed = false;
Element pluginsElement = buildElement.child(PLUGINS).orElse(null);
if (pluginsElement != null) {
fixed |= fixDuplicatePluginsInSection(pluginsElement, context, sectionName + "/" + PLUGINS);
}
Element pluginManagementElement = buildElement.child(PLUGIN_MANAGEMENT).orElse(null);
if (pluginManagementElement != null) {
Element managedPluginsElement =
pluginManagementElement.child(PLUGINS).orElse(null);
if (managedPluginsElement != null) {
fixed |= fixDuplicatePluginsInSection(
managedPluginsElement, context, sectionName + "/" + PLUGIN_MANAGEMENT + "/" + PLUGINS);
}
}
return fixed;
}
/**
* Fixes duplicate plugins within a specific plugins section.
*/
private boolean fixDuplicatePluginsInSection(Element pluginsElement, UpgradeContext context, String sectionName) {
List<Element> plugins = pluginsElement.children(PLUGIN).toList();
Map<String, Element> seenPlugins = new HashMap<>();
List<Element> duplicates = plugins.stream()
.filter(plugin -> {
String key = createPluginKey(plugin);
if (key != null) {
if (seenPlugins.containsKey(key)) {
context.detail("Fixed: Removed duplicate plugin: " + key + " in " + sectionName);
return true; // This is a duplicate
} else {
seenPlugins.put(key, plugin);
}
}
return false; // This is the first occurrence or invalid plugin
})
.toList();
// Remove duplicates while preserving formatting
duplicates.forEach(DomUtils::removeElement);
return !duplicates.isEmpty();
}
private String createPluginKey(Element plugin) {
String groupId = plugin.childText(MavenPomElements.Elements.GROUP_ID);
String artifactId = plugin.childText(MavenPomElements.Elements.ARTIFACT_ID);
// Default groupId for Maven plugins
if (groupId == null && artifactId != null && artifactId.startsWith(MAVEN_PLUGIN_PREFIX)) {
groupId = DEFAULT_MAVEN_PLUGIN_GROUP_ID;
}
return (groupId != null && artifactId != null) ? groupId + ":" + artifactId : null;
}
private boolean fixRepositoryExpressions(
Element repositoriesElement, Document pomDocument, UpgradeContext context) {
if (repositoriesElement == null) {
return false;
}
boolean fixed = false;
String elementType = repositoriesElement.name().equals(REPOSITORIES) ? REPOSITORY : PLUGIN_REPOSITORY;
List<Element> repositories = repositoriesElement.children(elementType).toList();
for (Element repository : repositories) {
Element urlElement = repository.child("url").orElse(null);
if (urlElement != null) {
String url = urlElement.textContent().trim();
if (url.contains("${")) {
// Allow repository URL interpolation; do not disable.
// Keep a gentle warning to help users notice unresolved placeholders at build time.
String repositoryId = repository.childText("id");
context.info("Detected interpolated expression in " + elementType + " URL (id: " + repositoryId
+ "): " + url);
}
}
}
return fixed;
}
private Path findParentPomInMap(
UpgradeContext context, String groupId, String artifactId, String version, Map<Path, Document> pomMap) {
return pomMap.entrySet().stream()
.filter(entry -> {
Coordinates gav = AbstractUpgradeStrategy.extractArtifactCoordinatesWithParentResolution(
context, entry.getValue());
return gav != null
&& Objects.equals(gav.groupId(), groupId)
&& Objects.equals(gav.artifactId(), artifactId)
&& (version == null || Objects.equals(gav.version(), version));
})
.findFirst()
.map(Map.Entry::getKey)
.orElse(null);
}
}
| BuildContainer |
java | spring-projects__spring-boot | module/spring-boot-data-neo4j/src/test/java/org/springframework/boot/data/neo4j/autoconfigure/DataNeo4jRepositoriesAutoConfigurationTests.java | {
"start": 4571,
"end": 4772
} | class ____ {
}
@Configuration(proxyBeanMethods = false)
@EnableNeo4jRepositories("foo.bar")
@TestAutoConfigurationPackage(DataNeo4jRepositoriesAutoConfigurationTests.class)
static | EmptyConfiguration |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.