language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableSingleMaybe.java
|
{
"start": 886,
"end": 1234
}
|
class ____<T> extends Maybe<T> {
final ObservableSource<T> source;
public ObservableSingleMaybe(ObservableSource<T> source) {
this.source = source;
}
@Override
public void subscribeActual(MaybeObserver<? super T> t) {
source.subscribe(new SingleElementObserver<>(t));
}
static final
|
ObservableSingleMaybe
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java
|
{
"start": 135037,
"end": 135946
}
|
class ____ {",
" @SuppressWarnings({UNDEFINED, \"mutable\"})",
" public abstract int[] buh();",
"}");
Compilation compilation2 =
javac()
.withOptions("-Xlint:-processing")
.withProcessors(new AutoValueProcessor())
.compile(bazFileObject);
assertThat(compilation2).hadErrorCount(1);
assertThat(compilation2)
.hadErrorContaining("UNDEFINED")
.inFile(bazFileObject)
.onLineContaining("UNDEFINED");
assertThat(compilation2).hadWarningCount(0);
}
@Test
public void packagePrivateAnnotationFromOtherPackage() {
JavaFileObject bazFileObject =
JavaFileObjects.forSourceLines(
"foo.bar.Baz",
"package foo.bar;",
"",
"import com.google.auto.value.AutoValue;",
"",
"@AutoValue",
"public abstract
|
Baz
|
java
|
apache__spark
|
common/tags/src/main/java/org/apache/spark/annotation/Private.java
|
{
"start": 881,
"end": 1773
}
|
class ____ is considered private to the internals of Spark -- there is a high-likelihood
* they will be changed in future versions of Spark.
*
* This should be used only when the standard Scala / Java means of protecting classes are
* insufficient. In particular, Java has no equivalent of private[spark], so we use this annotation
* in its place.
*
* NOTE: If there exists a Scaladoc comment that immediately precedes this annotation, the first
* line of the comment must be ":: Private ::" with no trailing blank line. This is because
* of the known issue that Scaladoc displays only either the annotation or the comment, whichever
* comes first.
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
public @
|
that
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/transport/TransportService.java
|
{
"start": 69878,
"end": 72804
}
|
class ____ extends AbstractRefCounted {
// To handle a response we (i) remove the handler from responseHandlers and then (ii) enqueue an action to complete the handler on
// the target executor. Once step (i) succeeds then the handler won't be completed by any other mechanism, but if the target
// executor is stopped then step (ii) will fail with an EsRejectedExecutionException which means the handler leaks.
//
// We wait for all transport threads to finish before stopping any executors, so a transport thread will never fail at step (ii).
// Remote responses are always delivered on transport threads so there's no problem there, but direct responses may be delivered on
// a non-transport thread which runs concurrently to the stopping of the transport service. This means we need this explicit
// mechanism to block the shutdown of the transport service while there are direct handlers in between steps (i) and (ii).
private final CountDownLatch countDownLatch = new CountDownLatch(1);
@Override
protected void closeInternal() {
countDownLatch.countDown();
}
void stop() {
decRef();
try {
final boolean completed = countDownLatch.await(30, TimeUnit.SECONDS);
assert completed : "timed out waiting for all direct handlers to be enqueued";
} catch (InterruptedException e) {
assert false : e;
Thread.currentThread().interrupt();
}
}
@Nullable
Releasable withRef() {
if (tryIncRef()) {
return this::decRef;
} else {
return null;
}
}
}
private record UnregisterChildTransportResponseHandler<T extends TransportResponse>(
Releasable unregisterChildNode,
TransportResponseHandler<T> handler,
String action,
TransportRequest childRequest,
Transport.Connection childConnection,
TaskManager taskManager
) implements TransportResponseHandler<T> {
@Override
public void handleResponse(T response) {
unregisterChildNode.close();
handler.handleResponse(response);
}
@Override
public void handleException(TransportException exp) {
assert childRequest.getParentTask().isSet();
taskManager.cancelChildRemote(childRequest.getParentTask(), childRequest.getRequestId(), childConnection, exp.toString());
unregisterChildNode.close();
handler.handleException(exp);
}
@Override
public Executor executor() {
return handler.executor();
}
@Override
public T read(StreamInput in) throws IOException {
return handler.read(in);
}
}
}
|
PendingDirectHandlers
|
java
|
apache__camel
|
components/camel-zip-deflater/src/generated/java/org/apache/camel/dataformat/deflater/ZipDeflaterDataFormatConfigurer.java
|
{
"start": 730,
"end": 2335
}
|
class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("CompressionLevel", int.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
ZipDeflaterDataFormat target = (ZipDeflaterDataFormat) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "compressionlevel":
case "compressionLevel": target.setCompressionLevel(property(camelContext, int.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "compressionlevel":
case "compressionLevel": return int.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
ZipDeflaterDataFormat target = (ZipDeflaterDataFormat) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "compressionlevel":
case "compressionLevel": return target.getCompressionLevel();
default: return null;
}
}
}
|
ZipDeflaterDataFormatConfigurer
|
java
|
elastic__elasticsearch
|
modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java
|
{
"start": 2530,
"end": 18787
}
|
class ____ extends ESSingleNodeTestCase {
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return List.of(MustachePlugin.class, DummyQueryParserPlugin.class);
}
@Override
protected Settings nodeSettings() {
return Settings.builder().put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true").build();
}
@Before
public void setup() throws IOException {
createIndex("test");
prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "value1").endObject()).get();
prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("text", "value2").endObject()).get();
indicesAdmin().prepareRefresh().get();
}
// Relates to #6318
public void testSearchRequestFail() throws Exception {
String query = """
{ "query": {"match_all": {}}, "size" : "{{my_size}}" }""";
SearchRequest searchRequest = new SearchRequest();
searchRequest.indices("_all");
expectThrows(
Exception.class,
() -> new SearchTemplateRequestBuilder(client()).setRequest(searchRequest)
.setScript(query)
.setScriptType(ScriptType.INLINE)
.setScriptParams(randomBoolean() ? null : Collections.emptyMap())
.get()
);
assertResponse(
new SearchTemplateRequestBuilder(client()).setRequest(searchRequest)
.setScript(query)
.setScriptType(ScriptType.INLINE)
.setScriptParams(Collections.singletonMap("my_size", 1)),
searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1))
);
}
/**
* Test that template can be expressed as a single escaped string.
*/
public void testTemplateQueryAsEscapedString() throws Exception {
SearchRequest searchRequest = new SearchRequest();
searchRequest.indices("_all");
String query = """
{
"source": "{ \\"size\\": \\"{{size}}\\", \\"query\\":{\\"match_all\\":{}}}",
"params": {
"size": 1
}
}""";
SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, query));
request.setRequest(searchRequest);
assertResponse(
client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request),
searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1))
);
}
/**
* Test that template can contain conditional clause. In this case it is at
* the beginning of the string.
*/
public void testTemplateQueryAsEscapedStringStartingWithConditionalClause() throws Exception {
SearchRequest searchRequest = new SearchRequest();
searchRequest.indices("_all");
String templateString = """
{
"source": "{ {{#use_size}} \\"size\\": \\"{{size}}\\", {{/use_size}} \\"query\\":{\\"match_all\\":{}}}",
"params": {
"size": 1,
"use_size": true
}
}""";
SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString));
request.setRequest(searchRequest);
assertResponse(
client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request),
searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1))
);
}
/**
* Test that template can contain conditional clause. In this case it is at
* the end of the string.
*/
public void testTemplateQueryAsEscapedStringWithConditionalClauseAtEnd() throws Exception {
SearchRequest searchRequest = new SearchRequest();
searchRequest.indices("_all");
String templateString = """
{
"source": "{ \\"query\\":{\\"match_all\\":{}} {{#use_size}}, \\"size\\": \\"{{size}}\\" {{/use_size}} }",
"params": {
"size": 1,
"use_size": true
}
}""";
SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString));
request.setRequest(searchRequest);
assertResponse(
client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request),
searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1))
);
}
public void testIndexedTemplateClient() {
putJsonStoredScript("testTemplate", """
{
"script": {
"lang": "mustache",
"source": {
"query": {
"match": {
"theField": "{{fieldParam}}"
}
}
}
}
}""");
GetStoredScriptResponse getResponse = safeExecute(
GetStoredScriptAction.INSTANCE,
new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "testTemplate")
);
assertNotNull(getResponse.getSource());
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON));
bulkRequestBuilder.get();
indicesAdmin().prepareRefresh().get();
Map<String, Object> templateParams = new HashMap<>();
templateParams.put("fieldParam", "foo");
assertHitCount(
new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test"))
.setScript("testTemplate")
.setScriptType(ScriptType.STORED)
.setScriptParams(templateParams),
4
);
assertAcked(
safeExecute(
TransportDeleteStoredScriptAction.TYPE,
new DeleteStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "testTemplate")
)
);
getResponse = safeExecute(GetStoredScriptAction.INSTANCE, new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "testTemplate"));
assertNull(getResponse.getSource());
}
public void testBadTemplate() {
// This template will produce badly formed json if given a multi-valued `text_fields` parameter,
// as it does not add commas between the entries. We test that it produces a 400 json parsing
// error both when used directly and when used in a render template request.
String script = """
{
"query": {
"multi_match": {
"query": "{{query_string}}",
"fields": [{{#text_fields}}"{{name}}^{{boost}}"{{/text_fields}}]
}
},
"from": "{{from}}",
"size": "{{size}}"
}""";
Map<String, Object> params = Map.of(
"text_fields",
List.of(Map.of("name", "title", "boost", 10), Map.of("name", "description", "boost", 2)),
"from",
0,
"size",
0
);
{
XContentParseException e = expectThrows(XContentParseException.class, () -> {
new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest())
.setScript(script)
.setScriptParams(params)
.setScriptType(ScriptType.INLINE)
.get();
});
assertThat(e.getMessage(), containsString("Unexpected character"));
}
{
XContentParseException e = expectThrows(XContentParseException.class, () -> {
new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest())
.setScript(script)
.setScriptParams(params)
.setScriptType(ScriptType.INLINE)
.setSimulate(true)
.get();
});
assertThat(e.getMessage(), containsString("Unexpected character"));
}
}
public void testIndexedTemplate() {
String script = """
{
"script": {
"lang": "mustache",
"source": {
"query": {
"match": {
"theField": "{{fieldParam}}"
}
}
}
}
}
""";
putJsonStoredScript("1a", script);
putJsonStoredScript("2", script);
putJsonStoredScript("3", script);
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON));
bulkRequestBuilder.get();
indicesAdmin().prepareRefresh().get();
Map<String, Object> templateParams = new HashMap<>();
templateParams.put("fieldParam", "foo");
assertHitCount(
new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest().indices("test"))
.setScript("1a")
.setScriptType(ScriptType.STORED)
.setScriptParams(templateParams),
4
);
expectThrows(
ResourceNotFoundException.class,
() -> new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest().indices("test"))
.setScript("1000")
.setScriptType(ScriptType.STORED)
.setScriptParams(templateParams)
.get()
);
templateParams.put("fieldParam", "bar");
assertHitCount(
new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test"))
.setScript("2")
.setScriptType(ScriptType.STORED)
.setScriptParams(templateParams),
1
);
}
// Relates to #10397
public void testIndexedTemplateOverwrite() throws Exception {
createIndex("testindex");
ensureGreen("testindex");
prepareIndex("testindex").setId("1").setSource(jsonBuilder().startObject().field("searchtext", "dev1").endObject()).get();
indicesAdmin().prepareRefresh().get();
int iterations = randomIntBetween(2, 11);
String query = """
{
"script": {
"lang": "mustache",
"source": {
"query": {
"match_phrase_prefix": {
"searchtext": {
"query": "{{P_Keyword1}}",
"slop": "{{slop}}"
}
}
}
}
}
}""";
for (int i = 1; i < iterations; i++) {
putJsonStoredScript("git01", query.replace("{{slop}}", Integer.toString(-1)));
GetStoredScriptResponse getResponse = safeExecute(
GetStoredScriptAction.INSTANCE,
new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "git01")
);
assertNotNull(getResponse.getSource());
Map<String, Object> templateParams = new HashMap<>();
templateParams.put("P_Keyword1", "dev");
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex"))
.setScript("git01")
.setScriptType(ScriptType.STORED)
.setScriptParams(templateParams)
.get()
);
assertThat(e.getMessage(), containsString("No negative slop allowed"));
putJsonStoredScript("git01", query.replace("{{slop}}", Integer.toString(0)));
assertHitCount(
new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex"))
.setScript("git01")
.setScript("git01")
.setScriptType(ScriptType.STORED)
.setScriptParams(templateParams),
1
);
}
}
public void testIndexedTemplateWithArray() {
putJsonStoredScript("4", """
{
"script": {
"lang": "mustache",
"source": {
"query": {
"terms": {
"theField": [
"{{#fieldParam}}",
"{{.}}",
"{{/fieldParam}}"
]
}
}
}
}
}""");
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON));
bulkRequestBuilder.add(prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON));
bulkRequestBuilder.get();
indicesAdmin().prepareRefresh().get();
Map<String, Object> arrayTemplateParams = new HashMap<>();
String[] fieldParams = { "foo", "bar" };
arrayTemplateParams.put("fieldParam", fieldParams);
assertHitCount(
new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test"))
.setScript("4")
.setScriptType(ScriptType.STORED)
.setScriptParams(arrayTemplateParams),
5
);
}
/**
* Test that triggering the CCS compatibility check with a query that shouldn't go to the minor before
* TransportVersion.minimumCCSVersion() works
*/
public void testCCSCheckCompatibility() throws Exception {
String templateString = """
{
"source": "{ \\"query\\":{\\"fail_before_current_version\\":{}} }"
}""";
SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString));
request.setRequest(new SearchRequest());
ExecutionException ex = expectThrows(
ExecutionException.class,
() -> client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request).get()
);
Throwable primary = ex.getCause();
assertNotNull(primary);
Throwable underlying = primary.getCause();
assertNotNull(underlying);
assertThat(
primary.getMessage(),
containsString("[
|
SearchTemplateIT
|
java
|
apache__flink
|
flink-libraries/flink-state-processing-api/src/test/java/org/apache/flink/state/api/input/KeyedStateInputFormatTest.java
|
{
"start": 13483,
"end": 14033
}
|
class ____ extends KeyedStateReaderFunction<Integer, Integer> {
@Override
public void open(OpenContext openContext) {
getRuntimeContext().getState(stateDescriptor);
}
@Override
public void readKey(
Integer key, KeyedStateReaderFunction.Context ctx, Collector<Integer> out)
throws Exception {
ValueState<Integer> state = getRuntimeContext().getState(stateDescriptor);
out.collect(state.value());
}
}
static
|
InvalidReaderFunction
|
java
|
spring-projects__spring-framework
|
spring-jms/src/test/java/org/springframework/jms/annotation/EnableJmsTests.java
|
{
"start": 10193,
"end": 10580
}
|
class ____ implements JmsListenerConfigurer {
@Override
public void configureJmsListeners(JmsListenerEndpointRegistrar registrar) {
registrar.setContainerFactory(simpleFactory());
}
@Bean
public JmsListenerContainerTestFactory simpleFactory() {
return new JmsListenerContainerTestFactory();
}
}
@Configuration
@EnableJms
static
|
EnableJmsCustomContainerFactoryConfig
|
java
|
google__auto
|
value/src/it/functional/src/test/java/com/google/auto/value/AutoValueJava8Test.java
|
{
"start": 36922,
"end": 37140
}
|
class ____ {
public abstract Bar.Builder toBuilder();
public static Bar.Builder builder() {
return new AutoValue_AutoValueJava8Test_Bar.Builder();
}
@AutoValue.Builder
public abstract static
|
Bar
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java
|
{
"start": 969,
"end": 1915
}
|
class ____ extends AbstractAbfsIntegrationTest {
private static final int OPERATIONS = 100;
public TestAbfsInputStreamStatistics() throws Exception {
}
/**
* Test to check the bytesReadFromBuffer statistic value from AbfsInputStream.
*/
@Test
public void testBytesReadFromBufferStatistic() {
describe("Testing bytesReadFromBuffer statistics value in AbfsInputStream");
AbfsInputStreamStatisticsImpl abfsInputStreamStatistics =
new AbfsInputStreamStatisticsImpl();
//Increment the bytesReadFromBuffer value.
for (int i = 0; i < OPERATIONS; i++) {
abfsInputStreamStatistics.bytesReadFromBuffer(1);
}
/*
* Since we incremented the bytesReadFromBuffer OPERATIONS times, this
* should be the expected value.
*/
assertEquals(OPERATIONS, abfsInputStreamStatistics.getBytesReadFromBuffer(),
"Mismatch in bytesReadFromBuffer value");
}
}
|
TestAbfsInputStreamStatistics
|
java
|
quarkusio__quarkus
|
integration-tests/hibernate-search-orm-elasticsearch-tenancy/src/test/java/io/quarkus/it/hibernate/search/orm/elasticsearch/multitenancy/fruit/HibernateSearchTenancyFunctionalityTest.java
|
{
"start": 644,
"end": 3497
}
|
class ____ {
public static final TypeRef<List<Fruit>> FRUIT_LIST_TYPE_REF = new TypeRef<>() {
};
private static RestAssuredConfig config;
@Test
public void test() {
String tenant1Id = "company1";
String tenant2Id = "company2";
String fruitName = "myFruit";
// Check the indexes are empty
assertThat(search(tenant1Id, fruitName)).isEmpty();
assertThat(search(tenant2Id, fruitName)).isEmpty();
// Create fruit for tenant 1
Fruit fruit1 = new Fruit(fruitName);
create(tenant1Id, fruit1);
assertThat(search(tenant1Id, fruitName)).hasSize(1);
assertThat(search(tenant2Id, fruitName)).isEmpty();
// Create fruit for tenant 2
Fruit fruit2 = new Fruit(fruitName);
create(tenant2Id, fruit2);
assertThat(search(tenant1Id, fruitName)).hasSize(1);
assertThat(search(tenant2Id, fruitName)).hasSize(1);
// Update fruit for tenant 1
fruit1 = search(tenant1Id, fruitName).get(0);
fruit1.setName("newName");
update(tenant1Id, fruit1);
assertThat(search(tenant1Id, fruitName)).isEmpty();
assertThat(search(tenant1Id, "newName")).hasSize(1);
assertThat(search(tenant2Id, fruitName)).hasSize(1);
assertThat(search(tenant2Id, "newName")).isEmpty();
// Delete fruit for tenant 2
fruit2 = search(tenant2Id, fruitName).get(0);
delete(tenant2Id, fruit2);
assertThat(search(tenant1Id, fruitName)).isEmpty();
assertThat(search(tenant2Id, fruitName)).isEmpty();
}
private void create(String tenantId, Fruit fruit) {
given().config(config).with().body(fruit).contentType(ContentType.JSON)
.when().post("/" + tenantId + "/fruits")
.then()
.statusCode(is(Status.CREATED.getStatusCode()));
}
private void update(String tenantId, Fruit fruit) {
given().config(config).with().body(fruit).contentType(ContentType.JSON)
.when().put("/" + tenantId + "/fruits/" + fruit.getId())
.then()
.statusCode(is(Status.OK.getStatusCode()));
}
private void delete(String tenantId, Fruit fruit) {
given().config(config)
.when().delete("/" + tenantId + "/fruits/" + fruit.getId())
.then()
.statusCode(is(Status.NO_CONTENT.getStatusCode()));
}
private List<Fruit> search(String tenantId, String terms) {
Response response = given().config(config)
.when().get("/" + tenantId + "/fruits/search?terms={terms}", terms);
if (response.getStatusCode() == Status.OK.getStatusCode()) {
return response.as(FRUIT_LIST_TYPE_REF);
}
return List.of();
}
}
|
HibernateSearchTenancyFunctionalityTest
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/MembersInjectionTest.java
|
{
"start": 51389,
"end": 52023
}
|
class ____ {",
" @Inject Bar() {}",
"}");
CompilerTests.daggerCompiler(component, foo, bar)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(0);
subject.generatedSource(goldenFileRule.goldenSource("test/DaggerMyComponent"));
});
}
@Test
public void kotlinNullableFieldInjection() {
Source file =
CompilerTests.kotlinSource(
"MyClass.kt",
"package test;",
"",
"import javax.inject.Inject;",
"",
"
|
Bar
|
java
|
google__dagger
|
dagger-android/main/java/dagger/android/DispatchingAndroidInjector.java
|
{
"start": 5205,
"end": 5925
}
|
class ____ not
* inject instances of that class
* @throws IllegalArgumentException if no {@link AndroidInjector.Factory} is bound for {@code
* instance}
*/
@Override
public void inject(T instance) {
boolean wasInjected = maybeInject(instance);
if (!wasInjected) {
throw new IllegalArgumentException(errorMessageSuggestions(instance));
}
}
/**
* Exception thrown if an incorrect binding is made for a {@link AndroidInjector.Factory}. If you
* see this exception, make sure the value in your {@code @ActivityKey(YourActivity.class)} or
* {@code @FragmentKey(YourFragment.class)} matches the type argument of the injector factory.
*/
@Beta
public static final
|
does
|
java
|
apache__flink
|
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/protobuf/PatchedProtoWriteSupport.java
|
{
"start": 8031,
"end": 9865
}
|
class ____ descriptor not specified.";
String hint =
" Please use method ProtoParquetOutputFormat.setProtobufClass(...) or other similar method.";
throw new BadConfigurationException(msg + hint);
}
}
descriptor = Protobufs.getMessageDescriptor(protoMessage);
extraMetaData.put(ProtoReadSupport.PB_CLASS, protoMessage.getName());
}
unwrapProtoWrappers =
configuration.getBoolean(PB_UNWRAP_PROTO_WRAPPERS, unwrapProtoWrappers);
writeSpecsCompliant =
configuration.getBoolean(PB_SPECS_COMPLIANT_WRITE, writeSpecsCompliant);
MessageType rootSchema = new PatchedProtoSchemaConverter(configuration).convert(descriptor);
validatedMapping(descriptor, rootSchema);
this.messageWriter = new MessageWriter(descriptor, rootSchema);
extraMetaData.put(ProtoReadSupport.PB_DESCRIPTOR, descriptor.toProto().toString());
extraMetaData.put(PB_SPECS_COMPLIANT_WRITE, String.valueOf(writeSpecsCompliant));
extraMetaData.put(PB_UNWRAP_PROTO_WRAPPERS, String.valueOf(unwrapProtoWrappers));
return new WriteContext(rootSchema, extraMetaData);
}
@Override
public FinalizedWriteContext finalizeWrite() {
Map<String, String> protoMetadata = enumMetadata();
return new FinalizedWriteContext(protoMetadata);
}
private Map<String, String> enumMetadata() {
Map<String, String> enumMetadata = new HashMap<>();
for (Map.Entry<String, Map<String, Integer>> enumNameNumberMapping :
protoEnumBookKeeper.entrySet()) {
StringBuilder nameNumberPairs = new StringBuilder();
if (enumNameNumberMapping.getValue().isEmpty()) {
// No
|
or
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
|
{
"start": 1473,
"end": 2900
}
|
class ____ extends HtmlBlock {
private Configuration conf;
@Inject
public NavBlock(Configuration conf) {
this.conf = conf;
}
@Override public void render(Block html) {
boolean addErrorsAndWarningsLink = false;
if (isLog4jLogger(NavBlock.class)) {
Log4jWarningErrorMetricsAppender appender =
Log4jWarningErrorMetricsAppender.findAppender();
if (appender != null) {
addErrorsAndWarningsLink = true;
}
}
UL<DIV<Hamlet>> mainList = html.
div("#nav").
h3("Cluster").
ul().
li().a(url("cluster"), "About").__().
li().a(url("nodes"), "Nodes").__().
li().a(url("nodelabels"), "Node Labels").__();
UL<LI<UL<DIV<Hamlet>>>> subAppsList = mainList.
li().a(url("apps"), "Applications").
ul();
subAppsList.li().__();
for (YarnApplicationState state : YarnApplicationState.values()) {
subAppsList.
li().a(url("apps", state.toString()), state.toString()).__();
}
subAppsList.__().__();
DIV<Hamlet> sectionBefore = mainList.
li().a(url("scheduler"), "Scheduler").__().__();
UL<DIV<Hamlet>> tools = WebPageUtils.appendToolSection(sectionBefore, conf);
if (tools == null) {
return;
}
if (addErrorsAndWarningsLink) {
tools.li().a(url("errors-and-warnings"), "Errors/Warnings").__();
}
tools.__().__();
}
}
|
NavBlock
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/AssistedFactoryErrorsTest.java
|
{
"start": 3860,
"end": 4115
}
|
class ____ {}");
Source noAssistedParam =
CompilerTests.javaSource(
"test.NoAssistedParam",
"package test;",
"",
"import dagger.assisted.AssistedInject;",
"",
"final
|
NoInject
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java
|
{
"start": 5825,
"end": 8586
}
|
class ____ extends Configured
implements Mapper<Text, LongWritable, Text, LongWritable> {
private Random random = new Random();
private byte[] buffer = new byte[BUFFER_SIZE];
private FileSystem fs;
private boolean fastCheck;
// a random suffix per task
private String suffix = "-"+random.nextLong();
{
try {
fs = FileSystem.get(conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public WriteMapper() { super(null); }
public WriteMapper(Configuration conf) { super(conf); }
public void configure(JobConf job) {
setConf(job);
fastCheck = job.getBoolean("fs.test.fastCheck", false);
}
public void map(Text key, LongWritable value,
OutputCollector<Text, LongWritable> collector,
Reporter reporter)
throws IOException {
String name = key.toString();
long size = value.get();
long seed = Long.parseLong(name);
random.setSeed(seed);
reporter.setStatus("creating " + name);
// write to temp file initially to permit parallel execution
Path tempFile = new Path(DATA_DIR, name+suffix);
OutputStream out = fs.create(tempFile);
long written = 0;
try {
while (written < size) {
if (fastCheck) {
Arrays.fill(buffer, (byte)random.nextInt(Byte.MAX_VALUE));
} else {
random.nextBytes(buffer);
}
long remains = size - written;
int length = (remains<=buffer.length) ? (int)remains : buffer.length;
out.write(buffer, 0, length);
written += length;
reporter.setStatus("writing "+name+"@"+written+"/"+size);
}
} finally {
out.close();
}
// rename to final location
fs.rename(tempFile, new Path(DATA_DIR, name));
collector.collect(new Text("bytes"), new LongWritable(written));
reporter.setStatus("wrote " + name);
}
public void close() {
}
}
public static void writeTest(FileSystem fs, boolean fastCheck)
throws Exception {
fs.delete(DATA_DIR, true);
fs.delete(WRITE_DIR, true);
JobConf job = new JobConf(conf, TestFileSystem.class);
job.setBoolean("fs.test.fastCheck", fastCheck);
FileInputFormat.setInputPaths(job, CONTROL_DIR);
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(WriteMapper.class);
job.setReducerClass(LongSumReducer.class);
FileOutputFormat.setOutputPath(job, WRITE_DIR);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
job.setNumReduceTasks(1);
JobClient.runJob(job);
}
public static
|
WriteMapper
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java
|
{
"start": 1242,
"end": 4684
}
|
class ____<Request extends InstanceShardOperationRequest<Request>> extends LegacyActionRequest
implements
IndicesRequest {
public static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueMinutes(1);
protected TimeValue timeout = DEFAULT_TIMEOUT;
protected String index;
// null means its not set, allows to explicitly direct a request to a specific shard
protected ShardId shardId = null;
private String concreteIndex;
protected InstanceShardOperationRequest() {}
protected InstanceShardOperationRequest(@Nullable ShardId shardId, StreamInput in) throws IOException {
super(in);
// Do a full read if no shard id is given (indicating that this instance isn't read as part of a BulkShardRequest or that `in` is of
// an older version) and is in the format used by #writeTo.
if (shardId == null) {
index = in.readString();
this.shardId = in.readOptionalWriteable(ShardId::new);
} else {
// We know a shard id so we read the format given by #writeThin
this.shardId = shardId;
if (in.readBoolean()) {
index = in.readString();
} else {
index = shardId.getIndexName();
}
}
timeout = in.readTimeValue();
concreteIndex = in.readOptionalString();
}
public InstanceShardOperationRequest(String index) {
this.index = index;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (index == null) {
validationException = ValidateActions.addValidationError("index is missing", validationException);
}
return validationException;
}
public String index() {
return index;
}
@Override
public String[] indices() {
return new String[] { index };
}
@Override
public IndicesOptions indicesOptions() {
return IndicesOptions.strictSingleIndexNoExpandForbidClosed();
}
@SuppressWarnings("unchecked")
public final Request index(String index) {
this.index = index;
return (Request) this;
}
public TimeValue timeout() {
return timeout;
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
@SuppressWarnings("unchecked")
public final Request timeout(TimeValue timeout) {
this.timeout = timeout;
return (Request) this;
}
public String concreteIndex() {
return concreteIndex;
}
void concreteIndex(String concreteIndex) {
this.concreteIndex = concreteIndex;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(index);
out.writeOptionalWriteable(shardId);
out.writeTimeValue(timeout);
out.writeOptionalString(concreteIndex);
}
public void writeThin(StreamOutput out) throws IOException {
super.writeTo(out);
if (shardId != null && index.equals(shardId.getIndexName())) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeString(index);
}
out.writeTimeValue(timeout);
out.writeOptionalString(concreteIndex);
}
}
|
InstanceShardOperationRequest
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/long_/LongAssert_isZero_Test.java
|
{
"start": 874,
"end": 1166
}
|
class ____ extends LongAssertBaseTest {
@Override
protected LongAssert invoke_api_method() {
return assertions.isZero();
}
@Override
protected void verify_internal_effects() {
verify(longs).assertIsZero(getInfo(assertions), getActual(assertions));
}
}
|
LongAssert_isZero_Test
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/grant/MySqlGrantTest_25.java
|
{
"start": 969,
"end": 2362
}
|
class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "GRANT REFERENCES ON mydb.* TO 'someuser'@'somehost';";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
String output = SQLUtils.toMySqlString(stmt);
assertEquals("GRANT REFERENCES ON mydb.* TO 'someuser'@'somehost';", //
output);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("City")));
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("t2")));
// assertTrue(visitor.getColumns().contains(new Column("t2", "id")));
}
}
|
MySqlGrantTest_25
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/asm/ASMUtilsTest.java
|
{
"start": 260,
"end": 997
}
|
class ____ extends TestCase {
public void test_isAnroid() throws Exception {
Assert.assertTrue(ASMUtils.isAndroid("Dalvik"));
}
public void test_getDescs() throws Exception {
Assert.assertEquals("Lcom/alibaba/fastjson/parser/ParseContext;", ASMUtils.desc(ParseContext.class));
}
public void test_getType_null() throws Exception {
Assert.assertNull(ASMUtils.getMethodType(ParseContext.class, "XX"));
}
public static Type getMethodType(Class<?> clazz, String methodName) {
try {
Method method = clazz.getMethod(methodName);
return method.getGenericReturnType();
} catch (Exception ex) {
return null;
}
}
}
|
ASMUtilsTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/joinedsubclassbatch/IdentityJoinedSubclassBatchingTest.java
|
{
"start": 4039,
"end": 4566
}
|
class ____ implements Serializable {
public String address;
public String zip;
public String country;
public String getAddress() {
return address;
}
public void setAddress(String address) {
this.address = address;
}
public String getZip() {
return zip;
}
public void setZip(String zip) {
this.zip = zip;
}
public String getCountry() {
return country;
}
public void setCountry(String country) {
this.country = country;
}
}
@Entity(name = "Customer")
public static
|
Address
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/common/serialization/AbstractDeserializationSchema.java
|
{
"start": 1802,
"end": 1932
}
|
class ____. Flink will reflectively determine the type and create the proper
* TypeInformation:
*
* <pre>{@code
* public
|
signature
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/NMNotYetReadyException.java
|
{
"start": 1188,
"end": 1362
}
|
class ____ extends YarnException {
private static final long serialVersionUID = 1L;
public NMNotYetReadyException(String msg) {
super(msg);
}
}
|
NMNotYetReadyException
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/sqm/mutation/internal/temptable/ColumnReferenceCheckingSqlAstWalker.java
|
{
"start": 551,
"end": 1840
}
|
class ____ extends AbstractSqlAstWalker {
private final String identificationVariable;
private boolean allColumnReferencesFromIdentificationVariable = true;
public ColumnReferenceCheckingSqlAstWalker(String identificationVariable) {
this.identificationVariable = identificationVariable;
}
public boolean isAllColumnReferencesFromIdentificationVariable() {
return allColumnReferencesFromIdentificationVariable;
}
@Override
public void visitSelectStatement(SelectStatement statement) {
// Ignore subquery
}
@Override
public void visitColumnReference(ColumnReference columnReference) {
if ( allColumnReferencesFromIdentificationVariable && !identificationVariable.equals( columnReference.getQualifier() ) ) {
allColumnReferencesFromIdentificationVariable = false;
}
}
@Override
public void visitFilterPredicate(FilterPredicate filterPredicate) {
allColumnReferencesFromIdentificationVariable = false;
}
@Override
public void visitFilterFragmentPredicate(FilterPredicate.FilterFragmentPredicate fragmentPredicate) {
allColumnReferencesFromIdentificationVariable = false;
}
@Override
public void visitSqlFragmentPredicate(SqlFragmentPredicate predicate) {
allColumnReferencesFromIdentificationVariable = false;
}
}
|
ColumnReferenceCheckingSqlAstWalker
|
java
|
spring-projects__spring-framework
|
spring-aop/src/main/java/org/springframework/aop/support/DelegatePerTargetObjectIntroductionInterceptor.java
|
{
"start": 1808,
"end": 1931
}
|
class ____
* {@link DelegatingIntroductionInterceptor} that suggest a possible refactoring
* to extract a common ancestor
|
and
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/EnumSerializer.java
|
{
"start": 8014,
"end": 8246
}
|
enum ____ was removed.");
}
}
this.enums = previousEnums;
}
@Override
public TypeSerializer<T> restoreSerializer() {
checkState(enumClass != null, "Enum
|
value
|
java
|
spring-projects__spring-framework
|
framework-docs/src/main/java/org/springframework/docs/web/webmvc/mvcconfig/mvcconfigviewresolvers/WebConfiguration.java
|
{
"start": 1056,
"end": 1291
}
|
class ____ implements WebMvcConfigurer {
@Override
public void configureViewResolvers(ViewResolverRegistry registry) {
registry.enableContentNegotiation(new JacksonJsonView());
registry.jsp();
}
}
// end::snippet[]
|
WebConfiguration
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/ProducerTemplate.java
|
{
"start": 30522,
"end": 31219
}
|
class ____)
*/
Exchange request(Endpoint endpoint, Processor processor);
/**
* Sends an exchange to an endpoint using a supplied processor Uses an {@link ExchangePattern#InOut} message
* exchange pattern. <br/>
* <br/>
* <p/>
* <b>Notice:</b> that if the processing of the exchange failed with an Exception it is <b>not</b> thrown from this
* method, but you can access it from the returned exchange using {@link org.apache.camel.Exchange#getException()}.
*
* @param endpointUri the endpoint URI to send to
* @param processor the processor which will populate the exchange before sending
* @return the result (see
|
javadoc
|
java
|
google__dagger
|
javatests/dagger/spi/SpiPluginTest.java
|
{
"start": 13108,
"end": 13912
}
|
interface ____");
}
// SpiDiagnosticReporter uses a shortest path algorithm to determine a dependency trace to a
// binding. Without modifications, this would produce a strange error if a shorter path exists
// from one entrypoint, through a @Module.subcomponents builder binding edge, and to the binding
// usage within the subcomponent. Therefore, when scanning for the shortest path, we only consider
// BindingNodes so we don't cross component boundaries. This test exhibits this case.
@Test
public void shortestPathToBindingExistsThroughSubcomponentBuilder() {
JavaFileObject chain1 =
JavaFileObjects.forSourceLines(
"test.Chain1",
"package test;",
"",
"import javax.inject.Inject;",
"",
"
|
TestComponent
|
java
|
apache__kafka
|
tools/src/main/java/org/apache/kafka/tools/TransactionsCommand.java
|
{
"start": 11129,
"end": 14607
}
|
class ____ extends TransactionsCommand {
static final List<String> HEADERS = List.of(
"ProducerId",
"ProducerEpoch",
"LatestCoordinatorEpoch",
"LastSequence",
"LastTimestamp",
"CurrentTransactionStartOffset"
);
DescribeProducersCommand(Time time) {
super(time);
}
@Override
public String name() {
return "describe-producers";
}
@Override
public void addSubparser(Subparsers subparsers) {
Subparser subparser = subparsers.addParser(name())
.help("describe the states of active producers for a topic partition");
subparser.addArgument("--broker-id")
.help("optional broker id to describe the producer state on a specific replica")
.action(store())
.type(Integer.class)
.required(false);
subparser.addArgument("--topic")
.help("topic name")
.action(store())
.type(String.class)
.required(true);
subparser.addArgument("--partition")
.help("partition number")
.action(store())
.type(Integer.class)
.required(true);
}
@Override
public void execute(Admin admin, Namespace ns, PrintStream out) throws Exception {
DescribeProducersOptions options = new DescribeProducersOptions();
Optional.ofNullable(ns.getInt("broker_id")).ifPresent(options::brokerId);
String topicName = ns.getString("topic");
Integer partitionId = ns.getInt("partition");
TopicPartition topicPartition = new TopicPartition(topicName, partitionId);
final DescribeProducersResult.PartitionProducerState result;
try {
result = admin.describeProducers(Set.of(topicPartition), options)
.partitionResult(topicPartition)
.get();
} catch (ExecutionException e) {
String brokerClause = options.brokerId().isPresent() ?
"broker " + options.brokerId().getAsInt() :
"leader";
printErrorAndExit("Failed to describe producers for partition " +
topicPartition + " on " + brokerClause, e.getCause());
return;
}
List<List<String>> rows = result.activeProducers().stream().map(producerState -> {
String currentTransactionStartOffsetColumnValue =
producerState.currentTransactionStartOffset().isPresent() ?
String.valueOf(producerState.currentTransactionStartOffset().getAsLong()) :
"None";
return List.of(
String.valueOf(producerState.producerId()),
String.valueOf(producerState.producerEpoch()),
String.valueOf(producerState.coordinatorEpoch().orElse(-1)),
String.valueOf(producerState.lastSequence()),
String.valueOf(producerState.lastTimestamp()),
currentTransactionStartOffsetColumnValue
);
}).collect(Collectors.toList());
ToolsUtils.prettyPrintTable(HEADERS, rows, out);
}
}
static
|
DescribeProducersCommand
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/model/source/spi/AttributeRole.java
|
{
"start": 394,
"end": 917
}
|
class ____ extends AbstractAttributeKey {
public static final char DELIMITER = '.';
public AttributeRole(String base) {
super( base );
}
@Override
protected char getDelimiter() {
return DELIMITER;
}
@Override
public AttributeRole append(String property) {
return new AttributeRole( this, property );
}
@Override
public AttributeRole getParent() {
return (AttributeRole) super.getParent();
}
private AttributeRole(AttributeRole parent, String property) {
super( parent, property );
}
}
|
AttributeRole
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TransportTLSBootstrapCheckTests.java
|
{
"start": 756,
"end": 4053
}
|
class ____ extends AbstractBootstrapCheckTestCase {
public void testBootstrapCheckOnEmptyMetadata() {
assertTrue(new TransportTLSBootstrapCheck().check(emptyContext).isFailure());
assertTrue(
new TransportTLSBootstrapCheck().check(
createTestContext(Settings.builder().put("xpack.security.transport.ssl.enabled", false).build(), Metadata.EMPTY_METADATA)
).isFailure()
);
assertTrue(
new TransportTLSBootstrapCheck().check(
createTestContext(Settings.builder().put("xpack.security.transport.ssl.enabled", true).build(), Metadata.EMPTY_METADATA)
).isSuccess()
);
}
public void testBootstrapCheckFailureOnAnyLicense() throws Exception {
final OperationMode mode = randomFrom(
OperationMode.ENTERPRISE,
OperationMode.PLATINUM,
OperationMode.GOLD,
OperationMode.STANDARD,
OperationMode.BASIC,
OperationMode.TRIAL
);
final Settings.Builder settings = Settings.builder();
if (randomBoolean()) {
// randomise between default-false & explicit-false
settings.put("xpack.security.transport.ssl.enabled", false);
}
if (randomBoolean()) {
// randomise between default-true & explicit-true
settings.put("xpack.security.enabled", true);
}
final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(mode, settings);
assertTrue("Expected bootstrap failure", result.isFailure());
assertEquals(
"Transport SSL must be enabled if security is enabled. Please set "
+ "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting "
+ "[xpack.security.enabled] to [false]",
result.getMessage()
);
}
public void testBootstrapCheckSucceedsWithTlsEnabledOnAnyLicense() throws Exception {
final OperationMode mode = randomFrom(
OperationMode.ENTERPRISE,
OperationMode.PLATINUM,
OperationMode.GOLD,
OperationMode.STANDARD,
OperationMode.BASIC,
OperationMode.TRIAL
);
final Settings.Builder settings = Settings.builder().put("xpack.security.transport.ssl.enabled", true);
final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(mode, settings);
assertSuccess(result);
}
public BootstrapCheck.BootstrapCheckResult runBootstrapCheck(OperationMode mode, Settings.Builder settings) throws Exception {
final License license = TestUtils.generateSignedLicense(mode.description(), TimeValue.timeValueHours(24));
Metadata.Builder builder = Metadata.builder();
TestUtils.putLicense(builder, license);
Metadata metadata = builder.build();
final BootstrapContext context = createTestContext(settings.build(), metadata);
return new TransportTLSBootstrapCheck().check(context);
}
public void assertSuccess(BootstrapCheck.BootstrapCheckResult result) {
if (result.isFailure()) {
fail("Bootstrap check failed unexpectedly: " + result.getMessage());
}
}
}
|
TransportTLSBootstrapCheckTests
|
java
|
google__error-prone
|
check_api/src/test/java/com/google/errorprone/util/FindIdentifiersTest.java
|
{
"start": 3919,
"end": 4402
}
|
class ____ {
private void doIt() {
String s1 = "";
while (true) {
// BUG: Diagnostic contains: [s1]
String.format(s1);
}
}
}
""")
.doTest();
}
@Test
public void findAllIdentsParams() {
CompilationTestHelper.newInstance(PrintIdents.class, getClass())
.addSourceLines(
"Test.java",
"""
|
Test
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/logging/structured/StructuredLogFormatterFactoryTests.java
|
{
"start": 7201,
"end": 7609
}
|
class ____
implements StructuredLoggingJsonMembersCustomizer<Object> {
@Override
public void customize(Members<Object> members) {
UnaryOperator<@Nullable String> toUpperCase = (string) -> (string != null) ? string.toUpperCase(Locale.ROOT)
: null;
members.applyingValueProcessor(ValueProcessor.of(String.class, toUpperCase));
}
}
static
|
ObjectMembersStructuredLoggingJsonMembersCustomizer
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/runtime/src/main/java/io/quarkus/rest/client/reactive/ClientFormParam.java
|
{
"start": 1552,
"end": 2274
}
|
interface ____ {
*
* static AtomicInteger counter = new AtomicInteger(1);
*
* default String determineFormParamValue(String name) {
* if ("SomeParam".equals(name)) {
* return "InvokedCount " + counter.getAndIncrement();
* }
* throw new UnsupportedOperationException("unknown name");
* }
*
* {@literal @}ClientFormParam(name="SomeName", value="ExplicitlyDefinedValue")
* {@literal @}GET
* Response useExplicitFormParamValue();
*
* {@literal @}ClientFormParam(name="SomeName", value="{determineFormParamValue}")
* {@literal @}DELETE
* Response useComputedFormParamValue();
* }
* </pre>
*
* The implementation should fail to deploy a client
|
MyClient
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java
|
{
"start": 2251,
"end": 9241
}
|
class ____ extends AbstractHeartbeatRequestManager<ConsumerGroupHeartbeatResponse> {
/**
* Membership manager for consumer groups
*/
private final ConsumerMembershipManager membershipManager;
/**
* HeartbeatState manages building the heartbeat requests correctly
*/
private final HeartbeatState heartbeatState;
public ConsumerHeartbeatRequestManager(
final LogContext logContext,
final Time time,
final ConsumerConfig config,
final CoordinatorRequestManager coordinatorRequestManager,
final SubscriptionState subscriptions,
final ConsumerMembershipManager membershipManager,
final BackgroundEventHandler backgroundEventHandler,
final Metrics metrics) {
super(logContext, time, config, coordinatorRequestManager, backgroundEventHandler,
new HeartbeatMetricsManager(metrics));
this.membershipManager = membershipManager;
this.heartbeatState = new HeartbeatState(subscriptions, membershipManager, maxPollIntervalMs);
}
// Visible for testing
ConsumerHeartbeatRequestManager(
final LogContext logContext,
final Timer timer,
final ConsumerConfig config,
final CoordinatorRequestManager coordinatorRequestManager,
final ConsumerMembershipManager membershipManager,
final HeartbeatState heartbeatState,
final HeartbeatRequestState heartbeatRequestState,
final BackgroundEventHandler backgroundEventHandler,
final Metrics metrics) {
super(logContext, timer, config, coordinatorRequestManager, heartbeatRequestState, backgroundEventHandler,
new HeartbeatMetricsManager(metrics));
this.membershipManager = membershipManager;
this.heartbeatState = heartbeatState;
}
/**
* {@inheritDoc}
*/
@Override
public boolean handleSpecificFailure(Throwable exception) {
boolean errorHandled = false;
String errorMessage = exception.getMessage();
if (exception instanceof UnsupportedVersionException) {
String message = CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG;
if (errorMessage.equals(REGEX_RESOLUTION_NOT_SUPPORTED_MSG)) {
message = REGEX_RESOLUTION_NOT_SUPPORTED_MSG;
logger.error("{} regex resolution not supported: {}", heartbeatRequestName(), message);
} else {
logger.error("{} failed due to unsupported version while sending request: {}", heartbeatRequestName(), errorMessage);
}
handleFatalFailure(new UnsupportedVersionException(message, exception));
errorHandled = true;
}
return errorHandled;
}
/**
* {@inheritDoc}
*/
@Override
public boolean handleSpecificExceptionInResponse(final ConsumerGroupHeartbeatResponse response, final long currentTimeMs) {
Errors error = errorForResponse(response);
String errorMessage = errorMessageForResponse(response);
boolean errorHandled;
switch (error) {
// Broker responded with HB not supported, meaning the new protocol is not enabled, so propagate
// custom message for it. Note that the case where the protocol is not supported at all should fail
// on the client side when building the request and checking supporting APIs (handled on onFailure).
case UNSUPPORTED_VERSION:
logger.error("{} failed due to unsupported version response on broker side: {}",
heartbeatRequestName(), CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG);
handleFatalFailure(error.exception(CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG));
errorHandled = true;
break;
case UNRELEASED_INSTANCE_ID:
logger.error("{} failed due to unreleased instance id {}: {}",
heartbeatRequestName(), membershipManager.groupInstanceId().orElse("null"), errorMessage);
handleFatalFailure(error.exception(errorMessage));
errorHandled = true;
break;
case FENCED_INSTANCE_ID:
logger.error("{} failed due to fenced instance id {}: {}. " +
"This is expected in the case that the member was removed from the group " +
"by an admin client, and another member joined using the same group instance id.",
heartbeatRequestName(), membershipManager.groupInstanceId().orElse("null"), errorMessage);
handleFatalFailure(error.exception(errorMessage));
errorHandled = true;
break;
default:
errorHandled = false;
}
return errorHandled;
}
/**
* {@inheritDoc}
*/
@Override
public void resetHeartbeatState() {
heartbeatState.reset();
}
/**
* {@inheritDoc}
*/
@Override
public NetworkClientDelegate.UnsentRequest buildHeartbeatRequest() {
return new NetworkClientDelegate.UnsentRequest(
new ConsumerGroupHeartbeatRequest.Builder(this.heartbeatState.buildRequestData()),
coordinatorRequestManager.coordinator());
}
/**
* {@inheritDoc}
*/
@Override
public String heartbeatRequestName() {
return "ConsumerGroupHeartbeatRequest";
}
/**
* {@inheritDoc}
*/
@Override
public Errors errorForResponse(ConsumerGroupHeartbeatResponse response) {
return Errors.forCode(response.data().errorCode());
}
/**
* {@inheritDoc}
*/
@Override
public String errorMessageForResponse(ConsumerGroupHeartbeatResponse response) {
return response.data().errorMessage();
}
/**
* {@inheritDoc}
*/
@Override
public long heartbeatIntervalForResponse(ConsumerGroupHeartbeatResponse response) {
return response.data().heartbeatIntervalMs();
}
/**
* {@inheritDoc}
*/
@Override
public ConsumerMembershipManager membershipManager() {
return membershipManager;
}
@Override
protected boolean shouldSendLeaveHeartbeatNow() {
// If the consumer has dynamic membership,
// we should skip the leaving heartbeat when leaveGroupOperation is REMAIN_IN_GROUP
if (membershipManager.groupInstanceId().isEmpty() && REMAIN_IN_GROUP == membershipManager.leaveGroupOperation())
return false;
return membershipManager().state() == MemberState.LEAVING;
}
/**
* Builds the heartbeat requests correctly, ensuring that all information is sent according to
* the protocol, but subsequent requests do not send information which has not changed. This
* is important to ensure that reconciliation completes successfully.
*/
static
|
ConsumerHeartbeatRequestManager
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/hql/fetchAndJoin/Child.java
|
{
"start": 564,
"end": 1270
}
|
class ____ {
@Id
@GeneratedValue
private long id;
@OneToMany(fetch = FetchType.LAZY, cascade = CascadeType.ALL)
@JoinColumn
private Set<GrandChild> grandChildren = new HashSet<GrandChild>();
public Child() {
}
public Child(String value) {
this.value = value;
}
@Column(name = "val")
private String value;
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public Set<GrandChild> getGrandChildren() {
return grandChildren;
}
public void setGrandChildren(Set<GrandChild> grandChildren) {
this.grandChildren = grandChildren;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
}
|
Child
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/matchers/MatchersTest.java
|
{
"start": 14705,
"end": 15033
}
|
class ____ {}
""")
.doTest();
CompilationTestHelper.newInstance(PackageNameChecker.class, getClass())
.addSourceLines(
"test/foo/ClassName.java",
"package testyfoo;",
// No match, the "." is escaped correctly in the regex "test.foo".
"public
|
ClassName
|
java
|
google__auto
|
value/src/it/functional/src/test/java/com/google/auto/value/AutoBuilderTest.java
|
{
"start": 17931,
"end": 18291
}
|
class ____<E> extends SingletonSet<E> {
private final Class<?> type;
<T extends E> TypedSingletonSet(T element, Class<T> type) {
super(element);
this.type = type;
}
@Override
public String toString() {
return type.getName() + super.toString();
}
}
@AutoBuilder(ofClass = TypedSingletonSet.class)
|
TypedSingletonSet
|
java
|
apache__camel
|
components/camel-test/camel-test-spring-junit5/src/main/java/org/apache/camel/test/spring/junit5/CamelAnnotationsHandler.java
|
{
"start": 4049,
"end": 5635
}
|
class ____ executed
*/
public static void handleDisableJmx(Class<?> testClass) {
boolean coverage = isRouteCoverageEnabled(testClass.isAnnotationPresent(EnableRouteCoverage.class));
if (isCamelDebugPresent()) {
LOGGER.info("Enabling Camel JMX as camel-debug has been found in the classpath.");
DefaultCamelContext.setDisableJmx(false);
} else if (testClass.isAnnotationPresent(DisableJmx.class)) {
if (testClass.getAnnotation(DisableJmx.class).value()) {
LOGGER.info("Disabling Camel JMX globally as DisableJmx annotation was found and disableJmx is set to true.");
DefaultCamelContext.setDisableJmx(true);
} else {
LOGGER.info("Enabling Camel JMX as DisableJmx annotation was found and disableJmx is set to false.");
DefaultCamelContext.setDisableJmx(false);
}
} else if (!coverage) {
// route coverage need JMX so do not disable it by default
LOGGER.info(
"Disabling Camel JMX globally for tests by default. Use the DisableJMX annotation to override the default setting.");
DefaultCamelContext.setDisableJmx(true);
} else {
LOGGER.info("Enabling Camel JMX as EnableRouteCoverage is used.");
DefaultCamelContext.setDisableJmx(false);
}
}
/**
* Handles enabling route coverage based on {@link EnableRouteCoverage}.
*
* @param context the initialized Spring context
* @param testClass the test
|
being
|
java
|
elastic__elasticsearch
|
build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/JarHellPrecommitPlugin.java
|
{
"start": 820,
"end": 1689
}
|
class ____ extends PrecommitPlugin {
@Override
public TaskProvider<? extends Task> createTask(Project project) {
project.getPluginManager().apply(JarHellPlugin.class);
if (project.getPath().equals(":libs:core") == false) {
// ideally we would configure this as a default dependency. But Default dependencies do not work correctly
// with gradle project dependencies as they're resolved to late in the build and don't setup according task
// dependencies properly
var elasticsearchCoreProject = project.findProject(":libs:core");
if (elasticsearchCoreProject != null) {
project.getDependencies().add("jarHell", elasticsearchCoreProject);
}
}
return project.getTasks().withType(JarHellTask.class).named("jarHell");
}
}
|
JarHellPrecommitPlugin
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/main/java/org/elasticsearch/painless/ir/LoadListShortcutNode.java
|
{
"start": 616,
"end": 1131
}
|
class ____ extends ExpressionNode {
/* ---- begin visitor ---- */
@Override
public <Scope> void visit(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
irTreeVisitor.visitLoadListShortcut(this, scope);
}
@Override
public <Scope> void visitChildren(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
// do nothing; terminal node
}
/* ---- end visitor ---- */
public LoadListShortcutNode(Location location) {
super(location);
}
}
|
LoadListShortcutNode
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamMapValues.java
|
{
"start": 1188,
"end": 1641
}
|
class ____<KIn, VIn, VOut> implements FixedKeyProcessorSupplier<KIn, VIn, VOut> {
private final ValueMapperWithKey<? super KIn, ? super VIn, ? extends VOut> mapper;
public KStreamMapValues(final ValueMapperWithKey<? super KIn, ? super VIn, ? extends VOut> mapper) {
this.mapper = mapper;
}
@Override
public FixedKeyProcessor<KIn, VIn, VOut> get() {
return new KStreamMapProcessor();
}
private
|
KStreamMapValues
|
java
|
spring-projects__spring-boot
|
module/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/endpoint/condition/ConditionalOnAvailableEndpoint.java
|
{
"start": 3838,
"end": 4595
}
|
interface ____ {
/**
* Alias for {@link #endpoint()}.
* @return the endpoint type to check
* @since 3.4.0
*/
@AliasFor(attribute = "endpoint")
Class<?> value() default Void.class;
/**
* The endpoint type that should be checked. Inferred when the return type of the
* {@code @Bean} method is either an {@link Endpoint @Endpoint} or an
* {@link EndpointExtension @EndpointExtension}.
* @return the endpoint type to check
*/
@AliasFor(attribute = "value")
Class<?> endpoint() default Void.class;
/**
* Technologies to check the exposure of the endpoint on while considering it to be
* available.
* @return the technologies to check
* @since 2.6.0
*/
EndpointExposure[] exposure() default {};
}
|
ConditionalOnAvailableEndpoint
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/fetchprofile/MoreFetchProfileTest.java
|
{
"start": 695,
"end": 2168
}
|
class ____ {
@Test
public void testFetchWithTwoOverrides(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
session.enableFetchProfile( "customer-with-orders-and-country" );
Country ctry = new Country();
ctry.setName( "France" );
Order o = new Order();
o.setCountry( ctry );
o.setDeliveryDate( new Date() );
o.setOrderNumber( 1 );
Order o2 = new Order();
o2.setCountry( ctry );
o2.setDeliveryDate( new Date() );
o2.setOrderNumber( 2 );
Customer c = new Customer();
c.setCustomerNumber( 1 );
c.setName( "Emmanuel" );
c.getOrders().add( o );
c.setLastOrder( o2 );
session.persist( ctry );
session.persist( o );
session.persist( o2 );
session.persist( c );
session.flush();
session.clear();
c = session.find( Customer.class, c.getId() );
assertThat( Hibernate.isInitialized( c.getLastOrder() ) ).isTrue();
assertThat( Hibernate.isInitialized( c.getOrders() ) ).isTrue();
for ( Order so : c.getOrders() ) {
assertThat( Hibernate.isInitialized( so.getCountry() ) ).isTrue();
}
final Order order = c.getOrders().iterator().next();
c.getOrders().remove( order );
session.remove( c );
final Order lastOrder = c.getLastOrder();
c.setLastOrder( null );
session.remove( order.getCountry() );
session.remove( lastOrder );
session.remove( order );
}
);
}
}
|
MoreFetchProfileTest
|
java
|
apache__camel
|
components/camel-kubernetes/src/test/java/org/apache/camel/component/kubernetes/consumer/integration/deployments/KubernetesDeploymentsConsumerClusterwideLabelsIT.java
|
{
"start": 1981,
"end": 3114
}
|
class ____ extends KubernetesConsumerTestSupport {
@Test
public void clusterWideLabelsTest() {
createDeployment(ns1, "d1", LABELS);
createDeployment(ns2, "d2", LABELS);
createDeployment(ns2, "d3", Map.of("otherKey", "otherValue"));
Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> {
final List<String> list = result.getExchanges().stream().map(ex -> ex.getIn().getBody(String.class)).toList();
assertThat(list, allOf(
hasItem(containsString("d1")), hasItem(containsString("d2")), not(hasItem(containsString("d3")))));
});
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
fromF("kubernetes-deployments://%s?oauthToken=%s&labelKey=%s&labelValue=%s",
host, authToken, "testkey", "testvalue")
.process(new KubernetesProcessor())
.to(result);
}
};
}
}
|
KubernetesDeploymentsConsumerClusterwideLabelsIT
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-ai/camel-spring-ai-chat/src/main/java/org/apache/camel/component/springai/chat/SpringAiChatConstants.java
|
{
"start": 5046,
"end": 6900
}
|
class ____ use for entity response conversion", javaType = "Class<?>")
public static final String ENTITY_CLASS = "CamelSpringAiChatEntityClass";
@Metadata(description = "Metadata to attach to user messages", javaType = "java.util.Map<String, Object>")
public static final String USER_METADATA = "CamelSpringAiChatUserMetadata";
@Metadata(description = "Metadata to attach to system messages", javaType = "java.util.Map<String, Object>")
public static final String SYSTEM_METADATA = "CamelSpringAiChatSystemMetadata";
@Metadata(description = "Conversation ID for managing separate conversation contexts in chat memory", javaType = "String")
public static final String CONVERSATION_ID = "CamelSpringAiChatConversationId";
@Metadata(description = "Maximum file size in bytes for multimodal content. Overrides endpoint configuration.",
javaType = "Long")
public static final String MAX_FILE_SIZE = "CamelSpringAiChatMaxFileSize";
@Metadata(description = "The reason why the chat response generation stopped (e.g., STOP, LENGTH, TOOL_CALLS)",
javaType = "String")
public static final String FINISH_REASON = "CamelSpringAiChatFinishReason";
@Metadata(description = "The name of the AI model used to generate the response", javaType = "String")
public static final String MODEL_NAME = "CamelSpringAiChatModelName";
@Metadata(description = "The unique ID of the chat response", javaType = "String")
public static final String RESPONSE_ID = "CamelSpringAiChatResponseId";
@Metadata(description = "Full response metadata as a Map containing all available metadata fields",
javaType = "java.util.Map<String, Object>")
public static final String RESPONSE_METADATA = "CamelSpringAiChatResponseMetadata";
private SpringAiChatConstants() {
}
}
|
to
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/metamodel/mapping/internal/DiscriminatedAssociationMapping.java
|
{
"start": 1865,
"end": 12292
}
|
class ____ implements MappingType, FetchOptions {
public static DiscriminatedAssociationMapping from(
NavigableRole containerRole,
JavaType<?> baseAssociationJtd,
DiscriminatedAssociationModelPart declaringModelPart,
AnyType anyType,
Any bootValueMapping,
MappingModelCreationProcess creationProcess) {
final var dialect = creationProcess.getCreationContext().getDialect();
final String tableName = MappingModelCreationHelper.getTableIdentifierExpression(
bootValueMapping.getTable(),
creationProcess
);
assert bootValueMapping.getColumnSpan() == 2;
final var columnIterator = bootValueMapping.getSelectables().iterator();
assert columnIterator.hasNext();
final var metaSelectable = columnIterator.next();
assert columnIterator.hasNext();
final var keySelectable = columnIterator.next();
assert !columnIterator.hasNext();
assert !metaSelectable.isFormula();
assert !keySelectable.isFormula();
final var metaColumn = (Column) metaSelectable;
final var keyColumn = (Column) keySelectable;
final SelectablePath parentSelectablePath =
declaringModelPart.asAttributeMapping() != null
? getSelectablePath( declaringModelPart.asAttributeMapping().getDeclaringType() )
: null;
final var metaType = (MetaType) anyType.getDiscriminatorType();
final var discriminatorPart = new AnyDiscriminatorPart(
containerRole.append( AnyDiscriminatorPart.ROLE_NAME ),
declaringModelPart,
tableName,
metaColumn.getText( dialect ),
parentSelectablePath != null ? parentSelectablePath.append( metaColumn.getQuotedName( dialect ) )
: new SelectablePath( metaColumn.getQuotedName( dialect ) ),
metaColumn.getCustomReadExpression(),
metaColumn.getCustomWriteExpression(),
metaColumn.getSqlType(),
metaColumn.getLength(),
metaColumn.getArrayLength(),
metaColumn.getPrecision(),
metaColumn.getScale(),
bootValueMapping.isColumnInsertable( 0 ),
bootValueMapping.isColumnUpdateable( 0 ),
bootValueMapping.isPartitionKey(),
(BasicType<?>) metaType.getBaseType(),
metaType.getDiscriminatorValuesToEntityNameMap(),
metaType.getImplicitValueStrategy(),
creationProcess.getCreationContext().getSessionFactory().getMappingMetamodel()
);
final var keyType = (BasicType<?>) anyType.getIdentifierType();
final var keyPart = new AnyKeyPart(
containerRole.append( AnyKeyPart.KEY_NAME ),
declaringModelPart,
tableName,
keyColumn.getText( dialect ),
parentSelectablePath != null ? parentSelectablePath.append( keyColumn.getQuotedName( dialect ) )
: new SelectablePath( keyColumn.getQuotedName( dialect ) ),
keyColumn.getCustomReadExpression(),
keyColumn.getCustomWriteExpression(),
keyColumn.getSqlType(),
keyColumn.getLength(),
keyColumn.getArrayLength(),
keyColumn.getPrecision(),
keyColumn.getScale(),
bootValueMapping.isNullable(),
bootValueMapping.isColumnInsertable( 1 ),
bootValueMapping.isColumnUpdateable( 1 ),
bootValueMapping.isPartitionKey(),
keyType
);
return new DiscriminatedAssociationMapping(
declaringModelPart,
discriminatorPart,
keyPart,
baseAssociationJtd,
bootValueMapping.isLazy()
? FetchTiming.DELAYED
: FetchTiming.IMMEDIATE,
creationProcess.getCreationContext().getSessionFactory()
);
}
private final DiscriminatedAssociationModelPart modelPart;
private final AnyDiscriminatorPart discriminatorPart;
private final BasicValuedModelPart keyPart;
private final JavaType<?> baseAssociationJtd;
private final FetchTiming fetchTiming;
private final SessionFactoryImplementor sessionFactory;
public DiscriminatedAssociationMapping(
DiscriminatedAssociationModelPart modelPart,
AnyDiscriminatorPart discriminatorPart,
BasicValuedModelPart keyPart,
JavaType<?> baseAssociationJtd,
FetchTiming fetchTiming,
SessionFactoryImplementor sessionFactory) {
this.modelPart = modelPart;
this.discriminatorPart = discriminatorPart;
this.keyPart = keyPart;
this.baseAssociationJtd = baseAssociationJtd;
this.fetchTiming = fetchTiming;
this.sessionFactory = sessionFactory;
}
public DiscriminatedAssociationModelPart getModelPart() {
return modelPart;
}
public DiscriminatorMapping getDiscriminatorPart() {
return discriminatorPart;
}
public BasicValuedModelPart getKeyPart() {
return keyPart;
}
public Object resolveDiscriminatorValueToEntityMapping(EntityMappingType entityMappingType) {
final DiscriminatorValueDetails details =
discriminatorPart.getValueConverter()
.getDetailsForEntityName( entityMappingType.getEntityName() );
return details != null
? details.getValue()
: null;
}
public EntityMappingType resolveDiscriminatorValueToEntityMapping(Object discriminatorValue) {
final var details =
discriminatorPart.getValueConverter().
getDetailsForDiscriminatorValue( discriminatorValue );
return details == null ? null : details.getIndicatedEntity();
}
public <X, Y> int breakDownJdbcValues(
int offset,
X x,
Y y,
Object domainValue,
ModelPart.JdbcValueBiConsumer<X, Y> valueConsumer,
SharedSessionContractImplementor session) {
if ( domainValue == null ) {
valueConsumer.consume( offset, x, y, null, getDiscriminatorPart() );
valueConsumer.consume( offset + 1, x, y, null, getKeyPart() );
return getDiscriminatorPart().getJdbcTypeCount() + getKeyPart().getJdbcTypeCount();
}
else {
final var concreteMappingType = determineConcreteType( domainValue, session );
final Object discriminator = getModelPart().resolveDiscriminatorForEntityType( concreteMappingType );
final Object disassembledDiscriminator = getDiscriminatorPart().disassemble( discriminator, session );
valueConsumer.consume( offset, x, y, disassembledDiscriminator, getDiscriminatorPart() );
final var identifierMapping = concreteMappingType.getIdentifierMapping();
final Object identifier = identifierMapping.getIdentifier( domainValue );
final Object disassembledKey = getKeyPart().disassemble( identifier, session );
valueConsumer.consume( offset + 1, x, y, disassembledKey, getKeyPart() );
}
return getDiscriminatorPart().getJdbcTypeCount() + getKeyPart().getJdbcTypeCount();
}
public <X, Y> int decompose(
int offset,
X x,
Y y,
Object domainValue,
ModelPart.JdbcValueBiConsumer<X, Y> valueConsumer,
SharedSessionContractImplementor session) {
if ( domainValue == null ) {
valueConsumer.consume( offset, x, y, null, getDiscriminatorPart() );
valueConsumer.consume( offset + 1, x, y, null, getKeyPart() );
}
else {
final var concreteMappingType = determineConcreteType( domainValue, session );
final Object discriminator = getModelPart().resolveDiscriminatorForEntityType( concreteMappingType );
getDiscriminatorPart().decompose( discriminator, offset, x, y, valueConsumer, session );
final var identifierMapping = concreteMappingType.getIdentifierMapping();
final Object identifier = identifierMapping.getIdentifier( domainValue );
getKeyPart().decompose( identifier, offset + 1, x, y, valueConsumer, session );
}
return getDiscriminatorPart().getJdbcTypeCount() + getKeyPart().getJdbcTypeCount();
}
private EntityMappingType determineConcreteType(Object entity, SharedSessionContractImplementor session) {
final String entityName =
session == null
? sessionFactory.bestGuessEntityName( entity )
: session.bestGuessEntityName( entity );
return sessionFactory.getMappingMetamodel()
.getEntityDescriptor( entityName );
}
public ModelPart findSubPart(String name, EntityMappingType treatTarget) {
if ( AnyDiscriminatorPart.ROLE_NAME.equals( name ) ) {
return getDiscriminatorPart();
}
if ( AnyKeyPart.KEY_NAME.equals( name ) ) {
return getKeyPart();
}
if ( treatTarget != null ) {
// make sure the treat-target is one of the mapped entities
ensureMapped( treatTarget );
return resolveAssociatedSubPart( name, treatTarget );
}
return discriminatorPart.getValueConverter().fromValueDetails( (detail) -> {
try {
final var subPart = resolveAssociatedSubPart( name, detail.getIndicatedEntity() );
if ( subPart != null ) {
return subPart;
}
}
catch (Exception ignore) {
}
return null;
} );
}
private ModelPart resolveAssociatedSubPart(String name, EntityMappingType entityMapping) {
final var identifierMapping = entityMapping.getIdentifierMapping();
if ( identifierMapping.getPartName().equals( name ) ) {
return getKeyPart();
}
if ( identifierMapping instanceof SingleAttributeIdentifierMapping ) {
final String idAttrName = identifierMapping.getAttributeName();
if ( idAttrName.equals( name ) ) {
return getKeyPart();
}
}
return entityMapping.findSubPart( name );
}
private void ensureMapped(EntityMappingType treatTarget) {
assert treatTarget != null;
final DiscriminatorValueDetails details = discriminatorPart.getValueConverter().getDetailsForEntityName( treatTarget.getEntityName() );
if ( details == null ) {
throw new IllegalArgumentException(
String.format(
Locale.ROOT,
"Treat-target [`%s`] is not not an entity mapped by ANY value : %s",
treatTarget.getEntityName(),
modelPart.getNavigableRole()
)
);
}
}
public MappingType getPartMappingType() {
return this;
}
public JavaType<?> getJavaType() {
return baseAssociationJtd;
}
@Override
public JavaType<?> getMappedJavaType() {
return baseAssociationJtd;
}
@Override
public FetchStyle getStyle() {
return FetchStyle.SELECT;
}
@Override
public FetchTiming getTiming() {
return fetchTiming;
}
public Fetch generateFetch(
FetchParent fetchParent,
NavigablePath fetchablePath,
FetchTiming fetchTiming,
boolean selected,
String resultVariable,
DomainResultCreationState creationState) {
return new DiscriminatedEntityFetch(
fetchablePath,
baseAssociationJtd,
modelPart,
fetchTiming,
fetchParent,
creationState
);
}
public <T> DomainResult<T> createDomainResult(
NavigablePath navigablePath,
TableGroup tableGroup,
String resultVariable,
DomainResultCreationState creationState) {
return new DiscriminatedEntityResult<>(
navigablePath,
baseAssociationJtd,
modelPart,
resultVariable,
creationState
);
}
}
|
DiscriminatedAssociationMapping
|
java
|
junit-team__junit5
|
junit-jupiter-params/src/main/java/org/junit/jupiter/params/provider/CsvFileSources.java
|
{
"start": 1396,
"end": 1545
}
|
interface ____ {
/**
* An array of one or more {@link CsvFileSource @CsvFileSource}
* annotations.
*/
CsvFileSource[] value();
}
|
CsvFileSources
|
java
|
grpc__grpc-java
|
core/src/test/java/io/grpc/internal/ReschedulerTest.java
|
{
"start": 3173,
"end": 3294
}
|
class ____ implements Runnable {
boolean ran;
@Override
public void run() {
ran = true;
}
}
}
|
Runner
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jdbc/src/test/java/org/springframework/boot/jdbc/metrics/DataSourcePoolMetricsTests.java
|
{
"start": 1439,
"end": 2032
}
|
class ____ {
@Test
void dataSourceIsInstrumented() {
new ApplicationContextRunner().withUserConfiguration(DataSourceConfig.class, MetricsApp.class)
.withConfiguration(AutoConfigurations.of(DataSourceAutoConfiguration.class))
.withPropertyValues("spring.datasource.generate-unique-name=true", "metrics.use-global-registry=false")
.run((context) -> {
context.getBean(DataSource.class).getConnection().getMetaData();
context.getBean(MeterRegistry.class).get("jdbc.connections.max").meter();
});
}
@Configuration(proxyBeanMethods = false)
static
|
DataSourcePoolMetricsTests
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskMultipleInputSelectiveReadingTest.java
|
{
"start": 2253,
"end": 10523
}
|
class ____ {
private static final StreamRecord<String>[] INPUT1 =
new StreamRecord[] {
new StreamRecord<>("Hello-1"),
new StreamRecord<>("Hello-2"),
new StreamRecord<>("Hello-3")
};
private static final StreamRecord<Integer>[] INPUT2 =
new StreamRecord[] {
new StreamRecord<>(1),
new StreamRecord<>(2),
new StreamRecord<>(3),
new StreamRecord<>(4)
};
@Test
void testAnyOrderedReading() throws Exception {
ArrayDeque<Object> expectedOutput = new ArrayDeque<>();
expectedOutput.add(new StreamRecord<>("[1]: Hello-1"));
expectedOutput.add(new StreamRecord<>("[2]: 1"));
expectedOutput.add(new StreamRecord<>("[1]: Hello-2"));
expectedOutput.add(new StreamRecord<>("[2]: 2"));
expectedOutput.add(new StreamRecord<>("[1]: Hello-3"));
expectedOutput.add(new StreamRecord<>("[2]: 3"));
expectedOutput.add(new StreamRecord<>("[2]: 4"));
testInputSelection(
new TestAnyModeMultipleInputStreamOperator.Factory(), false, expectedOutput, true);
}
@Test
void testAnyUnorderedReading() throws Exception {
ArrayDeque<Object> expectedOutput = new ArrayDeque<>();
expectedOutput.add(new StreamRecord<>("[1]: Hello-1"));
expectedOutput.add(new StreamRecord<>("[2]: 1"));
expectedOutput.add(new StreamRecord<>("[1]: Hello-2"));
expectedOutput.add(new StreamRecord<>("[2]: 2"));
expectedOutput.add(new StreamRecord<>("[1]: Hello-3"));
expectedOutput.add(new StreamRecord<>("[2]: 3"));
expectedOutput.add(new StreamRecord<>("[2]: 4"));
testInputSelection(
new TestAnyModeMultipleInputStreamOperator.Factory(), true, expectedOutput, false);
}
@Test
void testSequentialReading() throws Exception {
ArrayDeque<Object> expectedOutput = new ArrayDeque<>();
expectedOutput.add(new StreamRecord<>("[1]: Hello-1"));
expectedOutput.add(new StreamRecord<>("[1]: Hello-2"));
expectedOutput.add(new StreamRecord<>("[1]: Hello-3"));
expectedOutput.add(new StreamRecord<>("[2]: 1"));
expectedOutput.add(new StreamRecord<>("[2]: 2"));
expectedOutput.add(new StreamRecord<>("[2]: 3"));
expectedOutput.add(new StreamRecord<>("[2]: 4"));
testInputSelection(
new TestSequentialMultipleInputStreamOperator.Factory(),
true,
expectedOutput,
true);
}
@Test
void testSpecialRuleReading() throws Exception {
ArrayDeque<Object> expectedOutput = new ArrayDeque<>();
expectedOutput.add(new StreamRecord<>("[1]: Hello-1"));
expectedOutput.add(new StreamRecord<>("[1]: Hello-2"));
expectedOutput.add(new StreamRecord<>("[2]: 1"));
expectedOutput.add(new StreamRecord<>("[2]: 2"));
expectedOutput.add(new StreamRecord<>("[1]: Hello-3"));
expectedOutput.add(new StreamRecord<>("[2]: 3"));
expectedOutput.add(new StreamRecord<>("[2]: 4"));
testInputSelection(
new SpecialRuleReadingStreamOperatorFactory(3, 4, 2), true, expectedOutput, true);
}
@Test
void testReadFinishedInput() throws Exception {
assertThatThrownBy(
() ->
testInputSelection(
new TestReadFinishedInputStreamOperatorFactory(),
true,
new ArrayDeque<>(),
true))
.isInstanceOf(IOException.class)
.hasMessageContaining(
"Can not make a progress: all selected inputs are already finished");
}
private void testInputSelection(
StreamOperatorFactory<String> streamOperatorFactory,
boolean autoProcess,
ArrayDeque<Object> expectedOutput,
boolean orderedCheck)
throws Exception {
try (StreamTaskMailboxTestHarness<String> testHarness =
new StreamTaskMailboxTestHarnessBuilder<>(
MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO)
.addInput(BasicTypeInfo.STRING_TYPE_INFO)
.addInput(BasicTypeInfo.INT_TYPE_INFO)
.setupOutputForSingletonOperatorChain(streamOperatorFactory)
.build()) {
testHarness.setAutoProcess(autoProcess);
for (StreamRecord<String> record : INPUT1) {
testHarness.processElement(record, 0);
}
for (StreamRecord<Integer> record : INPUT2) {
testHarness.processElement(record, 1);
}
testHarness.endInput();
if (!autoProcess) {
testHarness.processAll();
}
testHarness.waitForTaskCompletion();
if (orderedCheck) {
assertThat(testHarness.getOutput()).containsExactlyElementsOf(expectedOutput);
} else {
assertThat(testHarness.getOutput())
.containsExactlyInAnyOrderElementsOf(expectedOutput);
}
}
}
/**
* Setup three inputs only two selected and make sure that neither of the two inputs is starved,
* when one has some data all the time, but the other only rarely.
*/
@Test
void testInputStarvation() throws Exception {
try (StreamTaskMailboxTestHarness<String> testHarness =
new StreamTaskMailboxTestHarnessBuilder<>(
MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO)
.addInput(BasicTypeInfo.STRING_TYPE_INFO)
.addInput(BasicTypeInfo.STRING_TYPE_INFO)
.addInput(BasicTypeInfo.STRING_TYPE_INFO)
.setupOutputForSingletonOperatorChain(
new TestInputStarvationMultipleInputOperatorFactory())
.build()) {
testHarness.processAll(); // request partitions
Queue<StreamRecord> expectedOutput = new ArrayDeque<>();
testHarness.setAutoProcess(false);
// StreamMultipleInputProcessor starts with all inputs available. Let it rotate and
// refresh properly.
testHarness.processSingleStep();
assertThat(testHarness.getOutput()).isEmpty();
testHarness.processElement(new StreamRecord<>("NOT_SELECTED"), 0);
testHarness.processElement(new StreamRecord<>("1"), 1);
testHarness.processElement(new StreamRecord<>("2"), 1);
testHarness.processElement(new StreamRecord<>("3"), 1);
testHarness.processElement(new StreamRecord<>("4"), 1);
testHarness.processSingleStep();
expectedOutput.add(new StreamRecord<>("[2]: 1"));
testHarness.processSingleStep();
expectedOutput.add(new StreamRecord<>("[2]: 2"));
assertThat(testHarness.getOutput()).containsExactlyElementsOf(expectedOutput);
// InputGate 2 was not available in previous steps, so let's check if we are not
// starving it
testHarness.processElement(new StreamRecord<>("1"), 2);
testHarness.processSingleStep();
testHarness.processSingleStep();
// One of those processing single step should pick up InputGate 2, however it's not
// important which one. We just must avoid starvation.
expectedOutput.add(new StreamRecord<>("[3]: 1"));
expectedOutput.add(new StreamRecord<>("[2]: 3"));
assertThat(testHarness.getOutput()).containsExactlyInAnyOrderElementsOf(expectedOutput);
}
}
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
private static
|
StreamTaskMultipleInputSelectiveReadingTest
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/util/concurrent/JSR166TestCase.java
|
{
"start": 5564,
"end": 22041
}
|
class ____ extends TestCase {
protected static final boolean expensiveTests = Boolean.getBoolean("jsr166.expensiveTests");
/**
* If true, report on stdout all "slow" tests, that is, ones that take more than profileThreshold
* milliseconds to execute.
*/
private static final boolean profileTests = Boolean.getBoolean("jsr166.profileTests");
/**
* The number of milliseconds that tests are permitted for execution without being reported, when
* profileTests is set.
*/
private static final long profileThreshold = Long.getLong("jsr166.profileThreshold", 100);
@Override
protected void runTest() throws Throwable {
if (profileTests) runTestProfiled();
else super.runTest();
}
protected void runTestProfiled() throws Throwable {
long t0 = System.nanoTime();
try {
super.runTest();
} finally {
long elapsedMillis = (System.nanoTime() - t0) / (1000L * 1000L);
if (elapsedMillis >= profileThreshold)
System.out.printf("%n%s: %d%n", toString(), elapsedMillis);
}
}
// /**
// * Runs all JSR166 unit tests using junit.textui.TestRunner
// */
// public static void main(String[] args) {
// if (useSecurityManager) {
// System.err.println("Setting a permissive security manager");
// Policy.setPolicy(permissivePolicy());
// System.setSecurityManager(new SecurityManager());
// }
// int iters = (args.length == 0) ? 1 : Integer.parseInt(args[0]);
// Test s = suite();
// for (int i = 0; i < iters; ++i) {
// junit.textui.TestRunner.run(s);
// System.gc();
// System.runFinalization();
// }
// System.exit(0);
// }
// public static TestSuite newTestSuite(Object... suiteOrClasses) {
// TestSuite suite = new TestSuite();
// for (Object suiteOrClass : suiteOrClasses) {
// if (suiteOrClass instanceof TestSuite)
// suite.addTest((TestSuite) suiteOrClass);
// else if (suiteOrClass instanceof Class)
// suite.addTest(new TestSuite((Class<?>) suiteOrClass));
// else
// throw new ClassCastException("not a test suite or class");
// }
// return suite;
// }
// /**
// * Collects all JSR166 unit tests as one suite.
// */
// public static Test suite() {
// return newTestSuite(
// ForkJoinPoolTest.suite(),
// ForkJoinTaskTest.suite(),
// RecursiveActionTest.suite(),
// RecursiveTaskTest.suite(),
// LinkedTransferQueueTest.suite(),
// PhaserTest.suite(),
// ThreadLocalRandomTest.suite(),
// AbstractExecutorServiceTest.suite(),
// AbstractQueueTest.suite(),
// AbstractQueuedSynchronizerTest.suite(),
// AbstractQueuedLongSynchronizerTest.suite(),
// ArrayBlockingQueueTest.suite(),
// ArrayDequeTest.suite(),
// AtomicBooleanTest.suite(),
// AtomicIntegerArrayTest.suite(),
// AtomicIntegerFieldUpdaterTest.suite(),
// AtomicIntegerTest.suite(),
// AtomicLongArrayTest.suite(),
// AtomicLongFieldUpdaterTest.suite(),
// AtomicLongTest.suite(),
// AtomicMarkableReferenceTest.suite(),
// AtomicReferenceArrayTest.suite(),
// AtomicReferenceFieldUpdaterTest.suite(),
// AtomicReferenceTest.suite(),
// AtomicStampedReferenceTest.suite(),
// ConcurrentHashMapTest.suite(),
// ConcurrentLinkedDequeTest.suite(),
// ConcurrentLinkedQueueTest.suite(),
// ConcurrentSkipListMapTest.suite(),
// ConcurrentSkipListSubMapTest.suite(),
// ConcurrentSkipListSetTest.suite(),
// ConcurrentSkipListSubSetTest.suite(),
// CopyOnWriteArrayListTest.suite(),
// CopyOnWriteArraySetTest.suite(),
// CountDownLatchTest.suite(),
// CyclicBarrierTest.suite(),
// DelayQueueTest.suite(),
// EntryTest.suite(),
// ExchangerTest.suite(),
// ExecutorsTest.suite(),
// ExecutorCompletionServiceTest.suite(),
// FutureTaskTest.suite(),
// LinkedBlockingDequeTest.suite(),
// LinkedBlockingQueueTest.suite(),
// LinkedListTest.suite(),
// LockSupportTest.suite(),
// PriorityBlockingQueueTest.suite(),
// PriorityQueueTest.suite(),
// ReentrantLockTest.suite(),
// ReentrantReadWriteLockTest.suite(),
// ScheduledExecutorTest.suite(),
// ScheduledExecutorSubclassTest.suite(),
// SemaphoreTest.suite(),
// SynchronousQueueTest.suite(),
// SystemTest.suite(),
// ThreadLocalTest.suite(),
// ThreadPoolExecutorTest.suite(),
// ThreadPoolExecutorSubclassTest.suite(),
// ThreadTest.suite(),
// TimeUnitTest.suite(),
// TreeMapTest.suite(),
// TreeSetTest.suite(),
// TreeSubMapTest.suite(),
// TreeSubSetTest.suite());
// }
public static long SHORT_DELAY_MS;
public static long SMALL_DELAY_MS;
public static long MEDIUM_DELAY_MS;
public static long LONG_DELAY_MS;
/**
* Returns the shortest timed delay. This could be reimplemented to use for example a Property.
*/
protected long getShortDelay() {
return 50;
}
/** Sets delays as multiples of SHORT_DELAY. */
protected void setDelays() {
SHORT_DELAY_MS = getShortDelay();
SMALL_DELAY_MS = SHORT_DELAY_MS * 5;
MEDIUM_DELAY_MS = SHORT_DELAY_MS * 10;
LONG_DELAY_MS = SHORT_DELAY_MS * 200;
}
/**
* Returns a timeout in milliseconds to be used in tests that verify that operations block or time
* out.
*/
long timeoutMillis() {
return SHORT_DELAY_MS / 4;
}
/** Returns a new Date instance representing a time delayMillis milliseconds in the future. */
Date delayedDate(long delayMillis) {
return new Date(System.currentTimeMillis() + delayMillis);
}
/** The first exception encountered if any threadAssertXXX method fails. */
private final AtomicReference<Throwable> threadFailure = new AtomicReference<>(null);
/**
* Records an exception so that it can be rethrown later in the test harness thread, triggering a
* test case failure. Only the first failure is recorded; subsequent calls to this method from
* within the same test have no effect.
*/
public void threadRecordFailure(Throwable t) {
threadFailure.compareAndSet(null, t);
}
@Override
public void setUp() {
setDelays();
}
/**
* Extra checks that get done for all test cases.
*
* <p>Triggers test case failure if any thread assertions have failed, by rethrowing, in the test
* harness thread, any exception recorded earlier by threadRecordFailure.
*
* <p>Triggers test case failure if interrupt status is set in the main thread.
*/
@Override
public void tearDown() throws Exception {
Throwable t = threadFailure.getAndSet(null);
if (t != null) {
if (t instanceof Error) throw (Error) t;
else if (t instanceof RuntimeException) throw (RuntimeException) t;
else if (t instanceof Exception) throw (Exception) t;
else {
AssertionFailedError afe = new AssertionFailedError(t.toString());
afe.initCause(t);
throw afe;
}
}
if (Thread.interrupted()) throw new AssertionFailedError("interrupt status set in main thread");
}
/**
* Just like fail(reason), but additionally recording (using threadRecordFailure) any
* AssertionFailedError thrown, so that the current testcase will fail.
*/
public void threadFail(String reason) {
try {
fail(reason);
} catch (AssertionFailedError t) {
threadRecordFailure(t);
fail(reason);
}
}
/**
* Just like assertTrue(b), but additionally recording (using threadRecordFailure) any
* AssertionFailedError thrown, so that the current testcase will fail.
*/
public void threadAssertTrue(boolean b) {
try {
assertTrue(b);
} catch (AssertionFailedError t) {
threadRecordFailure(t);
throw t;
}
}
/**
* Just like assertFalse(b), but additionally recording (using threadRecordFailure) any
* AssertionFailedError thrown, so that the current testcase will fail.
*/
public void threadAssertFalse(boolean b) {
try {
assertFalse(b);
} catch (AssertionFailedError t) {
threadRecordFailure(t);
throw t;
}
}
/**
* Just like assertNull(x), but additionally recording (using threadRecordFailure) any
* AssertionFailedError thrown, so that the current testcase will fail.
*/
public void threadAssertNull(Object x) {
try {
assertThat(x).isNull();
} catch (AssertionFailedError t) {
threadRecordFailure(t);
throw t;
}
}
/**
* Just like assertEquals(x, y), but additionally recording (using threadRecordFailure) any
* AssertionFailedError thrown, so that the current testcase will fail.
*/
public void threadAssertEquals(long x, long y) {
try {
assertEquals(x, y);
} catch (AssertionFailedError t) {
threadRecordFailure(t);
throw t;
}
}
/**
* Just like assertEquals(x, y), but additionally recording (using threadRecordFailure) any
* AssertionFailedError thrown, so that the current testcase will fail.
*/
public void threadAssertEquals(Object x, Object y) {
try {
assertEquals(x, y);
} catch (AssertionFailedError t) {
threadRecordFailure(t);
throw t;
} catch (Throwable t) {
threadUnexpectedException(t);
}
}
/**
* Just like assertSame(x, y), but additionally recording (using threadRecordFailure) any
* AssertionFailedError thrown, so that the current testcase will fail.
*/
public void threadAssertSame(Object x, Object y) {
try {
assertSame(x, y);
} catch (AssertionFailedError t) {
threadRecordFailure(t);
throw t;
}
}
/** Calls threadFail with message "should throw exception". */
public void threadShouldThrow() {
threadFail("should throw exception");
}
/** Calls threadFail with message "should throw" + exceptionName. */
public void threadShouldThrow(String exceptionName) {
threadFail("should throw " + exceptionName);
}
/**
* Records the given exception using {@link #threadRecordFailure}, then rethrows the exception,
* wrapping it in an AssertionFailedError if necessary.
*/
public void threadUnexpectedException(Throwable t) {
threadRecordFailure(t);
t.printStackTrace();
if (t instanceof RuntimeException) throw (RuntimeException) t;
else if (t instanceof Error) throw (Error) t;
else {
AssertionFailedError afe = new AssertionFailedError("unexpected exception: " + t);
afe.initCause(t);
throw afe;
}
}
/**
* Delays, via Thread.sleep, for the given millisecond delay, but if the sleep is shorter than
* specified, may re-sleep or yield until time elapses.
*/
@SuppressWarnings("ThreadPriorityCheck") // TODO: b/175898629 - Consider onSpinWait?
static void delay(long millis) throws InterruptedException {
long startTime = System.nanoTime();
long ns = millis * 1000 * 1000;
for (; ; ) {
if (millis > 0L) Thread.sleep(millis);
else // too short to sleep
Thread.yield();
long d = ns - (System.nanoTime() - startTime);
if (d > 0L) millis = d / (1000 * 1000);
else break;
}
}
/** Waits out termination of a thread pool or fails doing so. */
void joinPool(ExecutorService exec) throws InterruptedException {
try {
exec.shutdown();
assertTrue(
"ExecutorService did not terminate in a timely manner",
exec.awaitTermination(2 * LONG_DELAY_MS, MILLISECONDS));
} catch (SecurityException ok) {
// Allowed in case test doesn't have privs
}
}
/**
* Checks that thread does not terminate within the default millisecond delay of {@code
* timeoutMillis()}.
*/
void assertThreadStaysAlive(Thread thread) throws InterruptedException {
assertThreadStaysAlive(thread, timeoutMillis());
}
/** Checks that thread does not terminate within the given millisecond delay. */
void assertThreadStaysAlive(Thread thread, long millis) throws InterruptedException {
// No need to optimize the failing case via Thread.join.
delay(millis);
assertTrue(thread.isAlive());
}
/**
* Checks that the threads do not terminate within the default millisecond delay of {@code
* timeoutMillis()}.
*/
void assertThreadsStayAlive(Thread... threads) throws InterruptedException {
assertThreadsStayAlive(timeoutMillis(), threads);
}
/** Checks that the threads do not terminate within the given millisecond delay. */
void assertThreadsStayAlive(long millis, Thread... threads) throws InterruptedException {
// No need to optimize the failing case via Thread.join.
delay(millis);
for (Thread thread : threads) {
assertTrue(thread.isAlive());
}
}
/** Checks that future.get times out, with the default timeout of {@code timeoutMillis()}. */
void assertFutureTimesOut(Future<?> future) {
assertFutureTimesOut(future, timeoutMillis());
}
/** Checks that future.get times out, with the given millisecond timeout. */
void assertFutureTimesOut(Future<?> future, long timeoutMillis) {
long startTime = System.nanoTime();
try {
future.get(timeoutMillis, MILLISECONDS);
fail("Should throw exception");
} catch (TimeoutException success) {
} catch (Exception e) {
threadUnexpectedException(e);
} finally {
future.cancel(true);
}
assertThat(millisElapsedSince(startTime)).isAtLeast(timeoutMillis);
}
/** The number of elements to place in collections, arrays, etc. */
public static final int SIZE = 20;
// Some convenient Integer constants
public static final Integer zero = 0;
public static final Integer one = 1;
public static final Integer two = 2;
public static final Integer three = 3;
public static final Integer four = 4;
public static final Integer five = 5;
public static final Integer six = 6;
public static final Integer seven = 7;
public static final Integer eight = 8;
public static final Integer nine = 9;
public static final Integer m1 = -1;
public static final Integer m2 = -2;
public static final Integer m3 = -3;
public static final Integer m4 = -4;
public static final Integer m5 = -5;
public static final Integer m6 = -6;
public static final Integer m10 = -10;
/**
* Runs Runnable r with a security policy that permits precisely the specified permissions. If
* there is no current security manager, the runnable is run twice, both with and without a
* security manager. We require that any security manager permit getPolicy/setPolicy.
*/
public void runWithPermissions(Runnable r, Permission... permissions) {
SecurityManager sm = System.getSecurityManager();
if (sm == null) {
r.run();
Policy savedPolicy = Policy.getPolicy();
try {
Policy.setPolicy(permissivePolicy());
System.setSecurityManager(new SecurityManager());
runWithPermissions(r, permissions);
} finally {
System.setSecurityManager(null);
Policy.setPolicy(savedPolicy);
}
} else {
Policy savedPolicy = Policy.getPolicy();
AdjustablePolicy policy = new AdjustablePolicy(permissions);
Policy.setPolicy(policy);
try {
r.run();
} finally {
policy.addPermission(new SecurityPermission("setPolicy"));
Policy.setPolicy(savedPolicy);
}
}
}
/** Runs a runnable without any permissions. */
public void runWithoutPermissions(Runnable r) {
runWithPermissions(r);
}
/** A security policy where new permissions can be dynamically added or all cleared. */
public static
|
JSR166TestCase
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/OptionalOfRedundantMethodTest.java
|
{
"start": 13262,
"end": 13531
}
|
class ____ {
String f() {
return Optional.of("test").or("");
}
}
""")
.addOutputLines(
"Test.java",
"""
import com.google.common.base.Optional;
|
Test
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
|
{
"start": 33273,
"end": 33468
}
|
class ____ extends Options.IntegerOption
implements Option {
BufferSizeOption(int value) {
super(value);
}
}
static
|
BufferSizeOption
|
java
|
grpc__grpc-java
|
xds/src/main/java/io/grpc/xds/LazyLoadBalancer.java
|
{
"start": 935,
"end": 1426
}
|
class ____ extends ForwardingLoadBalancer {
private LoadBalancer delegate;
public LazyLoadBalancer(Helper helper, LoadBalancer.Factory delegateFactory) {
this.delegate = new LazyDelegate(helper, delegateFactory);
}
@Override
protected LoadBalancer delegate() {
return delegate;
}
@Override
public Status acceptResolvedAddresses(ResolvedAddresses resolvedAddresses) {
return delegate.acceptResolvedAddresses(resolvedAddresses);
}
private final
|
LazyLoadBalancer
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java
|
{
"start": 982,
"end": 1395
}
|
class ____ {
/* Switch container runtimes. Work in progress: These
* parameters may be changed/removed in the future. */
@Private
public static final String ENV_CONTAINER_TYPE =
"YARN_CONTAINER_RUNTIME_TYPE";
@Private
public static final String CONTAINER_RUNTIME_DOCKER =
"docker";
@Private
public static final String CONTAINER_RUNTIME_RUNC =
"runc";
}
|
ContainerRuntimeConstants
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/upgrade/RejectingHttpUpgradeCheck.java
|
{
"start": 141,
"end": 642
}
|
class ____ implements HttpUpgradeCheck {
static final String REJECT_HEADER = "reject";
@Override
public Uni<CheckResult> perform(HttpUpgradeContext context) {
if (shouldCheckUpgrade(context)) {
return CheckResult.rejectUpgrade(403);
}
return CheckResult.permitUpgrade();
}
protected boolean shouldCheckUpgrade(HttpUpgradeContext context) {
return context.httpRequest().headers().contains(REJECT_HEADER);
}
}
|
RejectingHttpUpgradeCheck
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/MisformattedTestDataTest.java
|
{
"start": 7059,
"end": 7460
}
|
class ____ {
void method() {
var foo
= "foo\\\\nbar";
}
}
\""");
}
}
""")
.addOutputLines(
"Test.java",
"""
import com.google.errorprone.BugCheckerRefactoringTestHelper;
|
Test
|
java
|
netty__netty
|
common/src/main/java/io/netty/util/internal/logging/InternalLoggerFactory.java
|
{
"start": 1583,
"end": 5125
}
|
class ____ {
private static volatile InternalLoggerFactory defaultFactory;
@SuppressWarnings("UnusedCatchParameter")
private static InternalLoggerFactory newDefaultFactory(String name) {
InternalLoggerFactory f = useSlf4JLoggerFactory(name);
if (f != null) {
return f;
}
f = useLog4J2LoggerFactory(name);
if (f != null) {
return f;
}
f = useLog4JLoggerFactory(name);
if (f != null) {
return f;
}
return useJdkLoggerFactory(name);
}
private static InternalLoggerFactory useSlf4JLoggerFactory(String name) {
try {
InternalLoggerFactory f = Slf4JLoggerFactory.getInstanceWithNopCheck();
f.newInstance(name).debug("Using SLF4J as the default logging framework");
return f;
} catch (LinkageError ignore) {
return null;
} catch (Exception ignore) {
// We catch Exception and not ReflectiveOperationException as we still support java 6
return null;
}
}
private static InternalLoggerFactory useLog4J2LoggerFactory(String name) {
try {
InternalLoggerFactory f = Log4J2LoggerFactory.INSTANCE;
f.newInstance(name).debug("Using Log4J2 as the default logging framework");
return f;
} catch (LinkageError ignore) {
return null;
} catch (Exception ignore) {
// We catch Exception and not ReflectiveOperationException as we still support java 6
return null;
}
}
private static InternalLoggerFactory useLog4JLoggerFactory(String name) {
try {
InternalLoggerFactory f = Log4JLoggerFactory.INSTANCE;
f.newInstance(name).debug("Using Log4J as the default logging framework");
return f;
} catch (LinkageError ignore) {
return null;
} catch (Exception ignore) {
// We catch Exception and not ReflectiveOperationException as we still support java 6
return null;
}
}
private static InternalLoggerFactory useJdkLoggerFactory(String name) {
InternalLoggerFactory f = JdkLoggerFactory.INSTANCE;
f.newInstance(name).debug("Using java.util.logging as the default logging framework");
return f;
}
/**
* Returns the default factory. The initial default factory is
* {@link JdkLoggerFactory}.
*/
public static InternalLoggerFactory getDefaultFactory() {
if (defaultFactory == null) {
defaultFactory = newDefaultFactory(InternalLoggerFactory.class.getName());
}
return defaultFactory;
}
/**
* Changes the default factory.
*/
public static void setDefaultFactory(InternalLoggerFactory defaultFactory) {
InternalLoggerFactory.defaultFactory = ObjectUtil.checkNotNull(defaultFactory, "defaultFactory");
}
/**
* Creates a new logger instance with the name of the specified class.
*/
public static InternalLogger getInstance(Class<?> clazz) {
return getInstance(clazz.getName());
}
/**
* Creates a new logger instance with the specified name.
*/
public static InternalLogger getInstance(String name) {
return getDefaultFactory().newInstance(name);
}
/**
* Creates a new logger instance with the specified name.
*/
protected abstract InternalLogger newInstance(String name);
}
|
InternalLoggerFactory
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/runtime/src/main/java/io/quarkus/websockets/next/runtime/WebSocketConnectionBase.java
|
{
"start": 921,
"end": 6257
}
|
class ____ implements Connection {
private static final Logger LOG = Logger.getLogger(WebSocketConnectionBase.class);
protected final String identifier;
protected final Map<String, String> pathParams;
protected final Codecs codecs;
protected final HandshakeRequest handshakeRequest;
protected final Instant creationTime;
protected final TrafficLogger trafficLogger;
private final UserData userData;
private final SendingInterceptor sendingInterceptor;
WebSocketConnectionBase(Map<String, String> pathParams, Codecs codecs, HandshakeRequest handshakeRequest,
TrafficLogger trafficLogger, UserData userData, SendingInterceptor sendingInterceptor) {
this.identifier = UUID.randomUUID().toString();
this.pathParams = pathParams;
this.codecs = codecs;
this.handshakeRequest = handshakeRequest;
this.creationTime = Instant.now();
this.trafficLogger = trafficLogger;
this.userData = userData;
this.sendingInterceptor = sendingInterceptor;
}
abstract WebSocketBase webSocket();
@Override
public String id() {
return identifier;
}
@Override
public String pathParam(String name) {
return pathParams.get(name);
}
@Override
public Uni<Void> sendText(String message) {
Uni<Void> uni = Uni.createFrom().completionStage(() -> webSocket().writeTextMessage(message).toCompletionStage());
if (sendingInterceptor != null) {
uni = uni.invoke(() -> sendingInterceptor.onSend(message));
}
return trafficLogger == null ? uni : uni.invoke(() -> {
trafficLogger.textMessageSent(this, message);
});
}
@Override
public Uni<Void> sendBinary(Buffer message) {
Uni<Void> uni = Uni.createFrom().completionStage(() -> webSocket().writeBinaryMessage(message).toCompletionStage());
if (sendingInterceptor != null) {
uni = uni.invoke(() -> sendingInterceptor.onSend(message));
}
return trafficLogger == null ? uni : uni.invoke(() -> trafficLogger.binaryMessageSent(this, message));
}
@Override
public <M> Uni<Void> sendText(M message) {
String text;
// Use the same conversion rules as defined for the OnTextMessage
if (message instanceof JsonObject || message instanceof JsonArray || message instanceof BufferImpl
|| message instanceof NoBoundChecksBuffer) {
text = message.toString();
} else if (message.getClass().isArray() && message.getClass().arrayType().equals(byte.class)) {
text = Buffer.buffer((byte[]) message).toString();
} else {
text = codecs.textEncode(message, null);
}
return sendText(text);
}
@Override
public Uni<Void> sendPing(Buffer data) {
return Uni.createFrom().completionStage(() -> webSocket().writePing(data).toCompletionStage());
}
void sendAutoPing() {
webSocket().writePing(Buffer.buffer("ping")).onComplete(r -> {
if (r.failed()) {
LOG.warnf("Unable to send auto-ping for %s: %s", this, r.cause().toString());
}
});
}
@Override
public Uni<Void> sendPong(Buffer data) {
return Uni.createFrom().completionStage(() -> webSocket().writePong(data).toCompletionStage());
}
@Override
public Uni<Void> close() {
return close(CloseReason.NORMAL);
}
@Override
public Uni<Void> close(CloseReason reason) {
if (isClosed()) {
LOG.warnf("Connection already closed: %s", this);
return Uni.createFrom().voidItem();
}
return Uni.createFrom()
.completionStage(() -> webSocket().close((short) reason.getCode(), reason.getMessage()).toCompletionStage());
}
@Override
public boolean isSecure() {
return webSocket().isSsl();
}
@Override
public SSLSession sslSession() {
return webSocket().sslSession();
}
@Override
public boolean isClosed() {
return webSocket().isClosed();
}
@Override
public HandshakeRequest handshakeRequest() {
return handshakeRequest;
}
@Override
public String subprotocol() {
return webSocket().subProtocol();
}
@Override
public Instant creationTime() {
return creationTime;
}
public BroadcastSender broadcast() {
throw new UnsupportedOperationException();
}
@Override
public CloseReason closeReason() {
WebSocketBase ws = webSocket();
if (ws.isClosed()) {
Short code = ws.closeStatusCode();
if (code == null || code == WebSocketCloseStatus.EMPTY.code()) {
// This could happen if the connection is terminated abruptly
return CloseReason.EMPTY;
}
if (code == WebSocketCloseStatus.ABNORMAL_CLOSURE.code()) {
// This could happen if a close frame is never received
return CloseReason.ABNORMAL;
}
return new CloseReason(code, ws.closeReason());
}
return null;
}
@Override
public UserData userData() {
return userData;
}
protected static
|
WebSocketConnectionBase
|
java
|
apache__camel
|
components/camel-aws/camel-aws-cloudtrail/src/main/java/org/apache/camel/component/aws/cloudtrail/CloudtrailConsumer.java
|
{
"start": 1417,
"end": 4548
}
|
class ____ extends ScheduledBatchPollingConsumer {
private static Instant lastTime;
public CloudtrailConsumer(CloudtrailEndpoint endpoint, Processor processor) {
super(endpoint, processor);
}
@Override
protected int poll() throws Exception {
LookupEventsRequest.Builder eventsRequestBuilder
= LookupEventsRequest.builder().maxResults(getEndpoint().getConfiguration().getMaxResults());
List<LookupAttribute> attributes = new ArrayList<>();
if (ObjectHelper.isNotEmpty(getEndpoint().getConfiguration().getEventSource())) {
LookupAttribute eventSource = LookupAttribute.builder().attributeKey(LookupAttributeKey.EVENT_SOURCE)
.attributeValue(getEndpoint().getConfiguration().getEventSource()).build();
attributes.add(eventSource);
}
if (!attributes.isEmpty()) {
eventsRequestBuilder.lookupAttributes(attributes);
}
if (lastTime != null) {
eventsRequestBuilder.startTime(lastTime.plusMillis(1000));
}
LookupEventsResponse response = getClient().lookupEvents(eventsRequestBuilder.build());
// okay we have some response from aws so lets mark the consumer as ready
forceConsumerAsReady();
if (!response.events().isEmpty()) {
lastTime = response.events().get(0).eventTime();
}
Queue<Exchange> exchanges = createExchanges(response.events());
return processBatch(CastUtils.cast(exchanges));
}
@Override
public int processBatch(Queue<Object> exchanges) throws Exception {
int processedExchanges = 0;
while (!exchanges.isEmpty()) {
final Exchange exchange = ObjectHelper.cast(Exchange.class, exchanges.poll());
// use default consumer callback
AsyncCallback cb = defaultConsumerCallback(exchange, true);
getAsyncProcessor().process(exchange, cb);
processedExchanges++;
}
return processedExchanges;
}
private CloudTrailClient getClient() {
return getEndpoint().getClient();
}
@Override
public CloudtrailEndpoint getEndpoint() {
return (CloudtrailEndpoint) super.getEndpoint();
}
private Queue<Exchange> createExchanges(List<Event> events) {
Queue<Exchange> exchanges = new ArrayDeque<>();
for (Event event : events) {
exchanges.add(createExchange(event));
}
return exchanges;
}
protected Exchange createExchange(Event event) {
Exchange exchange = createExchange(true);
exchange.getMessage().setBody(event.cloudTrailEvent().getBytes(StandardCharsets.UTF_8));
exchange.getMessage().setHeader(CloudtrailConstants.EVENT_ID, event.eventId());
exchange.getMessage().setHeader(CloudtrailConstants.EVENT_NAME, event.eventName());
exchange.getMessage().setHeader(CloudtrailConstants.EVENT_SOURCE, event.eventSource());
exchange.getMessage().setHeader(CloudtrailConstants.USERNAME, event.username());
return exchange;
}
}
|
CloudtrailConsumer
|
java
|
micronaut-projects__micronaut-core
|
http/src/main/java/io/micronaut/http/annotation/RouteCondition.java
|
{
"start": 1820,
"end": 2005
}
|
interface ____ {
/**
* An expression that evaluates to {@code true} or {@code false}.
* @return The expression
* @since 4.0.0
*/
String value();
}
|
RouteCondition
|
java
|
apache__dubbo
|
dubbo-metadata/dubbo-metadata-report-nacos/src/test/java/org/apache/dubbo/metadata/store/nacos/RetryTest.java
|
{
"start": 1453,
"end": 4805
}
|
class ____ {
@Test
void testRetryCreate() {
try (MockedStatic<NacosFactory> nacosFactoryMockedStatic = Mockito.mockStatic(NacosFactory.class)) {
AtomicInteger atomicInteger = new AtomicInteger(0);
ConfigService mock = new MockConfigService() {
@Override
public String getServerStatus() {
return atomicInteger.incrementAndGet() > 10 ? UP : DOWN;
}
};
nacosFactoryMockedStatic
.when(() -> NacosFactory.createConfigService((Properties) any()))
.thenReturn(mock);
URL url = URL.valueOf("nacos://127.0.0.1:8848")
.addParameter("nacos.retry", 5)
.addParameter("nacos.retry-wait", 10);
Assertions.assertThrows(IllegalStateException.class, () -> new NacosMetadataReport(url));
try {
new NacosMetadataReport(url);
} catch (Throwable t) {
Assertions.fail(t);
}
}
}
@Test
void testDisable() {
try (MockedStatic<NacosFactory> nacosFactoryMockedStatic = Mockito.mockStatic(NacosFactory.class)) {
ConfigService mock = new MockConfigService() {
@Override
public String getServerStatus() {
return DOWN;
}
};
nacosFactoryMockedStatic
.when(() -> NacosFactory.createConfigService((Properties) any()))
.thenReturn(mock);
URL url = URL.valueOf("nacos://127.0.0.1:8848")
.addParameter("nacos.retry", 5)
.addParameter("nacos.retry-wait", 10)
.addParameter("nacos.check", "false");
try {
new NacosMetadataReport(url);
} catch (Throwable t) {
Assertions.fail(t);
}
}
}
@Test
void testRequest() {
try (MockedStatic<NacosFactory> nacosFactoryMockedStatic = Mockito.mockStatic(NacosFactory.class)) {
AtomicInteger atomicInteger = new AtomicInteger(0);
ConfigService mock = new MockConfigService() {
@Override
public String getConfig(String dataId, String group, long timeoutMs) throws NacosException {
if (atomicInteger.incrementAndGet() > 10) {
return "";
} else {
throw new NacosException();
}
}
@Override
public String getServerStatus() {
return UP;
}
};
nacosFactoryMockedStatic
.when(() -> NacosFactory.createConfigService((Properties) any()))
.thenReturn(mock);
URL url = URL.valueOf("nacos://127.0.0.1:8848")
.addParameter("nacos.retry", 5)
.addParameter("nacos.retry-wait", 10);
Assertions.assertThrows(IllegalStateException.class, () -> new NacosMetadataReport(url));
try {
new NacosMetadataReport(url);
} catch (Throwable t) {
Assertions.fail(t);
}
}
}
}
|
RetryTest
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
|
{
"start": 235605,
"end": 237251
}
|
class ____ implements Serializable {
private Double doubleField;
private Integer intField;
public TestNestedPojoClassA() {}
public TestNestedPojoClassA(Double doubleField, Integer intField) {
this.doubleField = doubleField;
this.intField = intField;
}
public Double getDoubleField() {
return doubleField;
}
public void setDoubleField(Double doubleField) {
this.doubleField = doubleField;
}
public Integer getIntField() {
return intField;
}
public void setIntField(Integer intField) {
this.intField = intField;
}
@Override
public String toString() {
return "TestNestedPojoClassA{"
+ "doubleField='"
+ doubleField
+ '\''
+ ", intField="
+ intField
+ '}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TestNestedPojoClassA testNestedPojoClassA = (TestNestedPojoClassA) o;
if (!doubleField.equals(testNestedPojoClassA.doubleField)) return false;
return intField.equals(testNestedPojoClassA.intField);
}
@Override
public int hashCode() {
int result = doubleField.hashCode();
result = 31 * result + intField.hashCode();
return result;
}
}
public static
|
TestNestedPojoClassA
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounter.java
|
{
"start": 1110,
"end": 1206
}
|
interface ____ both mapred and mapreduce packages.
*/
@InterfaceAudience.Private
public abstract
|
in
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/cache/config/EnableCachingIntegrationTests.java
|
{
"start": 6872,
"end": 7051
}
|
class ____ {
@Autowired
Environment env;
@Bean
public Bar bar() {
return new Bar(Boolean.parseBoolean(env.getProperty("bar.enabled")));
}
static
|
BeanConditionConfig
|
java
|
spring-projects__spring-security
|
core/src/main/java/org/springframework/security/authentication/event/AuthenticationFailureProviderNotFoundEvent.java
|
{
"start": 1035,
"end": 1372
}
|
class ____ extends AbstractAuthenticationFailureEvent {
@Serial
private static final long serialVersionUID = 9122219669183263487L;
public AuthenticationFailureProviderNotFoundEvent(Authentication authentication,
AuthenticationException exception) {
super(authentication, exception);
}
}
|
AuthenticationFailureProviderNotFoundEvent
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/route/CompositeRouteLocator.java
|
{
"start": 775,
"end": 1281
}
|
class ____ implements RouteLocator {
private final Flux<RouteLocator> delegates;
public CompositeRouteLocator(Flux<RouteLocator> delegates) {
this.delegates = delegates;
}
@Override
public Flux<Route> getRoutes() {
return this.delegates.flatMapSequential(RouteLocator::getRoutes);
}
@Override
public Flux<Route> getRoutesByMetadata(Map<String, Object> metadata) {
return this.delegates.flatMapSequential(routeLocator -> routeLocator.getRoutesByMetadata(metadata));
}
}
|
CompositeRouteLocator
|
java
|
apache__flink
|
flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/rest/message/materializedtable/RefreshMaterializedTableRequestBody.java
|
{
"start": 1386,
"end": 3649
}
|
class ____ implements RequestBody {
private static final String FIELD_NAME_IS_PERIODIC = "isPeriodic";
private static final String FIELD_NAME_SCHEDULE_TIME = "scheduleTime";
private static final String FIELD_NAME_DYNAMIC_OPTIONS = "dynamicOptions";
private static final String FIELD_NAME_STATIC_PARTITIONS = "staticPartitions";
private static final String FIELD_NAME_EXECUTION_CONFIG = "executionConfig";
@JsonProperty(FIELD_NAME_IS_PERIODIC)
private final boolean isPeriodic;
@JsonProperty(FIELD_NAME_SCHEDULE_TIME)
@Nullable
private final String scheduleTime;
@JsonProperty(FIELD_NAME_DYNAMIC_OPTIONS)
@Nullable
private final Map<String, String> dynamicOptions;
@JsonProperty(FIELD_NAME_STATIC_PARTITIONS)
@Nullable
private final Map<String, String> staticPartitions;
@JsonProperty(FIELD_NAME_EXECUTION_CONFIG)
@Nullable
private final Map<String, String> executionConfig;
@JsonCreator
public RefreshMaterializedTableRequestBody(
@JsonProperty(FIELD_NAME_IS_PERIODIC) boolean isPeriodic,
@Nullable @JsonProperty(FIELD_NAME_SCHEDULE_TIME) String scheduleTime,
@Nullable @JsonProperty(FIELD_NAME_DYNAMIC_OPTIONS) Map<String, String> dynamicOptions,
@Nullable @JsonProperty(FIELD_NAME_STATIC_PARTITIONS)
Map<String, String> staticPartitions,
@Nullable @JsonProperty(FIELD_NAME_EXECUTION_CONFIG)
Map<String, String> executionConfig) {
this.isPeriodic = isPeriodic;
this.scheduleTime = scheduleTime;
this.dynamicOptions = dynamicOptions;
this.staticPartitions = staticPartitions;
this.executionConfig = executionConfig;
}
public boolean isPeriodic() {
return isPeriodic;
}
@Nullable
public String getScheduleTime() {
return scheduleTime;
}
@Nullable
public Map<String, String> getDynamicOptions() {
return dynamicOptions;
}
@Nullable
public Map<String, String> getStaticPartitions() {
return staticPartitions;
}
@Nullable
public Map<String, String> getExecutionConfig() {
return executionConfig;
}
}
|
RefreshMaterializedTableRequestBody
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/ServerResponseFilter.java
|
{
"start": 2366,
"end": 2531
}
|
interface ____ {
/**
* The priority with which this response filter will be executed
*/
int priority() default Priorities.USER;
}
|
ServerResponseFilter
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/dataformat/FhirDataformat.java
|
{
"start": 8457,
"end": 22357
}
|
class ____ can be separated by comma.
*
* @param preferTypes The preferred types, or <code>null</code>
*/
public void setPreferTypes(String preferTypes) {
this.preferTypes = preferTypes;
}
public String getForceResourceId() {
return forceResourceId;
}
/**
* When encoding, force this resource ID to be encoded as the resource ID.
*
* Reference to object of type org.hl7.fhir.instance.model.api.IIdType
*/
public void setForceResourceId(String forceResourceId) {
this.forceResourceId = forceResourceId;
}
public String getServerBaseUrl() {
return serverBaseUrl;
}
/**
* Sets the server's base URL used by this parser. If a value is set, resource references will be turned into
* relative references if they are provided as absolute URLs but have a base matching the given base.
*
* @param serverBaseUrl The base URL, e.g. "http://example.com/base"
*/
public void setServerBaseUrl(String serverBaseUrl) {
this.serverBaseUrl = serverBaseUrl;
}
public String getOmitResourceId() {
return omitResourceId;
}
/**
* If set to <code>true</code> (default is <code>false</code>) the ID of any resources being encoded will not be
* included in the output. Note that this does not apply to contained resources, only to root resources. In other
* words, if this is set to <code>true</code>, contained resources will still have local IDs but the
* outer/containing ID will not have an ID.
*
* @param omitResourceId Should resource IDs be omitted
*/
public void setOmitResourceId(String omitResourceId) {
this.omitResourceId = omitResourceId;
}
public String getEncodeElementsAppliesToChildResourcesOnly() {
return encodeElementsAppliesToChildResourcesOnly;
}
/**
* If set to <code>true</code> (default is false), the values supplied to {@link #setEncodeElements(Set)} will not
* be applied to the root resource (typically a Bundle), but will be applied to any sub-resources contained within
* it (i.e. search result resources in that bundle)
*/
public void setEncodeElementsAppliesToChildResourcesOnly(String encodeElementsAppliesToChildResourcesOnly) {
this.encodeElementsAppliesToChildResourcesOnly = encodeElementsAppliesToChildResourcesOnly;
}
public String getEncodeElements() {
return encodeElements;
}
/**
* If provided, specifies the elements which should be encoded, to the exclusion of all others. Multiple elements
* can be separated by comma when using String parameter.
*
* Valid values for this field would include:
* <ul>
* <li><b>Patient</b> - Encode patient and all its children</li>
* <li><b>Patient.name</b> - Encode only the patient's name</li>
* <li><b>Patient.name.family</b> - Encode only the patient's family name</li>
* <li><b>*.text</b> - Encode the text element on any resource (only the very first position may contain a
* wildcard)</li>
* <li><b>*.(mandatory)</b> - This is a special case which causes any mandatory fields (min > 0) to be encoded</li>
* </ul>
*
* @param encodeElements The elements to encode
* @see #setDontEncodeElements(Set)
*/
public void setEncodeElements(Set<String> encodeElements) {
this.encodeElements = String.join(",", encodeElements);
}
/**
* If provided, specifies the elements which should be encoded, to the exclusion of all others. Multiple elements
* can be separated by comma when using String parameter.
*
* Valid values for this field would include:
* <ul>
* <li><b>Patient</b> - Encode patient and all its children</li>
* <li><b>Patient.name</b> - Encode only the patient's name</li>
* <li><b>Patient.name.family</b> - Encode only the patient's family name</li>
* <li><b>*.text</b> - Encode the text element on any resource (only the very first position may contain a
* wildcard)</li>
* <li><b>*.(mandatory)</b> - This is a special case which causes any mandatory fields (min > 0) to be encoded</li>
* </ul>
*
* @param encodeElements The elements to encode
* @see #setDontEncodeElements(Set)
*/
public void setEncodeElements(String encodeElements) {
this.encodeElements = encodeElements;
}
public String getDontEncodeElements() {
return dontEncodeElements;
}
/**
* If provided, specifies the elements which should NOT be encoded. Multiple elements can be separated by comma when
* using String parameter.
*
* Valid values for this field would include:
* <ul>
* <li><b>Patient</b> - Don't encode patient and all its children</li>
* <li><b>Patient.name</b> - Don't encode the patient's name</li>
* <li><b>Patient.name.family</b> - Don't encode the patient's family name</li>
* <li><b>*.text</b> - Don't encode the text element on any resource (only the very first position may contain a
* wildcard)</li>
* </ul>
* <p>
* DSTU2 note: Note that values including meta, such as <code>Patient.meta</code> will work for DSTU2 parsers, but
* values with subelements on meta such as <code>Patient.meta.lastUpdated</code> will only work in DSTU3+ mode.
* </p>
*
* @param dontEncodeElements The elements to NOT encode
* @see #setEncodeElements(Set)
*/
public void setDontEncodeElements(Set<String> dontEncodeElements) {
this.dontEncodeElements = String.join(",", dontEncodeElements);
}
/**
* If provided, specifies the elements which should NOT be encoded. Multiple elements can be separated by comma when
* using String parameter.
*
* Valid values for this field would include:
* <ul>
* <li><b>Patient</b> - Don't encode patient and all its children</li>
* <li><b>Patient.name</b> - Don't encode the patient's name</li>
* <li><b>Patient.name.family</b> - Don't encode the patient's family name</li>
* <li><b>*.text</b> - Don't encode the text element on any resource (only the very first position may contain a
* wildcard)</li>
* </ul>
* <p>
* DSTU2 note: Note that values including meta, such as <code>Patient.meta</code> will work for DSTU2 parsers, but
* values with subelements on meta such as <code>Patient.meta.lastUpdated</code> will only work in DSTU3+ mode.
* </p>
*
* @param dontEncodeElements The elements to NOT encode
* @see #setEncodeElements(Set)
*/
public void setDontEncodeElements(String dontEncodeElements) {
this.dontEncodeElements = dontEncodeElements;
}
public String getStripVersionsFromReferences() {
return stripVersionsFromReferences;
}
/**
* If set to <code>true<code> (which is the default), resource references containing a version
* will have the version removed when the resource is encoded. This is generally good behaviour because
* in most situations, references from one resource to another should be to the resource by ID, not
* by ID and version. In some cases though, it may be desirable to preserve the version in resource
* links. In that case, this value should be set to <code>false</code>.
* <p>
* This method provides the ability to globally disable reference encoding. If finer-grained control is needed, use
* {@link #setDontStripVersionsFromReferencesAtPaths(List)}
* </p>
*
* @param stripVersionsFromReferences Set this to
* <code>false<code> to prevent the parser from removing resource versions
* from references (or <code>null</code> to apply the default setting from the
* parser options
*/
public void setStripVersionsFromReferences(String stripVersionsFromReferences) {
this.stripVersionsFromReferences = stripVersionsFromReferences;
}
public String getOverrideResourceIdWithBundleEntryFullUrl() {
return overrideResourceIdWithBundleEntryFullUrl;
}
/**
* If set to <code>true</code> (which is the default), the Bundle.entry.fullUrl will override the
* Bundle.entry.resource's resource id if the fullUrl is defined. This behavior happens when parsing the source data
* into a Bundle object. Set this to <code>false</code> if this is not the desired behavior (e.g. the client code
* wishes to perform additional validation checks between the fullUrl and the resource id).
*
* @param overrideResourceIdWithBundleEntryFullUrl Set this to <code>false</code> to prevent the parser from
* overriding resource ids with the Bundle.entry.fullUrl (or
* <code>null</code> to apply the default setting from the parser
* options
*/
public void setOverrideResourceIdWithBundleEntryFullUrl(String overrideResourceIdWithBundleEntryFullUrl) {
this.overrideResourceIdWithBundleEntryFullUrl = overrideResourceIdWithBundleEntryFullUrl;
}
public String getSummaryMode() {
return summaryMode;
}
/**
* If set to <code>true</code> (default is <code>false</code>) only elements marked by the FHIR specification as
* being "summary elements" will be included.
*/
public void setSummaryMode(String summaryMode) {
this.summaryMode = summaryMode;
}
public String getSuppressNarratives() {
return suppressNarratives;
}
/**
* If set to <code>true</code> (default is <code>false</code>), narratives will not be included in the encoded
* values.
*/
public void setSuppressNarratives(String suppressNarratives) {
this.suppressNarratives = suppressNarratives;
}
public String getDontStripVersionsFromReferencesAtPaths() {
return dontStripVersionsFromReferencesAtPaths;
}
/**
* If supplied value(s), any resource references at the specified paths will have their resource versions encoded
* instead of being automatically stripped during the encoding process. This setting has no effect on the parsing
* process. Multiple elements can be separated by comma when using String parameter.
* <p>
* This method provides a finer-grained level of control than {@link #setStripVersionsFromReferences(String)} and
* any paths specified by this method will be encoded even if {@link #setStripVersionsFromReferences(String)} has
* been set to <code>true</code> (which is the default)
* </p>
*
* @param dontStripVersionsFromReferencesAtPaths A collection of paths for which the resource versions will not be
* removed automatically when serializing, e.g.
* "Patient.managingOrganization" or "AuditEvent.object.reference".
* Note that only resource name and field names with dots separating
* is allowed here (no repetition indicators, FluentPath expressions,
* etc.). Set to <code>null</code> to use the value set in the parser
* options
*/
public void setDontStripVersionsFromReferencesAtPaths(List<String> dontStripVersionsFromReferencesAtPaths) {
this.dontStripVersionsFromReferencesAtPaths = String.join(",", dontStripVersionsFromReferencesAtPaths);
}
/**
* If supplied value(s), any resource references at the specified paths will have their resource versions encoded
* instead of being automatically stripped during the encoding process. This setting has no effect on the parsing
* process. Multiple elements can be separated by comma when using String parameter.
* <p>
* This method provides a finer-grained level of control than {@link #setStripVersionsFromReferences(String)} and
* any paths specified by this method will be encoded even if {@link #setStripVersionsFromReferences(String)} has
* been set to <code>true</code> (which is the default)
* </p>
*
* @param dontStripVersionsFromReferencesAtPaths A collection of paths for which the resource versions will not be
* removed automatically when serializing, e.g.
* "Patient.managingOrganization" or "AuditEvent.object.reference".
* Note that only resource name and field names with dots separating
* is allowed here (no repetition indicators, FluentPath expressions,
* etc.). Set to <code>null</code> to use the value set in the parser
* options
*/
public void setDontStripVersionsFromReferencesAtPaths(String dontStripVersionsFromReferencesAtPaths) {
this.dontStripVersionsFromReferencesAtPaths = dontStripVersionsFromReferencesAtPaths;
}
public String getContentTypeHeader() {
return contentTypeHeader;
}
public void setContentTypeHeader(String contentTypeHeader) {
this.contentTypeHeader = contentTypeHeader;
}
/**
* {@code AbstractBuilder} is the base builder for {@link FhirDataformat}.
*/
@XmlTransient
@SuppressWarnings("unchecked")
protected abstract static
|
names
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java
|
{
"start": 5496,
"end": 110999
}
|
class ____ extends ESTestCase {
private PersistedClusterStateService newPersistedClusterStateService(NodeEnvironment nodeEnvironment) {
return newPersistedClusterStateService(nodeEnvironment, ESTestCase::randomBoolean);
}
private PersistedClusterStateService newPersistedClusterStateService(
NodeEnvironment nodeEnvironment,
BooleanSupplier supportsMultipleProjects
) {
final Settings.Builder settings = Settings.builder();
if (randomBoolean()) {
settings.put(PersistedClusterStateService.DOCUMENT_PAGE_SIZE.getKey(), ByteSizeValue.ofBytes(randomLongBetween(1, 1024)));
}
return new PersistedClusterStateService(
nodeEnvironment,
xContentRegistry(),
new ClusterSettings(settings.build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
() -> 0L,
supportsMultipleProjects
);
}
public void testPersistsAndReloadsTerm() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final long newTerm = randomNonNegativeLong();
assertThat(persistedClusterStateService.loadBestOnDiskState().currentTerm, equalTo(0L));
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(newTerm, ClusterState.EMPTY_STATE);
assertThat(persistedClusterStateService.loadBestOnDiskState(false).currentTerm, equalTo(newTerm));
}
assertThat(persistedClusterStateService.loadBestOnDiskState().currentTerm, equalTo(newTerm));
}
}
public void testPersistsAndReloadsGlobalMetadata() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final String clusterUUID = UUIDs.randomBase64UUID(random());
final long version = randomLongBetween(1L, Long.MAX_VALUE);
ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID).clusterUUIDCommitted(true).version(version)
)
.incrementVersion()
.build()
);
clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().clusterUUID(), equalTo(clusterUUID));
assertTrue(clusterState.metadata().clusterUUIDCommitted());
assertThat(clusterState.metadata().version(), equalTo(version));
}
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.clusterUUID(clusterUUID)
.clusterUUIDCommitted(true)
.version(version + 1)
)
.incrementVersion()
.build()
);
}
clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().clusterUUID(), equalTo(clusterUUID));
assertTrue(clusterState.metadata().clusterUUIDCommitted());
assertThat(clusterState.metadata().version(), equalTo(version + 1));
}
}
private static void writeState(Writer writer, long currentTerm, ClusterState clusterState, ClusterState previousState)
throws IOException {
if (randomBoolean() || clusterState.term() != previousState.term() || writer.fullStateWritten == false) {
writer.writeFullStateAndCommit(currentTerm, clusterState);
} else {
writer.writeIncrementalStateAndCommit(currentTerm, previousState, clusterState);
}
}
public void testLoadsFreshestState() throws IOException {
final Path[] dataPaths = createDataPaths();
final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE);
final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm);
final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE);
final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE);
final HashSet<Path> unimportantPaths = Arrays.stream(dataPaths).collect(Collectors.toCollection(HashSet::new));
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
writeState(
writer,
staleTerm,
ClusterState.builder(clusterState)
.version(staleVersion)
.metadata(
Metadata.builder(clusterState.metadata())
.coordinationMetadata(
CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(staleTerm).build()
)
)
.build(),
clusterState
);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[] { randomFrom(dataPaths) })) {
unimportantPaths.remove(nodeEnvironment.nodeDataPaths()[0]);
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(
writer,
freshTerm,
ClusterState.builder(clusterState)
.version(freshVersion)
.metadata(
Metadata.builder(clusterState.metadata())
.coordinationMetadata(
CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(freshTerm).build()
)
)
.build(),
clusterState
);
}
}
if (randomBoolean() && unimportantPaths.isEmpty() == false) {
IOUtils.rm(randomFrom(unimportantPaths));
}
// verify that the freshest state is chosen
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) {
final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment)
.loadBestOnDiskState();
final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metadata);
assertThat(clusterState.term(), equalTo(freshTerm));
assertThat(clusterState.version(), equalTo(freshVersion));
}
}
public void testFailsOnMismatchedNodeIds() throws IOException {
final Path[] dataPaths1 = createDataPaths();
final Path[] dataPaths2 = createDataPaths();
final String[] nodeIds = new String[2];
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) {
nodeIds[0] = nodeEnvironment.nodeId();
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writer.writeFullStateAndCommit(
0L,
ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build()
);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) {
nodeIds[1] = nodeEnvironment.nodeId();
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writer.writeFullStateAndCommit(
0L,
ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build()
);
}
}
NodeMetadata.FORMAT.cleanupOldFiles(Long.MAX_VALUE, dataPaths2);
final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
final String failure = expectThrows(CorruptStateException.class, () -> newNodeEnvironment(combinedPaths)).getMessage();
assertThat(
failure,
allOf(containsString("unexpected node ID in metadata"), containsString(nodeIds[0]), containsString(nodeIds[1]))
);
assertTrue(
"[" + failure + "] should match " + Arrays.toString(dataPaths2),
Arrays.stream(dataPaths2).anyMatch(p -> failure.contains(p.toString()))
);
// verify that loadBestOnDiskState has same check
final String message = expectThrows(
CorruptStateException.class,
() -> new PersistedClusterStateService(
combinedPaths,
nodeIds[0],
xContentRegistry(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
() -> 0L,
ESTestCase::randomBoolean
).loadBestOnDiskState()
).getMessage();
assertThat(message, allOf(containsString("belongs to a node with ID"), containsString(nodeIds[0]), containsString(nodeIds[1])));
assertTrue(
"[" + message + "] should match " + Arrays.toString(dataPaths2),
Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString()))
);
}
public void testFailsOnMismatchedCommittedClusterUUIDs() throws IOException {
final Path[] dataPaths1 = createDataPaths();
final Path[] dataPaths2 = createDataPaths();
final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
final String clusterUUID1 = UUIDs.randomBase64UUID(random());
final String clusterUUID2 = UUIDs.randomBase64UUID(random());
// first establish consistent node IDs and write initial metadata
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writer.writeFullStateAndCommit(0L, clusterState);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writer.writeFullStateAndCommit(
0L,
ClusterState.builder(clusterState)
.metadata(Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID1).clusterUUIDCommitted(true).version(1))
.incrementVersion()
.build()
);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writer.writeFullStateAndCommit(
0L,
ClusterState.builder(clusterState)
.metadata(Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID2).clusterUUIDCommitted(true).version(1))
.incrementVersion()
.build()
);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
final String message = expectThrows(
CorruptStateException.class,
() -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()
).getMessage();
assertThat(
message,
allOf(containsString("mismatched cluster UUIDs in metadata"), containsString(clusterUUID1), containsString(clusterUUID2))
);
assertTrue(
"[" + message + "] should match " + Arrays.toString(dataPaths1),
Arrays.stream(dataPaths1).anyMatch(p -> message.contains(p.toString()))
);
assertTrue(
"[" + message + "] should match " + Arrays.toString(dataPaths2),
Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString()))
);
}
}
public void testFailsIfFreshestStateIsInStaleTerm() throws IOException {
final Path[] dataPaths1 = createDataPaths();
final Path[] dataPaths2 = createDataPaths();
final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
final long staleCurrentTerm = randomLongBetween(1L, Long.MAX_VALUE - 1);
final long freshCurrentTerm = randomLongBetween(staleCurrentTerm + 1, Long.MAX_VALUE);
final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE);
final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm);
final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE);
final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
assertFalse(clusterState.metadata().clusterUUIDCommitted());
writeState(
writer,
staleCurrentTerm,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.version(1)
.coordinationMetadata(
CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(staleTerm).build()
)
)
.version(staleVersion)
.build(),
clusterState
);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(writer, freshCurrentTerm, clusterState, clusterState);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment)
.loadBestOnDiskState(false);
final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metadata);
writeState(
writer,
onDiskState.currentTerm,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.version(2)
.coordinationMetadata(
CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(freshTerm).build()
)
)
.version(freshVersion)
.build(),
clusterState
);
}
}
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
final String message = expectThrows(
CorruptStateException.class,
() -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()
).getMessage();
assertThat(
message,
allOf(
containsString("inconsistent terms found"),
containsString(Long.toString(staleCurrentTerm)),
containsString(Long.toString(freshCurrentTerm))
)
);
assertTrue(
"[" + message + "] should match " + Arrays.toString(dataPaths1),
Arrays.stream(dataPaths1).anyMatch(p -> message.contains(p.toString()))
);
assertTrue(
"[" + message + "] should match " + Arrays.toString(dataPaths2),
Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString()))
);
}
}
public void testFailsGracefullyOnExceptionDuringFlush() throws IOException {
final AtomicBoolean throwException = new AtomicBoolean();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(
nodeEnvironment,
xContentRegistry(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
() -> 0L,
ESTestCase::randomBoolean
) {
@Override
protected Directory createDirectory(Path path) throws IOException {
return new FilterDirectory(super.createDirectory(path)) {
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
if (throwException.get()) {
throw new IOException("simulated");
}
return super.createOutput(name, context);
}
};
}
};
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
final long newTerm = randomNonNegativeLong();
final ClusterState newState = ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.clusterUUID(UUIDs.randomBase64UUID(random()))
.clusterUUIDCommitted(true)
.version(randomLongBetween(1L, Long.MAX_VALUE))
)
.incrementVersion()
.build();
throwException.set(true);
assertThat(
expectThrows(IllegalStateException.class, IOException.class, () -> writeState(writer, newTerm, newState, clusterState))
.getMessage(),
containsString("simulated")
);
}
}
}
public void testClosesWriterOnFatalError() throws IOException {
final AtomicBoolean throwException = new AtomicBoolean();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(
nodeEnvironment,
xContentRegistry(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
() -> 0L,
ESTestCase::randomBoolean
) {
@Override
protected Directory createDirectory(Path path) throws IOException {
return new FilterDirectory(super.createDirectory(path)) {
@Override
public void sync(Collection<String> names) {
throw new OutOfMemoryError("simulated");
}
};
}
};
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
final long newTerm = randomNonNegativeLong();
final ClusterState newState = ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.clusterUUID(UUIDs.randomBase64UUID(random()))
.clusterUUIDCommitted(true)
.version(randomLongBetween(1L, Long.MAX_VALUE))
)
.incrementVersion()
.build();
throwException.set(true);
assertThat(expectThrows(OutOfMemoryError.class, () -> {
if (randomBoolean()) {
writeState(writer, newTerm, newState, clusterState);
} else {
writer.commit(
newTerm,
newState.version(),
newState.metadata().getProject().oldestIndexVersion(),
newState.metadata().clusterUUID(),
newState.metadata().clusterUUIDCommitted()
);
}
}).getMessage(), containsString("simulated"));
assertFalse(writer.isOpen());
}
// noinspection EmptyTryBlock - we are just checking that opening the writer again doesn't throw any exceptions
try (Writer ignored = persistedClusterStateService.createWriter()) {}
}
}
public void testCrashesWithIOErrorOnCommitFailure() throws IOException {
final AtomicBoolean throwException = new AtomicBoolean();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(
nodeEnvironment,
xContentRegistry(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
() -> 0L,
ESTestCase::randomBoolean
) {
@Override
protected Directory createDirectory(Path path) throws IOException {
return new FilterDirectory(super.createDirectory(path)) {
@Override
public void rename(String source, String dest) throws IOException {
if (throwException.get() && dest.startsWith("segments")) {
throw new IOException("simulated");
}
}
};
}
};
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
final long newTerm = randomNonNegativeLong();
final ClusterState newState = ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.clusterUUID(UUIDs.randomBase64UUID(random()))
.clusterUUIDCommitted(true)
.version(randomLongBetween(1L, Long.MAX_VALUE))
)
.incrementVersion()
.build();
throwException.set(true);
assertThat(expectThrows(IOError.class, () -> {
if (randomBoolean()) {
writeState(writer, newTerm, newState, clusterState);
} else {
writer.commit(
newTerm,
newState.version(),
newState.metadata().getProject().oldestIndexVersion(),
newState.metadata().clusterUUID(),
newState.metadata().clusterUUIDCommitted()
);
}
}).getMessage(), containsString("simulated"));
assertFalse(writer.isOpen());
}
// noinspection EmptyTryBlock - we are just checking that opening the writer again doesn't throw any exceptions
try (Writer ignored = persistedClusterStateService.createWriter()) {}
}
}
public void testFailsIfGlobalMetadataIsMissing() throws IOException {
// if someone attempted surgery on the metadata index by hand, e.g. deleting broken segments, then maybe the global metadata
// isn't there any more
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(
writer,
0L,
ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build(),
clusterState
);
}
final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
try (Directory directory = newFSDirectory(brokenPath.resolve(METADATA_DIRECTORY_NAME))) {
final IndexWriterConfig indexWriterConfig = new IndexWriterConfig();
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
try (IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig)) {
indexWriter.commit();
}
}
final String message = expectThrows(
CorruptStateException.class,
() -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()
).getMessage();
assertThat(message, allOf(containsString("no global metadata found"), containsString(brokenPath.toString())));
}
}
public void testFailsIfGlobalMetadataIsDuplicated() throws IOException {
// if someone attempted surgery on the metadata index by hand, e.g. deleting broken segments, then maybe the global metadata
// is duplicated
final Path[] dataPaths1 = createDataPaths();
final Path[] dataPaths2 = createDataPaths();
final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(
writer,
0L,
ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build(),
clusterState
);
}
final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
try (
Directory directory = newFSDirectory(brokenPath.resolve(METADATA_DIRECTORY_NAME));
Directory dupDirectory = newFSDirectory(dupPath.resolve(METADATA_DIRECTORY_NAME))
) {
try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
indexWriter.addIndexes(dupDirectory);
indexWriter.commit();
}
}
final String message = expectThrows(
CorruptStateException.class,
() -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()
).getMessage();
assertThat(message, allOf(containsString("duplicate global metadata found"), containsString(brokenPath.toString())));
}
}
public void testFailsIfIndexMetadataIsDuplicated() throws IOException {
// if someone attempted surgery on the metadata index by hand, e.g. deleting broken segments, then maybe some index metadata
// is duplicated
final Path[] dataPaths1 = createDataPaths();
final Path[] dataPaths2 = createDataPaths();
final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
final String indexUUID = UUIDs.randomBase64UUID(random());
final String indexName = randomAlphaOfLength(10);
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(
writer,
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.version(1L)
.coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(1L).build())
.put(
IndexMetadata.builder(indexName)
.version(1L)
.putMapping(randomMappingMetadataOrNull())
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, indexUUID)
)
)
)
.incrementVersion()
.build(),
clusterState
);
}
final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
try (
Directory directory = newFSDirectory(brokenPath.resolve(METADATA_DIRECTORY_NAME));
Directory dupDirectory = newFSDirectory(dupPath.resolve(METADATA_DIRECTORY_NAME))
) {
try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
indexWriter.deleteDocuments(new Term(TYPE_FIELD_NAME, GLOBAL_TYPE_NAME)); // do not duplicate global metadata
indexWriter.deleteDocuments(new Term(TYPE_FIELD_NAME, MAPPING_TYPE_NAME)); // do not duplicate mappings
indexWriter.addIndexes(dupDirectory);
indexWriter.commit();
}
}
final String message = expectThrows(
CorruptStateException.class,
() -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()
).getMessage();
assertThat(
message,
allOf(
containsString("duplicate metadata found"),
containsString(brokenPath.toString()),
containsString(indexName),
containsString(indexUUID)
)
);
}
}
public void testPersistsAndReloadsIndexMetadataIffVersionOrTermChanges() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment, () -> false);
final long globalVersion = randomLongBetween(1L, Long.MAX_VALUE);
final String indexUUID = UUIDs.randomBase64UUID(random());
final long indexMetadataVersion = randomLongBetween(1L, Long.MAX_VALUE);
final long oldTerm = randomLongBetween(1L, Long.MAX_VALUE - 1);
final long newTerm = randomLongBetween(oldTerm + 1, Long.MAX_VALUE);
try (Writer writer = persistedClusterStateService.createWriter()) {
ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
writeState(
writer,
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.version(globalVersion)
.coordinationMetadata(
CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(oldTerm).build()
)
.put(
IndexMetadata.builder("test")
.version(indexMetadataVersion - 1) // -1 because it's incremented in .put()
.putMapping(randomMappingMetadataOrNull())
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, indexUUID)
)
)
)
.incrementVersion()
.build(),
clusterState
);
clusterState = loadPersistedClusterState(persistedClusterStateService);
IndexMetadata indexMetadata = clusterState.metadata().getProject().index("test");
assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(0));
// ensure we do not wastefully persist the same index metadata version by making a bad update with the same version
writer.writeIncrementalStateAndCommit(
0L,
clusterState,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.put(
IndexMetadata.builder(indexMetadata)
.putMapping(indexMetadata.mapping())
.settings(
Settings.builder()
.put(indexMetadata.getSettings())
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
)
.build(),
false
)
)
.incrementVersion()
.build()
);
clusterState = loadPersistedClusterState(persistedClusterStateService);
indexMetadata = clusterState.metadata().getProject().index("test");
assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(0));
// ensure that we do persist the same index metadata version by making an update with a higher version
writeState(
writer,
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.put(
IndexMetadata.builder(indexMetadata)
.putMapping(randomMappingMetadataOrNull())
.settings(
Settings.builder()
.put(indexMetadata.getSettings())
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
)
.build(),
true
)
)
.incrementVersion()
.build(),
clusterState
);
clusterState = loadPersistedClusterState(persistedClusterStateService);
indexMetadata = clusterState.metadata().getProject().index("test");
assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion + 1));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(2));
// ensure that we also persist the index metadata when the term changes
writeState(
writer,
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.coordinationMetadata(
CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(newTerm).build()
)
.put(
IndexMetadata.builder(indexMetadata)
.putMapping(randomMappingMetadataOrNull())
.settings(
Settings.builder()
.put(indexMetadata.getSettings())
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 3)
)
.build(),
false
)
)
.incrementVersion()
.build(),
clusterState
);
}
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
final IndexMetadata indexMetadata = clusterState.metadata().getProject().index("test");
assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion + 1));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(3));
}
}
public void testPersistsAndReloadsIndexMetadataForMultipleIndices() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final long term = randomLongBetween(1L, Long.MAX_VALUE);
final String addedIndexUuid = UUIDs.randomBase64UUID(random());
final String updatedIndexUuid = UUIDs.randomBase64UUID(random());
final String deletedIndexUuid = UUIDs.randomBase64UUID(random());
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
writeState(
writer,
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.version(clusterState.metadata().version() + 1)
.coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(term).build())
.put(
IndexMetadata.builder("updated")
.putMapping(randomMappingMetadataOrNull())
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
.settings(
indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, updatedIndexUuid)
)
)
.put(
IndexMetadata.builder("deleted")
.putMapping(randomMappingMetadataOrNull())
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
.settings(
indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, deletedIndexUuid)
)
)
)
.incrementVersion()
.build(),
clusterState
);
}
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().getProject().indices().size(), equalTo(2));
assertThat(clusterState.metadata().getProject().index("updated").getIndexUUID(), equalTo(updatedIndexUuid));
assertThat(
IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(clusterState.metadata().getProject().index("updated").getSettings()),
equalTo(1)
);
assertThat(clusterState.metadata().getProject().index("deleted").getIndexUUID(), equalTo(deletedIndexUuid));
writeState(
writer,
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.version(clusterState.metadata().version() + 1)
.remove("deleted")
.put(
IndexMetadata.builder("updated")
.putMapping(randomMappingMetadataOrNull())
.settings(
Settings.builder()
.put(clusterState.metadata().getProject().index("updated").getSettings())
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
)
)
.put(
IndexMetadata.builder("added")
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
.putMapping(randomMappingMetadataOrNull())
.settings(
indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, addedIndexUuid)
)
)
)
.incrementVersion()
.build(),
clusterState
);
}
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().getProject().indices().size(), equalTo(2));
assertThat(clusterState.metadata().getProject().index("updated").getIndexUUID(), equalTo(updatedIndexUuid));
assertThat(
IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(clusterState.metadata().getProject().index("updated").getSettings()),
equalTo(2)
);
assertThat(clusterState.metadata().getProject().index("added").getIndexUUID(), equalTo(addedIndexUuid));
assertThat(clusterState.metadata().getProject().index("deleted"), nullValue());
}
}
public void testPersistsAndReloadsIndexMetadataForMultipleIndicesInMultipleProjects() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final long term = randomLongBetween(1L, Long.MAX_VALUE);
final List<ProjectId> projectIds = randomList(1, 5, ESTestCase::randomUniqueProjectId);
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
var builder = Metadata.builder(clusterState.metadata())
.version(clusterState.metadata().version() + 1)
.coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(term).build());
for (ProjectId projectId : projectIds) {
builder.put(
ProjectMetadata.builder(projectId)
.put(
IndexMetadata.builder("updated")
.putMapping(randomMappingMetadataOrNull())
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
.settings(
indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, projectId.id() + "-updated")
)
)
.put(
IndexMetadata.builder("deleted")
.putMapping(randomMappingMetadataOrNull())
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
.settings(
indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, projectId.id() + "-deleted")
)
)
.build()
);
}
writeState(
writer,
0L,
ClusterState.builder(clusterState).metadata(builder.build()).incrementVersion().build(),
clusterState
);
}
try (Writer writer = persistedClusterStateService.createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
var builder = Metadata.builder(clusterState.metadata()).version(clusterState.metadata().version() + 1);
// +1 for default project
assertThat(clusterState.metadata().projects().size(), equalTo(projectIds.size() + 1));
for (ProjectId projectId : projectIds) {
ProjectMetadata project = clusterState.metadata().getProject(projectId);
assertThat(project.indices().size(), equalTo(2));
assertThat(project.index("updated").getIndexUUID(), equalTo(projectId.id() + "-updated"));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(project.index("updated").getSettings()), equalTo(1));
assertThat(project.index("deleted").getIndexUUID(), equalTo(projectId.id() + "-deleted"));
builder.put(
ProjectMetadata.builder(clusterState.metadata().getProject(projectId))
.remove("deleted")
.put(
IndexMetadata.builder("updated")
.putMapping(randomMappingMetadataOrNull())
.settings(
Settings.builder()
.put(project.index("updated").getSettings())
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
)
)
.put(
IndexMetadata.builder("added")
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
.putMapping(randomMappingMetadataOrNull())
.settings(
indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, projectId.id() + "-added")
)
)
);
}
writeState(
writer,
0L,
ClusterState.builder(clusterState).metadata(builder.build()).incrementVersion().build(),
clusterState
);
}
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
for (ProjectId projectId : projectIds) {
var project = clusterState.metadata().getProject(projectId);
assertThat(project.indices().size(), equalTo(2));
assertThat(project.index("updated").getIndexUUID(), equalTo(projectId.id() + "-updated"));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(project.index("updated").getSettings()), equalTo(2));
assertThat(project.index("added").getIndexUUID(), equalTo(projectId.id() + "-added"));
assertThat(project.index("deleted"), nullValue());
}
}
}
public void testReloadsMetadataAcrossMultipleSegments() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final int writes = between(5, 20);
final List<Index> indices = new ArrayList<>(writes);
try (Writer writer = persistedClusterStateService.createWriter()) {
for (int i = 0; i < writes; i++) {
final Index index = new Index("test-" + i, UUIDs.randomBase64UUID(random()));
indices.add(index);
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
writeState(
writer,
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.version(i + 2)
.put(
IndexMetadata.builder(index.getName())
.putMapping(randomMappingMetadataOrNull())
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID())
)
)
)
.incrementVersion()
.build(),
clusterState
);
}
}
final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
for (Index index : indices) {
final IndexMetadata indexMetadata = clusterState.metadata().getProject().index(index.getName());
assertThat(indexMetadata.getIndexUUID(), equalTo(index.getUUID()));
}
}
}
public void testHandlesShuffledDocuments() throws IOException {
final Path dataPath = createTempDir();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[] { dataPath })) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final Metadata.Builder metadata = Metadata.builder();
for (int i = between(5, 20); i >= 0; i--) {
metadata.put(
IndexMetadata.builder("test-" + i)
.putMapping(randomMappingMetadataOrNull())
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
)
);
}
final Settings.Builder persistentSettings = Settings.builder();
persistentSettings.put(
PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(),
TimeValue.timeValueMillis(randomLongBetween(0, 10000))
);
metadata.persistentSettings(persistentSettings.build());
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build();
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(0L, clusterState);
}
final List<Document> documents = new ArrayList<>();
final Map<String, String> commitUserData;
try (
Directory directory = new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME));
DirectoryReader reader = DirectoryReader.open(directory)
) {
commitUserData = reader.getIndexCommit().getUserData();
forEachDocument(reader, Set.of(GLOBAL_TYPE_NAME, MAPPING_TYPE_NAME, INDEX_TYPE_NAME), documents::add);
}
Randomness.shuffle(documents);
writeDocumentsAndCommit(dataPath.resolve(METADATA_DIRECTORY_NAME), commitUserData, documents);
final ClusterState loadedState = loadPersistedClusterState(persistedClusterStateService);
assertEquals(clusterState.metadata().getProject().indices(), loadedState.metadata().getProject().indices());
assertEquals(clusterState.metadata().persistentSettings(), loadedState.metadata().persistentSettings());
// Now corrupt one of the docs, breaking pagination invariants, and ensure it yields a CorruptStateException
final int corruptIndex = between(0, documents.size() - 1);
final Document corruptDocument = documents.get(corruptIndex);
final int corruptDocPage = corruptDocument.getField(PAGE_FIELD_NAME).numericValue().intValue();
final boolean corruptDocIsLastPage = corruptDocument.getField(LAST_PAGE_FIELD_NAME).numericValue().intValue() == IS_LAST_PAGE;
final boolean isOnlyPageForIndex = corruptDocument.getField(TYPE_FIELD_NAME).stringValue().equals(INDEX_TYPE_NAME)
&& corruptDocPage == 0
&& corruptDocIsLastPage;
final boolean isOnlyPageForMapping = corruptDocument.getField(TYPE_FIELD_NAME).stringValue().equals(MAPPING_TYPE_NAME)
&& corruptDocPage == 0
&& corruptDocIsLastPage;
if (isOnlyPageForIndex == false // don't remove the only doc for an index, this just loses the index and doesn't corrupt
&& isOnlyPageForMapping == false // similarly, don't remove the only doc for a mapping, this causes an AssertionError
&& rarely()) {
documents.remove(corruptIndex);
} else {
if (randomBoolean()) {
corruptDocument.removeFields(PAGE_FIELD_NAME);
corruptDocument.add(
new StoredField(PAGE_FIELD_NAME, randomValueOtherThan(corruptDocPage, () -> between(0, corruptDocPage + 10)))
);
} else {
corruptDocument.removeFields(LAST_PAGE_FIELD_NAME);
corruptDocument.add(new StoredField(LAST_PAGE_FIELD_NAME, corruptDocIsLastPage ? IS_NOT_LAST_PAGE : IS_LAST_PAGE));
}
}
writeDocumentsAndCommit(dataPath.resolve(METADATA_DIRECTORY_NAME), commitUserData, documents);
expectThrows(CorruptStateException.class, () -> loadPersistedClusterState(persistedClusterStateService));
}
}
@TestLogging(value = "org.elasticsearch.gateway:WARN", reason = "to ensure that we log gateway events on WARN level")
public void testSlowLogging() throws IOException, IllegalAccessException {
final long slowWriteLoggingThresholdMillis;
final Settings settings;
if (randomBoolean()) {
slowWriteLoggingThresholdMillis = PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.get(Settings.EMPTY).millis();
settings = Settings.EMPTY;
} else {
slowWriteLoggingThresholdMillis = randomLongBetween(2, 100000);
settings = Settings.builder()
.put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), slowWriteLoggingThresholdMillis + "ms")
.build();
}
final DiscoveryNode localNode = DiscoveryNodeUtils.create("node");
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()))
.build();
final long startTimeMillis = randomLongBetween(0L, Long.MAX_VALUE - slowWriteLoggingThresholdMillis * 10);
final AtomicLong currentTime = new AtomicLong(startTimeMillis);
final AtomicLong writeDurationMillis = new AtomicLong(slowWriteLoggingThresholdMillis);
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(
nodeEnvironment,
xContentRegistry(),
clusterSettings,
() -> currentTime.getAndAdd(writeDurationMillis.get()),
ESTestCase::randomBoolean
);
try (Writer writer = persistedClusterStateService.createWriter()) {
assertExpectedLogs(
1L,
null,
clusterState,
writer,
new MockLog.SeenEventExpectation(
"should see warning at threshold",
PersistedClusterStateService.class.getCanonicalName(),
Level.WARN,
"""
writing full cluster state took [*] which is above the warn threshold of [*]; \
wrote global metadata, [0] mappings, and metadata for [0] indices"""
)
);
writeDurationMillis.set(randomLongBetween(slowWriteLoggingThresholdMillis, slowWriteLoggingThresholdMillis * 2));
assertExpectedLogs(
1L,
null,
clusterState,
writer,
new MockLog.SeenEventExpectation(
"should see warning above threshold",
PersistedClusterStateService.class.getCanonicalName(),
Level.WARN,
"""
writing full cluster state took [*] which is above the warn threshold of [*]; \
wrote global metadata, [0] mappings, and metadata for [0] indices"""
)
);
writeDurationMillis.set(randomLongBetween(1, slowWriteLoggingThresholdMillis - 1));
assertExpectedLogs(
1L,
null,
clusterState,
writer,
new MockLog.UnseenEventExpectation(
"should not see warning below threshold",
PersistedClusterStateService.class.getCanonicalName(),
Level.WARN,
"*"
)
);
clusterSettings.applySettings(
Settings.builder()
.put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), writeDurationMillis.get() + "ms")
.build()
);
assertExpectedLogs(
1L,
null,
clusterState,
writer,
new MockLog.SeenEventExpectation(
"should see warning at reduced threshold",
PersistedClusterStateService.class.getCanonicalName(),
Level.WARN,
"""
writing full cluster state took [*] which is above the warn threshold of [*]; \
wrote global metadata, [0] mappings, and metadata for [0] indices"""
)
);
final ClusterState newClusterState = ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.version(clusterState.version())
.put(
IndexMetadata.builder("test")
.putMapping(randomMappingMetadata())
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, "test-uuid")
)
)
)
.incrementVersion()
.build();
assertExpectedLogs(
1L,
clusterState,
newClusterState,
writer,
new MockLog.SeenEventExpectation(
"should see warning at threshold",
PersistedClusterStateService.class.getCanonicalName(),
Level.WARN,
"""
writing cluster state took [*] which is above the warn threshold of [*]; [skipped writing] global metadata, \
wrote [1] new mappings, removed [0] mappings and skipped [0] unchanged mappings, \
wrote metadata for [1] new indices and [0] existing indices, removed metadata for [0] indices and \
skipped [0] unchanged indices"""
)
);
// force a full write, so that the next write is an actual incremental write from clusterState->newClusterState
writeDurationMillis.set(randomLongBetween(0, writeDurationMillis.get() - 1));
assertExpectedLogs(
1L,
null,
clusterState,
writer,
new MockLog.UnseenEventExpectation(
"should not see warning below threshold",
PersistedClusterStateService.class.getCanonicalName(),
Level.WARN,
"*"
)
);
assertExpectedLogs(
1L,
clusterState,
newClusterState,
writer,
new MockLog.UnseenEventExpectation(
"should not see warning below threshold",
PersistedClusterStateService.class.getCanonicalName(),
Level.WARN,
"*"
)
);
assertThat(currentTime.get(), lessThan(startTimeMillis + 16 * slowWriteLoggingThresholdMillis)); // ensure no overflow
}
}
}
public void testFailsIfCorrupt() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(1, ClusterState.EMPTY_STATE);
}
Path pathToCorrupt = randomFrom(nodeEnvironment.nodeDataPaths());
try (DirectoryStream<Path> directoryStream = Files.newDirectoryStream(pathToCorrupt.resolve("_state"))) {
CorruptionUtils.corruptFile(random(), randomFrom(StreamSupport.stream(directoryStream.spliterator(), false).filter(p -> {
final String filename = p.getFileName().toString();
return ExtrasFS.isExtra(filename) == false && filename.equals(WRITE_LOCK_NAME) == false;
}).toList()));
}
assertThat(
expectThrows(CorruptStateException.class, persistedClusterStateService::loadBestOnDiskState).getMessage(),
allOf(
startsWith("the index containing the cluster metadata under the data path ["),
endsWith("] has been changed by an external force after it was last written by Elasticsearch and is now unreadable")
)
);
}
}
public void testLimitsFileCount() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
try (Writer writer = persistedClusterStateService.createWriter()) {
ClusterState clusterState = ClusterState.EMPTY_STATE;
writer.writeFullStateAndCommit(1, ClusterState.EMPTY_STATE);
final int indexCount = between(2, usually() ? 20 : 1000);
final int maxSegmentCount = (indexCount / 100) + 100; // only expect to have two tiers, each with max 100 segments
final int filesPerSegment = 3; // .cfe, .cfs, .si
final int extraFiles = 2; // segments_*, write.lock
final int maxFileCount = (maxSegmentCount * filesPerSegment) + extraFiles;
logger.info("--> adding [{}] indices one-by-one, verifying file count does not exceed [{}]", indexCount, maxFileCount);
for (int i = 0; i < indexCount; i++) {
final ClusterState previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.version(i + 2)
.put(
IndexMetadata.builder("index-" + i)
.putMapping(randomMappingMetadataOrNull())
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
)
)
)
.incrementVersion()
.build();
writer.writeIncrementalStateAndCommit(1, previousClusterState, clusterState);
for (Path dataPath : nodeEnvironment.nodeDataPaths()) {
try (DirectoryStream<Path> files = Files.newDirectoryStream(dataPath.resolve(METADATA_DIRECTORY_NAME))) {
int fileCount = 0;
final List<String> fileNames = new ArrayList<>();
for (Path filePath : files) {
final String fileName = filePath.getFileName().toString();
if (ExtrasFS.isExtra(fileName) == false) {
fileNames.add(fileName);
fileCount += 1;
}
}
if (maxFileCount < fileCount) {
// don't bother preparing the description unless we are failing
fileNames.sort(Comparator.naturalOrder());
fail(
"after "
+ indexCount
+ " indices have "
+ fileCount
+ " files vs max of "
+ maxFileCount
+ ": "
+ fileNames
);
}
}
}
}
}
}
}
public void testOverrideNodeVersion() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final String clusterUUID = UUIDs.randomBase64UUID(random());
final long version = randomLongBetween(1L, Long.MAX_VALUE);
ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID).clusterUUIDCommitted(true).version(version)
)
.incrementVersion()
.build()
);
clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().clusterUUID(), equalTo(clusterUUID));
assertTrue(clusterState.metadata().clusterUUIDCommitted());
assertThat(clusterState.metadata().version(), equalTo(version));
}
BuildVersion overrideVersion = BuildVersion.fromVersionId(Version.V_8_0_0.id);
NodeMetadata prevMetadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths());
assertEquals(BuildVersion.current(), prevMetadata.nodeVersion());
PersistedClusterStateService.overrideVersion(overrideVersion, persistedClusterStateService.getDataPaths());
NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths());
assertEquals(overrideVersion, metadata.nodeVersion());
for (Path p : persistedClusterStateService.getDataPaths()) {
NodeMetadata individualMetadata = PersistedClusterStateService.nodeMetadata(p);
assertEquals(overrideVersion, individualMetadata.nodeVersion());
}
}
}
public void testDeleteAllPaths() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final String clusterUUID = UUIDs.randomBase64UUID(random());
final long version = randomLongBetween(1L, Long.MAX_VALUE);
ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID).clusterUUIDCommitted(true).version(version)
)
.incrementVersion()
.build()
);
clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().clusterUUID(), equalTo(clusterUUID));
assertTrue(clusterState.metadata().clusterUUIDCommitted());
assertThat(clusterState.metadata().version(), equalTo(version));
}
for (Path dataPath : persistedClusterStateService.getDataPaths()) {
assertTrue(findSegmentInDirectory(dataPath));
}
PersistedClusterStateService.deleteAll(persistedClusterStateService.getDataPaths());
for (Path dataPath : persistedClusterStateService.getDataPaths()) {
assertFalse(findSegmentInDirectory(dataPath));
}
}
}
public void testOldestIndexVersionIsCorrectlySerialized() throws IOException {
final Path[] dataPaths1 = createDataPaths();
final Path[] dataPaths2 = createDataPaths();
final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
IndexVersion oldVersion = IndexVersion.fromId(IndexVersions.MINIMUM_COMPATIBLE.id() - 1);
final IndexVersion[] indexVersions = new IndexVersion[] {
oldVersion,
IndexVersion.current(),
IndexVersion.fromId(IndexVersion.current().id() + 1) };
int lastIndexNum = randomIntBetween(9, 50);
Metadata.Builder b = Metadata.builder();
List<ProjectMetadata.Builder> projects = randomList(1, 3, () -> ProjectMetadata.builder(randomUniqueProjectId()));
projects.forEach(b::put);
for (IndexVersion indexVersion : indexVersions) {
String indexUUID = UUIDs.randomBase64UUID(random());
IndexMetadata im = IndexMetadata.builder(DataStream.getDefaultBackingIndexName("index", lastIndexNum))
.putMapping(randomMappingMetadataOrNull())
.settings(settings(indexVersion).put(IndexMetadata.SETTING_INDEX_UUID, indexUUID))
.numberOfShards(1)
.numberOfReplicas(1)
.build();
randomFrom(projects).put(im, false);
lastIndexNum = randomIntBetween(lastIndexNum + 1, lastIndexNum + 50);
}
Metadata metadata = b.build();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
writeState(
writer,
0L,
ClusterState.builder(clusterState)
.metadata(metadata)
.version(randomLongBetween(1L, Long.MAX_VALUE))
.routingTable(GlobalRoutingTableTestHelper.buildRoutingTable(metadata, RoutingTable.Builder::addAsNew))
.build(),
clusterState
);
}
PersistedClusterStateService.OnDiskState fromDisk = newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState();
NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(nodeEnvironment.nodeDataPaths());
assertEquals(oldVersion, nodeMetadata.oldestIndexVersion());
assertEquals(
oldVersion,
fromDisk.metadata.projects()
.values()
.stream()
.map(ProjectMetadata::oldestIndexVersion)
.min(IndexVersion::compareTo)
.orElse(null)
);
}
}
@TestLogging(value = "org.elasticsearch.gateway.PersistedClusterStateService:DEBUG", reason = "testing contents of DEBUG log")
public void testDebugLogging() throws IOException, IllegalAccessException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(randomNonNegativeLong(), ClusterState.EMPTY_STATE);
}
try (var mockLog = MockLog.capture(PersistedClusterStateService.class)) {
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"should see checkindex message",
PersistedClusterStateService.class.getCanonicalName(),
Level.DEBUG,
"checking cluster state integrity"
)
);
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"should see commit message including timestamps",
PersistedClusterStateService.class.getCanonicalName(),
Level.DEBUG,
"loading cluster state from commit [*] in [*creationTime*"
)
);
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"should see user data",
PersistedClusterStateService.class.getCanonicalName(),
Level.DEBUG,
"cluster state commit user data: *" + PersistedClusterStateService.NODE_VERSION_KEY + "*"
)
);
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"should see segment message including timestamp",
PersistedClusterStateService.class.getCanonicalName(),
Level.DEBUG,
"loading cluster state from segment: *timestamp=*"
)
);
persistedClusterStateService.loadBestOnDiskState();
mockLog.assertAllExpectationsMatched();
}
}
}
public void testFailsIfMappingIsDuplicated() throws IOException {
final Path dataPath = createTempDir();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[] { dataPath })) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(
Metadata.builder()
.put(
IndexMetadata.builder("test-1")
.putMapping(randomMappingMetadata())
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
)
)
)
.build();
String hash = clusterState.metadata().getProject().getMappingsByHash().keySet().iterator().next();
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(0L, clusterState);
}
final List<Document> documents = new ArrayList<>();
final Map<String, String> commitUserData;
try (
Directory directory = new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME));
DirectoryReader reader = DirectoryReader.open(directory)
) {
commitUserData = reader.getIndexCommit().getUserData();
forEachDocument(reader, Set.of(GLOBAL_TYPE_NAME, MAPPING_TYPE_NAME, INDEX_TYPE_NAME), documents::add);
}
// duplicate all documents associated with the mapping in question
for (Document document : new ArrayList<>(documents)) { // iterating a copy
IndexableField mappingHash = document.getField("mapping_hash");
if (mappingHash != null && mappingHash.stringValue().equals(hash)) {
documents.add(document);
}
}
writeDocumentsAndCommit(dataPath.resolve(METADATA_DIRECTORY_NAME), commitUserData, documents);
final String message = expectThrows(CorruptStateException.class, () -> persistedClusterStateService.loadBestOnDiskState())
.getMessage();
assertEquals("duplicate metadata found for mapping hash [" + hash + "] in project [default]", message);
}
}
public void testFailsIfMappingIsMissing() throws IOException {
final Path dataPath = createTempDir();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[] { dataPath })) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(
Metadata.builder()
.put(
IndexMetadata.builder("test-1")
.putMapping(randomMappingMetadata())
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
)
)
)
.build();
String hash = clusterState.metadata().getProject().getMappingsByHash().keySet().iterator().next();
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(0L, clusterState);
}
final List<Document> documents = new ArrayList<>();
final Map<String, String> commitUserData;
try (
Directory directory = new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME));
DirectoryReader reader = DirectoryReader.open(directory)
) {
commitUserData = reader.getIndexCommit().getUserData();
forEachDocument(reader, Set.of(GLOBAL_TYPE_NAME, MAPPING_TYPE_NAME, INDEX_TYPE_NAME), documents::add);
}
// remove all documents associated with the mapping in question
for (Document document : new ArrayList<>(documents)) { // iterating a copy
IndexableField mappingHash = document.getField("mapping_hash");
if (mappingHash != null && mappingHash.stringValue().equals(hash)) {
documents.remove(document);
}
}
writeDocumentsAndCommit(dataPath.resolve(METADATA_DIRECTORY_NAME), commitUserData, documents);
final String message = expectThrows(CorruptStateException.class, () -> persistedClusterStateService.loadBestOnDiskState())
.getCause()
.getMessage();
assertEquals("java.lang.IllegalArgumentException: mapping of index [test-1] with hash [" + hash + "] not found", message);
}
}
public void testDeduplicatedMappings() throws IOException {
final var supportsMultipleProjects = randomBoolean();
final ProjectId projectId = supportsMultipleProjects ? randomUniqueProjectId() : Metadata.DEFAULT_PROJECT_ID;
final ProjectId anotherProjectId = supportsMultipleProjects ? randomUniqueProjectId() : null;
final Path dataPath = createTempDir();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[] { dataPath })) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(
nodeEnvironment,
() -> supportsMultipleProjects
);
try (Writer writer = persistedClusterStateService.createWriter()) {
Map<String, Set<String>> hashes;
ProjectMetadata.Builder metadata;
ClusterState clusterState;
ClusterState previousState;
// generate two mappings
MappingMetadata mapping1 = randomMappingMetadata();
MappingMetadata mapping2 = randomValueOtherThan(mapping1, () -> randomMappingMetadata());
// build and write a cluster state with metadata that has all indices using a single mapping
metadata = ProjectMetadata.builder(projectId);
for (int i = between(5, 20); i >= 0; i--) {
metadata.put(
IndexMetadata.builder("test-" + i)
.putMapping(mapping1)
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
)
);
}
clusterState = ClusterState.builder(ClusterName.DEFAULT).putProjectMetadata(metadata).build();
// If the cluster supports multi-projects, add another project with the same mappings which should not be affected at all
if (anotherProjectId != null) {
var anotherMetadata = ProjectMetadata.builder(anotherProjectId);
for (int i = between(2, 10); i >= 0; i--) {
anotherMetadata.put(
IndexMetadata.builder("test-" + i)
.putMapping(mapping1)
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
)
);
}
for (int i = between(11, 20); i >= 10; i--) {
anotherMetadata.put(
IndexMetadata.builder("test-" + i)
.putMapping(mapping2)
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
)
);
}
clusterState = ClusterState.builder(clusterState).putProjectMetadata(anotherMetadata).build();
}
assertThat(clusterState.metadata().getProject(projectId).getMappingsByHash().size(), equalTo(1));
if (anotherProjectId != null) {
assertThat(clusterState.metadata().getProject(anotherProjectId).getMappingsByHash().size(), equalTo(2));
}
writer.writeFullStateAndCommit(0L, clusterState);
// verify that the on-disk state reflects 1 mapping
hashes = loadPersistedMappingHashes(dataPath.resolve(METADATA_DIRECTORY_NAME));
Set<String> projectHashes = hashes.get(projectId.id());
assertThat(projectHashes.size(), equalTo(1));
assertThat(clusterState.metadata().getProject(projectId).getMappingsByHash().keySet(), equalTo(projectHashes));
maybeCheckForAnotherProject(anotherProjectId, hashes, clusterState);
previousState = clusterState;
metadata = ProjectMetadata.builder(previousState.metadata().getProject(projectId));
// add a second mapping -- either by adding a new index or changing an existing one
if (randomBoolean()) {
// add another index with a different mapping
metadata.put(
IndexMetadata.builder("test-" + 99)
.putMapping(mapping2)
.settings(
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
)
);
} else {
// change an existing index to a different mapping
String index = randomFrom(previousState.metadata().getProject(projectId).indices().keySet());
metadata.put(IndexMetadata.builder(metadata.get(index)).putMapping(mapping2));
}
clusterState = ClusterState.builder(previousState).putProjectMetadata(metadata).build();
assertThat(clusterState.metadata().getProject(projectId).getMappingsByHash().size(), equalTo(2));
writer.writeIncrementalStateAndCommit(0L, previousState, clusterState);
// verify that the on-disk state reflects 2 mappings
hashes = loadPersistedMappingHashes(dataPath.resolve(METADATA_DIRECTORY_NAME));
projectHashes = hashes.get(projectId.id());
assertThat(projectHashes.size(), equalTo(2));
assertThat(clusterState.metadata().getProject(projectId).getMappingsByHash().keySet(), equalTo(projectHashes));
maybeCheckForAnotherProject(anotherProjectId, hashes, clusterState);
previousState = clusterState;
metadata = ProjectMetadata.builder(previousState.metadata().getProject(projectId));
// update all indices to use the second mapping
for (String index : previousState.metadata().getProject(projectId).indices().keySet()) {
metadata.put(IndexMetadata.builder(metadata.get(index)).putMapping(mapping2));
}
clusterState = ClusterState.builder(previousState).putProjectMetadata(metadata).build();
assertThat(clusterState.metadata().getProject(projectId).getMappingsByHash().size(), equalTo(1));
writer.writeIncrementalStateAndCommit(0L, previousState, clusterState);
// verify that the on-disk reflects 1 mapping
hashes = loadPersistedMappingHashes(dataPath.resolve(METADATA_DIRECTORY_NAME));
projectHashes = hashes.get(projectId.id());
assertThat(projectHashes.size(), equalTo(1));
assertThat(clusterState.metadata().getProject(projectId).getMappingsByHash().keySet(), equalTo(projectHashes));
maybeCheckForAnotherProject(anotherProjectId, hashes, clusterState);
}
}
}
private static void maybeCheckForAnotherProject(
ProjectId anotherProjectId,
Map<String, Set<String>> hashes,
ClusterState clusterState
) {
if (anotherProjectId != null) {
assertThat(hashes, aMapWithSize(2));
final Set<String> anotherProjectHashes = hashes.get(anotherProjectId.id());
assertThat(anotherProjectHashes.size(), equalTo(2));
assertThat(clusterState.metadata().getProject(anotherProjectId).getMappingsByHash().keySet(), equalTo(anotherProjectHashes));
} else {
assertThat(hashes, aMapWithSize(1));
}
}
public void testClusterUUIDIsStoredInCommitUserData() throws Exception {
final Path dataPath = createTempDir();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[] { dataPath })) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
String clusterUUID = UUIDs.randomBase64UUID();
boolean clusterUUIDCommitted = randomBoolean();
try (Writer writer = persistedClusterStateService.createWriter()) {
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(Metadata.builder().clusterUUID(clusterUUID).clusterUUIDCommitted(clusterUUIDCommitted))
.build();
writer.writeFullStateAndCommit(0, clusterState);
}
var onDiskState = persistedClusterStateService.loadBestOnDiskState();
assertThat(onDiskState.clusterUUID, is(equalTo(clusterUUID)));
assertThat(onDiskState.clusterUUIDCommitted, is(clusterUUIDCommitted));
}
}
/**
* Utility method for applying a consumer to each document (of the given types) associated with a DirectoryReader.
*/
private static void forEachDocument(DirectoryReader reader, Set<String> types, Consumer<Document> consumer) throws IOException {
final IndexSearcher indexSearcher = newSearcher(reader);
indexSearcher.setQueryCache(null);
for (String typeName : types) {
final Query query = new TermQuery(new Term(TYPE_FIELD_NAME, typeName));
final Weight weight = indexSearcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 0.0f);
for (LeafReaderContext leafReaderContext : indexSearcher.getIndexReader().leaves()) {
final Scorer scorer = weight.scorer(leafReaderContext);
if (scorer != null) {
final Bits liveDocs = leafReaderContext.reader().getLiveDocs();
final IntPredicate isLiveDoc = liveDocs == null ? i -> true : liveDocs::get;
final DocIdSetIterator docIdSetIterator = scorer.iterator();
StoredFields storedFields = leafReaderContext.reader().storedFields();
while (docIdSetIterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
if (isLiveDoc.test(docIdSetIterator.docID())) {
final Document document = storedFields.document(docIdSetIterator.docID());
document.add(new StringField(TYPE_FIELD_NAME, typeName, Field.Store.NO));
consumer.accept(document);
}
}
}
}
}
}
/**
* Utility method writing documents back to a directory.
*/
private static void writeDocumentsAndCommit(Path metadataDirectory, Map<String, String> commitUserData, List<Document> documents)
throws IOException {
try (Directory directory = new NIOFSDirectory(metadataDirectory)) {
final IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new KeywordAnalyzer());
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
try (IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig)) {
for (Document document : documents) {
indexWriter.addDocument(document);
}
indexWriter.setLiveCommitData(commitUserData.entrySet());
indexWriter.commit();
}
}
}
/**
* Search the underlying persisted state indices for non-deleted mapping_hash documents that represent the
* first page of data, collecting and returning the distinct mapping_hashes themselves.
*/
private static Map<String, Set<String>> loadPersistedMappingHashes(Path metadataDirectory) throws IOException {
Map<String, Set<String>> hashes = new HashMap<>();
try (Directory directory = new NIOFSDirectory(metadataDirectory); DirectoryReader reader = DirectoryReader.open(directory)) {
forEachDocument(reader, Set.of(MAPPING_TYPE_NAME), document -> {
int page = document.getField("page").numericValue().intValue();
if (page == 0) {
String projectId = document.getField("project_id").stringValue();
String hash = document.getField("mapping_hash").stringValue();
assertTrue(hashes.computeIfAbsent(projectId, ignored -> new HashSet<>()).add(hash));
}
});
}
return hashes;
}
private boolean findSegmentInDirectory(Path dataPath) throws IOException {
Directory d = new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME));
for (final String file : d.listAll()) {
if (file.startsWith(IndexFileNames.SEGMENTS)) {
return true;
}
}
return false;
}
private void assertExpectedLogs(
long currentTerm,
ClusterState previousState,
ClusterState clusterState,
PersistedClusterStateService.Writer writer,
MockLog.LoggingExpectation expectation
) throws IOException {
try (var mockLog = MockLog.capture(PersistedClusterStateService.class)) {
mockLog.addExpectation(expectation);
if (previousState == null) {
writer.writeFullStateAndCommit(currentTerm, clusterState);
} else {
writer.writeIncrementalStateAndCommit(currentTerm, previousState, clusterState);
}
mockLog.assertAllExpectationsMatched();
}
}
@Override
public Settings buildEnvSettings(Settings settings) {
assertTrue(settings.hasValue(Environment.PATH_DATA_SETTING.getKey()));
return Settings.builder().put(settings).put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()).build();
}
public static Path[] createDataPaths() {
final Path[] dataPaths = new Path[randomIntBetween(1, 4)];
for (int i = 0; i < dataPaths.length; i++) {
dataPaths[i] = createTempDir();
}
return dataPaths;
}
private NodeEnvironment newNodeEnvironment(Path[] dataPaths) throws IOException {
return newNodeEnvironment(
Settings.builder()
.putList(Environment.PATH_DATA_SETTING.getKey(), Arrays.stream(dataPaths).map(Path::toString).toList())
.build()
);
}
private static MappingMetadata randomMappingMetadata() {
int i = randomIntBetween(1, 4);
return new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, Map.of("_doc", Map.of("properties", Map.of("field" + i, "text"))));
}
private static MappingMetadata randomMappingMetadataOrNull() {
int i = randomIntBetween(0, 4);
if (i == 0) {
return null;
} else {
return randomMappingMetadata();
}
}
private static ClusterState loadPersistedClusterState(PersistedClusterStateService persistedClusterStateService) throws IOException {
final PersistedClusterStateService.OnDiskState onDiskState = persistedClusterStateService.loadBestOnDiskState(false);
return clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metadata);
}
private static ClusterState clusterStateFromMetadata(long version, Metadata metadata) {
return ClusterState.builder(ClusterName.DEFAULT).version(version).metadata(metadata).build();
}
}
|
PersistedClusterStateServiceTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiChatCompletionResponseHandler.java
|
{
"start": 723,
"end": 1480
}
|
class ____ extends OpenAiResponseHandler {
public OpenAiChatCompletionResponseHandler(String requestType, ResponseParser parseFunction) {
super(requestType, parseFunction, true);
}
public OpenAiChatCompletionResponseHandler(
String requestType,
ResponseParser parseFunction,
Function<HttpResult, ErrorResponse> errorParseFunction
) {
super(requestType, parseFunction, errorParseFunction, true);
}
@Override
protected RetryException buildExceptionHandling429(Request request, HttpResult result) {
// We don't retry, if the chat completion input is too large
return new RetryException(false, buildError(RATE_LIMIT, request, result));
}
}
|
OpenAiChatCompletionResponseHandler
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/inheritance/tableperclass/MixedInheritanceTest.java
|
{
"start": 5794,
"end": 6159
}
|
class ____ extends Person {
private String name;
public Customer() {
}
public Customer(Integer id, String name) {
super( id );
this.name = name;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
@Entity(name = "DomesticCustomer")
@DiscriminatorValue("dc")
public static
|
Customer
|
java
|
apache__dubbo
|
dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/condition/config/model/MultiDestConditionRouterRule.java
|
{
"start": 1134,
"end": 2181
}
|
class ____ extends AbstractRouterRule {
private List<MultiDestCondition> conditions;
public static AbstractRouterRule parseFromMap(Map<String, Object> map) {
MultiDestConditionRouterRule multiDestConditionRouterRule = new MultiDestConditionRouterRule();
multiDestConditionRouterRule.parseFromMap0(map);
List<Map<String, String>> conditions = (List<Map<String, String>>) map.get(CONDITIONS_KEY);
List<MultiDestCondition> multiDestConditions = new ArrayList<>();
for (Map<String, String> condition : conditions) {
multiDestConditions.add((MultiDestCondition) JsonUtils.convertObject(condition, MultiDestCondition.class));
}
multiDestConditionRouterRule.setConditions(multiDestConditions);
return multiDestConditionRouterRule;
}
public List<MultiDestCondition> getConditions() {
return conditions;
}
public void setConditions(List<MultiDestCondition> conditions) {
this.conditions = conditions;
}
}
|
MultiDestConditionRouterRule
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-core/src/main/java/org/springframework/security/oauth2/core/http/converter/OAuth2ErrorHttpMessageConverter.java
|
{
"start": 5907,
"end": 6525
}
|
class ____ implements Converter<OAuth2Error, Map<String, String>> {
@Override
public Map<String, String> convert(OAuth2Error oauth2Error) {
Map<String, String> parameters = new HashMap<>();
parameters.put(OAuth2ParameterNames.ERROR, oauth2Error.getErrorCode());
if (StringUtils.hasText(oauth2Error.getDescription())) {
parameters.put(OAuth2ParameterNames.ERROR_DESCRIPTION, oauth2Error.getDescription());
}
if (StringUtils.hasText(oauth2Error.getUri())) {
parameters.put(OAuth2ParameterNames.ERROR_URI, oauth2Error.getUri());
}
return parameters;
}
}
}
|
OAuth2ErrorParametersConverter
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/proxy/HttpProxyDevServicesSingleCustomProviderTest.java
|
{
"start": 1046,
"end": 3837
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest()
.withApplicationRoot(
jar -> jar.addClasses(Resource.class, Client.class, CustomDevServicesRestClientProxyProvider.class))
.overrideConfigKey(
"quarkus.rest-client.\"io.quarkus.rest.client.reactive.proxy.HttpProxyDevServicesSingleCustomProviderTest$Client\".enable-local-proxy",
"true")
.overrideConfigKey(
"quarkus.rest-client.\"io.quarkus.rest.client.reactive.proxy.HttpProxyDevServicesSingleCustomProviderTest$Client\".url",
"http://localhost:${quarkus.http.test-port:8081}")
.setLogRecordPredicate(record -> record.getLevel().equals(Level.INFO))
.assertLogRecords(new Consumer<>() {
@Override
public void accept(List<LogRecord> logRecords) {
assertThat(logRecords).extracting(LogRecord::getMessage)
.anyMatch(message -> message.startsWith("Started custom HTTP proxy server") && message.endsWith(
"REST Client 'io.quarkus.rest.client.reactive.proxy.HttpProxyDevServicesSingleCustomProviderTest$Client'"));
}
})
.addBuildChainCustomizer(new Consumer<>() {
@Override
public void accept(BuildChainBuilder buildChainBuilder) {
buildChainBuilder.addBuildStep(new BuildStep() {
@Override
public void execute(BuildContext context) {
context.produce(
new DevServicesRestClientProxyProvider.BuildItem(
new CustomDevServicesRestClientProxyProvider()));
}
}).produces(DevServicesRestClientProxyProvider.BuildItem.class).build();
}
});
@ConfigProperty(name = "quarkus.rest-client.\"io.quarkus.rest.client.reactive.proxy.HttpProxyDevServicesSingleCustomProviderTest$Client\".override-uri")
String proxyUrl;
@Test
public void test() {
Client client = QuarkusRestClientBuilder.newBuilder().baseUri(URI.create("http://unused.dev")).build(Client.class);
// test that the proxy works as expected
given()
.baseUri(proxyUrl)
.get("test/count")
.then()
.statusCode(200)
.body(equalTo("10"));
// test that the client works as expected
long result = client.count();
assertEquals(10, result);
}
@Path("test")
public
|
HttpProxyDevServicesSingleCustomProviderTest
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/aop/framework/ProxyFactoryBeanTests.java
|
{
"start": 27499,
"end": 28086
}
|
class ____ extends DefaultPointcutAdvisor {
public static List<String> methodNames = new ArrayList<>();
public static void reset() {
methodNames.clear();
}
public PointcutForVoid() {
setAdvice((MethodInterceptor) invocation -> {
methodNames.add(invocation.getMethod().getName());
return invocation.proceed();
});
setPointcut(new DynamicMethodMatcherPointcut() {
@Override
public boolean matches(Method m, @Nullable Class<?> targetClass, Object... args) {
return m.getReturnType() == void.class;
}
});
}
}
public static
|
PointcutForVoid
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/api/DisplayNameGenerationTests.java
|
{
"start": 19409,
"end": 19504
}
|
class ____ {
@Test
void some_nested_test() {
}
}
private static
|
Nested_Class_Template
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/EntryPointAssertions_within_Test.java
|
{
"start": 1440,
"end": 7349
}
|
class ____ extends EntryPointAssertionsBaseTest {
@ParameterizedTest
@MethodSource("bigDecimalOffsetFactories")
void should_create_BigDecimal_offset(Function<BigDecimal, Offset<BigDecimal>> offsetFactory) {
// GIVEN
BigDecimal offsetValue = BigDecimal.ONE;
// WHEN
Offset<BigDecimal> offset = offsetFactory.apply(offsetValue);
// THEN
then(offset).isEqualTo(offset(offsetValue));
}
private static Stream<Function<BigDecimal, Offset<BigDecimal>>> bigDecimalOffsetFactories() {
return Stream.of(Assertions::within, BDDAssertions::within, withAssertions::within);
}
@ParameterizedTest
@MethodSource("bigIntegerOffsetFactories")
void should_create_BigInteger_offset(Function<BigInteger, Offset<BigInteger>> offsetFactory) {
// GIVEN
BigInteger offsetValue = BigInteger.ONE;
// WHEN
Offset<BigInteger> offset = offsetFactory.apply(offsetValue);
// THEN
then(offset).isEqualTo(offset(offsetValue));
}
private static Stream<Function<BigInteger, Offset<BigInteger>>> bigIntegerOffsetFactories() {
return Stream.of(Assertions::within, BDDAssertions::within, withAssertions::within);
}
@ParameterizedTest
@MethodSource("byteOffsetFactories")
void should_create_Byte_offset(Function<Byte, Offset<Byte>> offsetFactory) {
// GIVEN
Byte offsetValue = Byte.MAX_VALUE;
// WHEN
Offset<Byte> offset = offsetFactory.apply(offsetValue);
// THEN
then(offset).isEqualTo(offset(offsetValue));
}
private static Stream<Function<Byte, Offset<Byte>>> byteOffsetFactories() {
return Stream.of(Assertions::within, BDDAssertions::within, withAssertions::within);
}
@ParameterizedTest
@MethodSource("doubleOffsetFactories")
void should_create_Double_offset(Function<Double, Offset<Double>> offsetFactory) {
// GIVEN
Double offsetValue = Double.MAX_VALUE;
// WHEN
Offset<Double> offset = offsetFactory.apply(offsetValue);
// THEN
then(offset).isEqualTo(offset(offsetValue));
}
private static Stream<Function<Double, Offset<Double>>> doubleOffsetFactories() {
return Stream.of(Assertions::within, BDDAssertions::within, withAssertions::within);
}
@ParameterizedTest
@MethodSource("floatOffsetFactories")
void should_create_Float_offset(Function<Float, Offset<Float>> offsetFactory) {
// GIVEN
Float offsetValue = Float.MAX_VALUE;
// WHEN
Offset<Float> offset = offsetFactory.apply(offsetValue);
// THEN
then(offset).isEqualTo(offset(offsetValue));
}
private static Stream<Function<Float, Offset<Float>>> floatOffsetFactories() {
return Stream.of(Assertions::within, BDDAssertions::within, withAssertions::within);
}
@ParameterizedTest
@MethodSource("integerOffsetFactories")
void should_create_Integer_offset(Function<Integer, Offset<Integer>> offsetFactory) {
// GIVEN
Integer offsetValue = Integer.MAX_VALUE;
// WHEN
Offset<Integer> offset = offsetFactory.apply(offsetValue);
// THEN
then(offset).isEqualTo(offset(offsetValue));
}
private static Stream<Function<Integer, Offset<Integer>>> integerOffsetFactories() {
return Stream.of(Assertions::within, BDDAssertions::within, withAssertions::within);
}
@ParameterizedTest
@MethodSource("longOffsetFactories")
void should_create_Long_offset(Function<Long, Offset<Long>> offsetFactory) {
// GIVEN
Long offsetValue = Long.MAX_VALUE;
// WHEN
Offset<Long> offset = offsetFactory.apply(offsetValue);
// THEN
then(offset).isEqualTo(offset(offsetValue));
}
private static Stream<Function<Long, Offset<Long>>> longOffsetFactories() {
return Stream.of(Assertions::within, BDDAssertions::within, withAssertions::within);
}
@ParameterizedTest
@MethodSource("temporalOffsetFactories")
void should_create_temporal_offset(BiFunction<Long, TemporalUnit, TemporalUnitOffset> offsetFactory) {
// GIVEN
long value = Long.MAX_VALUE;
TemporalUnit temporalUnit = ChronoUnit.MINUTES;
// WHEN
TemporalUnitOffset offset = offsetFactory.apply(value, temporalUnit);
// THEN
then(offset).isEqualTo(new TemporalUnitWithinOffset(value, temporalUnit));
}
private static Stream<BiFunction<Long, TemporalUnit, TemporalUnitOffset>> temporalOffsetFactories() {
return Stream.of(Assertions::within, BDDAssertions::within, withAssertions::within);
}
@ParameterizedTest
@MethodSource("temporal_offset_from_duration")
void should_create_temporal_strictOffset_from_duration(Function<Duration, TemporalUnitOffset> offsetFactory) {
// GIVEN
Duration duration = Duration.ofNanos(123);
// WHEN
TemporalUnitOffset offset = offsetFactory.apply(duration);
// THEN
then(offset).isEqualTo(new TemporalUnitWithinOffset(123, ChronoUnit.NANOS));
}
@ParameterizedTest
@MethodSource("temporal_offset_from_duration")
void should_fail_if_duration_is_null(Function<Duration, TemporalUnitOffset> offsetFactory) {
// GIVEN
Duration duration = null;
// WHEN
NullPointerException npe = catchNullPointerException(() -> offsetFactory.apply(duration));
// THEN
then(npe).hasMessage("non null duration expected");
}
private static Stream<Function<Duration, TemporalUnitOffset>> temporal_offset_from_duration() {
return Stream.of(Assertions::within, BDDAssertions::within, withAssertions::within);
}
@ParameterizedTest
@MethodSource("shortOffsetFactories")
void should_create_Short_offset(Function<Short, Offset<Short>> offsetFactory) {
// GIVEN
Short offsetValue = Short.MAX_VALUE;
// WHEN
Offset<Short> offset = offsetFactory.apply(offsetValue);
// THEN
then(offset).isEqualTo(offset(offsetValue));
}
private static Stream<Function<Short, Offset<Short>>> shortOffsetFactories() {
return Stream.of(Assertions::within, BDDAssertions::within, withAssertions::within);
}
}
|
EntryPointAssertions_within_Test
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ext/javatime/ser/TestLocalTimeSerializationWithCustomFormatter.java
|
{
"start": 534,
"end": 2067
}
|
class ____
{
@ParameterizedTest
@MethodSource("customFormatters")
void testSerialization(DateTimeFormatter formatter) throws Exception {
LocalTime dateTime = LocalTime.now();
assertTrue(serializeWith(dateTime, formatter).contains(dateTime.format(formatter)));
}
private String serializeWith(LocalTime dateTime, DateTimeFormatter f) throws Exception {
ObjectMapper mapper = JsonMapper.builder()
.addModule(new SimpleModule()
.addSerializer(new LocalTimeSerializer(f)))
.build();
return mapper.writeValueAsString(dateTime);
}
@ParameterizedTest
@MethodSource("customFormatters")
void testDeserialization(DateTimeFormatter formatter) throws Exception {
LocalTime dateTime = LocalTime.now();
assertEquals(dateTime, deserializeWith(dateTime.format(formatter), formatter));
}
private LocalTime deserializeWith(String json, DateTimeFormatter f) throws Exception {
ObjectMapper mapper = JsonMapper.builder()
.addModule(new SimpleModule()
.addDeserializer(LocalTime.class, new LocalTimeDeserializer(f)))
.build();
return mapper.readValue("\"" + json + "\"", LocalTime.class);
}
static Stream<DateTimeFormatter> customFormatters() {
return Stream.of(
DateTimeFormatter.ISO_LOCAL_TIME,
DateTimeFormatter.ISO_TIME
);
}
}
|
TestLocalTimeSerializationWithCustomFormatter
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamOperatorWrapperTest.java
|
{
"start": 9924,
"end": 12847
}
|
class ____ extends AbstractStreamOperator<String>
implements OneInputStreamOperator<String, String>, BoundedOneInput {
private static final long serialVersionUID = 1L;
private final String name;
private final ConcurrentLinkedQueue<Object> output;
private final ProcessingTimeService processingTimeService;
private final MailboxExecutor mailboxExecutor;
private final TimerMailController timerMailController;
TestOneInputStreamOperator(
String name,
ConcurrentLinkedQueue<Object> output,
ProcessingTimeService processingTimeService,
MailboxExecutor mailboxExecutor,
TimerMailController timerMailController) {
this.name = name;
this.output = output;
this.processingTimeService = processingTimeService;
this.mailboxExecutor = mailboxExecutor;
this.timerMailController = timerMailController;
processingTimeService.registerTimer(
Long.MAX_VALUE, t2 -> output.add("[" + name + "]: Timer not triggered"));
super.setProcessingTimeService(processingTimeService);
}
public String getName() {
return name;
}
@Override
public void processElement(StreamRecord<String> element) {}
@Override
public void endInput() throws InterruptedException {
output.add("[" + name + "]: End of input");
ProcessingTimeCallback callback =
t1 ->
output.add(
"["
+ name
+ "]: Timer that was in mailbox before closing operator");
processingTimeService.registerTimer(0, callback);
timerMailController.getInMailboxLatch(callback).await();
}
@Override
public void finish() throws Exception {
ProcessingTimeCallback callback =
t1 ->
output.add(
"["
+ name
+ "]: Timer to put in mailbox when finishing operator");
assertThat((Future<?>) processingTimeService.registerTimer(0, callback)).isNotNull();
assertThat(timerMailController.getPuttingLatch(callback)).isNull();
mailboxExecutor.execute(
() ->
output.add(
"["
+ name
+ "]: Mail to put in mailbox when finishing operator"),
"");
output.add("[" + name + "]: Bye");
}
}
}
|
TestOneInputStreamOperator
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/protocol/types/NullableSchema.java
|
{
"start": 979,
"end": 3178
}
|
class ____ extends Schema {
private static final String NULLABLE_STRUCT_TYPE_NAME = "NULLABLE_STRUCT";
public NullableSchema(Schema schema) {
super(schema.tolerateMissingFieldsWithDefaults(), Arrays.stream(schema.fields()).map(field -> field.def).toArray(Field[]::new));
}
@Override
public boolean isNullable() {
return true;
}
/**
* Write a struct to the buffer with special handling for null values
* If the input object is null, writes a byte value of -1 to the buffer as a null indicator.
*/
@Override
public void write(ByteBuffer buffer, Object o) {
if (o == null) {
buffer.put((byte) -1);
return;
}
buffer.put((byte) 1);
super.write(buffer, o);
}
@Override
public Struct read(ByteBuffer buffer) {
byte nullIndicator = buffer.get();
if (nullIndicator < 0)
return null;
return super.read(buffer);
}
@Override
public int sizeOf(Object o) {
if (o == null)
return 1;
return 1 + super.sizeOf(o);
}
@Override
public Struct validate(Object item) {
if (item == null)
return null;
return super.validate(item);
}
@Override
public String typeName() {
return NULLABLE_STRUCT_TYPE_NAME;
}
@Override
public String leftBracket() {
return "?{";
}
@Override
public String rightBracket() {
return "}";
}
@Override
public String documentation() {
return "A nullable struct is named by a string with a capitalized first letter and consists of one or more fields. " +
"It represents a composite object or null. " +
"For non-null values, the first byte has value 1, " +
"followed by the serialization of each field in the order they are defined. " +
"A null value is encoded as a byte with value -1 and there are no following bytes." +
"In protocol documentation a nullable struct containing multiple fields is enclosed by " +
leftBracket() + " and " + rightBracket() + ".";
}
}
|
NullableSchema
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java
|
{
"start": 1276,
"end": 1652
}
|
class ____ extends SequentialNumber {
/**
* The last reserved inode id. InodeIDs are allocated from LAST_RESERVED_ID +
* 1.
*/
public static final long LAST_RESERVED_ID = 1 << 14; // 16384
public static final long ROOT_INODE_ID = LAST_RESERVED_ID + 1; // 16385
public static final long INVALID_INODE_ID = -1;
INodeId() {
super(ROOT_INODE_ID);
}
}
|
INodeId
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/TestingRetrievableStateStorageHelper.java
|
{
"start": 1166,
"end": 1447
}
|
class ____<T extends Serializable>
implements RetrievableStateStorageHelper<T> {
@Override
public RetrievableStateHandle<T> store(T state) {
return new TestingRetrievableStateHandle<>(state);
}
private static final
|
TestingRetrievableStateStorageHelper
|
java
|
apache__flink
|
flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/Printer.java
|
{
"start": 3141,
"end": 3647
}
|
class ____ implements Printer {
private static final HelpCommandPrinter INSTANCE = new HelpCommandPrinter();
@Override
public boolean isQuitCommand() {
return false;
}
@Override
public void print(Terminal terminal) {
terminal.writer().println(CliStrings.MESSAGE_HELP);
terminal.flush();
}
@Override
public void close() {}
}
/** Printer to print the QUIT messages. */
|
HelpCommandPrinter
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/function/array/PostgreSQLArrayPositionFunction.java
|
{
"start": 579,
"end": 1570
}
|
class ____ extends AbstractArrayPositionFunction {
public PostgreSQLArrayPositionFunction(TypeConfiguration typeConfiguration) {
super( typeConfiguration );
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
ReturnableType<?> returnType,
SqlAstTranslator<?> walker) {
final Expression arrayExpression = (Expression) sqlAstArguments.get( 0 );
final Expression elementExpression = (Expression) sqlAstArguments.get( 1 );
sqlAppender.append( "case when " );
arrayExpression.accept( walker );
sqlAppender.append( " is not null then coalesce(array_position(" );
walker.render( arrayExpression, SqlAstNodeRenderingMode.DEFAULT );
sqlAppender.append( ',' );
walker.render( elementExpression, SqlAstNodeRenderingMode.DEFAULT );
if ( sqlAstArguments.size() > 2 ) {
sqlAppender.append( ',' );
sqlAstArguments.get( 2 ).accept( walker );
}
sqlAppender.append( "),0) end" );
}
}
|
PostgreSQLArrayPositionFunction
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ext/javatime/ser/PeriodSerTest.java
|
{
"start": 995,
"end": 1850
}
|
class ____ extends DateTimeTestBase
{
private final ObjectMapper MAPPER = newMapper();
@Test
public void testSerialization01() throws Exception
{
assertEquals(q("P1Y6M15D"), MAPPER.writeValueAsString(Period.of(1, 6, 15)));
}
@Test
public void testSerialization02() throws Exception
{
assertEquals(q("P21D"), MAPPER.writeValueAsString(Period.of(0, 0, 21)));
}
@Test
public void testSerializationWithTypeInfo01() throws Exception
{
Period period = Period.of(5, 1, 12);
final ObjectMapper mapper = mapperBuilder()
.addMixIn(TemporalAmount.class, MockObjectConfiguration.class)
.build();
String value = mapper.writeValueAsString(period);
assertEquals("[" + q(Period.class.getName()) + ",\"P5Y1M12D\"]", value);
}
}
|
PeriodSerTest
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/BaseTypeAsDefaultTest.java
|
{
"start": 563,
"end": 721
}
|
class ____ extends Parent {
}
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, property = "@class", defaultImpl = ChildOfChild.class)
static abstract
|
Child
|
java
|
apache__logging-log4j2
|
log4j-1.2-api/src/main/java/org/apache/log4j/bridge/AppenderWrapper.java
|
{
"start": 1431,
"end": 4548
}
|
class ____ implements Appender {
private static final Logger LOGGER = StatusLogger.getLogger();
private final org.apache.logging.log4j.core.Appender appender;
/**
* Adapts a Log4j 2.x appender into a Log4j 1.x appender. Applying this method
* on the result of {@link AppenderAdapter#adapt(Appender)} should return the
* original Log4j 1.x appender.
*
* @param appender a Log4j 2.x appender
* @return a Log4j 1.x appender or {@code null} if the parameter is {@code null}
*/
public static Appender adapt(final org.apache.logging.log4j.core.Appender appender) {
if (appender instanceof Appender) {
return (Appender) appender;
}
if (appender instanceof Adapter) {
final Adapter adapter = (Adapter) appender;
// Don't unwrap an appender with filters
if (!adapter.hasFilter()) {
return adapter.getAppender();
}
}
if (appender != null) {
return new AppenderWrapper(appender);
}
return null;
}
/**
* Constructs a new instance for a Core Appender.
*
* @param appender a Core Appender.
*/
public AppenderWrapper(final org.apache.logging.log4j.core.Appender appender) {
this.appender = appender;
}
/**
* Gets the wrapped Core Appender.
*
* @return the wrapped Core Appender.
*/
public org.apache.logging.log4j.core.Appender getAppender() {
return appender;
}
@Override
public void addFilter(final Filter newFilter) {
if (appender instanceof AbstractFilterable) {
((AbstractFilterable) appender).addFilter(FilterAdapter.adapt(newFilter));
} else {
LOGGER.warn("Unable to add filter to appender {}, it does not support filters", appender.getName());
}
}
@Override
public Filter getFilter() {
return null;
}
@Override
public void clearFilters() {
// noop
}
@Override
public void close() {
// Not supported with Log4j 2.
}
@Override
public void doAppend(final LoggingEvent event) {
if (event instanceof LogEventAdapter) {
appender.append(((LogEventAdapter) event).getEvent());
}
}
@Override
public String getName() {
return appender.getName();
}
@Override
public void setErrorHandler(final ErrorHandler errorHandler) {
appender.setHandler(new ErrorHandlerAdapter(errorHandler));
}
@Override
public ErrorHandler getErrorHandler() {
return ((ErrorHandlerAdapter) appender.getHandler()).getHandler();
}
@Override
public void setLayout(final Layout layout) {
// Log4j 2 doesn't support this.
}
@Override
public Layout getLayout() {
return new LayoutWrapper(appender.getLayout());
}
@Override
public void setName(final String name) {
// Log4j 2 doesn't support this.
}
@Override
public boolean requiresLayout() {
return false;
}
}
|
AppenderWrapper
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java
|
{
"start": 10306,
"end": 12433
}
|
class ____<R> {
private final R response;
private final SQLException exception;
private ResponseOrException(R response) {
this.response = response;
this.exception = null;
}
private ResponseOrException(SQLException exception) {
this.response = null;
this.exception = exception;
}
public R getResponseOrThrowException() throws SQLException {
if (exception != null) {
throw exception;
}
assert response != null;
return response;
}
}
private static InputStream getStream(HttpURLConnection con, InputStream stream) throws IOException {
if (GZIP.equals(con.getContentEncoding())) {
return new GZIPInputStream(stream);
}
return stream;
}
public void connect() {
if (closed) {
throw new ClientException("Connection cannot be reused");
}
try {
con.connect();
} catch (IOException ex) {
throw new ClientException("Cannot open connection to " + url + " (" + ex.getMessage() + ")", ex);
}
}
@Override
public void close() {
if (closed == false) {
closed = true;
// consume streams
consumeStreams();
}
}
public void disconnect() {
try {
connect();
} finally {
con.disconnect();
}
}
// http://docs.oracle.com/javase/7/docs/technotes/guides/net/http-keepalive.html
private void consumeStreams() {
try (InputStream in = con.getInputStream()) {
while (in != null && in.read() > -1) {
}
} catch (IOException ex) {
// ignore
} finally {
try (InputStream ein = con.getErrorStream()) {
while (ein != null && ein.read() > -1) {
}
} catch (IOException ex) {
// keep on ignoring
}
}
}
/**
* Exception type.
*/
public
|
ResponseOrException
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/suite/engine/testcases/StatefulTestCase.java
|
{
"start": 840,
"end": 963
}
|
class ____ {
@Test
void statefulTest() {
callSequence.add("test2");
fail("This is a failing test");
}
}
}
|
Test2
|
java
|
apache__rocketmq
|
common/src/main/java/org/apache/rocketmq/common/TopicQueueId.java
|
{
"start": 886,
"end": 1849
}
|
class ____ {
private final String topic;
private final int queueId;
private final int hash;
public TopicQueueId(String topic, int queueId) {
this.topic = topic;
this.queueId = queueId;
this.hash = Objects.hashCode(topic, queueId);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
TopicQueueId broker = (TopicQueueId) o;
return queueId == broker.queueId && Objects.equal(topic, broker.topic);
}
@Override
public int hashCode() {
return hash;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("MessageQueueInBroker{");
sb.append("topic='").append(topic).append('\'');
sb.append(", queueId=").append(queueId);
sb.append('}');
return sb.toString();
}
}
|
TopicQueueId
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/stream/JSONReaderTest_error2.java
|
{
"start": 1126,
"end": 1530
}
|
class ____ {
public VO() {
try {
stateField.set(context, -1);
} catch (IllegalArgumentException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IllegalAccessException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
}
|
VO
|
java
|
quarkusio__quarkus
|
integration-tests/mailer/src/test/java/io/quarkus/it/mailer/mailpit/Message.java
|
{
"start": 231,
"end": 765
}
|
class ____ {
@JsonProperty("Attachments")
public int attachmentCount;
@JsonProperty("Bcc")
public List<Recipient> bcc;
@JsonProperty("Cc")
public List<Recipient> cc;
@JsonProperty("To")
public List<Recipient> to;
@JsonProperty("From")
public Recipient from;
@JsonProperty("ID")
public String id;
@JsonProperty("MessageID")
public String messageId;
@JsonProperty("Subject")
public String subject;
public Recipient to() {
return to.get(0);
}
}
|
Message
|
java
|
spring-projects__spring-security
|
core/src/test/java/org/springframework/security/authentication/AuthenticationTrustResolverImplTests.java
|
{
"start": 1036,
"end": 5039
}
|
class ____ {
@Test
public void testCorrectOperationIsAnonymous() {
AuthenticationTrustResolverImpl trustResolver = new AuthenticationTrustResolverImpl();
assertThat(trustResolver.isAnonymous(
new AnonymousAuthenticationToken("ignored", "ignored", AuthorityUtils.createAuthorityList("ignored"))))
.isTrue();
assertThat(trustResolver.isAnonymous(
new TestingAuthenticationToken("ignored", "ignored", AuthorityUtils.createAuthorityList("ignored"))))
.isFalse();
}
@Test
public void testCorrectOperationIsRememberMe() {
AuthenticationTrustResolverImpl trustResolver = new AuthenticationTrustResolverImpl();
assertThat(trustResolver.isRememberMe(
new RememberMeAuthenticationToken("ignored", "ignored", AuthorityUtils.createAuthorityList("ignored"))))
.isTrue();
assertThat(trustResolver.isAnonymous(
new TestingAuthenticationToken("ignored", "ignored", AuthorityUtils.createAuthorityList("ignored"))))
.isFalse();
}
@Test
public void testGettersSetters() {
AuthenticationTrustResolverImpl trustResolver = new AuthenticationTrustResolverImpl();
assertThat(AnonymousAuthenticationToken.class).isEqualTo(trustResolver.getAnonymousClass());
trustResolver.setAnonymousClass(TestingAuthenticationToken.class);
assertThat(trustResolver.getAnonymousClass()).isEqualTo(TestingAuthenticationToken.class);
assertThat(RememberMeAuthenticationToken.class).isEqualTo(trustResolver.getRememberMeClass());
trustResolver.setRememberMeClass(TestingAuthenticationToken.class);
assertThat(trustResolver.getRememberMeClass()).isEqualTo(TestingAuthenticationToken.class);
}
@Test
void isAuthenticatedWhenAuthenticationNullThenFalse() {
AuthenticationTrustResolver trustResolver = new AuthenticationTrustResolverImpl();
Authentication authentication = null;
assertThat(trustResolver.isAuthenticated(authentication)).isFalse();
}
@Test
void isAuthenticatedWhenAuthenticationNotAuthenticatedThenFalse() {
AuthenticationTrustResolver trustResolver = new AuthenticationTrustResolverImpl();
TestingAuthenticationToken authentication = new TestingAuthenticationToken("user", "password");
assertThat(trustResolver.isAuthenticated(authentication)).isFalse();
}
@Test
void isAuthenticatedWhenAnonymousThenFalse() {
AuthenticationTrustResolver trustResolver = new AuthenticationTrustResolverImpl();
AnonymousAuthenticationToken authentication = new AnonymousAuthenticationToken("key", "principal",
AuthorityUtils.createAuthorityList("ROLE_ANONYMOUS"));
assertThat(trustResolver.isAuthenticated(authentication)).isFalse();
}
@Test
void isFullyAuthenticatedWhenAuthenticationNullThenFalse() {
AuthenticationTrustResolver trustResolver = new AuthenticationTrustResolverImpl();
Authentication authentication = null;
assertThat(trustResolver.isFullyAuthenticated(authentication)).isFalse();
}
@Test
void isFullyAuthenticatedWhenAuthenticationNotAuthenticatedThenFalse() {
AuthenticationTrustResolver trustResolver = new AuthenticationTrustResolverImpl();
TestingAuthenticationToken authentication = new TestingAuthenticationToken("user", "password");
assertThat(trustResolver.isFullyAuthenticated(authentication)).isFalse();
}
@Test
void isFullyAuthenticatedWhenAnonymousThenFalse() {
AuthenticationTrustResolver trustResolver = new AuthenticationTrustResolverImpl();
AnonymousAuthenticationToken authentication = new AnonymousAuthenticationToken("key", "principal",
AuthorityUtils.createAuthorityList("ROLE_ANONYMOUS"));
assertThat(trustResolver.isFullyAuthenticated(authentication)).isFalse();
}
@Test
void isFullyAuthenticatedWhenRememberMeThenFalse() {
AuthenticationTrustResolver trustResolver = new AuthenticationTrustResolverImpl();
RememberMeAuthenticationToken authentication = new RememberMeAuthenticationToken("key", "user",
AuthorityUtils.createAuthorityList("ROLE_USER"));
assertThat(trustResolver.isFullyAuthenticated(authentication)).isFalse();
}
}
|
AuthenticationTrustResolverImplTests
|
java
|
apache__flink
|
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/handlers/ParallelismQueryParameterTest.java
|
{
"start": 1009,
"end": 1461
}
|
class ____ {
private final ParallelismQueryParameter parallelismQueryParameter =
new ParallelismQueryParameter();
@Test
void testConvertStringToValue() {
assertThat(parallelismQueryParameter.convertValueToString(42)).isEqualTo("42");
}
@Test
void testConvertValueFromString() {
assertThat((int) parallelismQueryParameter.convertStringToValue("42")).isEqualTo(42);
}
}
|
ParallelismQueryParameterTest
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java
|
{
"start": 18489,
"end": 18607
}
|
class ____ local Plugin class ["
+ TestPlugin.class.getName()
+ "] (
|
loader
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.