language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/TimeProvider.java | {
"start": 753,
"end": 937
} | interface ____ {
/** Returns the current nano time. */
long currentTimeNanos();
TimeProvider SYSTEM_TIME_PROVIDER = TimeProviderResolverFactory.resolveTimeProvider();
}
| TimeProvider |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/events/ListenerTest.java | {
"start": 4900,
"end": 5009
} | class ____ {
public static boolean isAuthorized(String clazz, Object id) {
return false;
}
}
}
| Principal |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/injection/HeaderFieldInSuperClassNoScopeTest.java | {
"start": 1346,
"end": 1569
} | class ____ extends AbstractAbstractResource {
@GET
@Produces(MediaType.TEXT_PLAIN)
public String hello() {
return "foo: " + foo + ", bar: " + bar;
}
}
public static | Resource |
java | spring-projects__spring-framework | spring-oxm/src/main/java/org/springframework/oxm/jaxb/Jaxb2Marshaller.java | {
"start": 4326,
"end": 5011
} | interface ____ JAXB 2.2.
*
* <p>The typical usage will be to set either the "contextPath" or the "classesToBeBound"
* property on this bean, possibly customize the marshaller and unmarshaller by setting
* properties, schemas, adapters, and listeners, and to refer to it.
*
* @author Arjen Poutsma
* @author Juergen Hoeller
* @author Rossen Stoyanchev
* @author Sam Brannen
* @since 3.0
* @see #setContextPath
* @see #setClassesToBeBound
* @see #setJaxbContextProperties
* @see #setMarshallerProperties
* @see #setUnmarshallerProperties
* @see #setSchema
* @see #setSchemas
* @see #setMarshallerListener
* @see #setUnmarshallerListener
* @see #setAdapters
*/
public | for |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxGenerateTest.java | {
"start": 1158,
"end": 12432
} | class ____ {
@Test
public void stateSupplierNull() {
assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> {
Flux.generate(null, (s, o) -> s, s -> {
});
});
}
@Test
public void generatorNull() {
assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> {
Flux.generate(() -> 1, null, s -> {
});
});
}
@Test
public void stateConsumerNull() {
assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> {
Flux.generate(() -> 1, (s, o) -> s, null);
});
}
@Test
public void sinkNotUsed() {
StepVerifier.create(Flux.generate(sink -> {}))
.expectFusion(Fuseable.NONE)
.verifyErrorMessage("The generator didn't call any of the SynchronousSink method");
}
@Test
public void sinkNotUsedFusion() {
StepVerifier.create(Flux.generate(sink -> {}))
.expectFusion(Fuseable.SYNC)
.verifyErrorMessage("The generator didn't call any of the SynchronousSink method");
}
@Test
public void generateEmpty() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.<Integer>generate(o -> {
o.complete();
}).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertComplete();
}
@Test
public void generateJust() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.<Integer>generate(o -> {
o.next(1);
o.complete();
}).subscribe(ts);
ts.assertValues(1)
.assertNoError()
.assertComplete();
}
@Test
public void generateError() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.<Integer>generate(o -> {
o.error(new RuntimeException("forced failure"));
}).subscribe(ts);
ts.assertNoValues()
.assertNotComplete()
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure");
}
@Test
public void generateJustBackpressured() {
AssertSubscriber<Integer> ts = AssertSubscriber.create(0);
Flux.<Integer>generate(o -> {
o.next(1);
o.complete();
}).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(1)
.assertNoError()
.assertComplete();
}
@Test
public void generateRange() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.<Integer, Integer>generate(() -> 1, (s, o) -> {
if (s < 11) {
o.next(s);
}
else {
o.complete();
}
return s + 1;
}).subscribe(ts);
ts.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertNoError()
.assertComplete();
}
@Test
public void generateRangeBackpressured() {
AssertSubscriber<Integer> ts = AssertSubscriber.create(0);
Flux.<Integer, Integer>generate(() -> 1, (s, o) -> {
if (s < 11) {
o.next(s);
}
else {
o.complete();
}
return s + 1;
}).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(1, 2)
.assertNoError()
.assertNotComplete();
ts.request(10);
ts.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertNoError()
.assertComplete();
}
@Test
public void stateSupplierThrows() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.<Integer, Integer>generate(() -> {
throw new RuntimeException("forced failure");
}, (s, o) -> {
o.next(1);
return s;
}).subscribe(ts);
ts.assertNoValues()
.assertNotComplete()
.assertError(RuntimeException.class);
}
@Test
public void generatorThrows() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.<Integer>generate(o -> {
throw new RuntimeException("forced failure");
}).subscribe(ts);
ts.assertNoValues()
.assertNotComplete()
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure");
}
@Test
public void generatorThrowsFusion() {
StepVerifier.create(
Flux.<Integer>generate(o -> { throw new IllegalStateException("forced failure"); }))
.expectFusion(Fuseable.SYNC)
.verifyErrorSatisfies(e -> assertThat(e).isInstanceOf(IllegalStateException.class)
.hasMessage("forced failure"));
}
@Test
public void generatorMultipleOnErrors() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.<Integer>generate(o -> {
o.error(new RuntimeException("forced failure"));
o.error(new RuntimeException("forced failure"));
}).subscribe(ts);
ts.assertNoValues()
.assertNotComplete()
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure");
}
@Test
public void generatorMultipleOnCompletes() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.<Integer>generate(o -> {
o.complete();
o.complete();
}).subscribe(ts);
ts.assertNoValues()
.assertComplete()
.assertNoError();
}
@Test
public void generatorMultipleOnNexts() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.<Integer>generate(o -> {
o.next(1);
o.next(1);
}).subscribe(ts);
ts.assertValues(1)
.assertNotComplete()
.assertError(IllegalStateException.class);
}
@Test
public void stateConsumerCalled() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
AtomicInteger stateConsumer = new AtomicInteger();
Flux.<Integer, Integer>generate(() -> 1, (s, o) -> {
o.complete();
return s;
}, stateConsumer::set).subscribe(ts);
ts.assertNoValues()
.assertComplete()
.assertNoError();
assertThat(stateConsumer).hasValue(1);
}
@Test
public void iterableSource() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
List<Integer> list = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
Flux.<Integer, Iterator<Integer>>generate(list::iterator, (s, o) -> {
if (s.hasNext()) {
o.next(s.next());
}
else {
o.complete();
}
return s;
}).subscribe(ts);
ts.assertValueSequence(list)
.assertComplete()
.assertNoError();
}
@Test
public void iterableSourceBackpressured() {
AssertSubscriber<Integer> ts = AssertSubscriber.create(0);
List<Integer> list = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
Flux.<Integer, Iterator<Integer>>generate(list::iterator, (s, o) -> {
if (s.hasNext()) {
o.next(s.next());
}
else {
o.complete();
}
return s;
}).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(1, 2)
.assertNoError()
.assertNotComplete();
ts.request(5);
ts.assertValues(1, 2, 3, 4, 5, 6, 7)
.assertNoError()
.assertNotComplete();
ts.request(10);
ts.assertValueSequence(list)
.assertComplete()
.assertNoError();
}
@Test
public void fusion() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
ts.requestedFusionMode(Fuseable.ANY);
List<Integer> list = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
Flux.<Integer, Iterator<Integer>>generate(() -> list.iterator(), (s, o) -> {
if (s.hasNext()) {
o.next(s.next());
}
else {
o.complete();
}
return s;
}).subscribe(ts);
ts.assertFuseableSource()
.assertFusionMode(Fuseable.SYNC)
.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
}
@Test
public void fusionBoundary() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
ts.requestedFusionMode(Fuseable.ANY | Fuseable.THREAD_BARRIER);
List<Integer> list = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
Flux.<Integer, Iterator<Integer>>generate(list::iterator, (s, o) -> {
if (s.hasNext()) {
o.next(s.next());
}
else {
o.complete();
}
return s;
}).subscribe(ts);
ts.assertFuseableSource()
.assertFusionMode(Fuseable.NONE)
.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
}
@Test
public void scanOperator(){
FluxGenerate<Object, Object> test = new FluxGenerate<>(o -> {
o.next(1);
o.complete();
});
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
@Test
public void scanSubscription() {
CoreSubscriber<Integer> subscriber = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxGenerate.GenerateSubscription<Integer, Integer> test =
new FluxGenerate.GenerateSubscription<>(subscriber, 1, (s, o) -> null, s -> {});
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(subscriber);
test.request(5);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(5L);
assertThat(test.scan(Scannable.Attr.ERROR)).isNull();
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
@Test
public void scanSubscriptionError() {
CoreSubscriber<Integer> subscriber = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxGenerate.GenerateSubscription<Integer, Integer> test =
new FluxGenerate.GenerateSubscription<>(subscriber, 1, (s, o) -> null, s -> {});
test.error(new IllegalStateException("boom"));
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
assertThat(test.scan(Scannable.Attr.ERROR)).isNull();
}
@Test
public void scanSubscriptionCancelled() {
CoreSubscriber<Integer> subscriber = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxGenerate.GenerateSubscription<Integer, Integer> test =
new FluxGenerate.GenerateSubscription<>(subscriber, 1, (s, o) -> null, s -> {});
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
test.cancel();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isTrue();
}
@Test
void contextIsReadable() {
StepVerifier.create(Flux.generate(s -> s.next(s.contextView()
.get(AtomicInteger.class)
.incrementAndGet()))
.take(10, false)
.contextWrite(ctx -> ctx.put(AtomicInteger.class,
new AtomicInteger())))
.expectNext(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.verifyComplete();
}
//see https://github.com/reactor/reactor-core/issues/1685
@Test
public void fusedGeneratedNextAndErrorPropagateException() {
StepVerifier.create(
Flux.<String>generate(sink -> {
sink.next("foo");
sink.error(new IllegalStateException("boom"));
}))
.expectFusion(Fuseable.SYNC)
.expectNext("foo")
.verifyErrorSatisfies(e -> assertThat(e)
.isInstanceOf(IllegalStateException.class)
.hasMessage("boom")
);
}
//see https://github.com/reactor/reactor-core/issues/1685
@Test
public void fusedGenerateErrorThrowsPropagateException() {
StepVerifier.create(
Flux.<String>generate(sink -> {
sink.error(new IllegalStateException("boom"));
}))
.expectFusion(Fuseable.SYNC)
.verifyErrorSatisfies(e -> assertThat(e)
.isInstanceOf(IllegalStateException.class)
.hasMessage("boom")
);
}
}
| FluxGenerateTest |
java | quarkusio__quarkus | independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/Qualifiers.java | {
"start": 495,
"end": 6235
} | class ____ {
public static final Set<Annotation> DEFAULT_QUALIFIERS = Set.of(Default.Literal.INSTANCE, Any.Literal.INSTANCE);
public static final Set<Annotation> IP_DEFAULT_QUALIFIERS = Set.of(Default.Literal.INSTANCE);
final Set<String> allQualifiers;
// custom qualifier -> non-binding members (can be empty but never null)
final Map<String, Set<String>> qualifierNonbindingMembers;
Qualifiers(Set<String> qualifiers, Map<String, Set<String>> qualifierNonbindingMembers) {
this.allQualifiers = qualifiers;
this.qualifierNonbindingMembers = qualifierNonbindingMembers;
}
boolean isRegistered(Class<? extends Annotation> annotationType) {
return allQualifiers.contains(annotationType.getName());
}
void verify(Collection<Annotation> qualifiers) {
if (qualifiers.isEmpty()) {
return;
}
if (qualifiers.size() == 1) {
verifyQualifier(qualifiers.iterator().next().annotationType());
} else {
Map<Class<? extends Annotation>, Integer> timesQualifierWasSeen = new HashMap<>();
for (Annotation qualifier : qualifiers) {
verifyQualifier(qualifier.annotationType());
timesQualifierWasSeen.compute(qualifier.annotationType(), TimesSeenBiFunction.INSTANCE);
}
checkQualifiersForDuplicates(timesQualifierWasSeen);
}
}
void verify(Annotation[] qualifiers) {
if (qualifiers.length == 0) {
return;
}
if (qualifiers.length == 1) {
verifyQualifier(qualifiers[0].annotationType());
} else {
Map<Class<? extends Annotation>, Integer> timesQualifierWasSeen = new HashMap<>();
for (Annotation qualifier : qualifiers) {
verifyQualifier(qualifier.annotationType());
timesQualifierWasSeen.compute(qualifier.annotationType(), TimesSeenBiFunction.INSTANCE);
}
checkQualifiersForDuplicates(timesQualifierWasSeen);
}
}
// in various cases, specification requires to check qualifiers for duplicates and throw IAE
private static void checkQualifiersForDuplicates(Map<Class<? extends Annotation>, Integer> timesQualifierSeen) {
timesQualifierSeen.forEach(Qualifiers::checkQualifiersForDuplicates);
}
private static void checkQualifiersForDuplicates(Class<? extends Annotation> aClass, Integer times) {
if (times > 1 && (aClass.getAnnotation(Repeatable.class) == null)) {
throw new IllegalArgumentException("The qualifier " + aClass + " was used repeatedly " +
"but it is not annotated with @java.lang.annotation.Repeatable");
}
}
boolean hasQualifiers(Set<Annotation> beanQualifiers, Annotation... requiredQualifiers) {
for (Annotation qualifier : requiredQualifiers) {
if (!hasQualifier(beanQualifiers, qualifier)) {
return false;
}
}
return true;
}
boolean hasQualifier(Iterable<Annotation> qualifiers, Annotation requiredQualifier) {
Class<? extends Annotation> requiredQualifierClass = requiredQualifier.annotationType();
Method[] members = requiredQualifierClass.getDeclaredMethods();
for (Annotation qualifier : qualifiers) {
Class<? extends Annotation> qualifierClass = qualifier.annotationType();
if (!qualifierClass.equals(requiredQualifierClass)) {
continue;
}
boolean matches = true;
for (Method value : members) {
if (value.isAnnotationPresent(Nonbinding.class)) {
continue;
}
if (!qualifierNonbindingMembers.isEmpty()) {
Set<String> nonbindingMembers = qualifierNonbindingMembers.get(qualifierClass.getName());
if (nonbindingMembers != null && nonbindingMembers.contains(value.getName())) {
continue;
}
}
Object val1 = invoke(value, requiredQualifier);
Object val2 = invoke(value, qualifier);
if (val1.getClass().isArray()) {
if (!val2.getClass().isArray() || !Arrays.equals((Object[]) val1, (Object[]) val2)) {
matches = false;
break;
}
} else if (!val1.equals(val2)) {
matches = false;
break;
}
}
if (matches) {
return true;
}
}
return false;
}
boolean isSubset(Set<Annotation> observedQualifiers, Set<Annotation> eventQualifiers) {
for (Annotation required : observedQualifiers) {
if (!hasQualifier(eventQualifiers, required)) {
return false;
}
}
return true;
}
private static Object invoke(Method method, Object instance) {
try {
method.setAccessible(true);
return method.invoke(instance);
} catch (IllegalArgumentException | IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(
"Error checking value of member method " + method.getName() + " on " + method.getDeclaringClass(), e);
}
}
private void verifyQualifier(Class<? extends Annotation> annotationType) {
if (!allQualifiers.contains(annotationType.getName())) {
throw new IllegalArgumentException("Annotation is not a registered qualifier: " + annotationType);
}
}
private static | Qualifiers |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java | {
"start": 4489,
"end": 5021
} | class ____ {
public static final String NAME = SourceFieldMapper.NAME;
public static final FieldType FIELD_TYPE;
static {
FieldType ft = new FieldType();
ft.setIndexOptions(IndexOptions.NONE); // not indexed
ft.setStored(true);
ft.setOmitNorms(true);
FIELD_TYPE = freezeAndDeduplicateFieldType(ft);
}
}
private static SourceFieldMapper toType(FieldMapper in) {
return (SourceFieldMapper) in;
}
public static | Defaults |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/completion/GoogleVertexAiChatCompletionServiceSettings.java | {
"start": 2509,
"end": 14079
} | class ____ extends FilteredXContentObject
implements
ServiceSettings,
GoogleVertexAiRateLimitServiceSettings {
public static final String NAME = "google_vertex_ai_chatcompletion_service_settings";
private static final TransportVersion ML_INFERENCE_VERTEXAI_CHATCOMPLETION_ADDED = TransportVersion.fromName(
"ml_inference_vertexai_chatcompletion_added"
);
private final String location;
private final String modelId;
private final String projectId;
private final URI uri;
private final URI streamingUri;
private final GoogleModelGardenProvider provider;
private final RateLimitSettings rateLimitSettings;
// https://cloud.google.com/vertex-ai/docs/quotas#eval-quotas
private static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(1000);
public GoogleVertexAiChatCompletionServiceSettings(StreamInput in) throws IOException {
var version = in.getTransportVersion();
String projectIdFromStreamInput;
String locationFromStreamInput;
String modelIdFromStreamInput;
URI uriFromStreamInput = null;
URI streamingUriFromStreamInput = null;
GoogleModelGardenProvider providerFromStreamInput = null;
if (GoogleVertexAiUtils.supportsModelGarden(version)) {
projectIdFromStreamInput = in.readOptionalString();
locationFromStreamInput = in.readOptionalString();
modelIdFromStreamInput = in.readOptionalString();
uriFromStreamInput = ServiceUtils.createOptionalUri(in.readOptionalString());
streamingUriFromStreamInput = ServiceUtils.createOptionalUri(in.readOptionalString());
providerFromStreamInput = in.readOptionalEnum(GoogleModelGardenProvider.class);
} else {
projectIdFromStreamInput = in.readString();
locationFromStreamInput = in.readString();
modelIdFromStreamInput = in.readString();
}
RateLimitSettings rateLimitSettingsFromStreamInput = new RateLimitSettings(in);
this.projectId = Strings.isNullOrEmpty(projectIdFromStreamInput) ? null : projectIdFromStreamInput;
this.location = Strings.isNullOrEmpty(locationFromStreamInput) ? null : locationFromStreamInput;
this.modelId = Strings.isNullOrEmpty(modelIdFromStreamInput) ? null : modelIdFromStreamInput;
this.uri = uriFromStreamInput;
this.streamingUri = streamingUriFromStreamInput;
// Default to GOOGLE if not set
this.provider = Objects.requireNonNullElse(providerFromStreamInput, GoogleModelGardenProvider.GOOGLE);
this.rateLimitSettings = rateLimitSettingsFromStreamInput;
}
@Override
protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, ToXContent.Params params) throws IOException {
if (Strings.isNullOrEmpty(projectId) == false) {
builder.field(PROJECT_ID, projectId);
}
if (Strings.isNullOrEmpty(location) == false) {
builder.field(LOCATION, location);
}
if (Strings.isNullOrEmpty(modelId) == false) {
builder.field(MODEL_ID, modelId);
}
if (uri != null) {
builder.field(URL_SETTING_NAME, uri.toString());
}
if (streamingUri != null) {
builder.field(STREAMING_URL_SETTING_NAME, streamingUri.toString());
}
if (provider != null) {
builder.field(PROVIDER_SETTING_NAME, provider.name());
}
rateLimitSettings.toXContent(builder, params);
return builder;
}
public static GoogleVertexAiChatCompletionServiceSettings fromMap(Map<String, Object> map, ConfigurationParseContext context) {
ValidationException validationException = new ValidationException();
// Extract Google Vertex AI fields
String projectId = ServiceUtils.extractOptionalString(map, PROJECT_ID, ModelConfigurations.SERVICE_SETTINGS, validationException);
String location = ServiceUtils.extractOptionalString(map, LOCATION, ModelConfigurations.SERVICE_SETTINGS, validationException);
String modelId = ServiceUtils.extractOptionalString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException);
// Extract Google Model Garden fields
URI uri = ServiceUtils.extractOptionalUri(map, URL_SETTING_NAME, validationException);
URI streamingUri = ServiceUtils.extractOptionalUri(map, STREAMING_URL_SETTING_NAME, validationException);
GoogleModelGardenProvider provider = ServiceUtils.extractOptionalEnum(
map,
PROVIDER_SETTING_NAME,
ModelConfigurations.SERVICE_SETTINGS,
GoogleModelGardenProvider::fromString,
EnumSet.allOf(GoogleModelGardenProvider.class),
validationException
);
// Extract rate limit settings
RateLimitSettings rateLimitSettings = RateLimitSettings.of(
map,
DEFAULT_RATE_LIMIT_SETTINGS,
validationException,
GoogleVertexAiService.NAME,
context
);
validateServiceSettings(provider, uri, streamingUri, projectId, location, modelId, validationException);
if (validationException.validationErrors().isEmpty() == false) {
throw validationException;
}
return new GoogleVertexAiChatCompletionServiceSettings(
projectId,
location,
modelId,
uri,
streamingUri,
provider,
rateLimitSettings
);
}
private static void validateServiceSettings(
GoogleModelGardenProvider provider,
URI uri,
URI streamingUri,
String projectId,
String location,
String modelId,
ValidationException validationException
) {
// GOOGLE is the default provider, so if provider is null, we treat it as GOOGLE
boolean isNonGoogleProvider = provider != null && provider != GoogleModelGardenProvider.GOOGLE;
// If using a non-Google provider, at least one URL must be provided
boolean hasAnyUrl = uri != null || streamingUri != null;
// If using Google Vertex AI, all three fields must be provided
boolean hasAllVertexFields = projectId != null && location != null && modelId != null;
if (isNonGoogleProvider) {
if (hasAnyUrl == false) {
// Non-Google (Model Garden endpoint mode): must have at least one URL. Google Vertex AI fields are allowed.
validationException.addValidationError(
String.format(
Locale.ROOT,
"Google Model Garden provider=%s selected. Either 'uri' or 'streaming_uri' must be provided",
provider
)
);
}
} else if (hasAnyUrl) {
// If using Google Vertex AI, URLs must not be provided
validationException.addValidationError(String.format(Locale.ROOT, """
'provider' is either GOOGLE or null. For Google Vertex AI models 'uri' and 'streaming_uri' must not be provided. \
Remove 'url' and 'streaming_url' fields. Provided values: uri=%s, streaming_uri=%s""", uri, streamingUri));
} else if (hasAllVertexFields == false) {
// If using Google Vertex AI, all fields must be provided
validationException.addValidationError(String.format(Locale.ROOT, """
For Google Vertex AI models, you must provide 'location', 'project_id', and 'model_id'. \
Provided values: location=%s, project_id=%s, model_id=%s""", location, projectId, modelId));
}
}
public GoogleVertexAiChatCompletionServiceSettings(
@Nullable String projectId,
@Nullable String location,
@Nullable String modelId,
@Nullable URI uri,
@Nullable URI streamingUri,
@Nullable GoogleModelGardenProvider provider,
@Nullable RateLimitSettings rateLimitSettings
) {
this.projectId = projectId;
this.location = location;
this.modelId = modelId;
this.uri = uri;
this.streamingUri = streamingUri;
this.provider = Objects.requireNonNullElse(provider, GoogleModelGardenProvider.GOOGLE);
this.rateLimitSettings = Objects.requireNonNullElse(rateLimitSettings, DEFAULT_RATE_LIMIT_SETTINGS);
}
public String location() {
return location;
}
@Override
public String modelId() {
return modelId;
}
@Override
public String projectId() {
return projectId;
}
public URI uri() {
return uri;
}
public URI streamingUri() {
return streamingUri;
}
public GoogleModelGardenProvider provider() {
return provider;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public RateLimitSettings rateLimitSettings() {
return rateLimitSettings;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
assert false : "should never be called when supportsVersion is used";
return ML_INFERENCE_VERTEXAI_CHATCOMPLETION_ADDED;
}
@Override
public boolean supportsVersion(TransportVersion version) {
return version.supports(ML_INFERENCE_VERTEXAI_CHATCOMPLETION_ADDED);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
TransportVersion version = out.getTransportVersion();
if (GoogleVertexAiUtils.supportsModelGarden(version)) {
out.writeOptionalString(projectId);
out.writeOptionalString(location);
out.writeOptionalString(modelId);
out.writeOptionalString(uri != null ? uri.toString() : null);
out.writeOptionalString(streamingUri != null ? streamingUri.toString() : null);
out.writeOptionalEnum(provider);
} else {
out.writeString(Objects.requireNonNullElse(projectId, ""));
out.writeString(Objects.requireNonNullElse(location, ""));
out.writeString(Objects.requireNonNullElse(modelId, ""));
}
rateLimitSettings.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
toXContentFragmentOfExposedFields(builder, params);
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GoogleVertexAiChatCompletionServiceSettings that = (GoogleVertexAiChatCompletionServiceSettings) o;
return Objects.equals(location, that.location)
&& Objects.equals(modelId, that.modelId)
&& Objects.equals(projectId, that.projectId)
&& Objects.equals(uri, that.uri)
&& Objects.equals(streamingUri, that.streamingUri)
&& Objects.equals(provider, that.provider)
&& Objects.equals(rateLimitSettings, that.rateLimitSettings);
}
@Override
public int hashCode() {
return Objects.hash(location, modelId, projectId, uri, streamingUri, provider, rateLimitSettings);
}
@Override
public String toString() {
return Strings.toString(this);
}
}
| GoogleVertexAiChatCompletionServiceSettings |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/simple/MyParameter.java | {
"start": 65,
"end": 363
} | class ____ {
private String value;
public MyParameter(String str) {
this.value = "WRONG CONSTRUCTOR";
}
public MyParameter(String str, String str2) {
this.value = str + str2;
}
@Override
public String toString() {
return value;
}
}
| MyParameter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/type/OracleArrayTest.java | {
"start": 1210,
"end": 1873
} | class ____ {
@AfterEach
void tearDown(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@Test
public void test(SessionFactoryScope scope) {
scope.inTransaction( session -> {
ArrayHolder expected = new ArrayHolder( 1, new Integer[] { 1, 2, 3 }, new String[] { "abc", "def" } );
session.persist( expected );
session.flush();
session.clear();
ArrayHolder arrayHolder = session.find( ArrayHolder.class, 1 );
assertArrayEquals( expected.getIntArray(), arrayHolder.getIntArray() );
assertArrayEquals( expected.getTextArray(), arrayHolder.getTextArray() );
} );
}
@Entity(name = "ArrayHolder")
public static | OracleArrayTest |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java | {
"start": 2004,
"end": 2388
} | class ____ instantiated. Subsequent reloading is triggered
* by invoking the {@link #reload(ActionListener)} method. The updated JWK set can be retrieved with
* the {@link #getContentAndJwksAlgs()} method once loading or reloading is completed. Additionally,
* {@link JwtRealm} settings can specify reloading parameters to enable periodic background reloading
* of the JWK set.
*/
| is |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/blockloader/docvalues/DenseVectorBlockLoaderProcessor.java | {
"start": 2887,
"end": 4109
} | class ____ implements DenseVectorBlockLoaderProcessor<BlockLoader.DoubleBuilder> {
private final DenseVectorFieldMapper.VectorSimilarityFunctionConfig config;
public DenseVectorSimilarityProcessor(DenseVectorFieldMapper.VectorSimilarityFunctionConfig config) {
this.config = config;
}
@Override
public BlockLoader.DoubleBuilder createBuilder(BlockLoader.BlockFactory factory, int expectedCount, int dimensions) {
return factory.doubles(expectedCount);
}
@Override
public void process(float[] vector, BlockLoader.DoubleBuilder builder) {
double similarity = config.similarityFunction().calculateSimilarity(vector, config.vector());
builder.appendDouble(similarity);
}
@Override
public void process(byte[] vector, BlockLoader.DoubleBuilder builder) {
double similarity = config.similarityFunction().calculateSimilarity(vector, config.vectorAsBytes());
builder.appendDouble(similarity);
}
@Override
public String name() {
return config.similarityFunction().function().toString();
}
}
}
| DenseVectorSimilarityProcessor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/generics/GenericMappedSuperclassAssociationTest.java | {
"start": 3577,
"end": 4015
} | class ____<P extends Parent<?>> {
@Id
@GeneratedValue
private Long id;
private String name;
@ManyToOne
private P parent;
public Child() {
}
public Child(String name, P parent) {
this.name = name;
this.parent = parent;
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
public P getParent() {
return parent;
}
}
@MappedSuperclass
public static abstract | Child |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ItemSetTraverser.java | {
"start": 1087,
"end": 6169
} | class ____ {
// start size and size increment for array holding items
private static final int SIZE_INCREMENT = 100;
private final TransactionStore.TopItemIds topItemIds;
// stack implementation: to avoid object churn this is not implemented as classical stack, but optimized for re-usage
// non-optimized: Stack<TransactionStore.TopItemIds.IdIterator> itemIterators = new Stack<>();
private final List<TransactionStore.TopItemIds.IdIterator> itemIterators = new ArrayList<>();
private LongsRef itemIdStack = new LongsRef(SIZE_INCREMENT);
private final ItemSetBitSet itemPositionsVector;
private final ItemSetBitSet itemPositionsVectorParent;
private IntsRef itemPositionsStack = new IntsRef(SIZE_INCREMENT);
private int stackPosition = 0;
ItemSetTraverser(TransactionStore.TopItemIds topItemIds) {
this.topItemIds = topItemIds;
// push the first iterator
itemIterators.add(topItemIds.iterator());
// create a bit vector that corresponds to the number of items
itemPositionsVector = new ItemSetBitSet((int) topItemIds.size());
// create a bit vector that corresponds to the item set
itemPositionsVectorParent = new ItemSetBitSet((int) topItemIds.size());
}
/**
* Return true if the iterator is at a leaf, which means it would backtrack on next()
*
* @return true if on a leaf
*/
public boolean atLeaf() {
// check if we are already exhausted
// non-optimized: itemIterators.isEmpty()
if (stackPosition == -1) {
return false;
}
return itemIterators.get(stackPosition).hasNext() == false;
}
public boolean next() {
// check if we are already exhausted
// non-optimized: itemIterators.isEmpty()
if (stackPosition == -1) {
return false;
}
long itemId;
for (;;) {
if (itemIterators.get(stackPosition).hasNext()) {
itemId = itemIterators.get(stackPosition).next();
break;
} else {
// non-optimized: itemIterators.pop();
--stackPosition;
// non-optimized: itemIterators.isEmpty()
if (stackPosition == -1) {
return false;
}
itemIdStack.length--;
itemPositionsStack.length--;
itemPositionsVectorParent.clear(itemPositionsStack.ints[itemPositionsStack.length]);
itemPositionsVector.clear(itemPositionsStack.ints[itemPositionsStack.length]);
}
}
// push a new iterator on the stack
int itemPosition = itemIterators.get(stackPosition).getIndex();
// non-optimized: itemIterators.add(topItemIds.iterator(itemIteratorStack.peek().getIndex()));
if (itemIterators.size() == stackPosition + 1) {
itemIterators.add(topItemIds.iterator(itemPosition));
} else {
itemIterators.get(stackPosition + 1).reset(itemPosition);
}
growStacksIfNecessary();
itemIdStack.longs[itemIdStack.length++] = itemId;
// set the position from the previous step
if (itemPositionsStack.length > 0) {
itemPositionsVectorParent.set(itemPositionsStack.ints[itemPositionsStack.length - 1]);
}
// set the position from the this step
itemPositionsStack.ints[itemPositionsStack.length++] = itemPosition;
itemPositionsVector.set(itemPosition);
++stackPosition;
return true;
}
public long getItemId() {
return itemIdStack.longs[itemIdStack.length - 1];
}
public LongsRef getItemSet() {
return itemIdStack;
}
public ItemSetBitSet getItemSetBitSet() {
return itemPositionsVector;
}
public ItemSetBitSet getParentItemSetBitSet() {
return itemPositionsVectorParent;
}
public int getNumberOfItems() {
return stackPosition;
}
public void prune() {
// already empty
// non-optimized: itemIterators.isEmpty()
if (stackPosition == -1) {
return;
}
// non-optimized: itemIterators.pop();
--stackPosition;
// the id stack has 1 item less
if (stackPosition == -1) {
return;
}
itemIdStack.length--;
itemPositionsStack.length--;
itemPositionsVectorParent.clear(itemPositionsStack.ints[itemPositionsStack.length]);
itemPositionsVector.clear(itemPositionsStack.ints[itemPositionsStack.length]);
}
private void growStacksIfNecessary() {
if (itemIdStack.longs.length == itemIdStack.length) {
itemIdStack.longs = ArrayUtil.grow(itemIdStack.longs, itemIdStack.length + SIZE_INCREMENT);
}
if (itemPositionsStack.ints.length == itemPositionsStack.length) {
itemPositionsStack.ints = ArrayUtil.grow(itemPositionsStack.ints, itemPositionsStack.length + SIZE_INCREMENT);
}
}
}
| ItemSetTraverser |
java | mapstruct__mapstruct | core/src/main/java/org/mapstruct/ObjectFactory.java | {
"start": 1720,
"end": 2482
} | class ____ {
*
* @PersistenceContext
* private EntityManager em;
*
* @ObjectFactory
* public <T extends AbstractEntity> T resolve(AbstractDto sourceDto, @TargetType Class<T> type) {
* T entity = em.find( type, sourceDto.getId() );
* return entity != null ? entity : type.newInstance();
* }
* }
* </code>
* </pre>
* <p>
* If there are two factory methods, both serving the same type, one with no parameters and one taking sources as input,
* then the one with the source parameters is favored. If there are multiple such factories, an ambiguity error is
* shown.
*
* @author Remo Meier
* @since 1.2
*/
@Retention(RetentionPolicy.CLASS)
@Target(ElementType.METHOD)
public @ | ReferenceMapper |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/internal/LettuceClassUtils.java | {
"start": 2555,
"end": 3736
} | class ____ the {@link #getDefaultClassLoader()}.
*
* @param className
* @return
* @throws ClassNotFoundException
*/
public static Class<?> forName(String className) throws ClassNotFoundException {
return forName(className, getDefaultClassLoader());
}
private static Class<?> forName(String className, ClassLoader classLoader) throws ClassNotFoundException {
try {
return classLoader.loadClass(className);
} catch (ClassNotFoundException ex) {
int lastDotIndex = className.lastIndexOf('.');
if (lastDotIndex != -1) {
String innerClassName = className.substring(0, lastDotIndex) + '$' + className.substring(lastDotIndex + 1);
try {
return classLoader.loadClass(innerClassName);
} catch (ClassNotFoundException ex2) {
// swallow - let original exception get through
}
}
throw ex;
}
}
/**
* Return the default ClassLoader to use: typically the thread context ClassLoader, if available; the ClassLoader that
* loaded the ClassUtils | using |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webmvc/src/main/java/org/springframework/cloud/gateway/server/mvc/filter/CircuitBreakerFilterFunctions.java | {
"start": 7105,
"end": 7244
} | class ____ extends SimpleFilterSupplier {
public FilterSupplier() {
super(CircuitBreakerFilterFunctions.class);
}
}
}
| FilterSupplier |
java | google__guava | android/guava-tests/test/com/google/common/util/concurrent/InterruptionUtil.java | {
"start": 1223,
"end": 1441
} | class ____ {
private static final Logger logger = Logger.getLogger(InterruptionUtil.class.getName());
/** Runnable which will interrupt the target thread repeatedly when run. */
private static final | InterruptionUtil |
java | quarkusio__quarkus | extensions/grpc/deployment/src/test/java/io/quarkus/grpc/client/interceptors/ClientInterceptorConstructorRegistrationTest.java | {
"start": 1031,
"end": 2701
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addClasses(MutinyHelloService.class, MyThirdClientInterceptor.class, MyLastClientInterceptor.class,
Calls.class,
GreeterGrpc.class, Greeter.class, GreeterBean.class, HelloRequest.class, HelloReply.class,
MutinyGreeterGrpc.class,
HelloRequestOrBuilder.class, HelloReplyOrBuilder.class))
.withConfigurationResource("hello-config.properties");
private static final Logger log = LoggerFactory.getLogger(ClientInterceptorConstructorRegistrationTest.class);
private GreeterGrpc.GreeterBlockingStub client;
public ClientInterceptorConstructorRegistrationTest(
@RegisterClientInterceptor(MyLastClientInterceptor.class) @RegisterClientInterceptor(MyThirdClientInterceptor.class) @GrpcClient("hello-service") GreeterGrpc.GreeterBlockingStub client) {
this.client = client;
}
@Test
public void testInterceptorRegistration() {
Calls.LIST.clear();
HelloReply reply = client
.sayHello(HelloRequest.newBuilder().setName("neo").build());
assertThat(reply.getMessage()).isEqualTo("Hello neo");
List<String> calls = Calls.LIST;
assertEquals(2, calls.size());
assertEquals(MyThirdClientInterceptor.class.getName(), calls.get(0));
assertEquals(MyLastClientInterceptor.class.getName(), calls.get(1));
}
}
| ClientInterceptorConstructorRegistrationTest |
java | redisson__redisson | redisson/src/main/java/org/redisson/client/NettyHook.java | {
"start": 868,
"end": 1299
} | interface ____ {
/**
* Invoked when Redis client created and initialized Netty Bootstrap object.
*
* @param bootstrap - Netty Bootstrap object
*/
void afterBoostrapInitialization(Bootstrap bootstrap);
/**
* Invoked when Netty Channel object was created and initialized.
*
* @param channel - Netty Channel object
*/
void afterChannelInitialization(Channel channel);
}
| NettyHook |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/webapp/GPGOverviewBlock.java | {
"start": 1392,
"end": 3415
} | class ____ extends HtmlBlock {
private GlobalPolicyGenerator globalPolicyGenerator;
@Inject
GPGOverviewBlock(GlobalPolicyGenerator gpg, ViewContext ctx) {
super(ctx);
this.globalPolicyGenerator = gpg;
}
@Override
protected void render(Block html) {
Configuration config = this.globalPolicyGenerator.getConfig();
String appCleaner = "disable";
long appCleanerIntervalMs = config.getTimeDuration(YarnConfiguration.GPG_APPCLEANER_INTERVAL_MS,
YarnConfiguration.DEFAULT_GPG_APPCLEANER_INTERVAL_MS, TimeUnit.MILLISECONDS);
if (appCleanerIntervalMs > 0) {
appCleaner = "enable, interval : " + appCleanerIntervalMs + " ms";
}
String scCleaner = "disable";
long scCleanerIntervalMs = config.getTimeDuration(
YarnConfiguration.GPG_SUBCLUSTER_CLEANER_INTERVAL_MS,
YarnConfiguration.DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS, TimeUnit.MILLISECONDS);
if (scCleanerIntervalMs > 0) {
scCleaner = "enable, interval : " + scCleanerIntervalMs + " ms";
}
String pgGenerator = "disable";
long policyGeneratorIntervalMillis = config.getTimeDuration(
YarnConfiguration.GPG_POLICY_GENERATOR_INTERVAL,
YarnConfiguration.DEFAULT_GPG_POLICY_GENERATOR_INTERVAL, TimeUnit.MILLISECONDS);
if (policyGeneratorIntervalMillis > 0) {
pgGenerator = "enable, interval : " + policyGeneratorIntervalMillis + " ms";
}
String policy = config.get(YarnConfiguration.GPG_GLOBAL_POLICY_CLASS,
YarnConfiguration.DEFAULT_GPG_GLOBAL_POLICY_CLASS);
info("GPG Details")
.__("GPG started on", new Date(GlobalPolicyGenerator.getGPGStartupTime()))
.__("GPG application cleaner", appCleaner)
.__("GPG subcluster cleaner", scCleaner)
.__("GPG policy generator", pgGenerator)
.__("GPG policy generator class", policy)
.__("GPG Version", YarnVersionInfo.getVersion())
.__("Hadoop Version", VersionInfo.getVersion());
html.__(InfoBlock.class);
}
}
| GPGOverviewBlock |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/visitor/MySqlSchemaStatVisitorTest3.java | {
"start": 973,
"end": 2369
} | class ____ extends TestCase {
public void test_0() throws Exception {
String sql = "insert into users2 (id2, name2) select id, name FROM users where loginCount > 1";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
statemen.accept(visitor);
System.out.println(sql);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
assertEquals(2, visitor.getTables().size());
assertEquals(true, visitor.containsTable("users"));
assertEquals(true, visitor.containsTable("users2"));
assertEquals(5, visitor.getColumns().size());
assertEquals(true, visitor.getColumns().contains(new Column("users", "id")));
assertEquals(true, visitor.getColumns().contains(new Column("users", "name")));
assertEquals(true, visitor.getColumns().contains(new Column("users", "loginCount")));
assertEquals(true, visitor.getColumns().contains(new Column("users2", "name2")));
assertEquals(true, visitor.getColumns().contains(new Column("users2", "id2")));
}
}
| MySqlSchemaStatVisitorTest3 |
java | processing__processing4 | core/src/processing/data/DoubleList.java | {
"start": 586,
"end": 19783
} | class ____ implements Iterable<Double> {
int count;
double[] data;
@SuppressWarnings("unused")
public DoubleList() {
data = new double[10];
}
/**
* @nowebref
*/
public DoubleList(int length) {
data = new double[length];
}
/**
* @nowebref
*/
public DoubleList(double[] list) {
count = list.length;
data = new double[count];
System.arraycopy(list, 0, data, 0, count);
}
/**
* Construct an FloatList from an iterable pile of objects.
* For instance, a double array, an array of strings, who knows.
* Un-parsable or null values will be set to NaN.
* @nowebref
*/
@SuppressWarnings("unused")
public DoubleList(Iterable<Object> iterator) {
this(10);
for (Object o : iterator) {
if (o == null) {
append(Double.NaN);
} else if (o instanceof Number) {
append(((Number) o).doubleValue());
} else {
append(PApplet.parseFloat(o.toString().trim()));
}
}
crop();
}
/**
* Construct an FloatList from a random pile of objects.
* Un-parseable or null values will be set to NaN.
*/
@SuppressWarnings("unused")
public DoubleList(Object... items) {
// nuts, no good way to pass missingValue to this fn (varargs must be last)
final double missingValue = Double.NaN;
count = items.length;
data = new double[count];
int index = 0;
for (Object o : items) {
double value = missingValue;
if (o != null) {
if (o instanceof Number) {
value = ((Number) o).doubleValue();
} else {
try {
value = Double.parseDouble(o.toString().trim());
} catch (NumberFormatException ignored) { }
}
}
data[index++] = value;
}
}
/**
* Improve efficiency by removing allocated but unused entries from the
* internal array used to store the data. Set to private, though it could
* be useful to have this public if lists are frequently making drastic
* size changes (from very large to very small).
*/
private void crop() {
if (count != data.length) {
data = PApplet.subset(data, 0, count);
}
}
/**
* Get the length of the list.
*
* @webref doublelist:method
* @brief Get the length of the list
*/
public int size() {
return count;
}
public void resize(int length) {
if (length > data.length) {
double[] temp = new double[length];
System.arraycopy(data, 0, temp, 0, count);
data = temp;
} else if (length > count) {
Arrays.fill(data, count, length, 0);
}
count = length;
}
/**
* Remove all entries from the list.
*
* @webref doublelist:method
* @brief Remove all entries from the list
*/
public void clear() {
count = 0;
}
/**
* Get an entry at a particular index.
*
* @webref doublelist:method
* @brief Get an entry at a particular index
*/
public double get(int index) {
if (index >= count) {
throw new ArrayIndexOutOfBoundsException(index);
}
return data[index];
}
/**
* Set the entry at a particular index. If the index is past the length of
* the list, it'll expand the list to accommodate, and fill the intermediate
* entries with 0s.
*
* @webref doublelist:method
* @brief Set the entry at a particular index
*/
public void set(int index, double what) {
if (index >= count) {
data = PApplet.expand(data, index+1);
for (int i = count; i < index; i++) {
data[i] = 0;
}
count = index+1;
}
data[index] = what;
}
/** Just an alias for append(), but matches pop() */
public void push(double value) {
append(value);
}
public double pop() {
if (count == 0) {
throw new RuntimeException("Can't call pop() on an empty list");
}
double value = get(count-1);
count--;
return value;
}
/**
* Remove an element from the specified index.
*
* @webref doublelist:method
* @brief Remove an element from the specified index
*/
public double remove(int index) {
if (index < 0 || index >= count) {
throw new ArrayIndexOutOfBoundsException(index);
}
double entry = data[index];
// int[] outgoing = new int[count - 1];
// System.arraycopy(data, 0, outgoing, 0, index);
// count--;
// System.arraycopy(data, index + 1, outgoing, 0, count - index);
// data = outgoing;
// For most cases, this actually appears to be faster
// than arraycopy() on an array copying into itself.
for (int i = index; i < count-1; i++) {
data[i] = data[i+1];
}
count--;
return entry;
}
// Remove the first instance of a particular value,
// and return the index at which it was found.
@SuppressWarnings("unused")
public int removeValue(int value) {
int index = index(value);
if (index != -1) {
remove(index);
return index;
}
return -1;
}
// Remove all instances of a particular value,
// and return the number of values found and removed
@SuppressWarnings("unused")
public int removeValues(double value) {
int ii = 0;
if (Double.isNaN(value)) {
for (int i = 0; i < count; i++) {
if (!Double.isNaN(data[i])) {
data[ii++] = data[i];
}
}
} else {
for (int i = 0; i < count; i++) {
if (data[i] != value) {
data[ii++] = data[i];
}
}
}
int removed = count - ii;
count = ii;
return removed;
}
/** Replace the first instance of a particular value */
@SuppressWarnings("unused")
public boolean replaceValue(double value, double newValue) {
if (Double.isNaN(value)) {
for (int i = 0; i < count; i++) {
if (Double.isNaN(data[i])) {
data[i] = newValue;
return true;
}
}
} else {
int index = index(value);
if (index != -1) {
data[index] = newValue;
return true;
}
}
return false;
}
/** Replace all instances of a particular value */
@SuppressWarnings("unused")
public boolean replaceValues(double value, double newValue) {
boolean changed = false;
if (Double.isNaN(value)) {
for (int i = 0; i < count; i++) {
if (Double.isNaN(data[i])) {
data[i] = newValue;
changed = true;
}
}
} else {
for (int i = 0; i < count; i++) {
if (data[i] == value) {
data[i] = newValue;
changed = true;
}
}
}
return changed;
}
/**
* Add a new entry to the list.
*
* @webref doublelist:method
* @brief Add a new entry to the list
*/
public void append(double value) {
if (count == data.length) {
data = PApplet.expand(data);
}
data[count++] = value;
}
public void append(double[] values) {
for (double v : values) {
append(v);
}
}
public void append(DoubleList list) {
for (double v : list.values()) { // will concat the list...
append(v);
}
}
/** Add this value, but only if it's not already in the list. */
@SuppressWarnings("unused")
public void appendUnique(double value) {
if (!hasValue(value)) {
append(value);
}
}
public void insert(int index, double value) {
insert(index, new double[] { value });
}
// same as splice
public void insert(int index, double[] values) {
if (index < 0) {
throw new IllegalArgumentException("insert() index cannot be negative: it was " + index);
}
if (index >= data.length) {
throw new IllegalArgumentException("insert() index " + index + " is past the end of this list");
}
double[] temp = new double[count + values.length];
// Copy the old values, but not more than already exist
System.arraycopy(data, 0, temp, 0, Math.min(count, index));
// Copy the new values into the proper place
System.arraycopy(values, 0, temp, index, values.length);
// if (index < count) {
// The index was inside count, so it's a true splice/insert
System.arraycopy(data, index, temp, index+values.length, count - index);
count = count + values.length;
// } else {
// // The index was past 'count', so the new count is weirder
// count = index + values.length;
// }
data = temp;
}
public void insert(int index, DoubleList list) {
insert(index, list.values());
}
/** Return the first index of a particular value. */
public int index(double what) {
for (int i = 0; i < count; i++) {
if (data[i] == what) {
return i;
}
}
return -1;
}
/**
* @webref doublelist:method
* @brief Check if a number is a part of the list
*/
public boolean hasValue(double value) {
if (Double.isNaN(value)) {
for (int i = 0; i < count; i++) {
if (Double.isNaN(data[i])) {
return true;
}
}
} else {
for (int i = 0; i < count; i++) {
if (data[i] == value) {
return true;
}
}
}
return false;
}
private void boundsProblem(int index, String method) {
final String msg = String.format("The list size is %d. " +
"You cannot %s() to element %d.", count, method, index);
throw new ArrayIndexOutOfBoundsException(msg);
}
/**
* @webref doublelist:method
* @brief Add to a value
*/
public void add(int index, double amount) {
if (index < count) {
data[index] += amount;
} else {
boundsProblem(index, "add");
}
}
/**
* @webref doublelist:method
* @brief Subtract from a value
*/
public void sub(int index, double amount) {
if (index < count) {
data[index] -= amount;
} else {
boundsProblem(index, "sub");
}
}
/**
* @webref doublelist:method
* @brief Multiply a value
*/
public void mult(int index, double amount) {
if (index < count) {
data[index] *= amount;
} else {
boundsProblem(index, "mult");
}
}
/**
* @webref doublelist:method
* @brief Divide a value
*/
public void div(int index, double amount) {
if (index < count) {
data[index] /= amount;
} else {
boundsProblem(index, "div");
}
}
private void checkMinMax(String functionName) {
if (count == 0) {
String msg =
String.format("Cannot use %s() on an empty %s.",
functionName, getClass().getSimpleName());
throw new RuntimeException(msg);
}
}
/**
* @webref doublelist:method
* @brief Return the smallest value
*/
public double min() {
checkMinMax("min");
int index = minIndex();
return index == -1 ? Double.NaN : data[index];
}
public int minIndex() {
checkMinMax("minIndex");
double m;
int mi = -1;
for (int i = 0; i < count; i++) {
// find one good value to start
if (data[i] == data[i]) {
m = data[i];
mi = i;
// calculate the rest
for (int j = i+1; j < count; j++) {
double d = data[j];
if (!Double.isNaN(d) && (d < m)) {
m = data[j];
mi = j;
}
}
break;
}
}
return mi;
}
/**
* @webref doublelist:method
* @brief Return the largest value
*/
public double max() {
checkMinMax("max");
int index = maxIndex();
return index == -1 ? Double.NaN : data[index];
}
public int maxIndex() {
checkMinMax("maxIndex");
double m;
int mi = -1;
for (int i = 0; i < count; i++) {
// find one good value to start
if (data[i] == data[i]) {
m = data[i];
mi = i;
// calculate the rest
for (int j = i+1; j < count; j++) {
double d = data[j];
if (!Double.isNaN(d) && (d > m)) {
m = data[j];
mi = j;
}
}
break;
}
}
return mi;
}
public double sum() {
double sum = 0;
for (int i = 0; i < count; i++) {
sum += data[i];
}
return sum;
}
/**
* Sorts the array in place.
*
* @webref doublelist:method
* @brief Sorts an array, lowest to highest
*/
public void sort() {
Arrays.sort(data, 0, count);
}
/**
* Reverse sort, orders values from highest to lowest
*
* @webref doublelist:method
* @brief Reverse sort, orders values from highest to lowest
*/
public void sortReverse() {
new Sort() {
@Override
public int size() {
// if empty, don't even mess with the NaN check, it'll AIOOBE
if (count == 0) {
return 0;
}
// move NaN values to the end of the list and don't sort them
int right = count - 1;
while (data[right] != data[right]) {
right--;
if (right == -1) { // all values are NaN
return 0;
}
}
for (int i = right; i >= 0; --i) {
double v = data[i];
if (v != v) {
data[i] = data[right];
data[right] = v;
--right;
}
}
return right + 1;
}
@Override
public int compare(int a, int b) {
double diff = data[b] - data[a];
return diff == 0 ? 0 : (diff < 0 ? -1 : 1);
}
@Override
public void swap(int a, int b) {
double temp = data[a];
data[a] = data[b];
data[b] = temp;
}
}.run();
}
/**
* @webref doublelist:method
* @brief Reverse the order of the list elements
*/
public void reverse() {
int ii = count - 1;
for (int i = 0; i < count/2; i++) {
double t = data[i];
data[i] = data[ii];
data[ii] = t;
--ii;
}
}
/**
* Randomize the order of the list elements. Note that this does not
* obey the randomSeed() function in PApplet.
*
* @webref doublelist:method
* @brief Randomize the order of the list elements
*/
@SuppressWarnings("unused")
public void shuffle() {
Random r = new Random();
int num = count;
while (num > 1) {
int value = r.nextInt(num);
num--;
double temp = data[num];
data[num] = data[value];
data[value] = temp;
}
}
/**
* Randomize the list order using the random() function from the specified
* sketch, allowing shuffle() to use its current randomSeed() setting.
*/
@SuppressWarnings("unused")
public void shuffle(PApplet sketch) {
int num = count;
while (num > 1) {
int value = (int) sketch.random(num);
num--;
double temp = data[num];
data[num] = data[value];
data[value] = temp;
}
}
/**
* Return a random value from the list.
*/
public double random() {
if (count == 0) {
throw new ArrayIndexOutOfBoundsException("No entries in this DoubleList");
}
return data[(int) (Math.random() * count)];
}
// see notes in StringList
// /**
// * Return a random value from the list, using the
// * randomSeed() from the specified sketch object.
// */
// public double random(PApplet sketch) {
// if (count == 0) {
// throw new ArrayIndexOutOfBoundsException("No entries in this DoubleList");
// }
// return data[(int) sketch.random(count)];
// }
public double removeChoice() {
if (count == 0) {
throw new ArrayIndexOutOfBoundsException("No entries in this DoubleList");
}
int index = (int) (Math.random() * count);
return remove(index);
}
public DoubleList copy() {
DoubleList outgoing = new DoubleList(data);
outgoing.count = count;
return outgoing;
}
/**
* Returns the actual array being used to store the data. For advanced users,
* this is the fastest way to access a large list. Suitable for iterating
* with a for() loop, but modifying the list will have terrible consequences.
*/
public double[] values() {
crop();
return data;
}
/** Implemented this way so that we can use a FloatList in a for loop. */
@Override
public Iterator<Double> iterator() {
return new Iterator<>() {
int index = -1;
public void remove() {
DoubleList.this.remove(index);
index--;
}
public Double next() {
return data[++index];
}
public boolean hasNext() {
return index+1 < count;
}
};
}
@Deprecated
public double[] array() {
return toArray();
}
/**
* Create a new array with a copy of all the values.
* @return an array sized by the length of the list with each of the values.
* @webref doublelist:method
* @brief Create a new array with a copy of all the values
*/
public double[] toArray() {
return toArray(null);
}
@Deprecated
public double[] array(double[] array) {
return toArray(array);
}
/**
* Copy values into the specified array. If the specified array is
* null or not the same size, a new array will be allocated.
*/
public double[] toArray(double[] array) {
if (array == null || array.length != count) {
array = new double[count];
}
System.arraycopy(data, 0, array, 0, count);
return array;
}
/**
* Returns a normalized version of this array. Called getPercent() for
* consistency with the Dict classes. It's a getter method because it needs
* to return a new list (because IntList/Dict can't do percentages or
* normalization in place on int values).
*/
@SuppressWarnings("unused")
public DoubleList getPercent() {
double sum = 0;
for (double value : array()) {
sum += value;
}
DoubleList outgoing = new DoubleList(count);
for (int i = 0; i < count; i++) {
double percent = data[i] / sum;
outgoing.set(i, percent);
}
return outgoing;
}
@SuppressWarnings("unused")
public DoubleList getSubset(int start) {
return getSubset(start, count - start);
}
public DoubleList getSubset(int start, int num) {
double[] subset = new double[num];
System.arraycopy(data, start, subset, 0, num);
return new DoubleList(subset);
}
public String join(String separator) {
if (count == 0) {
return "";
}
StringBuilder sb = new StringBuilder();
sb.append(data[0]);
for (int i = 1; i < count; i++) {
sb.append(separator);
sb.append(data[i]);
}
return sb.toString();
}
public void print() {
for (int i = 0; i < count; i++) {
System.out.format("[%d] %f%n", i, data[i]);
}
}
/**
* Save tab-delimited entries to a file (TSV format, UTF-8 encoding)
*/
public void save(File file) {
PrintWriter writer = PApplet.createWriter(file);
write(writer);
writer.close();
}
/**
* Write entries to a PrintWriter, one per line
*/
public void write(PrintWriter writer) {
for (int i = 0; i < count; i++) {
writer.println(data[i]);
}
writer.flush();
}
/**
* Return this dictionary as a String in JSON format.
*/
public String toJSON() {
return "[ " + join(", ") + " ]";
}
@Override
public String toString() {
return getClass().getSimpleName() + " size=" + size() + " " + toJSON();
}
}
| DoubleList |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/ser/bean/UnrolledBeanAsArraySerializer.java | {
"start": 750,
"end": 8219
} | class ____
extends BeanSerializerBase
{
/**
* Serializer that would produce JSON Object version; used in
* cases where array output cannot be used.
*/
protected final BeanSerializerBase _defaultSerializer;
public static final int MAX_PROPS = 6;
protected final int _propCount;
// // // We store separate references in form more easily accessed
// // // from switch statement
protected BeanPropertyWriter _prop1;
protected BeanPropertyWriter _prop2;
protected BeanPropertyWriter _prop3;
protected BeanPropertyWriter _prop4;
protected BeanPropertyWriter _prop5;
protected BeanPropertyWriter _prop6;
/*
/**********************************************************************
/* Life-cycle: constructors
/**********************************************************************
*/
public UnrolledBeanAsArraySerializer(BeanSerializerBase src) {
super(src, (ObjectIdWriter) null);
_defaultSerializer = src;
_propCount = _props.length;
_calcUnrolled();
}
protected UnrolledBeanAsArraySerializer(BeanSerializerBase src,
Set<String> toIgnore, Set<String> toInclude) {
super(src, toIgnore, toInclude);
_defaultSerializer = src;
_propCount = _props.length;
_calcUnrolled();
}
private void _calcUnrolled() {
BeanPropertyWriter[] oProps = new BeanPropertyWriter[6];
int offset = 6 - _propCount;
System.arraycopy(_props, 0, oProps, offset, _propCount);
_prop1 = oProps[0];
_prop2 = oProps[1];
_prop3 = oProps[2];
_prop4 = oProps[3];
_prop5 = oProps[4];
_prop6 = oProps[5];
}
/**
* Factory method that will construct optimized instance if all the constraints
* are obeyed; or, if not, return `null` to indicate that instance cannot be
* created.
*/
public static UnrolledBeanAsArraySerializer tryConstruct(BeanSerializerBase src)
{
if ((src.propertyCount() > MAX_PROPS)
|| (src.getFilterId() != null)
|| src.hasViewProperties()) {
return null;
}
return new UnrolledBeanAsArraySerializer(src);
}
/*
/**********************************************************************
/* Life-cycle: factory methods, fluent factories
/**********************************************************************
*/
@Override
public ValueSerializer<Object> unwrappingSerializer(NameTransformer transformer) {
// If this gets called, we will just need delegate to the default
// serializer, to "undo" as-array serialization
return _defaultSerializer.unwrappingSerializer(transformer);
}
@Override
public boolean isUnwrappingSerializer() {
return false;
}
@Override
public BeanSerializerBase withObjectIdWriter(ObjectIdWriter objectIdWriter) {
// can't handle Object Ids, for now, so:
return _defaultSerializer.withObjectIdWriter(objectIdWriter);
}
@Override
public BeanSerializerBase withFilterId(Object filterId) {
// Revert to Vanilla variant, if so:
return new BeanAsArraySerializer(_defaultSerializer,
_objectIdWriter, filterId);
}
@Override
protected UnrolledBeanAsArraySerializer withByNameInclusion(Set<String> toIgnore,
Set<String> toInclude) {
return new UnrolledBeanAsArraySerializer(this, toIgnore, toInclude);
}
@Override
protected BeanSerializerBase withProperties(BeanPropertyWriter[] properties,
BeanPropertyWriter[] filteredProperties) {
// Similar to regular as-array-serializer, let's NOT reorder properties
return this;
}
@Override
protected BeanSerializerBase asArraySerializer() {
return this; // already is one...
}
@Override
public void resolve(SerializationContext provider)
{
super.resolve(provider);
_calcUnrolled();
}
/*
/**********************************************************************
/* ValueSerializer implementation that differs between impls
/**********************************************************************
*/
// Re-defined from base class, due to differing prefixes
@Override
public void serializeWithType(Object bean, JsonGenerator gen,
SerializationContext ctxt, TypeSerializer typeSer)
throws JacksonException
{
WritableTypeId typeIdDef = _typeIdDef(typeSer, bean, JsonToken.START_ARRAY);
typeSer.writeTypePrefix(gen, ctxt, typeIdDef);
// NOTE: instances NOT constructed if view-processing available
serializeNonFiltered(bean, gen, ctxt);
typeSer.writeTypeSuffix(gen, ctxt, typeIdDef);
}
/**
* Main serialization method that will delegate actual output to
* configured
* {@link BeanPropertyWriter} instances.
*/
@Override
public final void serialize(Object bean, JsonGenerator gen, SerializationContext provider)
throws JacksonException
{
if (provider.isEnabled(SerializationFeature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED)
&& hasSingleElement(provider)) {
serializeNonFiltered(bean, gen, provider);
return;
}
// note: it is assumed here that limitations (type id, object id,
// any getter, filtering) have already been checked; so code here
// is trivial.
gen.writeStartArray(bean, _props.length);
// NOTE: instances NOT constructed if view-processing available
serializeNonFiltered(bean, gen, provider);
gen.writeEndArray();
}
/*
/**********************************************************************
/* Property serialization methods
/**********************************************************************
*/
private boolean hasSingleElement(SerializationContext provider) {
return _props.length == 1;
}
protected final void serializeNonFiltered(Object bean, JsonGenerator gen,
SerializationContext provider)
throws JacksonException
{
BeanPropertyWriter prop = null;
try {
switch (_propCount) {
default:
//case 6:
prop = _prop1;
prop.serializeAsElement(bean, gen, provider);
// fall through
case 5:
prop = _prop2;
prop.serializeAsElement(bean, gen, provider);
case 4:
prop = _prop3;
prop.serializeAsElement(bean, gen, provider);
case 3:
prop = _prop4;
prop.serializeAsElement(bean, gen, provider);
case 2:
prop = _prop5;
prop.serializeAsElement(bean, gen, provider);
case 1:
prop = _prop6;
prop.serializeAsElement(bean, gen, provider);
case 0:
}
// NOTE: any getters cannot be supported either
} catch (Exception e) {
wrapAndThrow(provider, e, bean, prop.getName());
} catch (StackOverflowError e) {
throw DatabindException.from(gen, "Infinite recursion (StackOverflowError)", e)
.prependPath(bean, prop.getName());
}
}
}
| UnrolledBeanAsArraySerializer |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/mysql/MySqlWallTest139.java | {
"start": 804,
"end": 2005
} | class ____ extends TestCase {
public void test_false() throws Exception {
WallProvider provider = new MySqlWallProvider();
String sql = "select count(1) as total "//
+ " from ("
+ " select '' buyer_nick from dual "
+ " where 1=0 "//
+ " union "
+ " select distinct buyer_nick "//
+ " from sys_info.orders "//
+ " where 1=1 and receiver_district in ('平谷区')"
+ ") a "
+ "inner join ("
+ " select buyer_nick from ("//
+ " select distinct buyer_nick "//
+ " from sys_info.orders "
+ " where 1=1 and created > '2013-07-28' "//
+ " ) recent_days "
+ "inner join ("
+ " select distinct buyer_nick "
+ " from sys_info.orders "
+ " where 1=1 and seller_nick in ('创维官方旗舰店') "
+ " ) seller_nick using(buyer_nick) "//
+ ") b using(buyer_nick)";
assertTrue(provider.checkValid(sql));
}
}
| MySqlWallTest139 |
java | spring-projects__spring-framework | spring-websocket/src/main/java/org/springframework/web/socket/server/standard/SpringConfigurator.java | {
"start": 1751,
"end": 4217
} | class ____ extends Configurator {
private static final String NO_VALUE = ObjectUtils.identityToString(new Object());
private static final Log logger = LogFactory.getLog(SpringConfigurator.class);
private static final Map<String, Map<Class<?>, String>> cache =
new ConcurrentHashMap<>();
@SuppressWarnings("unchecked")
@Override
public <T> T getEndpointInstance(Class<T> endpointClass) throws InstantiationException {
WebApplicationContext wac = ContextLoader.getCurrentWebApplicationContext();
if (wac == null) {
String message = "Failed to find the root WebApplicationContext. Was ContextLoaderListener not used?";
logger.error(message);
throw new IllegalStateException(message);
}
String beanName = ClassUtils.getShortNameAsProperty(endpointClass);
if (wac.containsBean(beanName)) {
T endpoint = wac.getBean(beanName, endpointClass);
if (logger.isTraceEnabled()) {
logger.trace("Using @ServerEndpoint singleton " + endpoint);
}
return endpoint;
}
Component ann = AnnotationUtils.findAnnotation(endpointClass, Component.class);
if (ann != null && wac.containsBean(ann.value())) {
T endpoint = wac.getBean(ann.value(), endpointClass);
if (logger.isTraceEnabled()) {
logger.trace("Using @ServerEndpoint singleton " + endpoint);
}
return endpoint;
}
beanName = getBeanNameByType(wac, endpointClass);
if (beanName != null) {
return (T) wac.getBean(beanName);
}
if (logger.isTraceEnabled()) {
logger.trace("Creating new @ServerEndpoint instance of type " + endpointClass);
}
return wac.getAutowireCapableBeanFactory().createBean(endpointClass);
}
private @Nullable String getBeanNameByType(WebApplicationContext wac, Class<?> endpointClass) {
String wacId = wac.getId();
Map<Class<?>, String> beanNamesByType = cache.computeIfAbsent(wacId, k -> new ConcurrentHashMap<>());
if (!beanNamesByType.containsKey(endpointClass)) {
String[] names = wac.getBeanNamesForType(endpointClass);
if (names.length == 1) {
beanNamesByType.put(endpointClass, names[0]);
}
else {
beanNamesByType.put(endpointClass, NO_VALUE);
if (names.length > 1) {
throw new IllegalStateException("Found multiple @ServerEndpoint's of type [" +
endpointClass.getName() + "]: bean names " + Arrays.toString(names));
}
}
}
String beanName = beanNamesByType.get(endpointClass);
return (NO_VALUE.equals(beanName) ? null : beanName);
}
}
| SpringConfigurator |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/CustomTypeIdResolverTest.java | {
"start": 1241,
"end": 1753
} | class ____ extends TestCustomResolverBase {
private static final long serialVersionUID = 1L;
// yes, static: just for test purposes, not real use
static List<JavaType> initTypes;
public CustomResolver() {
super(CustomBean.class, CustomBeanImpl.class);
}
@Override
public void init(JavaType baseType) {
if (initTypes != null) {
initTypes.add(baseType);
}
}
}
static abstract | CustomResolver |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterResult.java | {
"start": 1109,
"end": 2784
} | class ____ {
private final KafkaFuture<Collection<Node>> nodes;
private final KafkaFuture<Node> controller;
private final KafkaFuture<String> clusterId;
private final KafkaFuture<Set<AclOperation>> authorizedOperations;
DescribeClusterResult(KafkaFuture<Collection<Node>> nodes,
KafkaFuture<Node> controller,
KafkaFuture<String> clusterId,
KafkaFuture<Set<AclOperation>> authorizedOperations) {
this.nodes = nodes;
this.controller = controller;
this.clusterId = clusterId;
this.authorizedOperations = authorizedOperations;
}
/**
* Returns a future which yields a collection of nodes.
*/
public KafkaFuture<Collection<Node>> nodes() {
return nodes;
}
/**
* Returns a future which yields the current controller node.
* <p>
* When using {@link AdminClientConfig#BOOTSTRAP_SERVERS_CONFIG}, the controller refer to a random broker.
* When using {@link AdminClientConfig#BOOTSTRAP_CONTROLLERS_CONFIG}, it refers to the current voter leader.
*/
public KafkaFuture<Node> controller() {
return controller;
}
/**
* Returns a future which yields the current cluster id.
*/
public KafkaFuture<String> clusterId() {
return clusterId;
}
/**
* Returns a future which yields authorized operations. The future value will be non-null if the
* broker supplied this information, and null otherwise.
*/
public KafkaFuture<Set<AclOperation>> authorizedOperations() {
return authorizedOperations;
}
}
| DescribeClusterResult |
java | google__dagger | javatests/dagger/internal/codegen/ComponentProcessorTest.java | {
"start": 67157,
"end": 67776
} | interface ____ {",
" @Provides",
" static String unqualified() {",
" return new String();",
" }",
"",
" @Provides",
" @GeneratedQualifier",
" static String qualified() {",
" return new String();",
" }",
"}");
Source component =
CompilerTests.javaSource(
"test.TestComponent",
"package test;",
"",
"import dagger.Component;",
"",
"@Component(modules = TestModule.class)",
" | TestModule |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunctionTests.java | {
"start": 1126,
"end": 6106
} | class ____ extends AggregatorFunctionTestCase {
@Override
protected SourceOperator simpleInput(BlockFactory blockFactory, int size) {
return new SequenceDoubleBlockSourceOperator(blockFactory, LongStream.range(0, size).mapToDouble(l -> ESTestCase.randomDouble()));
}
@Override
protected AggregatorFunctionSupplier aggregatorFunction() {
return new SumDoubleAggregatorFunctionSupplier();
}
@Override
protected String expectedDescriptionOfAggregator() {
return "sum of doubles";
}
@Override
protected void assertSimpleOutput(List<Page> input, Block result) {
double sum = input.stream().flatMapToDouble(p -> allDoubles(p.getBlock(0))).sum();
assertThat(((DoubleBlock) result).getDouble(0), closeTo(sum, .0001));
}
public void testOverflowSucceeds() {
DriverContext driverContext = driverContext();
List<Page> results = new ArrayList<>();
try (
Driver d = TestDriverFactory.create(
driverContext,
new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(Double.MAX_VALUE - 1, 2)),
List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add)
)
) {
runDriver(d);
}
assertThat(results.get(0).<DoubleBlock>getBlock(0).getDouble(0), equalTo(Double.MAX_VALUE + 1));
assertDriverContext(driverContext);
}
public void testSummationAccuracy() {
DriverContext driverContext = driverContext();
List<Page> results = new ArrayList<>();
try (
Driver d = TestDriverFactory.create(
driverContext,
new SequenceDoubleBlockSourceOperator(
driverContext.blockFactory(),
DoubleStream.of(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7)
),
List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add)
)
) {
runDriver(d);
}
assertEquals(15.3, results.get(0).<DoubleBlock>getBlock(0).getDouble(0), Double.MIN_NORMAL);
assertDriverContext(driverContext);
// Summing up an array which contains NaN and infinities and expect a result same as naive summation
results.clear();
int n = randomIntBetween(5, 10);
double[] values = new double[n];
double sum = 0;
for (int i = 0; i < n; i++) {
values[i] = frequently()
? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY)
: randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true);
sum += values[i];
}
driverContext = driverContext();
try (
Driver d = TestDriverFactory.create(
driverContext,
new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(values)),
List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add)
)
) {
runDriver(d);
}
assertEquals(sum, results.get(0).<DoubleBlock>getBlock(0).getDouble(0), 1e-10);
assertDriverContext(driverContext);
// Summing up some big double values and expect infinity result
results.clear();
n = randomIntBetween(5, 10);
double[] largeValues = new double[n];
for (int i = 0; i < n; i++) {
largeValues[i] = Double.MAX_VALUE;
}
driverContext = driverContext();
try (
Driver d = TestDriverFactory.create(
driverContext,
new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(largeValues)),
List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add)
)
) {
runDriver(d);
}
assertEquals(Double.POSITIVE_INFINITY, results.get(0).<DoubleBlock>getBlock(0).getDouble(0), 0d);
assertDriverContext(driverContext);
results.clear();
for (int i = 0; i < n; i++) {
largeValues[i] = -Double.MAX_VALUE;
}
driverContext = driverContext();
try (
Driver d = TestDriverFactory.create(
driverContext,
new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(largeValues)),
List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add)
)
) {
runDriver(d);
}
assertEquals(Double.NEGATIVE_INFINITY, results.get(0).<DoubleBlock>getBlock(0).getDouble(0), 0d);
assertDriverContext(driverContext);
}
}
| SumDoubleAggregatorFunctionTests |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporter.java | {
"start": 689,
"end": 3930
} | class ____ {
/** Persisted timing stats. May be stale. */
private TimingStats persistedTimingStats;
/** Current timing stats. */
private volatile TimingStats currentTimingStats;
/** Object used to persist current timing stats. */
private final JobResultsPersister.Builder bulkResultsPersister;
public TimingStatsReporter(TimingStats timingStats, JobResultsPersister.Builder jobResultsPersister) {
Objects.requireNonNull(timingStats);
this.persistedTimingStats = new TimingStats(timingStats);
this.currentTimingStats = new TimingStats(timingStats);
this.bulkResultsPersister = Objects.requireNonNull(jobResultsPersister);
}
public TimingStats getCurrentTimingStats() {
return new TimingStats(currentTimingStats);
}
public void reportBucket(Bucket bucket) {
currentTimingStats.updateStats(bucket.getProcessingTimeMs());
currentTimingStats.setLatestRecordTimestamp(bucket.getTimestamp().toInstant().plusSeconds(bucket.getBucketSpan()));
if (differSignificantly(currentTimingStats, persistedTimingStats)) {
flush();
}
}
public void finishReporting() {
// Don't flush if current timing stats are identical to the persisted ones
if (currentTimingStats.equals(persistedTimingStats)) {
return;
}
flush();
}
private void flush() {
persistedTimingStats = new TimingStats(currentTimingStats);
bulkResultsPersister.persistTimingStats(persistedTimingStats);
}
/**
* Returns true if given stats objects differ from each other by more than 10% for at least one of the statistics.
*/
public static boolean differSignificantly(TimingStats stats1, TimingStats stats2) {
return differSignificantly(stats1.getMinBucketProcessingTimeMs(), stats2.getMinBucketProcessingTimeMs())
|| differSignificantly(stats1.getMaxBucketProcessingTimeMs(), stats2.getMaxBucketProcessingTimeMs())
|| differSignificantly(stats1.getAvgBucketProcessingTimeMs(), stats2.getAvgBucketProcessingTimeMs())
|| differSignificantly(stats1.getExponentialAvgBucketProcessingTimeMs(), stats2.getExponentialAvgBucketProcessingTimeMs());
}
/**
* Returns {@code true} if one of the ratios { value1 / value2, value2 / value1 } is smaller than MIN_VALID_RATIO.
* This can be interpreted as values { value1, value2 } differing significantly from each other.
* This method also returns:
* - {@code true} in case one value is {@code null} while the other is not.
* - {@code false} in case both values are {@code null}.
*/
static boolean differSignificantly(Double value1, Double value2) {
if (value1 != null && value2 != null) {
return (value2 / value1 < MIN_VALID_RATIO) || (value1 / value2 < MIN_VALID_RATIO);
}
return (value1 != null) || (value2 != null);
}
/**
* Minimum ratio of values that is interpreted as values being similar.
* If the values ratio is less than MIN_VALID_RATIO, the values are interpreted as significantly different.
*/
private static final double MIN_VALID_RATIO = 0.9;
}
| TimingStatsReporter |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/LiteralsOnTheRightTests.java | {
"start": 994,
"end": 1491
} | class ____ extends ESTestCase {
public void testLiteralsOnTheRight() {
Alias a = new Alias(EMPTY, "a", new Literal(EMPTY, 10, INTEGER));
Expression result = new LiteralsOnTheRight().rule(equalsOf(FIVE, a), unboundLogicalOptimizerContext());
assertTrue(result instanceof Equals);
Equals eq = (Equals) result;
assertEquals(a, eq.left());
assertEquals(FIVE, eq.right());
// Note: Null Equals test removed here
}
}
| LiteralsOnTheRightTests |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/JsonTypeInfoIgnored2968Test.java | {
"start": 1346,
"end": 1386
} | class ____ extends Animal {}
static | Cat |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_156.java | {
"start": 492,
"end": 2255
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "SELECT SQL_SMALL_RESULT ((NULL) is not FALSE) \n" +
"FROM corona_select_multi_db_one_tb AS layer_0_left_tb \n" +
"RIGHT JOIN corona_select_one_db_multi_tb AS layer_0_right_tb \n" +
" ON layer_0_right_tb.smallint_test=layer_0_right_tb.date_test \n" +
"WHERE layer_0_right_tb.time_test='x6' NOT BETWEEN 96 AND layer_0_right_tb.bigint_test;\n";
//
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.MYSQL, SQLParserFeature.TDDLHint);
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
assertEquals(1, statementList.size());
assertEquals("SELECT SQL_SMALL_RESULT (NULL IS NOT false)\n" +
"FROM corona_select_multi_db_one_tb layer_0_left_tb\n" +
"\tRIGHT JOIN corona_select_one_db_multi_tb layer_0_right_tb ON layer_0_right_tb.smallint_test = layer_0_right_tb.date_test\n" +
"WHERE layer_0_right_tb.time_test = 'x6' NOT BETWEEN 96 AND layer_0_right_tb.bigint_test;", stmt.toString());
assertEquals("SELECT SQL_SMALL_RESULT (NULL IS NOT ?)\n" +
"FROM corona_select_multi_db_one_tb layer_0_left_tb\n" +
"\tRIGHT JOIN corona_select_one_db_multi_tb layer_0_right_tb ON layer_0_right_tb.smallint_test = layer_0_right_tb.date_test\n" +
"WHERE layer_0_right_tb.time_test = ? NOT BETWEEN ? AND layer_0_right_tb.bigint_test;",
ParameterizedOutputVisitorUtils.parameterize(sql, JdbcConstants.MYSQL, VisitorFeature.OutputParameterizedZeroReplaceNotUseOriginalSql));
}
}
| MySqlSelectTest_156 |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/fetching/DirectVsQueryFetchingTest.java | {
"start": 2158,
"end": 2420
} | class ____ {
@Id
private Long id;
@NaturalId
private String username;
@ManyToOne(fetch = FetchType.EAGER)
private Department department;
//Getters and setters omitted for brevity
}
//end::fetching-direct-vs-query-domain-model-example[]
}
| Employee |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCounters.java | {
"start": 29486,
"end": 29652
} | class ____ extends
org.apache.hadoop.mapreduce.Reducer<Text, IntWritable, Text, IntWritable>{
/**
* Test customer counter.
*/
public | IntSumReducer |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/MultiValueMode.java | {
"start": 1844,
"end": 34671
} | enum ____ implements Writeable {
/**
* Pick the sum of all the values.
*/
SUM {
@Override
protected long pick(SortedNumericLongValues values) throws IOException {
final int count = values.docValueCount();
long total = 0;
for (int index = 0; index < count; ++index) {
total += values.nextValue();
}
return total;
}
@Override
protected long pick(
SortedNumericLongValues values,
long missingValue,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
int totalCount = 0;
long totalValue = 0;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
final int docCount = values.docValueCount();
for (int index = 0; index < docCount; ++index) {
totalValue += values.nextValue();
}
totalCount += docCount;
}
}
return totalCount > 0 ? totalValue : missingValue;
}
@Override
protected double pick(SortedNumericDoubleValues values) throws IOException {
final int count = values.docValueCount();
double total = 0;
for (int index = 0; index < count; ++index) {
total += values.nextValue();
}
return total;
}
@Override
protected double pick(
SortedNumericDoubleValues values,
double missingValue,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
int totalCount = 0;
double totalValue = 0;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
final int docCount = values.docValueCount();
for (int index = 0; index < docCount; ++index) {
totalValue += values.nextValue();
}
totalCount += docCount;
}
}
return totalCount > 0 ? totalValue : missingValue;
}
},
/**
* Pick the average of all the values.
*/
AVG {
@Override
protected long pick(SortedNumericLongValues values) throws IOException {
final int count = values.docValueCount();
long total = 0;
for (int index = 0; index < count; ++index) {
total += values.nextValue();
}
return count > 1 ? Math.round((double) total / (double) count) : total;
}
@Override
protected long pick(
SortedNumericLongValues values,
long missingValue,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
int totalCount = 0;
long totalValue = 0;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
final int docCount = values.docValueCount();
for (int index = 0; index < docCount; ++index) {
totalValue += values.nextValue();
}
totalCount += docCount;
}
}
if (totalCount < 1) {
return missingValue;
}
return totalCount > 1 ? Math.round((double) totalValue / (double) totalCount) : totalValue;
}
@Override
protected double pick(SortedNumericDoubleValues values) throws IOException {
final int count = values.docValueCount();
double total = 0;
for (int index = 0; index < count; ++index) {
total += values.nextValue();
}
return total / count;
}
@Override
protected double pick(
SortedNumericDoubleValues values,
double missingValue,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
int totalCount = 0;
double totalValue = 0;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
final int docCount = values.docValueCount();
for (int index = 0; index < docCount; ++index) {
totalValue += values.nextValue();
}
totalCount += docCount;
}
}
if (totalCount < 1) {
return missingValue;
}
return totalValue / totalCount;
}
},
/**
* Pick the median of the values.
*/
MEDIAN {
@Override
protected long pick(SortedNumericLongValues values) throws IOException {
int count = values.docValueCount();
for (int i = 0; i < (count - 1) / 2; ++i) {
values.nextValue();
}
if (count % 2 == 0) {
return Math.round(((double) values.nextValue() + values.nextValue()) / 2);
} else {
return values.nextValue();
}
}
@Override
protected double pick(SortedNumericDoubleValues values) throws IOException {
int count = values.docValueCount();
for (int i = 0; i < (count - 1) / 2; ++i) {
values.nextValue();
}
if (count % 2 == 0) {
return (values.nextValue() + values.nextValue()) / 2;
} else {
return values.nextValue();
}
}
},
/**
* Pick the lowest value.
*/
MIN {
@Override
protected long pick(SortedNumericLongValues values) throws IOException {
return values.nextValue();
}
@Override
protected long pick(
SortedNumericLongValues values,
long missingValue,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
boolean hasValue = false;
long minValue = Long.MAX_VALUE;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
minValue = Math.min(minValue, values.nextValue());
hasValue = true;
}
}
return hasValue ? minValue : missingValue;
}
@Override
protected double pick(SortedNumericDoubleValues values) throws IOException {
return values.nextValue();
}
@Override
protected double pick(
SortedNumericDoubleValues values,
double missingValue,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
boolean hasValue = false;
double minValue = Double.POSITIVE_INFINITY;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
minValue = Math.min(minValue, values.nextValue());
hasValue = true;
}
}
return hasValue ? minValue : missingValue;
}
@Override
protected BytesRef pick(SortedBinaryDocValues values) throws IOException {
return values.nextValue();
}
@Override
protected BytesRef pick(
BinaryDocValues values,
BytesRefBuilder builder,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
BytesRefBuilder bytesRefBuilder = null;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
final BytesRef innerValue = values.binaryValue();
if (bytesRefBuilder == null) {
builder.copyBytes(innerValue);
bytesRefBuilder = builder;
} else {
final BytesRef min = bytesRefBuilder.get().compareTo(innerValue) <= 0 ? bytesRefBuilder.get() : innerValue;
if (min == innerValue) {
bytesRefBuilder.copyBytes(min);
}
}
}
}
return bytesRefBuilder == null ? null : bytesRefBuilder.get();
}
@Override
protected int pick(SortedSetDocValues values) throws IOException {
return Math.toIntExact(values.nextOrd());
}
@Override
protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc, int endDoc, int maxChildren) throws IOException {
int ord = Integer.MAX_VALUE;
boolean hasValue = false;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
final int innerOrd = values.ordValue();
ord = Math.min(ord, innerOrd);
hasValue = true;
}
}
return hasValue ? ord : -1;
}
},
/**
* Pick the highest value.
*/
MAX {
@Override
protected long pick(SortedNumericLongValues values) throws IOException {
final int count = values.docValueCount();
for (int i = 0; i < count - 1; ++i) {
values.nextValue();
}
return values.nextValue();
}
@Override
protected long pick(
SortedNumericLongValues values,
long missingValue,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
boolean hasValue = false;
long maxValue = Long.MIN_VALUE;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
final int docCount = values.docValueCount();
for (int i = 0; i < docCount - 1; ++i) {
values.nextValue();
}
maxValue = Math.max(maxValue, values.nextValue());
hasValue = true;
}
}
return hasValue ? maxValue : missingValue;
}
@Override
protected double pick(SortedNumericDoubleValues values) throws IOException {
final int count = values.docValueCount();
for (int i = 0; i < count - 1; ++i) {
values.nextValue();
}
return values.nextValue();
}
@Override
protected double pick(
SortedNumericDoubleValues values,
double missingValue,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
boolean hasValue = false;
double maxValue = Double.NEGATIVE_INFINITY;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
final int docCount = values.docValueCount();
for (int i = 0; i < docCount - 1; ++i) {
values.nextValue();
}
maxValue = Math.max(maxValue, values.nextValue());
hasValue = true;
}
}
return hasValue ? maxValue : missingValue;
}
@Override
protected BytesRef pick(SortedBinaryDocValues values) throws IOException {
int count = values.docValueCount();
for (int i = 0; i < count - 1; ++i) {
values.nextValue();
}
return values.nextValue();
}
@Override
protected BytesRef pick(
BinaryDocValues values,
BytesRefBuilder builder,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
BytesRefBuilder bytesRefBuilder = null;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
final BytesRef innerValue = values.binaryValue();
if (bytesRefBuilder == null) {
builder.copyBytes(innerValue);
bytesRefBuilder = builder;
} else {
final BytesRef max = bytesRefBuilder.get().compareTo(innerValue) > 0 ? bytesRefBuilder.get() : innerValue;
if (max == innerValue) {
bytesRefBuilder.copyBytes(max);
}
}
}
}
return bytesRefBuilder == null ? null : bytesRefBuilder.get();
}
@Override
protected int pick(SortedSetDocValues values) throws IOException {
int count = values.docValueCount();
for (int i = 0; i < count - 1; ++i) {
values.nextOrd();
}
return Math.toIntExact(values.nextOrd());
}
@Override
protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc, int endDoc, int maxChildren) throws IOException {
int ord = -1;
int count = 0;
for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) {
if (values.advanceExact(doc)) {
if (++count > maxChildren) {
break;
}
ord = Math.max(ord, values.ordValue());
}
}
return ord;
}
};
/**
* A case insensitive version of {@link #valueOf(String)}
*
* @throws IllegalArgumentException if the given string doesn't match a sort mode or is <code>null</code>.
*/
public static MultiValueMode fromString(String sortMode) {
try {
return valueOf(sortMode.toUpperCase(Locale.ROOT));
} catch (Exception e) {
throw new IllegalArgumentException("Illegal sort mode: " + sortMode);
}
}
/**
* Return a {@link NumericDocValues} instance that can be used to sort documents
* with this mode and the provided values. When a document has no value,
* <code>missingValue</code> is returned.
*
* Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX
*/
public LongValues select(final SortedNumericLongValues values) {
final LongValues singleton = SortedNumericLongValues.unwrapSingleton(values);
if (singleton != null) {
return singleton;
} else {
return new LongValues() {
private long value;
@Override
public boolean advanceExact(int target) throws IOException {
if (values.advanceExact(target)) {
value = pick(values);
return true;
}
return false;
}
@Override
public long longValue() {
return value;
}
};
}
}
protected long pick(SortedNumericLongValues values) throws IOException {
throw new IllegalArgumentException("Unsupported sort mode: " + this);
}
/**
* Return a {@link NumericDocValues} instance that can be used to sort root documents
* with this mode, the provided values and filters for root/inner documents.
*
* For every root document, the values of its inner documents will be aggregated.
* If none of the inner documents has a value, then <code>missingValue</code> is returned.
*
* Allowed Modes: SUM, AVG, MIN, MAX
*
* NOTE: Calling the returned instance on docs that are not root docs is illegal
* The returned instance can only be evaluate the current and upcoming docs
*/
public DenseLongValues select(
final SortedNumericLongValues values,
final long missingValue,
final BitSet parentDocs,
final DocIdSetIterator childDocs,
int maxChildren
) throws IOException {
if (parentDocs == null || childDocs == null) {
return FieldData.replaceMissing(FieldData.EMPTY, missingValue);
}
return new DenseLongValues() {
int lastSeenParentDoc = -1;
long lastEmittedValue = missingValue;
@Override
public void doAdvanceExact(int parentDoc) throws IOException {
assert parentDoc >= lastSeenParentDoc : "can only evaluate current and upcoming parent docs";
if (parentDoc == lastSeenParentDoc) {
return;
} else if (parentDoc == 0) {
lastEmittedValue = missingValue;
return;
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
final int firstChildDoc = getFirstChildDoc(prevParentDoc, childDocs);
lastSeenParentDoc = parentDoc;
lastEmittedValue = pick(values, missingValue, childDocs, firstChildDoc, parentDoc, maxChildren);
}
@Override
public long longValue() {
return lastEmittedValue;
}
};
}
protected long pick(
SortedNumericLongValues values,
long missingValue,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
throw new IllegalArgumentException("Unsupported sort mode: " + this);
}
/**
* Return a {@link DoubleValues} instance that can be used to sort documents
* with this mode and the provided values. When a document has no value,
* <code>missingValue</code> is returned.
*
* Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX
*/
public DoubleValues select(final SortedNumericDoubleValues values) {
final DoubleValues singleton = FieldData.unwrapSingleton(values);
if (singleton != null) {
return singleton;
} else {
return new DoubleValues() {
private double value;
@Override
public boolean advanceExact(int target) throws IOException {
if (values.advanceExact(target)) {
value = pick(values);
return true;
}
return false;
}
@Override
public double doubleValue() {
return this.value;
}
};
}
}
protected double pick(SortedNumericDoubleValues values) throws IOException {
throw new IllegalArgumentException("Unsupported sort mode: " + this);
}
/**
* Return a {@link DoubleValues} instance that can be used to sort root documents
* with this mode, the provided values and filters for root/inner documents.
*
* For every root document, the values of its inner documents will be aggregated.
* If none of the inner documents has a value, then <code>missingValue</code> is returned.
*
* Allowed Modes: SUM, AVG, MIN, MAX
*
* NOTE: Calling the returned instance on docs that are not root docs is illegal
* The returned instance can only be evaluate the current and upcoming docs
*/
public DenseDoubleValues select(
final SortedNumericDoubleValues values,
final double missingValue,
final BitSet parentDocs,
final DocIdSetIterator childDocs,
int maxChildren
) throws IOException {
if (parentDocs == null || childDocs == null) {
return FieldData.replaceMissing(DoubleValues.EMPTY, missingValue);
}
return new DenseDoubleValues() {
int lastSeenParentDoc = 0;
double lastEmittedValue = missingValue;
@Override
public void doAdvanceExact(int parentDoc) throws IOException {
assert parentDoc >= lastSeenParentDoc : "can only evaluate current and upcoming parent docs";
if (parentDoc == lastSeenParentDoc) {
return;
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
final int firstChildDoc = getFirstChildDoc(prevParentDoc, childDocs);
lastSeenParentDoc = parentDoc;
lastEmittedValue = pick(values, missingValue, childDocs, firstChildDoc, parentDoc, maxChildren);
}
@Override
public double doubleValue() {
return lastEmittedValue;
}
};
}
private static int getFirstChildDoc(int prevParentDoc, DocIdSetIterator childDocs) throws IOException {
final int firstChildDoc;
if (childDocs.docID() > prevParentDoc) {
firstChildDoc = childDocs.docID();
} else {
firstChildDoc = childDocs.advance(prevParentDoc + 1);
}
return firstChildDoc;
}
protected double pick(
SortedNumericDoubleValues values,
double missingValue,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
throw new IllegalArgumentException("Unsupported sort mode: " + this);
}
/**
* Return a {@link BinaryDocValues} instance that can be used to sort documents
* with this mode and the provided values. When a document has no value,
* <code>missingValue</code> is returned.
*
* Allowed Modes: MIN, MAX
*/
public BinaryDocValues select(final SortedBinaryDocValues values, final BytesRef missingValue) {
final BinaryDocValues singleton = FieldData.unwrapSingleton(values);
if (singleton != null) {
if (missingValue == null) {
return singleton;
}
return new AbstractBinaryDocValues() {
private BytesRef value;
@Override
public boolean advanceExact(int target) throws IOException {
this.value = singleton.advanceExact(target) ? singleton.binaryValue() : missingValue;
return true;
}
@Override
public BytesRef binaryValue() {
return this.value;
}
};
} else {
return new AbstractBinaryDocValues() {
private BytesRef value;
@Override
public boolean advanceExact(int target) throws IOException {
if (values.advanceExact(target)) {
value = pick(values);
return true;
}
value = missingValue;
return missingValue != null;
}
@Override
public BytesRef binaryValue() {
return value;
}
};
}
}
protected BytesRef pick(SortedBinaryDocValues values) throws IOException {
throw new IllegalArgumentException("Unsupported sort mode: " + this);
}
/**
* Return a {@link BinaryDocValues} instance that can be used to sort root documents
* with this mode, the provided values and filters for root/inner documents.
*
* For every root document, the values of its inner documents will be aggregated.
* If none of the inner documents has a value, then <code>missingValue</code> is returned.
*
* Allowed Modes: MIN, MAX
*
* NOTE: Calling the returned instance on docs that are not root docs is illegal
* The returned instance can only be evaluate the current and upcoming docs
*/
public BinaryDocValues select(
final SortedBinaryDocValues values,
final BytesRef missingValue,
final BitSet parentDocs,
final DocIdSetIterator childDocs,
int maxChildren
) throws IOException {
if (parentDocs == null || childDocs == null) {
return select(FieldData.emptySortedBinary(), missingValue);
}
final BinaryDocValues selectedValues = select(values, null);
return new AbstractBinaryDocValues() {
final BytesRefBuilder builder = new BytesRefBuilder();
int lastSeenParentDoc = 0;
BytesRef lastEmittedValue = missingValue;
@Override
public boolean advanceExact(int parentDoc) throws IOException {
assert parentDoc >= lastSeenParentDoc : "can only evaluate current and upcoming root docs";
if (parentDoc == lastSeenParentDoc) {
return true;
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
final int firstChildDoc = getFirstChildDoc(prevParentDoc, childDocs);
lastSeenParentDoc = parentDoc;
lastEmittedValue = pick(selectedValues, builder, childDocs, firstChildDoc, parentDoc, maxChildren);
if (lastEmittedValue == null) {
lastEmittedValue = missingValue;
}
return true;
}
@Override
public BytesRef binaryValue() {
return lastEmittedValue;
}
};
}
protected BytesRef pick(
BinaryDocValues values,
BytesRefBuilder builder,
DocIdSetIterator docItr,
int startDoc,
int endDoc,
int maxChildren
) throws IOException {
throw new IllegalArgumentException("Unsupported sort mode: " + this);
}
/**
* Return a {@link SortedDocValues} instance that can be used to sort documents
* with this mode and the provided values.
*
* Allowed Modes: MIN, MAX
*/
public SortedDocValues select(final SortedSetDocValues values) {
if (values.getValueCount() >= Integer.MAX_VALUE) {
throw new UnsupportedOperationException(
"fields containing more than " + (Integer.MAX_VALUE - 1) + " unique terms are unsupported"
);
}
final SortedDocValues singleton = DocValues.unwrapSingleton(values);
if (singleton != null) {
return singleton;
} else {
return new AbstractSortedDocValues() {
int ord;
@Override
public boolean advanceExact(int target) throws IOException {
if (values.advanceExact(target)) {
ord = pick(values);
return true;
} else {
ord = -1;
return false;
}
}
@Override
public int docID() {
return values.docID();
}
@Override
public int ordValue() {
assert ord != -1;
return ord;
}
@Override
public BytesRef lookupOrd(int ord) throws IOException {
return values.lookupOrd(ord);
}
@Override
public int getValueCount() {
return (int) values.getValueCount();
}
};
}
}
protected int pick(SortedSetDocValues values) throws IOException {
throw new IllegalArgumentException("Unsupported sort mode: " + this);
}
/**
* Return a {@link SortedDocValues} instance that can be used to sort root documents
* with this mode, the provided values and filters for root/inner documents.
*
* For every root document, the values of its inner documents will be aggregated.
*
* Allowed Modes: MIN, MAX
*
* NOTE: Calling the returned instance on docs that are not root docs is illegal
* The returned instance can only be evaluate the current and upcoming docs
*/
public SortedDocValues select(
final SortedSetDocValues values,
final BitSet parentDocs,
final DocIdSetIterator childDocs,
int maxChildren
) throws IOException {
if (parentDocs == null || childDocs == null) {
return select(DocValues.emptySortedSet());
}
final SortedDocValues selectedValues = select(values);
return new AbstractSortedDocValues() {
int docID = -1;
int lastSeenParentDoc = 0;
int lastEmittedOrd = -1;
@Override
public BytesRef lookupOrd(int ord) throws IOException {
return selectedValues.lookupOrd(ord);
}
@Override
public int getValueCount() {
return selectedValues.getValueCount();
}
@Override
public boolean advanceExact(int parentDoc) throws IOException {
assert parentDoc >= lastSeenParentDoc : "can only evaluate current and upcoming root docs";
if (parentDoc == lastSeenParentDoc) {
return lastEmittedOrd != -1;
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
final int firstChildDoc = getFirstChildDoc(prevParentDoc, childDocs);
docID = lastSeenParentDoc = parentDoc;
lastEmittedOrd = pick(selectedValues, childDocs, firstChildDoc, parentDoc, maxChildren);
return lastEmittedOrd != -1;
}
@Override
public int docID() {
return docID;
}
@Override
public int ordValue() {
return lastEmittedOrd;
}
};
}
protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc, int endDoc, int maxChildren) throws IOException {
throw new IllegalArgumentException("Unsupported sort mode: " + this);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeEnum(this);
}
public static MultiValueMode readMultiValueModeFrom(StreamInput in) throws IOException {
return in.readEnum(MultiValueMode.class);
}
}
| MultiValueMode |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/CollectionIdJdbcTypeAnnotation.java | {
"start": 530,
"end": 1525
} | class ____ implements CollectionIdJdbcType {
private Class<? extends JdbcType> value;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public CollectionIdJdbcTypeAnnotation(ModelsContext modelContext) {
}
/**
* Used in creating annotation instances from JDK variant
*/
public CollectionIdJdbcTypeAnnotation(CollectionIdJdbcType annotation, ModelsContext modelContext) {
this.value = annotation.value();
}
/**
* Used in creating annotation instances from Jandex variant
*/
public CollectionIdJdbcTypeAnnotation(Map<String, Object> attributeValues, ModelsContext modelContext) {
this.value = (Class<? extends JdbcType>) attributeValues.get( "value" );
}
@Override
public Class<? extends Annotation> annotationType() {
return CollectionIdJdbcType.class;
}
@Override
public Class<? extends JdbcType> value() {
return value;
}
public void value(Class<? extends JdbcType> value) {
this.value = value;
}
}
| CollectionIdJdbcTypeAnnotation |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/service/ConfigFuzzyWatchContextService.java | {
"start": 1789,
"end": 14507
} | class ____ {
/**
* groupKeyPattern -> watched client id set.
*/
private final Map<String, Set<String>> watchedClientsMap = new ConcurrentHashMap<>();
/**
* groupKeyPattern -> matched groupKeys set.
*/
private final Map<String, Set<String>> matchedGroupKeysMap = new ConcurrentHashMap<>();
public ConfigFuzzyWatchContextService() {
}
@PostConstruct
public void init() {
GlobalExecutor.scheduleWithFixDelayByCommon(() -> trimFuzzyWatchContext(), 30000);
}
/**
* trim fuzzy watch context. <br/> 1.remove watchedClients if watched client is empty. 2.remove matchedServiceKeys
* if watchedClients is null. pattern matchedServiceKeys will be removed in second period to avoid frequently
* matchedServiceKeys init.
*/
void trimFuzzyWatchContext() {
try {
Iterator<Map.Entry<String, Set<String>>> iterator = matchedGroupKeysMap.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, Set<String>> matchedGroupKeys = iterator.next();
Set<String> watchedClients = this.watchedClientsMap.get(matchedGroupKeys.getKey());
if (watchedClients == null) {
iterator.remove();
LogUtil.DEFAULT_LOG.info(
"[fuzzy-watch] no watchedClients context for pattern {},remove matchedGroupKeys context",
matchedGroupKeys.getKey());
} else if (watchedClients.isEmpty()) {
LogUtil.DEFAULT_LOG.info("[fuzzy-watch] no client watched pattern {},remove watchedClients context",
matchedGroupKeys.getKey());
this.watchedClientsMap.remove(matchedGroupKeys.getKey());
} else if (reachToUpLimit(matchedGroupKeys.getValue().size())) {
LogUtil.DEFAULT_LOG.warn(
"[fuzzy-watch] pattern {} matched config count has reached to upper limit {}, fuzzy watch has been suppressed ",
matchedGroupKeys.getKey(), matchedGroupKeys.getValue().size());
} else if (reachToUpLimit((int) (matchedGroupKeys.getValue().size() * 1.25))) {
LogUtil.DEFAULT_LOG.warn(
"[fuzzy-watch] pattern {} matched config count has reached to 80% of the upper limit {} "
+ ",it may has a risk of notify suppressed in the near further",
matchedGroupKeys.getKey(), matchedGroupKeys.getValue().size());
}
}
} catch (Throwable throwable) {
LogUtil.DEFAULT_LOG.warn("[fuzzy-watch] trim fuzzy watch context fail", throwable);
}
}
/**
* get matched exist group keys with the groupKeyPattern. return null if not matched.
*
* @param groupKeyPattern groupKeyPattern.
* @return
*/
public Set<String> matchGroupKeys(String groupKeyPattern) {
Set<String> stringSet = matchedGroupKeysMap.get(groupKeyPattern);
return stringSet == null ? new HashSet<>() : new HashSet<>(matchedGroupKeysMap.get(groupKeyPattern));
}
/**
* sync group key change to fuzzy context.
*
* @param groupKey groupKey.
* @param changedType changedType.
* @return need notify ot not.
*/
public boolean syncGroupKeyContext(String groupKey, String changedType) {
boolean needNotify = false;
String[] groupKeyItems = GroupKey.parseKey(groupKey);
String dataId = groupKeyItems[0];
String group = groupKeyItems[1];
String namespace = groupKeyItems[2];
boolean tryAdd = changedType.equals(ADD_CONFIG) || changedType.equals(CONFIG_CHANGED);
boolean tryRemove = changedType.equals(DELETE_CONFIG);
Iterator<Map.Entry<String, Set<String>>> iterator = matchedGroupKeysMap.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, Set<String>> entry = iterator.next();
if (FuzzyGroupKeyPattern.matchPattern(entry.getKey(), dataId, group, namespace)) {
boolean containsAlready = entry.getValue().contains(groupKey);
boolean reachToUpLimit = reachToUpLimit(entry.getValue().size());
if (tryAdd && !containsAlready && reachToUpLimit) {
LogUtil.DEFAULT_LOG.warn("[fuzzy-watch] pattern matched config count is over limit , "
+ "current config will be ignored for pattern {} ,current count is {}", entry.getKey(),
entry.getValue().size());
continue;
}
if (tryAdd && !containsAlready && entry.getValue().add(groupKey)) {
needNotify = true;
}
if (tryRemove && containsAlready && entry.getValue().remove(groupKey)) {
needNotify = true;
if (reachToUpLimit) {
makeupMatchedGroupKeys(entry.getKey());
}
}
}
}
return needNotify;
}
/**
* make matched group key when deleted configs on loa protection model.
*
* @param groupKeyPattern group key pattern.
*/
public void makeupMatchedGroupKeys(String groupKeyPattern) {
Set<String> matchedGroupKeys = matchedGroupKeysMap.get(groupKeyPattern);
if (matchedGroupKeys == null || reachToUpLimit(matchedGroupKeys.size())) {
return;
}
for (String groupKey : ConfigCacheService.CACHE.keySet()) {
String[] groupKeyItems = GroupKey.parseKey(groupKey);
if (FuzzyGroupKeyPattern.matchPattern(groupKeyPattern, groupKeyItems[0], groupKeyItems[1], groupKeyItems[2])
&& !matchedGroupKeys.contains(groupKey)) {
matchedGroupKeys.add(groupKey);
LogUtil.DEFAULT_LOG.info("[fuzzy-watch] pattern {} makeup group key {}", groupKeyPattern, groupKey);
if (reachToUpLimit(matchedGroupKeys.size())) {
LogUtil.DEFAULT_LOG.warn(
"[fuzzy-watch] pattern {] matched config count is over limit ,makeup group keys skip.",
groupKeyPattern);
return;
}
}
}
}
private boolean reachToUpLimit(int size) {
return size >= ConfigCommonConfig.getInstance().getMaxMatchedConfigCount();
}
public boolean reachToUpLimit(String groupKeyPattern) {
Set<String> strings = matchedGroupKeysMap.get(groupKeyPattern);
return strings != null && (reachToUpLimit(strings.size()));
}
/**
* Matches the client effective group keys based on the specified group key pattern, client IP, and tag.
*
* @param groupKeyPattern The pattern to match group keys.
*/
private void initMatchGroupKeys(String groupKeyPattern) throws NacosException {
if (matchedGroupKeysMap.containsKey(groupKeyPattern)) {
return;
}
if (matchedGroupKeysMap.size() >= ConfigCommonConfig.getInstance().getMaxPatternCount()) {
LogUtil.DEFAULT_LOG.warn(
"[fuzzy-watch] pattern count is over limit ,pattern {} init fail,current count is {}",
groupKeyPattern, matchedGroupKeysMap.size());
throw new NacosException(FUZZY_WATCH_PATTERN_OVER_LIMIT.getCode(), FUZZY_WATCH_PATTERN_OVER_LIMIT.getMsg());
}
matchedGroupKeysMap.computeIfAbsent(groupKeyPattern, k -> new HashSet<>());
Set<String> matchedGroupKeys = this.matchedGroupKeysMap.get(groupKeyPattern);
long matchBeginTime = System.currentTimeMillis();
boolean overMatchCount = false;
for (String groupKey : ConfigCacheService.CACHE.keySet()) {
String[] groupKeyItems = GroupKey.parseKey(groupKey);
if (FuzzyGroupKeyPattern.matchPattern(groupKeyPattern, groupKeyItems[0], groupKeyItems[1],
groupKeyItems[2])) {
if (reachToUpLimit(matchedGroupKeys.size())) {
LogUtil.DEFAULT_LOG.warn("[fuzzy-watch] pattern matched service count is over limit , "
+ "other services will stop notify for pattern {} ,current count is {}", groupKeyPattern,
matchedGroupKeys.size());
overMatchCount = true;
break;
}
matchedGroupKeys.add(groupKey);
}
}
LogUtil.DEFAULT_LOG.info("[fuzzy-watch] pattern {} match {} group keys,overMatchCount={}, cost {}ms",
groupKeyPattern, matchedGroupKeys.size(), overMatchCount, System.currentTimeMillis() - matchBeginTime);
}
/**
* Adds a fuzzy listen connection ID associated with the specified group key pattern. If the key pattern does not
* exist in the context, a new entry will be created. If the key pattern already exists, the connection ID will be
* added to the existing set.
*
* @param groupKeyPattern The group key pattern to associate with the listen connection.
* @param connectId The connection ID to be added.
* @throws NacosException over max pattern count.
*/
public synchronized void addFuzzyWatch(String groupKeyPattern, String connectId) throws NacosException {
watchedClientsMap.computeIfAbsent(groupKeyPattern, k -> new HashSet<>());
initMatchGroupKeys(groupKeyPattern);
// Add the connection ID to the set associated with the key pattern in keyPatternContext
watchedClientsMap.get(groupKeyPattern).add(connectId);
}
/**
* Removes a fuzzy listen connection ID associated with the specified group key pattern. If the group key pattern
* exists in the context and the connection ID is found in the associated set, the connection ID will be removed
* from the set. If the set becomes empty after removal, the entry for the group key pattern will be removed from
* the context.
*
* @param groupKeyPattern The group key pattern associated with the listen connection to be removed.
* @param connectionId The connection ID to be removed.
*/
public synchronized void removeFuzzyListen(String groupKeyPattern, String connectionId) {
// Retrieve the set of connection IDs associated with the group key pattern
Set<String> connectIds = watchedClientsMap.get(groupKeyPattern);
if (CollectionUtils.isNotEmpty(connectIds)) {
// Remove the connection ID from the set if it exists
connectIds.remove(connectionId);
}
}
/**
* remove watch context for connection id.
*
* @param connectionId connection id.
*/
public void clearFuzzyWatchContext(String connectionId) {
for (Map.Entry<String, Set<String>> keyPatternContextEntry : watchedClientsMap.entrySet()) {
Set<String> connectionIds = keyPatternContextEntry.getValue();
if (CollectionUtils.isNotEmpty(connectionIds)) {
connectionIds.remove(connectionId);
}
}
}
/**
* Retrieves the set of connection IDs matched with the specified group key.
*
* @param groupKey The group key to match with the key patterns.
* @return The set of connection IDs matched with the group key.
*/
public Set<String> getMatchedClients(String groupKey) {
// Initialize a set to store the matched connection IDs
Set<String> connectIds = new HashSet<>();
// Iterate over each key pattern in the context
Iterator<Map.Entry<String, Set<String>>> watchClientIterator = watchedClientsMap.entrySet().iterator();
String[] groupItems = GroupKey2.parseKey(groupKey);
while (watchClientIterator.hasNext()) {
Map.Entry<String, Set<String>> watchClientEntry = watchClientIterator.next();
String keyPattern = watchClientEntry.getKey();
if (FuzzyGroupKeyPattern.matchPattern(keyPattern, groupItems[0], groupItems[1], groupItems[2])) {
if (CollectionUtils.isNotEmpty(watchClientEntry.getValue())) {
connectIds.addAll(watchClientEntry.getValue());
}
}
}
return connectIds;
}
}
| ConfigFuzzyWatchContextService |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java | {
"start": 3243,
"end": 27100
} | class ____ implements ToXContentObject, Writeable {
public static final String NAME = "trained_model_config";
public static final int CURRENT_DEFINITION_COMPRESSION_VERSION = 1;
public static final String DECOMPRESS_DEFINITION = "decompress_definition";
public static final String TOTAL_FEATURE_IMPORTANCE = "total_feature_importance";
public static final String FEATURE_IMPORTANCE_BASELINE = "feature_importance_baseline";
public static final String HYPERPARAMETERS = "hyperparameters";
public static final String MODEL_ALIASES = "model_aliases";
public static final String DEFINITION_STATUS = "definition_status";
private static final String ESTIMATED_HEAP_MEMORY_USAGE_HUMAN = "estimated_heap_memory_usage";
private static final String MODEL_SIZE_HUMAN = "model_size";
public static final ParseField MODEL_ID = new ParseField("model_id");
public static final ParseField MODEL_TYPE = new ParseField("model_type");
public static final ParseField CREATED_BY = new ParseField("created_by");
public static final ParseField VERSION = new ParseField("version");
public static final ParseField DESCRIPTION = new ParseField("description");
public static final ParseField CREATE_TIME = new ParseField("create_time");
public static final ParseField DEFINITION = new ParseField("definition");
public static final ParseField COMPRESSED_DEFINITION = new ParseField("compressed_definition");
public static final ParseField TAGS = new ParseField("tags");
public static final ParseField METADATA = new ParseField("metadata");
public static final ParseField INPUT = new ParseField("input");
public static final ParseField MODEL_SIZE_BYTES = new ParseField("model_size_bytes");
public static final ParseField MODEL_SIZE_BYTES_WITH_DEPRECATION = new ParseField(
"model_size_bytes",
"estimated_heap_memory_usage_bytes"
);
public static final ParseField DEPRECATED_ESTIMATED_HEAP_MEMORY_USAGE_BYTES = new ParseField("estimated_heap_memory_usage_bytes");
public static final ParseField ESTIMATED_OPERATIONS = new ParseField("estimated_operations");
public static final ParseField LICENSE_LEVEL = new ParseField("license_level");
public static final ParseField DEFAULT_FIELD_MAP = new ParseField("default_field_map");
public static final ParseField INFERENCE_CONFIG = new ParseField("inference_config");
public static final ParseField LOCATION = new ParseField("location");
public static final ParseField MODEL_PACKAGE = new ParseField("model_package");
public static final ParseField PREFIX_STRINGS = new ParseField("prefix_strings");
public static final ParseField PER_DEPLOYMENT_MEMORY_BYTES = new ParseField("per_deployment_memory_bytes");
public static final ParseField PER_ALLOCATION_MEMORY_BYTES = new ParseField("per_allocation_memory_bytes");
public static final ParseField PLATFORM_ARCHITECTURE = new ParseField("platform_architecture");
public static final TransportVersion VERSION_ALLOCATION_MEMORY_ADDED = TransportVersions.V_8_11_X;
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ObjectParser<TrainedModelConfig.Builder, Void> LENIENT_PARSER = createParser(true);
public static final ObjectParser<TrainedModelConfig.Builder, Void> STRICT_PARSER = createParser(false);
private static ObjectParser<TrainedModelConfig.Builder, Void> createParser(boolean ignoreUnknownFields) {
ObjectParser<TrainedModelConfig.Builder, Void> parser = new ObjectParser<>(
NAME,
ignoreUnknownFields,
TrainedModelConfig.Builder::new
);
parser.declareString(TrainedModelConfig.Builder::setModelId, MODEL_ID);
parser.declareString(TrainedModelConfig.Builder::setModelType, MODEL_TYPE);
parser.declareString(TrainedModelConfig.Builder::setCreatedBy, CREATED_BY);
parser.declareString(TrainedModelConfig.Builder::setVersion, VERSION);
parser.declareString(TrainedModelConfig.Builder::setDescription, DESCRIPTION);
parser.declareField(
TrainedModelConfig.Builder::setCreateTime,
(p, c) -> TimeUtils.parseTimeFieldToInstant(p, CREATE_TIME.getPreferredName()),
CREATE_TIME,
ObjectParser.ValueType.VALUE
);
parser.declareStringArray(TrainedModelConfig.Builder::setTags, TAGS);
parser.declareObject(TrainedModelConfig.Builder::setMetadata, (p, c) -> p.map(), METADATA);
parser.declareString((trainedModelConfig, s) -> {}, InferenceIndexConstants.DOC_TYPE);
parser.declareObject(TrainedModelConfig.Builder::setInput, (p, c) -> TrainedModelInput.fromXContent(p, ignoreUnknownFields), INPUT);
if (ignoreUnknownFields) {
// On reading from the index, we automatically translate to the new field, no need have a deprecation warning
parser.declareLong(TrainedModelConfig.Builder::setModelSize, DEPRECATED_ESTIMATED_HEAP_MEMORY_USAGE_BYTES);
parser.declareLong(TrainedModelConfig.Builder::setModelSize, MODEL_SIZE_BYTES);
} else {
// If this is a new PUT, we should indicate that `estimated_heap_memory_usage_bytes` is deprecated
parser.declareLong(TrainedModelConfig.Builder::setModelSize, MODEL_SIZE_BYTES_WITH_DEPRECATION);
}
parser.declareLong(TrainedModelConfig.Builder::setEstimatedOperations, ESTIMATED_OPERATIONS);
parser.declareObject(
TrainedModelConfig.Builder::setLazyDefinition,
(p, c) -> TrainedModelDefinition.fromXContent(p, ignoreUnknownFields),
DEFINITION
);
parser.declareString(TrainedModelConfig.Builder::setLazyDefinition, COMPRESSED_DEFINITION);
parser.declareString(TrainedModelConfig.Builder::setLicenseLevel, LICENSE_LEVEL);
parser.declareObject(TrainedModelConfig.Builder::setDefaultFieldMap, (p, c) -> p.mapStrings(), DEFAULT_FIELD_MAP);
parser.declareNamedObject(
TrainedModelConfig.Builder::setInferenceConfig,
(p, c, n) -> ignoreUnknownFields
? p.namedObject(LenientlyParsedInferenceConfig.class, n, null)
: p.namedObject(StrictlyParsedInferenceConfig.class, n, null),
INFERENCE_CONFIG
);
parser.declareNamedObject(
TrainedModelConfig.Builder::setLocation,
(p, c, n) -> ignoreUnknownFields
? p.namedObject(LenientlyParsedTrainedModelLocation.class, n, null)
: p.namedObject(StrictlyParsedTrainedModelLocation.class, n, null),
LOCATION
);
parser.declareObject(
TrainedModelConfig.Builder::setModelPackageConfig,
(p, c) -> ignoreUnknownFields ? ModelPackageConfig.fromXContentLenient(p) : ModelPackageConfig.fromXContentStrict(p),
MODEL_PACKAGE
);
parser.declareString(TrainedModelConfig.Builder::setPlatformArchitecture, PLATFORM_ARCHITECTURE);
parser.declareObject(
TrainedModelConfig.Builder::setPrefixStrings,
(p, c) -> TrainedModelPrefixStrings.fromXContent(p, ignoreUnknownFields),
PREFIX_STRINGS
);
return parser;
}
public static TrainedModelConfig.Builder fromXContent(XContentParser parser, boolean lenient) throws IOException {
return lenient ? LENIENT_PARSER.parse(parser, null) : STRICT_PARSER.parse(parser, null);
}
private final String modelId;
private final String createdBy;
private final MlConfigVersion version;
private final String description;
private final Instant createTime;
private final TrainedModelType modelType;
private final List<String> tags;
private final Map<String, Object> metadata;
private final TrainedModelInput input;
private final long modelSize;
private final long estimatedOperations;
private final License.OperationMode licenseLevel;
private final Map<String, String> defaultFieldMap;
private final InferenceConfig inferenceConfig;
private final LazyModelDefinition definition;
private final TrainedModelLocation location;
private final ModelPackageConfig modelPackageConfig;
private Boolean fullDefinition;
private String platformArchitecture;
private TrainedModelPrefixStrings prefixStrings;
TrainedModelConfig(
String modelId,
TrainedModelType modelType,
String createdBy,
MlConfigVersion version,
String description,
Instant createTime,
LazyModelDefinition definition,
List<String> tags,
Map<String, Object> metadata,
TrainedModelInput input,
Long modelSize,
Long estimatedOperations,
String licenseLevel,
Map<String, String> defaultFieldMap,
InferenceConfig inferenceConfig,
TrainedModelLocation location,
ModelPackageConfig modelPackageConfig,
String platformArchitecture,
TrainedModelPrefixStrings prefixStrings
) {
this.modelId = ExceptionsHelper.requireNonNull(modelId, MODEL_ID);
this.modelType = modelType;
this.createdBy = ExceptionsHelper.requireNonNull(createdBy, CREATED_BY);
this.version = ExceptionsHelper.requireNonNull(version, VERSION);
this.createTime = Instant.ofEpochMilli(ExceptionsHelper.requireNonNull(createTime, CREATE_TIME).toEpochMilli());
this.definition = definition;
this.description = description;
this.tags = Collections.unmodifiableList(ExceptionsHelper.requireNonNull(tags, TAGS));
this.metadata = metadata == null ? null : Collections.unmodifiableMap(metadata);
this.input = ExceptionsHelper.requireNonNull(handleDefaultInput(input, inferenceConfig, modelType), INPUT);
if (ExceptionsHelper.requireNonNull(modelSize, MODEL_SIZE_BYTES) < 0) {
throw new IllegalArgumentException("[" + MODEL_SIZE_BYTES.getPreferredName() + "] must be greater than or equal to 0");
}
this.modelSize = modelSize;
if (ExceptionsHelper.requireNonNull(estimatedOperations, ESTIMATED_OPERATIONS) < 0) {
throw new IllegalArgumentException("[" + ESTIMATED_OPERATIONS.getPreferredName() + "] must be greater than or equal to 0");
}
this.estimatedOperations = estimatedOperations;
this.licenseLevel = License.OperationMode.parse(ExceptionsHelper.requireNonNull(licenseLevel, LICENSE_LEVEL));
assert this.licenseLevel.equals(License.OperationMode.PLATINUM) || this.licenseLevel.equals(License.OperationMode.BASIC)
: "[" + LICENSE_LEVEL.getPreferredName() + "] only [platinum] or [basic] is supported";
this.defaultFieldMap = defaultFieldMap == null ? null : Collections.unmodifiableMap(defaultFieldMap);
this.inferenceConfig = inferenceConfig;
this.location = location;
this.modelPackageConfig = modelPackageConfig;
this.platformArchitecture = platformArchitecture;
this.prefixStrings = prefixStrings;
}
private static TrainedModelInput handleDefaultInput(
TrainedModelInput input,
InferenceConfig inferenceConfig,
TrainedModelType modelType
) {
return input == null && inferenceConfig != null ? inferenceConfig.getDefaultInput(modelType) : input;
}
public TrainedModelConfig(StreamInput in) throws IOException {
modelId = in.readString();
createdBy = in.readString();
version = MlConfigVersion.readVersion(in);
description = in.readOptionalString();
createTime = in.readInstant();
definition = in.readOptionalWriteable(LazyModelDefinition::fromStreamInput);
tags = in.readCollectionAsImmutableList(StreamInput::readString);
metadata = in.readGenericMap();
input = new TrainedModelInput(in);
modelSize = in.readVLong();
estimatedOperations = in.readVLong();
licenseLevel = License.OperationMode.parse(in.readString());
this.defaultFieldMap = in.readBoolean() ? in.readImmutableMap(StreamInput::readString) : null;
this.inferenceConfig = in.readOptionalNamedWriteable(InferenceConfig.class);
this.modelType = in.readOptionalEnum(TrainedModelType.class);
this.location = in.readOptionalNamedWriteable(TrainedModelLocation.class);
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
modelPackageConfig = in.readOptionalWriteable(ModelPackageConfig::new);
fullDefinition = in.readOptionalBoolean();
} else {
modelPackageConfig = null;
fullDefinition = null;
}
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) {
platformArchitecture = in.readOptionalString();
} else {
platformArchitecture = null;
}
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) {
prefixStrings = in.readOptionalWriteable(TrainedModelPrefixStrings::new);
}
}
public boolean isPackagedModel() {
return modelId.startsWith(".");
}
public String getModelId() {
return modelId;
}
@Nullable
public TrainedModelType getModelType() {
return this.modelType;
}
public String getCreatedBy() {
return createdBy;
}
public MlConfigVersion getVersion() {
return version;
}
public String getDescription() {
return description;
}
public Instant getCreateTime() {
return createTime;
}
public List<String> getTags() {
return tags;
}
public Map<String, Object> getMetadata() {
return metadata;
}
public Map<String, String> getDefaultFieldMap() {
return defaultFieldMap;
}
@Nullable
public InferenceConfig getInferenceConfig() {
return inferenceConfig;
}
@Nullable
public BytesReference getCompressedDefinition() throws IOException {
if (definition == null) {
return null;
}
return definition.getCompressedDefinition();
}
public BytesReference getCompressedDefinitionIfSet() {
if (definition == null) {
return null;
}
return definition.getCompressedDefinitionIfSet();
}
public ModelPackageConfig getModelPackageConfig() {
return modelPackageConfig;
}
public void clearCompressed() {
definition.compressedRepresentation = null;
}
public TrainedModelConfig ensureParsedDefinition(NamedXContentRegistry xContentRegistry) throws IOException {
if (definition == null) {
return null;
}
definition.ensureParsedDefinition(xContentRegistry);
return this;
}
public TrainedModelConfig ensureParsedDefinitionUnsafe(NamedXContentRegistry xContentRegistry) throws IOException {
if (definition == null) {
return null;
}
definition.ensureParsedDefinitionUnsafe(xContentRegistry);
return this;
}
@Nullable
public TrainedModelDefinition getModelDefinition() {
if (definition == null) {
return null;
}
return definition.parsedDefinition;
}
@Nullable
public TrainedModelLocation getLocation() {
return location;
}
public TrainedModelInput getInput() {
return input;
}
public static Builder builder() {
return new Builder();
}
public long getModelSize() {
return modelSize;
}
public long getEstimatedOperations() {
return estimatedOperations;
}
// TODO if we ever support anything other than "basic" and platinum, we need to adjust our feature tracking logic
// and we need to adjust our license checks to validate more than "is basic" or not
public License.OperationMode getLicenseLevel() {
return licenseLevel;
}
public boolean isAllocateOnly() {
return Optional.ofNullable(inferenceConfig).map(InferenceConfig::isAllocateOnly).orElse(false);
}
public void setFullDefinition(boolean fullDefinition) {
this.fullDefinition = fullDefinition;
}
public long getPerDeploymentMemoryBytes() {
return metadata != null && metadata.containsKey(PER_DEPLOYMENT_MEMORY_BYTES.getPreferredName())
? ((Number) metadata.get(PER_DEPLOYMENT_MEMORY_BYTES.getPreferredName())).longValue()
: 0L;
}
public long getPerAllocationMemoryBytes() {
return metadata != null && metadata.containsKey(PER_ALLOCATION_MEMORY_BYTES.getPreferredName())
? ((Number) metadata.get(PER_ALLOCATION_MEMORY_BYTES.getPreferredName())).longValue()
: 0L;
}
public String getPlatformArchitecture() {
return platformArchitecture;
}
public TrainedModelPrefixStrings getPrefixStrings() {
return prefixStrings;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(modelId);
out.writeString(createdBy);
MlConfigVersion.writeVersion(version, out);
out.writeOptionalString(description);
out.writeInstant(createTime);
out.writeOptionalWriteable(definition);
out.writeStringCollection(tags);
out.writeGenericMap(metadata);
input.writeTo(out);
out.writeVLong(modelSize);
out.writeVLong(estimatedOperations);
out.writeString(licenseLevel.description());
if (defaultFieldMap != null) {
out.writeBoolean(true);
out.writeMap(defaultFieldMap, StreamOutput::writeString);
} else {
out.writeBoolean(false);
}
out.writeOptionalNamedWriteable(inferenceConfig);
out.writeOptionalEnum(modelType);
out.writeOptionalNamedWriteable(location);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
out.writeOptionalWriteable(modelPackageConfig);
out.writeOptionalBoolean(fullDefinition);
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) {
out.writeOptionalString(platformArchitecture);
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) {
out.writeOptionalWriteable(prefixStrings);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(MODEL_ID.getPreferredName(), modelId);
if (modelType != null) {
builder.field(MODEL_TYPE.getPreferredName(), modelType.toString());
}
if (modelPackageConfig != null) {
builder.field(MODEL_PACKAGE.getPreferredName(), modelPackageConfig);
}
if (platformArchitecture != null) {
builder.field(PLATFORM_ARCHITECTURE.getPreferredName(), platformArchitecture);
}
// If the model is to be exported for future import to another cluster, these fields are irrelevant.
if (params.paramAsBoolean(EXCLUDE_GENERATED, false) == false) {
builder.field(CREATED_BY.getPreferredName(), createdBy);
builder.field(VERSION.getPreferredName(), version.toString());
builder.timestampFieldsFromUnixEpochMillis(
CREATE_TIME.getPreferredName(),
CREATE_TIME.getPreferredName() + "_string",
createTime.toEpochMilli()
);
builder.humanReadableField(MODEL_SIZE_BYTES.getPreferredName(), MODEL_SIZE_HUMAN, ByteSizeValue.ofBytes(modelSize));
builder.field(ESTIMATED_OPERATIONS.getPreferredName(), estimatedOperations);
builder.field(LICENSE_LEVEL.getPreferredName(), licenseLevel.description());
}
if (description != null) {
builder.field(DESCRIPTION.getPreferredName(), description);
}
// We don't store the definition in the same document as the configuration
if ((params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false) == false) && definition != null) {
if (params.paramAsBoolean(DECOMPRESS_DEFINITION, false)) {
builder.field(DEFINITION.getPreferredName(), definition);
} else {
builder.field(COMPRESSED_DEFINITION.getPreferredName(), definition.getBase64CompressedDefinition());
}
}
builder.field(TAGS.getPreferredName(), tags);
if (metadata != null) {
builder.field(METADATA.getPreferredName(), metadata);
}
if (params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false)) {
builder.field(InferenceIndexConstants.DOC_TYPE.getPreferredName(), NAME);
}
builder.field(INPUT.getPreferredName(), input);
if (defaultFieldMap != null && defaultFieldMap.isEmpty() == false) {
builder.field(DEFAULT_FIELD_MAP.getPreferredName(), defaultFieldMap);
}
if (inferenceConfig != null) {
writeNamedObject(builder, params, INFERENCE_CONFIG.getPreferredName(), inferenceConfig);
}
if (location != null) {
writeNamedObject(builder, params, LOCATION.getPreferredName(), location);
}
if (prefixStrings != null) {
builder.field(PREFIX_STRINGS.getPreferredName(), prefixStrings);
}
if (params.paramAsBoolean(DEFINITION_STATUS, false) && fullDefinition != null) {
builder.field("fully_defined", fullDefinition);
}
builder.endObject();
return builder;
}
@Override
public String toString() {
return Strings.toString(this);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TrainedModelConfig that = (TrainedModelConfig) o;
return Objects.equals(modelId, that.modelId)
&& Objects.equals(modelType, that.modelType)
&& Objects.equals(modelPackageConfig, that.modelPackageConfig)
&& Objects.equals(createdBy, that.createdBy)
&& Objects.equals(version, that.version)
&& Objects.equals(description, that.description)
&& Objects.equals(createTime, that.createTime)
&& Objects.equals(definition, that.definition)
&& Objects.equals(tags, that.tags)
&& Objects.equals(input, that.input)
&& Objects.equals(modelSize, that.modelSize)
&& Objects.equals(estimatedOperations, that.estimatedOperations)
&& Objects.equals(licenseLevel, that.licenseLevel)
&& Objects.equals(defaultFieldMap, that.defaultFieldMap)
&& Objects.equals(inferenceConfig, that.inferenceConfig)
&& Objects.equals(metadata, that.metadata)
&& Objects.equals(location, that.location)
&& Objects.equals(platformArchitecture, that.platformArchitecture)
&& Objects.equals(prefixStrings, that.prefixStrings);
}
@Override
public int hashCode() {
return Objects.hash(
modelId,
modelType,
modelPackageConfig,
createdBy,
version,
createTime,
definition,
description,
tags,
metadata,
modelSize,
estimatedOperations,
input,
licenseLevel,
inferenceConfig,
defaultFieldMap,
location,
platformArchitecture,
prefixStrings
);
}
public static | TrainedModelConfig |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/dynamic/support/StandardReflectionParameterNameDiscoverer.java | {
"start": 386,
"end": 1422
} | class ____ implements ParameterNameDiscoverer {
@Override
public String[] getParameterNames(Method method) {
Parameter[] parameters = method.getParameters();
String[] parameterNames = new String[parameters.length];
for (int i = 0; i < parameters.length; i++) {
Parameter param = parameters[i];
if (!param.isNamePresent()) {
return null;
}
parameterNames[i] = param.getName();
}
return parameterNames;
}
@Override
public String[] getParameterNames(Constructor<?> ctor) {
Parameter[] parameters = ctor.getParameters();
String[] parameterNames = new String[parameters.length];
for (int i = 0; i < parameters.length; i++) {
Parameter param = parameters[i];
if (!param.isNamePresent()) {
return null;
}
parameterNames[i] = param.getName();
}
return parameterNames;
}
}
| StandardReflectionParameterNameDiscoverer |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/MaintNotificationsConfig.java | {
"start": 9172,
"end": 9675
} | class ____ {
/**
* Determines the endpoint type based on connection characteristics.
*
* @param socketAddress the remote socket address of the connection
* @param sslEnabled whether TLS/SSL is enabled for the connection
* @return the {@link EndpointType} type to request, or null if no specific type is needed
*/
public abstract EndpointType getEndpointType(SocketAddress socketAddress, boolean sslEnabled);
}
}
| EndpointTypeSource |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java | {
"start": 6235,
"end": 7514
} | interface ____ only for Counter (CTR) mode. Generally the Encryptor
* or Decryptor calculates the IV and maintain encryption context internally.
* For example a {@link javax.crypto.Cipher} will maintain its encryption
* context internally when we do encryption/decryption using the
* Cipher#update interface.
* <p>
* Encryption/Decryption is not always on the entire file. For example,
* in Hadoop, a node may only decrypt a portion of a file (i.e. a split).
* In these situations, the counter is derived from the file position.
* <p>
* The IV can be calculated by combining the initial IV and the counter with
* a lossless operation (concatenation, addition, or XOR).
* See http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_
* .28CTR.29
*
* @param initIV initial IV
* @param counter counter for input stream position
* @param IV the IV for input stream position
*/
public abstract void calculateIV(byte[] initIV, long counter, byte[] IV);
/**
* Generate a number of secure, random bytes suitable for cryptographic use.
* This method needs to be thread-safe.
*
* @param bytes byte array to populate with random data
*/
public abstract void generateSecureRandom(byte[] bytes);
}
| is |
java | elastic__elasticsearch | x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java | {
"start": 2179,
"end": 2569
} | class ____ extends ESRestTestCase {
public String elasticsearchAddress() {
// CLI only supports a single node at a time so we just give it one.
return getTestRestCluster().split(",")[0];
}
/**
* Actions taken by this test.
* <p>
* For methods that take {@code user} a {@code null} user means "use the admin".
*/
protected | SqlSecurityTestCase |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java | {
"start": 1004,
"end": 2668
} | class ____ {
private RpcConstants() {
// Hidden Constructor
}
public static final int AUTHORIZATION_FAILED_CALL_ID = -1;
public static final int INVALID_CALL_ID = -2;
public static final int CONNECTION_CONTEXT_CALL_ID = -3;
public static final int PING_CALL_ID = -4;
public static final byte[] DUMMY_CLIENT_ID = new byte[0];
public static final int INVALID_RETRY_COUNT = -1;
/**
* The Rpc-connection header is as follows
* +----------------------------------+
* | "hrpc" 4 bytes |
* +----------------------------------+
* | Version (1 byte) |
* +----------------------------------+
* | Service Class (1 byte) |
* +----------------------------------+
* | AuthProtocol (1 byte) |
* +----------------------------------+
*/
/**
* The first four bytes of Hadoop RPC connections
*/
public static final ByteBuffer HEADER =
ByteBuffer.wrap("hrpc".getBytes(StandardCharsets.UTF_8));
public static final int HEADER_LEN_AFTER_HRPC_PART = 3; // 3 bytes that follow
// 1 : Introduce ping and server does not throw away RPCs
// 3 : Introduce the protocol into the RPC connection header
// 4 : Introduced SASL security layer
// 5 : Introduced use of {@link ArrayPrimitiveWritable$Internal}
// in ObjectWritable to efficiently transmit arrays of primitives
// 6 : Made RPC Request header explicit
// 7 : Changed Ipc Connection Header to use Protocol buffers
// 8 : SASL server always sends a final response
// 9 : Changes to protocol for HADOOP-8990
public static final byte CURRENT_VERSION = 9;
}
| RpcConstants |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/AdditionalAnswers.java | {
"start": 1250,
"end": 1662
} | interface ____ provide a neater way to write custom answers
* that either return a value or are void (see answer interfaces in org.mockito.stubbing).
*
* <p>See factory methods for more information : {@link #returnsFirstArg}, {@link #returnsSecondArg},
* {@link #returnsLastArg}, {@link #returnsArgAt}, {@link #answer} and {@link #answerVoid}
*
* @since 1.9.5
*/
@SuppressWarnings("unchecked")
public final | to |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/server/reactive/EchoHandlerIntegrationTests.java | {
"start": 1198,
"end": 2078
} | class ____ extends AbstractHttpHandlerIntegrationTests {
private static final int REQUEST_SIZE = 4096 * 3;
private final Random rnd = new Random();
@Override
protected EchoHandler createHttpHandler() {
return new EchoHandler();
}
@ParameterizedHttpServerTest
public void echo(HttpServer httpServer) throws Exception {
startServer(httpServer);
RestTemplate restTemplate = new RestTemplate();
byte[] body = randomBytes();
RequestEntity<byte[]> request = RequestEntity.post(URI.create("http://localhost:" + port)).body(body);
ResponseEntity<byte[]> response = restTemplate.exchange(request, byte[].class);
assertThat(response.getBody()).isEqualTo(body);
}
private byte[] randomBytes() {
byte[] buffer = new byte[REQUEST_SIZE];
rnd.nextBytes(buffer);
return buffer;
}
/**
* @author Arjen Poutsma
*/
public static | EchoHandlerIntegrationTests |
java | elastic__elasticsearch | x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java | {
"start": 4412,
"end": 20494
} | class ____ extends Plugin {
@Override
public List<Setting<?>> getSettings() {
return List.of(IGNORE_DESERIALIZATION_ERRORS_SETTING);
}
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(XPackSettings.SECURITY_ENABLED.getKey(), false)
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "128mb")
/*
* Force standard settings for the request breaker or we may not break at all.
* Without this we can randomly decide to use the `noop` breaker for request
* and it won't break.....
*/
.put(
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(),
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getDefault(Settings.EMPTY)
)
.put(
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(),
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getDefault(Settings.EMPTY)
)
.put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 4000)))
.put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_SIZE_SETTING, ByteSizeValue.ofBytes(between(0, 256)))
.put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_MAX_SIZE_SETTING, ByteSizeValue.ofBytes(between(0, 1024)))
// allow reading pages from network can trip the circuit breaker
.put(IGNORE_DESERIALIZATION_ERRORS_SETTING.getKey(), true)
.build();
}
@Override
public EsqlQueryResponse run(EsqlQueryRequest request) {
final Client client;
if (randomBoolean()) {
client = client(randomFrom(clusterService().state().nodes().getCoordinatingOnlyNodes().values()).getName());
} else {
client = client();
}
if (request.profile() == false && randomBoolean()) {
request.profile(true);
}
if (randomBoolean()) {
setRequestCircuitBreakerLimit(ByteSizeValue.ofBytes(between(256, 4096)));
try {
return client.execute(EsqlQueryAction.INSTANCE, request).actionGet(2, TimeUnit.MINUTES);
} catch (Exception e) {
logger.info("request failed", e);
EsqlTestUtils.assertEsqlFailure(e);
ensureBlocksReleased();
} finally {
setRequestCircuitBreakerLimit(null);
}
}
return client.execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS);
}
static EnrichPolicy policy = new EnrichPolicy("match", null, List.of("songs"), "song_id", List.of("title", "artist", "length"));
@Before
public void setupEnrichPolicies() {
client().admin()
.indices()
.prepareCreate("songs")
.setMapping("song_id", "type=keyword", "title", "type=keyword", "artist", "type=keyword", "length", "type=double")
.get();
record Song(String id, String title, String artist, double length) {
}
var songs = List.of(
new Song("s1", "Hotel California", "Eagles", 7.12),
new Song("s2", "In The End", "Linkin Park", 3.36),
new Song("s3", "Numb", "Linkin Park", 3.05),
new Song("s4", "The Sound Of Silence", "Disturbed", 4.08)
);
for (var s : songs) {
client().prepareIndex("songs").setSource("song_id", s.id, "title", s.title, "artist", s.artist, "length", s.length).get();
}
client().admin().indices().prepareRefresh("songs").get();
client().execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "songs", policy))
.actionGet();
client().execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "songs"))
.actionGet();
assertAcked(client().admin().indices().prepareDelete("songs"));
}
@After
public void cleanEnrichPolicies() {
cluster().wipe(Set.of());
client().execute(DeleteEnrichPolicyAction.INSTANCE, new DeleteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "songs"));
}
@Before
public void setupMainIndex() {
var localListens = List.of(
new Listen(1, "s3", 1.5),
new Listen(2, "s2", 2.0),
new Listen(3, "s1", 0.5),
new Listen(4, "s3", 1.0),
new Listen(5, "s1", 2.5),
new Listen(6, "s1", 0.25),
new Listen(7, "s2", 3.0)
);
client().admin()
.indices()
.prepareCreate("listens")
.setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1))
.setMapping("timestamp", "type=long", "song_id", "type=keyword", "duration", "type=double")
.get();
for (Listen listen : localListens) {
client().prepareIndex("listens")
.setSource("timestamp", listen.timestamp, "song_id", listen.songId, "duration", listen.duration)
.get();
}
client().admin().indices().prepareRefresh("listens").get();
}
@Before
public void ensureAtLeastOneCoordinatingNodeOnly() {
if (clusterService().state().nodes().getCoordinatingOnlyNodes().isEmpty()) {
internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
}
}
record Listen(long timestamp, String songId, double duration) {
}
private static String enrichSongCommand() {
return EsqlTestUtils.randomEnrichCommand(
"songs",
randomFrom(Enrich.Mode.COORDINATOR, Enrich.Mode.ANY),
policy.getMatchField(),
policy.getEnrichFields()
);
}
public void testSumDurationByArtist() {
Function<EsqlQueryResponse, Map<String, Double>> extractStats = resp -> {
List<ColumnInfoImpl> columns = resp.columns();
assertThat(columns, hasSize(2));
assertThat(columns.get(0).name(), equalTo("sum(duration)"));
assertThat(columns.get(0).type(), equalTo(DataType.DOUBLE));
assertThat(columns.get(1).name(), equalTo("artist"));
assertThat(columns.get(1).type(), equalTo(DataType.KEYWORD));
Iterator<Iterator<Object>> rows = resp.values();
Map<String, Double> actualValues = new HashMap<>();
while (rows.hasNext()) {
Iterator<Object> row = rows.next();
Object v = row.next();
Object k = row.next();
actualValues.put((String) k, (Double) v);
}
return actualValues;
};
var statsCommands = List.of(
enrichSongCommand() + " | STATS sum(duration) by artist",
"STATS duration = sum(duration) by song_id | " + enrichSongCommand() + " | STATS sum(duration) by artist"
);
for (String statsCommand : statsCommands) {
try (var resp = run("from listens* | " + statsCommand)) {
assertThat(extractStats.apply(resp), equalTo(Map.of("Eagles", 3.25, "Linkin Park", 7.5)));
}
}
}
public void testAvgDurationByArtist() {
Function<EsqlQueryResponse, Map<String, Double>> extractStats = resp -> {
List<ColumnInfoImpl> columns = resp.columns();
assertThat(columns, hasSize(2));
assertThat(columns.get(0).name(), equalTo("avg(duration)"));
assertThat(columns.get(0).type(), equalTo(DataType.DOUBLE));
assertThat(columns.get(1).name(), equalTo("artist"));
assertThat(columns.get(1).type(), equalTo(DataType.KEYWORD));
Iterator<Iterator<Object>> rows = resp.values();
Map<String, Double> actualValues = new HashMap<>();
while (rows.hasNext()) {
Iterator<Object> row = rows.next();
Object v = row.next();
Object k = row.next();
actualValues.put((String) k, (Double) v);
}
return actualValues;
};
try (var resp = run("from listens* | " + enrichSongCommand() + " | STATS avg(duration) by artist")) {
Map<String, Double> stats = extractStats.apply(resp);
assertThat(stats.keySet(), containsInAnyOrder("Eagles", "Linkin Park"));
assertThat(stats.get("Eagles"), closeTo(1.08333, 0.1));
assertThat(stats.get("Linkin Park"), closeTo(1.875, 0.1));
}
}
public void testListeningRatio() {
Function<EsqlQueryResponse, Map<String, Double>> extractStats = resp -> {
List<ColumnInfoImpl> columns = resp.columns();
assertThat(columns, hasSize(2));
assertThat(columns.get(0).name(), equalTo("ratio"));
assertThat(columns.get(0).type(), equalTo(DataType.DOUBLE));
assertThat(columns.get(1).name(), equalTo("artist"));
assertThat(columns.get(1).type(), equalTo(DataType.KEYWORD));
Iterator<Iterator<Object>> rows = resp.values();
Map<String, Double> actualValues = new HashMap<>();
while (rows.hasNext()) {
Iterator<Object> row = rows.next();
Object v = row.next();
Object k = row.next();
actualValues.put((String) k, (Double) v);
}
return actualValues;
};
var statsCommand = "STATS d = sum(duration), l = sum(length) by artist | EVAL ratio=d /l | KEEP ratio, artist";
try (var resp = run("from listens* | " + enrichSongCommand() + "|" + statsCommand)) {
Map<String, Double> stats = extractStats.apply(resp);
assertThat(stats.keySet(), containsInAnyOrder("Eagles", "Linkin Park"));
assertThat(stats.get("Eagles"), closeTo(0.1521, 0.05));
assertThat(stats.get("Linkin Park"), closeTo(0.585, 0.05));
}
}
public void testFilterAfterEnrich() {
try (var resp = run("from listens* | " + enrichSongCommand() + " | WHERE length < 3.2 | limit 10 | KEEP artist,title")) {
Iterator<Object> row = resp.values().next();
assertThat(row.next(), equalTo("Linkin Park"));
assertThat(row.next(), equalTo("Numb"));
}
}
public void testTopN() {
try (var resp = run("from listens* | sort timestamp DESC | limit 1 |" + enrichSongCommand() + " | KEEP timestamp, artist")) {
Iterator<Object> row = resp.values().next();
assertThat(row.next(), equalTo(7L));
assertThat(row.next(), equalTo("Linkin Park"));
}
try (var resp = run("from listens* | " + enrichSongCommand() + " | sort timestamp DESC | limit 1 | KEEP timestamp, artist")) {
Iterator<Object> row = resp.values().next();
assertThat(row.next(), equalTo(7L));
assertThat(row.next(), equalTo("Linkin Park"));
}
}
public void testProfile() {
EsqlQueryRequest request = syncEsqlQueryRequest(
"from listens* | sort timestamp DESC | limit 1 | " + enrichSongCommand() + " | KEEP timestamp, artist"
).pragmas(randomPragmas()).profile(true);
try (var resp = run(request)) {
Iterator<Object> row = resp.values().next();
assertThat(row.next(), equalTo(7L));
assertThat(row.next(), equalTo("Linkin Park"));
EsqlQueryResponse.Profile profile = resp.profile();
assertNotNull(profile);
List<DriverProfile> drivers = profile.drivers();
assertThat(drivers.size(), greaterThanOrEqualTo(2));
List<OperatorStatus> enrichOperators = drivers.stream()
.flatMap(d -> d.operators().stream())
.filter(status -> status.operator().startsWith("EnrichOperator"))
.toList();
assertThat(enrichOperators, not(emptyList()));
}
}
/**
* Some enrich queries that could fail without the PushDownEnrich rule.
*/
public void testForPushDownEnrichRule() {
{
String query = String.format(Locale.ROOT, """
FROM listens*
| eval x = TO_STR(song_id)
| SORT x
| %s
| SORT song_id
| LIMIT 5
| STATS listens = count(*) BY title
| SORT listens DESC
| KEEP title, listens
""", enrichSongCommand());
try (EsqlQueryResponse resp = run(query)) {
assertThat(EsqlTestUtils.getValuesList(resp), equalTo(List.of(List.of("Hotel California", 3L), List.of("In The End", 2L))));
}
}
{
String query = String.format(Locale.ROOT, """
FROM listens*
| eval x = TO_STR(song_id)
| SORT x
| KEEP x, song_id
| %s
| SORT song_id
| KEEP title, song_id
| LIMIT 1
""", enrichSongCommand());
try (EsqlQueryResponse resp = run(query)) {
assertThat(EsqlTestUtils.getValuesList(resp), equalTo(List.of(List.of("Hotel California", "s1"))));
}
}
}
/**
* To enable enrich lookup using ordinals
*/
public void testManyDocuments() {
int numDocs = between(200, 2000);
var artists = Map.of("s1", "Eagles", "s2", "Linkin Park", "s3", "Linkin Park", "s4", "Disturbed");
client().admin()
.indices()
.prepareCreate("many_docs")
.setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1))
.setMapping("song_id", "type=keyword")
.get();
Map<String, Long> songs = new HashMap<>();
for (int i = 0; i < numDocs; i++) {
String song = randomFrom(artists.keySet());
client().prepareIndex("many_docs").setSource("song_id", song).get();
songs.merge(song, 1L, Long::sum);
}
client().admin().indices().prepareRefresh("many_docs").get();
try (EsqlQueryResponse resp = run("FROM many_docs | ENRICH songs | STATS count(*) BY artist")) {
List<List<Object>> values = EsqlTestUtils.getValuesList(resp);
Map<String, Long> actual = new HashMap<>();
for (List<Object> value : values) {
actual.merge((String) value.get(1), (Long) value.get(0), Long::sum);
}
Map<String, Long> expected = new HashMap<>();
for (Map.Entry<String, Long> e : songs.entrySet()) {
expected.merge(artists.get(e.getKey()), e.getValue(), Long::sum);
}
assertThat(actual, equalTo(expected));
}
}
public void testRejection() {
for (var ts : internalCluster().getInstances(TransportService.class)) {
((MockTransportService) ts).addRequestHandlingBehavior(EnrichLookupService.LOOKUP_ACTION_NAME, (h, r, channel, t) -> {
EsRejectedExecutionException ex = new EsRejectedExecutionException("test", false);
channel.sendResponse(new RemoteTransportException("test", ex));
});
}
try {
String query = "FROM listen* | " + enrichSongCommand();
Exception error = expectThrows(Exception.class, () -> run(query).close());
assertThat(ExceptionsHelper.status(error), equalTo(RestStatus.TOO_MANY_REQUESTS));
} finally {
for (var ts : internalCluster().getInstances(TransportService.class)) {
((MockTransportService) ts).clearAllRules();
}
}
}
public static | InternalTransportSettingPlugin |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/tools/picocli/CommandLine.java | {
"start": 178057,
"end": 212281
} | class ____ superclass have a @Command annotation
if (cls.isAnnotationPresent(Command.class)) {
final Command cmd = cls.getAnnotation(Command.class);
if (DEFAULT_COMMAND_NAME.equals(commandName)) {
commandName = cmd.name();
}
separator = (separator == null) ? cmd.separator() : separator;
abbreviateSynopsis = (abbreviateSynopsis == null) ? cmd.abbreviateSynopsis() : abbreviateSynopsis;
sortOptions = (sortOptions == null) ? cmd.sortOptions() : sortOptions;
requiredOptionMarker =
(requiredOptionMarker == null) ? cmd.requiredOptionMarker() : requiredOptionMarker;
showDefaultValues = (showDefaultValues == null) ? cmd.showDefaultValues() : showDefaultValues;
customSynopsis = empty(customSynopsis) ? cmd.customSynopsis() : customSynopsis;
description = empty(description) ? cmd.description() : description;
header = empty(header) ? cmd.header() : header;
footer = empty(footer) ? cmd.footer() : footer;
headerHeading = empty(headerHeading) ? cmd.headerHeading() : headerHeading;
synopsisHeading = empty(synopsisHeading) || "Usage: ".equals(synopsisHeading)
? cmd.synopsisHeading()
: synopsisHeading;
descriptionHeading = empty(descriptionHeading) ? cmd.descriptionHeading() : descriptionHeading;
parameterListHeading =
empty(parameterListHeading) ? cmd.parameterListHeading() : parameterListHeading;
optionListHeading = empty(optionListHeading) ? cmd.optionListHeading() : optionListHeading;
commandListHeading = empty(commandListHeading) || "Commands:%n".equals(commandListHeading)
? cmd.commandListHeading()
: commandListHeading;
footerHeading = empty(footerHeading) ? cmd.footerHeading() : footerHeading;
}
cls = cls.getSuperclass();
}
sortOptions = (sortOptions == null) ? true : sortOptions;
abbreviateSynopsis = (abbreviateSynopsis == null) ? false : abbreviateSynopsis;
requiredOptionMarker = (requiredOptionMarker == null) ? ' ' : requiredOptionMarker;
showDefaultValues = (showDefaultValues == null) ? false : showDefaultValues;
synopsisHeading = (synopsisHeading == null) ? "Usage: " : synopsisHeading;
commandListHeading = (commandListHeading == null) ? "Commands:%n" : commandListHeading;
separator = (separator == null) ? DEFAULT_SEPARATOR : separator;
parameterLabelRenderer = createDefaultParamLabelRenderer(); // uses help separator
Collections.sort(operands, new PositionalParametersSorter());
positionalParametersFields = Collections.unmodifiableList(operands);
optionFields = Collections.unmodifiableList(options);
}
/** Registers all specified subcommands with this Help.
* @param commands maps the command names to the associated CommandLine object
* @return this Help instance (for method chaining)
* @see CommandLine#getSubcommands()
*/
public Help addAllSubcommands(final Map<String, CommandLine> commands) {
if (commands != null) {
for (final Map.Entry<String, CommandLine> entry : commands.entrySet()) {
addSubcommand(entry.getKey(), entry.getValue().getCommand());
}
}
return this;
}
/** Registers the specified subcommand with this Help.
* @param commandName the name of the subcommand to display in the usage message
* @param command the annotated object to get more information from
* @return this Help instance (for method chaining)
*/
public Help addSubcommand(final String commandName, final Object command) {
commands.put(commandName, new Help(command));
return this;
}
/** Returns a synopsis for the command without reserving space for the synopsis heading.
* @return a synopsis
* @see #abbreviatedSynopsis()
* @see #detailedSynopsis(Comparator, boolean)
* @deprecated use {@link #synopsis(int)} instead
*/
@Deprecated
public String synopsis() {
return synopsis(0);
}
/**
* Returns a synopsis for the command, reserving the specified space for the synopsis heading.
* @param synopsisHeadingLength the length of the synopsis heading that will be displayed on the same line
* @return a synopsis
* @see #abbreviatedSynopsis()
* @see #detailedSynopsis(Comparator, boolean)
* @see #synopsisHeading
*/
public String synopsis(final int synopsisHeadingLength) {
if (!empty(customSynopsis)) {
return customSynopsis();
}
return abbreviateSynopsis
? abbreviatedSynopsis()
: detailedSynopsis(synopsisHeadingLength, createShortOptionArityAndNameComparator(), true);
}
/** Generates a generic synopsis like {@code <command name> [OPTIONS] [PARAM1 [PARAM2]...]}, omitting parts
* that don't apply to the command (e.g., does not show [OPTIONS] if the command has no options).
* @return a generic synopsis */
public String abbreviatedSynopsis() {
final StringBuilder sb = new StringBuilder();
if (!optionFields.isEmpty()) { // only show if annotated object actually has options
sb.append(" [OPTIONS]");
}
// sb.append(" [--] "); // implied
for (final Field positionalParam : positionalParametersFields) {
if (!positionalParam.getAnnotation(Parameters.class).hidden()) {
sb.append(' ')
.append(parameterLabelRenderer.renderParameterLabel(
positionalParam, ansi(), colorScheme.parameterStyles));
}
}
return colorScheme.commandText(commandName).toString()
+ (sb.toString())
+ System.getProperty("line.separator");
}
/** Generates a detailed synopsis message showing all options and parameters. Follows the unix convention of
* showing optional options and parameters in square brackets ({@code [ ]}).
* @param optionSort comparator to sort options or {@code null} if options should not be sorted
* @param clusterBooleanOptions {@code true} if boolean short options should be clustered into a single string
* @return a detailed synopsis
* @deprecated use {@link #detailedSynopsis(int, Comparator, boolean)} instead. */
@Deprecated
public String detailedSynopsis(final Comparator<Field> optionSort, final boolean clusterBooleanOptions) {
return detailedSynopsis(0, optionSort, clusterBooleanOptions);
}
/** Generates a detailed synopsis message showing all options and parameters. Follows the unix convention of
* showing optional options and parameters in square brackets ({@code [ ]}).
* @param synopsisHeadingLength the length of the synopsis heading that will be displayed on the same line
* @param optionSort comparator to sort options or {@code null} if options should not be sorted
* @param clusterBooleanOptions {@code true} if boolean short options should be clustered into a single string
* @return a detailed synopsis */
public String detailedSynopsis(
final int synopsisHeadingLength,
final Comparator<Field> optionSort,
final boolean clusterBooleanOptions) {
Text optionText = ansi().new Text(0);
final List<Field> fields = new ArrayList<>(optionFields); // iterate in declaration order
if (optionSort != null) {
Collections.sort(fields, optionSort); // iterate in specified sort order
}
if (clusterBooleanOptions) { // cluster all short boolean options into a single string
final List<Field> booleanOptions = new ArrayList<>();
final StringBuilder clusteredRequired = new StringBuilder("-");
final StringBuilder clusteredOptional = new StringBuilder("-");
for (final Field field : fields) {
if (field.getType() == boolean.class || field.getType() == Boolean.class) {
final Option option = field.getAnnotation(Option.class);
final String shortestName = ShortestFirst.sort(option.names())[0];
if (shortestName.length() == 2 && shortestName.startsWith("-")) {
booleanOptions.add(field);
if (option.required()) {
clusteredRequired.append(shortestName.substring(1));
} else {
clusteredOptional.append(shortestName.substring(1));
}
}
}
}
fields.removeAll(booleanOptions);
if (clusteredRequired.length() > 1) { // initial length was 1
optionText = optionText.append(" ").append(colorScheme.optionText(clusteredRequired.toString()));
}
if (clusteredOptional.length() > 1) { // initial length was 1
optionText = optionText
.append(" [")
.append(colorScheme.optionText(clusteredOptional.toString()))
.append("]");
}
}
for (final Field field : fields) {
final Option option = field.getAnnotation(Option.class);
if (!option.hidden()) {
if (option.required()) {
optionText =
appendOptionSynopsis(optionText, field, ShortestFirst.sort(option.names())[0], " ", "");
if (isMultiValue(field)) {
optionText = appendOptionSynopsis(
optionText, field, ShortestFirst.sort(option.names())[0], " [", "]...");
}
} else {
optionText = appendOptionSynopsis(
optionText, field, ShortestFirst.sort(option.names())[0], " [", "]");
if (isMultiValue(field)) {
optionText = optionText.append("...");
}
}
}
}
for (final Field positionalParam : positionalParametersFields) {
if (!positionalParam.getAnnotation(Parameters.class).hidden()) {
optionText = optionText.append(" ");
final Text label = parameterLabelRenderer.renderParameterLabel(
positionalParam, colorScheme.ansi(), colorScheme.parameterStyles);
optionText = optionText.append(label);
}
}
// Fix for #142: first line of synopsis overshoots max. characters
final int firstColumnLength = commandName.length() + synopsisHeadingLength;
// synopsis heading ("Usage: ") may be on the same line, so adjust column width
final TextTable textTable = new TextTable(ansi(), firstColumnLength, usageHelpWidth - firstColumnLength);
textTable.indentWrappedLines =
1; // don't worry about first line: options (2nd column) always start with a space
// right-adjust the command name by length of synopsis heading
final Text PADDING = Ansi.OFF.new Text(stringOf('X', synopsisHeadingLength));
textTable.addRowValues(PADDING.append(colorScheme.commandText(commandName)), optionText);
return textTable.toString().substring(synopsisHeadingLength); // cut off leading synopsis heading spaces
}
private Text appendOptionSynopsis(
final Text optionText,
final Field field,
final String optionName,
final String prefix,
final String suffix) {
final Text optionParamText = parameterLabelRenderer.renderParameterLabel(
field, colorScheme.ansi(), colorScheme.optionParamStyles);
return optionText
.append(prefix)
.append(colorScheme.optionText(optionName))
.append(optionParamText)
.append(suffix);
}
/** Returns the number of characters the synopsis heading will take on the same line as the synopsis.
* @return the number of characters the synopsis heading will take on the same line as the synopsis.
* @see #detailedSynopsis(int, Comparator, boolean)
*/
public int synopsisHeadingLength() {
final String[] lines = Ansi.OFF.new Text(synopsisHeading).toString().split("\\r?\\n|\\r|%n", -1);
return lines[lines.length - 1].length();
}
/**
* <p>Returns a description of the {@linkplain Option options} supported by the application.
* This implementation {@linkplain #createShortOptionNameComparator() sorts options alphabetically}, and shows
* only the {@linkplain Option#hidden() non-hidden} options in a {@linkplain TextTable tabular format}
* using the {@linkplain #createDefaultOptionRenderer() default renderer} and {@linkplain Layout default layout}.</p>
* @return the fully formatted option list
* @see #optionList(Layout, Comparator, IParamLabelRenderer)
*/
public String optionList() {
final Comparator<Field> sortOrder =
sortOptions == null || sortOptions.booleanValue() ? createShortOptionNameComparator() : null;
return optionList(createDefaultLayout(), sortOrder, parameterLabelRenderer);
}
/** Sorts all {@code Options} with the specified {@code comparator} (if the comparator is non-{@code null}),
* then {@linkplain Layout#addOption(Field, CommandLine.Help.IParamLabelRenderer) adds} all non-hidden options to the
* specified TextTable and returns the result of TextTable.toString().
* @param layout responsible for rendering the option list
* @param optionSort determines in what order {@code Options} should be listed. Declared order if {@code null}
* @param valueLabelRenderer used for options with a parameter
* @return the fully formatted option list
*/
public String optionList(
final Layout layout, final Comparator<Field> optionSort, final IParamLabelRenderer valueLabelRenderer) {
final List<Field> fields = new ArrayList<>(optionFields); // options are stored in order of declaration
if (optionSort != null) {
Collections.sort(fields, optionSort); // default: sort options ABC
}
layout.addOptions(fields, valueLabelRenderer);
return layout.toString();
}
/**
* Returns the section of the usage help message that lists the parameters with their descriptions.
* @return the section of the usage help message that lists the parameters
*/
public String parameterList() {
return parameterList(createDefaultLayout(), parameterLabelRenderer);
}
/**
* Returns the section of the usage help message that lists the parameters with their descriptions.
* @param layout the layout to use
* @param paramLabelRenderer for rendering parameter names
* @return the section of the usage help message that lists the parameters
*/
public String parameterList(final Layout layout, final IParamLabelRenderer paramLabelRenderer) {
layout.addPositionalParameters(positionalParametersFields, paramLabelRenderer);
return layout.toString();
}
private static String heading(final Ansi ansi, final String values, final Object... params) {
final StringBuilder sb = join(ansi, new String[] {values}, new StringBuilder(), params);
String result = sb.toString();
result = result.endsWith(System.getProperty("line.separator"))
? result.substring(
0,
result.length()
- System.getProperty("line.separator").length())
: result;
return result + new String(spaces(countTrailingSpaces(values)));
}
private static char[] spaces(final int length) {
final char[] result = new char[length];
Arrays.fill(result, ' ');
return result;
}
private static int countTrailingSpaces(final String str) {
if (str == null) {
return 0;
}
int trailingSpaces = 0;
for (int i = str.length() - 1; i >= 0 && str.charAt(i) == ' '; i--) {
trailingSpaces++;
}
return trailingSpaces;
}
/** Formats each of the specified values and appends it to the specified StringBuilder.
* @param ansi whether the result should contain ANSI escape codes or not
* @param values the values to format and append to the StringBuilder
* @param sb the StringBuilder to collect the formatted strings
* @param params the parameters to pass to the format method when formatting each value
* @return the specified StringBuilder */
public static StringBuilder join(
final Ansi ansi, final String[] values, final StringBuilder sb, final Object... params) {
if (values != null) {
final TextTable table = new TextTable(ansi, usageHelpWidth);
table.indentWrappedLines = 0;
for (final String summaryLine : values) {
final Text[] lines = ansi.new Text(format(summaryLine, params)).splitLines();
for (final Text line : lines) {
table.addRowValues(line);
}
}
table.toString(sb);
}
return sb;
}
private static String format(final String formatString, final Object... params) {
return formatString == null ? "" : String.format(formatString, params);
}
/** Returns command custom synopsis as a string. A custom synopsis can be zero or more lines, and can be
* specified declaratively with the {@link Command#customSynopsis()} annotation attribute or programmatically
* by setting the Help instance's {@link Help#customSynopsis} field.
* @param params Arguments referenced by the format specifiers in the synopsis strings
* @return the custom synopsis lines combined into a single String (which may be empty)
*/
public String customSynopsis(final Object... params) {
return join(ansi(), customSynopsis, new StringBuilder(), params).toString();
}
/** Returns command description text as a string. Description text can be zero or more lines, and can be specified
* declaratively with the {@link Command#description()} annotation attribute or programmatically by
* setting the Help instance's {@link Help#description} field.
* @param params Arguments referenced by the format specifiers in the description strings
* @return the description lines combined into a single String (which may be empty)
*/
public String description(final Object... params) {
return join(ansi(), description, new StringBuilder(), params).toString();
}
/** Returns the command header text as a string. Header text can be zero or more lines, and can be specified
* declaratively with the {@link Command#header()} annotation attribute or programmatically by
* setting the Help instance's {@link Help#header} field.
* @param params Arguments referenced by the format specifiers in the header strings
* @return the header lines combined into a single String (which may be empty)
*/
public String header(final Object... params) {
return join(ansi(), header, new StringBuilder(), params).toString();
}
/** Returns command footer text as a string. Footer text can be zero or more lines, and can be specified
* declaratively with the {@link Command#footer()} annotation attribute or programmatically by
* setting the Help instance's {@link Help#footer} field.
* @param params Arguments referenced by the format specifiers in the footer strings
* @return the footer lines combined into a single String (which may be empty)
*/
public String footer(final Object... params) {
return join(ansi(), footer, new StringBuilder(), params).toString();
}
/** Returns the text displayed before the header text; the result of {@code String.format(headerHeading, params)}.
* @param params the parameters to use to format the header heading
* @return the formatted header heading */
public String headerHeading(final Object... params) {
return heading(ansi(), headerHeading, params);
}
/** Returns the text displayed before the synopsis text; the result of {@code String.format(synopsisHeading, params)}.
* @param params the parameters to use to format the synopsis heading
* @return the formatted synopsis heading */
public String synopsisHeading(final Object... params) {
return heading(ansi(), synopsisHeading, params);
}
/** Returns the text displayed before the description text; an empty string if there is no description,
* otherwise the result of {@code String.format(descriptionHeading, params)}.
* @param params the parameters to use to format the description heading
* @return the formatted description heading */
public String descriptionHeading(final Object... params) {
return empty(descriptionHeading) ? "" : heading(ansi(), descriptionHeading, params);
}
/** Returns the text displayed before the positional parameter list; an empty string if there are no positional
* parameters, otherwise the result of {@code String.format(parameterListHeading, params)}.
* @param params the parameters to use to format the parameter list heading
* @return the formatted parameter list heading */
public String parameterListHeading(final Object... params) {
return positionalParametersFields.isEmpty() ? "" : heading(ansi(), parameterListHeading, params);
}
/** Returns the text displayed before the option list; an empty string if there are no options,
* otherwise the result of {@code String.format(optionListHeading, params)}.
* @param params the parameters to use to format the option list heading
* @return the formatted option list heading */
public String optionListHeading(final Object... params) {
return optionFields.isEmpty() ? "" : heading(ansi(), optionListHeading, params);
}
/** Returns the text displayed before the command list; an empty string if there are no commands,
* otherwise the result of {@code String.format(commandListHeading, params)}.
* @param params the parameters to use to format the command list heading
* @return the formatted command list heading */
public String commandListHeading(final Object... params) {
return commands.isEmpty() ? "" : heading(ansi(), commandListHeading, params);
}
/** Returns the text displayed before the footer text; the result of {@code String.format(footerHeading, params)}.
* @param params the parameters to use to format the footer heading
* @return the formatted footer heading */
public String footerHeading(final Object... params) {
return heading(ansi(), footerHeading, params);
}
/** Returns a 2-column list with command names and the first line of their header or (if absent) description.
* @return a usage help section describing the added commands */
public String commandList() {
if (commands.isEmpty()) {
return "";
}
final int commandLength = maxLength(commands.keySet());
final Help.TextTable textTable = new Help.TextTable(
ansi(),
new Help.Column(commandLength + 2, 2, Help.Column.Overflow.SPAN),
new Help.Column(usageHelpWidth - (commandLength + 2), 2, Help.Column.Overflow.WRAP));
for (final Map.Entry<String, Help> entry : commands.entrySet()) {
final Help command = entry.getValue();
final String header = command.header != null && command.header.length > 0
? command.header[0]
: (command.description != null && command.description.length > 0 ? command.description[0] : "");
textTable.addRowValues(colorScheme.commandText(entry.getKey()), ansi().new Text(header));
}
return textTable.toString();
}
private static int maxLength(final Collection<String> any) {
final List<String> strings = new ArrayList<>(any);
Collections.sort(strings, Collections.reverseOrder(Help.shortestFirst()));
return strings.get(0).length();
}
private static String join(final String[] names, final int offset, final int length, final String separator) {
if (names == null) {
return "";
}
final StringBuilder result = new StringBuilder();
for (int i = offset; i < offset + length; i++) {
result.append((i > offset) ? separator : "").append(names[i]);
}
return result.toString();
}
private static String stringOf(final char chr, final int length) {
final char[] buff = new char[length];
Arrays.fill(buff, chr);
return new String(buff);
}
/** Returns a {@code Layout} instance configured with the user preferences captured in this Help instance.
* @return a Layout */
public Layout createDefaultLayout() {
return new Layout(
colorScheme,
new TextTable(colorScheme.ansi()),
createDefaultOptionRenderer(),
createDefaultParameterRenderer());
}
/** Returns a new default OptionRenderer which converts {@link Option Options} to five columns of text to match
* the default {@linkplain TextTable TextTable} column layout. The first row of values looks like this:
* <ol>
* <li>the required option marker</li>
* <li>2-character short option name (or empty string if no short option exists)</li>
* <li>comma separator (only if both short option and long option exist, empty string otherwise)</li>
* <li>comma-separated string with long option name(s)</li>
* <li>first element of the {@link Option#description()} array</li>
* </ol>
* <p>Following this, there will be one row for each of the remaining elements of the {@link
* Option#description()} array, and these rows look like {@code {"", "", "", "", option.description()[i]}}.</p>
* <p>If configured, this option renderer adds an additional row to display the default field value.</p>
* @return a new default OptionRenderer
*/
public IOptionRenderer createDefaultOptionRenderer() {
final DefaultOptionRenderer result = new DefaultOptionRenderer();
result.requiredMarker = String.valueOf(requiredOptionMarker);
if (showDefaultValues != null && showDefaultValues.booleanValue()) {
result.command = this.command;
}
return result;
}
/** Returns a new minimal OptionRenderer which converts {@link Option Options} to a single row with two columns
* of text: an option name and a description. If multiple names or descriptions exist, the first value is used.
* @return a new minimal OptionRenderer */
public static IOptionRenderer createMinimalOptionRenderer() {
return new MinimalOptionRenderer();
}
/** Returns a new default ParameterRenderer which converts {@link Parameters Parameters} to four columns of
* text to match the default {@linkplain TextTable TextTable} column layout. The first row of values looks like this:
* <ol>
* <li>empty string </li>
* <li>empty string </li>
* <li>parameter(s) label as rendered by the {@link IParamLabelRenderer}</li>
* <li>first element of the {@link Parameters#description()} array</li>
* </ol>
* <p>Following this, there will be one row for each of the remaining elements of the {@link
* Parameters#description()} array, and these rows look like {@code {"", "", "", param.description()[i]}}.</p>
* <p>If configured, this parameter renderer adds an additional row to display the default field value.</p>
* @return a new default ParameterRenderer
*/
public IParameterRenderer createDefaultParameterRenderer() {
final DefaultParameterRenderer result = new DefaultParameterRenderer();
result.requiredMarker = String.valueOf(requiredOptionMarker);
return result;
}
/** Returns a new minimal ParameterRenderer which converts {@link Parameters Parameters} to a single row with
* two columns of text: an option name and a description. If multiple descriptions exist, the first value is used.
* @return a new minimal ParameterRenderer */
public static IParameterRenderer createMinimalParameterRenderer() {
return new MinimalParameterRenderer();
}
/** Returns a value renderer that returns the {@code paramLabel} if defined or the field name otherwise.
* @return a new minimal ParamLabelRenderer */
public static IParamLabelRenderer createMinimalParamLabelRenderer() {
return new IParamLabelRenderer() {
@Override
public Text renderParameterLabel(final Field field, final Ansi ansi, final List<IStyle> styles) {
final String text = DefaultParamLabelRenderer.renderParameterName(field);
return ansi.apply(text, styles);
}
@Override
public String separator() {
return "";
}
};
}
/** Returns a new default value renderer that separates option parameters from their {@linkplain Option
* options} with the specified separator string, surrounds optional parameters with {@code '['} and {@code ']'}
* characters and uses ellipses ("...") to indicate that any number of a parameter are allowed.
* @return a new default ParamLabelRenderer
*/
public IParamLabelRenderer createDefaultParamLabelRenderer() {
return new DefaultParamLabelRenderer(separator);
}
/** Sorts Fields annotated with {@code Option} by their option name in case-insensitive alphabetic order. If an
* Option has multiple names, the shortest name is used for the sorting. Help options follow non-help options.
* @return a comparator that sorts fields by their option name in case-insensitive alphabetic order */
public static Comparator<Field> createShortOptionNameComparator() {
return new SortByShortestOptionNameAlphabetically();
}
/** Sorts Fields annotated with {@code Option} by their option {@linkplain Range#max max arity} first, by
* {@linkplain Range#min min arity} next, and by {@linkplain #createShortOptionNameComparator() option name} last.
* @return a comparator that sorts fields by arity first, then their option name */
public static Comparator<Field> createShortOptionArityAndNameComparator() {
return new SortByOptionArityAndNameAlphabetically();
}
/** Sorts short strings before longer strings.
* @return a comparators that sorts short strings before longer strings */
public static Comparator<String> shortestFirst() {
return new ShortestFirst();
}
/** Returns whether ANSI escape codes are enabled or not.
* @return whether ANSI escape codes are enabled or not
*/
public Ansi ansi() {
return colorScheme.ansi;
}
/** When customizing online help for {@link Option Option} details, a custom {@code IOptionRenderer} can be
* used to create textual representation of an Option in a tabular format: one or more rows, each containing
* one or more columns. The {@link Layout Layout} is responsible for placing these text values in the
* {@link TextTable TextTable}. */
public | and |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/client/StatusHandler.java | {
"start": 1718,
"end": 6384
} | class ____ {
private final ResponsePredicate predicate;
private final RestClient.ResponseSpec.ErrorHandler handler;
private StatusHandler(ResponsePredicate predicate, RestClient.ResponseSpec.ErrorHandler handler) {
this.predicate = predicate;
this.handler = handler;
}
/**
* Test whether the response has any errors.
*/
public boolean test(ClientHttpResponse response) throws IOException {
return this.predicate.test(response);
}
/**
* Handle the error in the given response.
* <p>This method is only called when {@link #test(ClientHttpResponse)}
* has returned {@code true}.
ß */
public void handle(HttpRequest request, ClientHttpResponse response) throws IOException {
this.handler.handle(request, response);
}
/**
* Create a StatusHandler from a RestClient {@link RestClient.ResponseSpec.ErrorHandler}.
*/
public static StatusHandler of(
Predicate<HttpStatusCode> predicate, RestClient.ResponseSpec.ErrorHandler errorHandler) {
Assert.notNull(predicate, "Predicate must not be null");
Assert.notNull(errorHandler, "ErrorHandler must not be null");
return new StatusHandler(response -> predicate.test(response.getStatusCode()), errorHandler);
}
/**
* Create a StatusHandler from a {@link ResponseErrorHandler}.
*/
public static StatusHandler fromErrorHandler(ResponseErrorHandler errorHandler) {
Assert.notNull(errorHandler, "ResponseErrorHandler must not be null");
return new StatusHandler(errorHandler::hasError,
(request, response) -> errorHandler.handleError(request.getURI(), request.getMethod(), response));
}
/**
* Create a StatusHandler for default error response handling.
*/
public static StatusHandler createDefaultStatusHandler(List<HttpMessageConverter<?>> converters) {
return new StatusHandler(response -> response.getStatusCode().isError(),
(request, response) -> {
throw createException(response, converters);
});
}
/**
* Create a {@link RestClientResponseException} of the appropriate
* subtype depending on the response status code.
* @param response the response
* @param converters the converters to use to decode the body
* @return the created exception
* @throws IOException in case of a response failure (e.g. to obtain the status)
* @since 7.0
*/
public static RestClientResponseException createException(
ClientHttpResponse response, List<HttpMessageConverter<?>> converters) throws IOException {
HttpStatusCode statusCode = response.getStatusCode();
String statusText = response.getStatusText();
HttpHeaders headers = response.getHeaders();
byte[] body = RestClientUtils.getBody(response);
Charset charset = RestClientUtils.getCharset(response);
String message = getErrorMessage(statusCode.value(), statusText, body, charset);
RestClientResponseException ex;
if (statusCode.is4xxClientError()) {
ex = HttpClientErrorException.create(message, statusCode, statusText, headers, body, charset);
}
else if (statusCode.is5xxServerError()) {
ex = HttpServerErrorException.create(message, statusCode, statusText, headers, body, charset);
}
else {
ex = new UnknownHttpStatusCodeException(message, statusCode.value(), statusText, headers, body, charset);
}
if (!CollectionUtils.isEmpty(converters)) {
ex.setBodyConvertFunction(initBodyConvertFunction(response, body, converters));
}
return ex;
}
private static String getErrorMessage(
int rawStatusCode, String statusText, byte @Nullable [] responseBody, @Nullable Charset charset) {
String preface = rawStatusCode + " " + statusText + ": ";
if (ObjectUtils.isEmpty(responseBody)) {
return preface + "[no body]";
}
charset = (charset != null ? charset : StandardCharsets.UTF_8);
String bodyText = new String(responseBody, charset);
bodyText = LogFormatUtils.formatValue(bodyText, -1, true);
return preface + bodyText;
}
@SuppressWarnings("NullAway")
private static Function<ResolvableType, ? extends @Nullable Object> initBodyConvertFunction(
ClientHttpResponse response, byte[] body, List<HttpMessageConverter<?>> messageConverters) {
return resolvableType -> {
try {
HttpMessageConverterExtractor<?> extractor =
new HttpMessageConverterExtractor<>(resolvableType.getType(), messageConverters);
return extractor.extractData(new ClientHttpResponseDecorator(response) {
@Override
public InputStream getBody() {
return new ByteArrayInputStream(body);
}
});
}
catch (IOException ex) {
throw new RestClientException("Error while extracting response for type [" + resolvableType + "]", ex);
}
};
}
@FunctionalInterface
private | StatusHandler |
java | netty__netty | codec-base/src/main/java/io/netty/handler/codec/DefaultHeaders.java | {
"start": 37890,
"end": 38486
} | class ____ implements Iterator<Entry<K, V>> {
private HeaderEntry<K, V> current = head;
@Override
public boolean hasNext() {
return current.after != head;
}
@Override
public Entry<K, V> next() {
current = current.after;
if (current == head) {
throw new NoSuchElementException();
}
return current;
}
@Override
public void remove() {
throw new UnsupportedOperationException("read only");
}
}
private final | HeaderIterator |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/metrics/MetricsTrackingMapState.java | {
"start": 15619,
"end": 21904
} | class ____ extends StateMetricBase {
private static final String MAP_STATE_GET_LATENCY = "mapStateGetLatency";
private static final String MAP_STATE_PUT_LATENCY = "mapStatePutLatency";
private static final String MAP_STATE_PUT_ALL_LATENCY = "mapStatePutAllLatency";
private static final String MAP_STATE_REMOVE_LATENCY = "mapStateRemoveLatency";
private static final String MAP_STATE_CONTAINS_LATENCY = "mapStateContainsLatency";
private static final String MAP_STATE_ENTRIES_INIT_LATENCY = "mapStateEntriesInitLatency";
private static final String MAP_STATE_KEYS_INIT_LATENCY = "mapStateKeysInitLatency";
private static final String MAP_STATE_VALUES_INIT_LATENCY = "mapStateValuesInitLatency";
private static final String MAP_STATE_ITERATOR_INIT_LATENCY = "mapStateIteratorInitLatency";
private static final String MAP_STATE_IS_EMPTY_LATENCY = "mapStateIsEmptyLatency";
private static final String MAP_STATE_ITERATOR_HAS_NEXT_LATENCY =
"mapStateIteratorHasNextLatency";
private static final String MAP_STATE_ITERATOR_NEXT_LATENCY = "mapStateIteratorNextLatency";
private static final String MAP_STATE_ITERATOR_REMOVE_LATENCY =
"mapStateIteratorRemoveLatency";
private static final String MAP_STATE_GET_KEY_SIZE = "mapStateGetKeySize";
private static final String MAP_STATE_GET_VALUE_SIZE = "mapStateGetValueSize";
private static final String MAP_STATE_PUT_KEY_SIZE = "mapStatePutKeySize";
private static final String MAP_STATE_PUT_VALUE_SIZE = "mapStatePutValueSize";
private static final String MAP_STATE_ITERATOR_KEY_SIZE = "mapStateIteratorKeySize";
private static final String MAP_STATE_ITERATOR_VALUE_SIZE = "mapStateIteratorValueSize";
private static final String MAP_STATE_REMOVE_KEY_SIZE = "mapStateRemoveKeySize";
private static final String MAP_STATE_CONTAINS_KEY_SIZE = "mapStateContainsKeySize";
private static final String MAP_STATE_IS_EMPTY_KEY_SIZE = "mapStateIsEmptyKeySize";
private int getCount = 0;
private int iteratorRemoveCount = 0;
private int putCount = 0;
private int putAllCount = 0;
private int removeCount = 0;
private int containsCount = 0;
private int entriesInitCount = 0;
private int keysInitCount = 0;
private int valuesInitCount = 0;
private int isEmptyCount = 0;
private int iteratorInitCount = 0;
private int iteratorHasNextCount = 0;
private int iteratorNextCount = 0;
private MapStateMetrics(
String stateName,
MetricGroup metricGroup,
int sampleInterval,
int historySize,
boolean stateNameAsVariable) {
super(stateName, metricGroup, sampleInterval, historySize, stateNameAsVariable);
}
int getGetCount() {
return getCount;
}
int getIteratorRemoveCount() {
return iteratorRemoveCount;
}
int getPutCount() {
return putCount;
}
int getPutAllCount() {
return putAllCount;
}
int getRemoveCount() {
return removeCount;
}
int getContainsCount() {
return containsCount;
}
int getEntriesInitCount() {
return entriesInitCount;
}
int getKeysInitCount() {
return keysInitCount;
}
int getValuesInitCount() {
return valuesInitCount;
}
int getIsEmptyCount() {
return isEmptyCount;
}
int getIteratorInitCount() {
return iteratorInitCount;
}
int getIteratorHasNextCount() {
return iteratorHasNextCount;
}
@VisibleForTesting
void resetIteratorHasNextCount() {
iteratorHasNextCount = 0;
}
int getIteratorNextCount() {
return iteratorNextCount;
}
private boolean trackMetricsOnGet() {
getCount = loopUpdateCounter(getCount);
return getCount == 1;
}
private boolean trackMetricsOnPut() {
putCount = loopUpdateCounter(putCount);
return putCount == 1;
}
private boolean trackMetricsOnPutAll() {
putAllCount = loopUpdateCounter(putAllCount);
return putAllCount == 1;
}
private boolean trackMetricsOnRemove() {
removeCount = loopUpdateCounter(removeCount);
return removeCount == 1;
}
private boolean trackMetricsOnContains() {
containsCount = loopUpdateCounter(containsCount);
return containsCount == 1;
}
private boolean trackMetricsOnEntriesInit() {
entriesInitCount = loopUpdateCounter(entriesInitCount);
return entriesInitCount == 1;
}
private boolean trackMetricsOnKeysInit() {
keysInitCount = loopUpdateCounter(keysInitCount);
return keysInitCount == 1;
}
private boolean trackMetricsOnValuesInit() {
valuesInitCount = loopUpdateCounter(valuesInitCount);
return valuesInitCount == 1;
}
private boolean trackMetricsOnIteratorInit() {
iteratorInitCount = loopUpdateCounter(iteratorInitCount);
return iteratorInitCount == 1;
}
private boolean trackMetricsOnIsEmpty() {
isEmptyCount = loopUpdateCounter(isEmptyCount);
return isEmptyCount == 1;
}
private boolean trackMetricsOnIteratorHasNext() {
iteratorHasNextCount = loopUpdateCounter(iteratorHasNextCount);
return iteratorHasNextCount == 1;
}
private boolean trackMetricsOnIteratorNext() {
iteratorNextCount = loopUpdateCounter(iteratorNextCount);
return iteratorNextCount == 1;
}
private boolean trackMetricsOnIteratorRemove() {
iteratorRemoveCount = loopUpdateCounter(iteratorRemoveCount);
return iteratorRemoveCount == 1;
}
}
}
| MapStateMetrics |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/codec/json/AbstractJackson2Encoder.java | {
"start": 2600,
"end": 3029
} | class ____ support methods for Jackson 2.x encoding. For non-streaming use
* cases, {@link Flux} elements are collected into a {@link List} before serialization for
* performance reasons.
*
* @author Sebastien Deleuze
* @author Arjen Poutsma
* @since 5.0
* @deprecated since 7.0 in favor of {@link AbstractJacksonEncoder}
*/
@Deprecated(since = "7.0", forRemoval = true)
@SuppressWarnings("removal")
public abstract | providing |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java | {
"start": 61126,
"end": 61811
} | class
____ t = clazz.getGenericSuperclass();
Type parameter = getParameterTypeFromGenericType(baseClass, typeHierarchy, t, pos);
if (parameter != null) {
return parameter;
}
throw new InvalidTypesException(
"The types of the interface "
+ baseClass.getName()
+ " could not be inferred. "
+ "Support for synthetic interfaces, lambdas, and generic or raw types is limited at this point");
}
private static Type getParameterTypeFromGenericType(
Class<?> baseClass, List<Type> typeHierarchy, Type t, int pos) {
// base | Type |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/test/java/org/apache/dubbo/rpc/TimeoutCountDownTest.java | {
"start": 949,
"end": 1800
} | class ____ {
@Test
void testTimeoutCountDown() throws InterruptedException {
TimeoutCountDown timeoutCountDown = TimeoutCountDown.newCountDown(5, TimeUnit.SECONDS);
Assertions.assertEquals(5 * 1000, timeoutCountDown.getTimeoutInMilli());
Assertions.assertFalse(timeoutCountDown.isExpired());
Assertions.assertTrue(timeoutCountDown.timeRemaining(TimeUnit.SECONDS) > 0);
Assertions.assertTrue(timeoutCountDown.elapsedMillis() < TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS));
Thread.sleep(6 * 1000);
Assertions.assertTrue(timeoutCountDown.isExpired());
Assertions.assertTrue(timeoutCountDown.timeRemaining(TimeUnit.SECONDS) <= 0);
Assertions.assertTrue(timeoutCountDown.elapsedMillis() > TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS));
}
}
| TimeoutCountDownTest |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/config/annotation/ProvidedBy.java | {
"start": 1221,
"end": 1339
} | interface ____ {
* String sayHello(String name);
* }
*
* @Component("annotatedConsumer")
* public | GreetingService |
java | micronaut-projects__micronaut-core | http-server-tck/src/main/java/io/micronaut/http/server/tck/tests/filter/RequestFilterCompletionStageFutureProceedTest.java | {
"start": 2673,
"end": 3238
} | class ____ {
//tag::methods[]
@RequestFilter
CompletionStage<@Nullable HttpResponse<?>> filter(@NonNull HttpRequest<?> request) {
if (request.getHeaders().contains("X-FOOBAR")) {
// proceed
return CompletableFuture.completedFuture(null);
} else {
return CompletableFuture.completedFuture(HttpResponse.unauthorized());
}
}
}
//end::methods[]
@Requires(property = "spec.name", value = SPEC_NAME)
@Controller("/foobar")
static | FooBarFilter |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/window/grouping/HeapWindowsGrouping.java | {
"start": 3121,
"end": 3753
} | class ____ implements RowIterator<BinaryRowData> {
private final Iterator<BinaryRowData> iterator;
private BinaryRowData next;
BufferIterator(Iterator<BinaryRowData> iterator) {
this.iterator = iterator;
}
@Override
public boolean advanceNext() {
if (iterator.hasNext()) {
next = iterator.next();
return true;
} else {
next = null;
return false;
}
}
@Override
public BinaryRowData getRow() {
return next;
}
}
}
| BufferIterator |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/Int2HashJoinOperatorTest.java | {
"start": 1135,
"end": 10253
} | class ____ extends Int2HashJoinOperatorTestBase {
// ---------------------- build first inner join -----------------------------------------
@Test
protected void testBuildFirstHashInnerJoin() throws Exception {
int numKeys = 100;
int buildValsPerKey = 3;
int probeValsPerKey = 10;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys, buildValsPerKey, false);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys, probeValsPerKey, true);
buildJoin(
buildInput,
probeInput,
false,
false,
true,
numKeys * buildValsPerKey * probeValsPerKey,
numKeys,
165);
}
// ---------------------- build first left out join -----------------------------------------
@Test
protected void testBuildFirstHashLeftOutJoin() throws Exception {
int numKeys1 = 9;
int numKeys2 = 10;
int buildValsPerKey = 3;
int probeValsPerKey = 10;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys1, buildValsPerKey, true);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys2, probeValsPerKey, true);
buildJoin(
buildInput,
probeInput,
true,
false,
true,
numKeys1 * buildValsPerKey * probeValsPerKey,
numKeys1,
165);
}
// ---------------------- build first right out join -----------------------------------------
@Test
protected void testBuildFirstHashRightOutJoin() throws Exception {
int numKeys1 = 9;
int numKeys2 = 10;
int buildValsPerKey = 3;
int probeValsPerKey = 10;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys1, buildValsPerKey, true);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys2, probeValsPerKey, true);
buildJoin(buildInput, probeInput, false, true, true, 280, numKeys2, -1);
}
// ---------------------- build first full out join -----------------------------------------
@Test
protected void testBuildFirstHashFullOutJoin() throws Exception {
int numKeys1 = 9;
int numKeys2 = 10;
int buildValsPerKey = 3;
int probeValsPerKey = 10;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys1, buildValsPerKey, true);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys2, probeValsPerKey, true);
buildJoin(buildInput, probeInput, true, true, true, 280, numKeys2, -1);
}
// ---------------------- build second inner join -----------------------------------------
@Test
protected void testBuildSecondHashInnerJoin() throws Exception {
int numKeys = 100;
int buildValsPerKey = 3;
int probeValsPerKey = 10;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys, buildValsPerKey, false);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys, probeValsPerKey, true);
buildJoin(
buildInput,
probeInput,
false,
false,
false,
numKeys * buildValsPerKey * probeValsPerKey,
numKeys,
165);
}
// ---------------------- build second left out join -----------------------------------------
@Test
protected void testBuildSecondHashLeftOutJoin() throws Exception {
int numKeys1 = 10;
int numKeys2 = 9;
int buildValsPerKey = 3;
int probeValsPerKey = 10;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys1, buildValsPerKey, true);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys2, probeValsPerKey, true);
buildJoin(
buildInput,
probeInput,
true,
false,
false,
numKeys2 * buildValsPerKey * probeValsPerKey,
numKeys2,
165);
}
// ---------------------- build second right out join -----------------------------------------
@Test
protected void testBuildSecondHashRightOutJoin() throws Exception {
int numKeys1 = 9;
int numKeys2 = 10;
int buildValsPerKey = 3;
int probeValsPerKey = 10;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys1, buildValsPerKey, true);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys2, probeValsPerKey, true);
buildJoin(
buildInput,
probeInput,
false,
true,
false,
numKeys1 * buildValsPerKey * probeValsPerKey,
numKeys2,
-1);
}
// ---------------------- build second full out join -----------------------------------------
@Test
protected void testBuildSecondHashFullOutJoin() throws Exception {
int numKeys1 = 9;
int numKeys2 = 10;
int buildValsPerKey = 3;
int probeValsPerKey = 10;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys1, buildValsPerKey, true);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys2, probeValsPerKey, true);
buildJoin(buildInput, probeInput, true, true, false, 280, numKeys2, -1);
}
@Test
protected void testSemiJoin() throws Exception {
int numKeys1 = 9;
int numKeys2 = 10;
int buildValsPerKey = 3;
int probeValsPerKey = 10;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys1, buildValsPerKey, true);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys2, probeValsPerKey, true);
Object operator =
newOperator(33 * 32 * 1024, FlinkJoinType.SEMI, HashJoinType.SEMI, false, false);
joinAndAssert(operator, buildInput, probeInput, 90, 9, 45, true);
}
@Test
protected void testAntiJoin() throws Exception {
int numKeys1 = 9;
int numKeys2 = 10;
int buildValsPerKey = 3;
int probeValsPerKey = 10;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys1, buildValsPerKey, true);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys2, probeValsPerKey, true);
Object operator =
newOperator(33 * 32 * 1024, FlinkJoinType.ANTI, HashJoinType.ANTI, false, false);
joinAndAssert(operator, buildInput, probeInput, 10, 1, 45, true);
}
@Test
protected void testBuildLeftSemiJoin() throws Exception {
int numKeys1 = 10;
int numKeys2 = 9;
int buildValsPerKey = 10;
int probeValsPerKey = 3;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys1, buildValsPerKey, true);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys2, probeValsPerKey, true);
Object operator =
newOperator(
33 * 32 * 1024,
FlinkJoinType.SEMI,
HashJoinType.BUILD_LEFT_SEMI,
true,
false);
joinAndAssert(operator, buildInput, probeInput, 90, 9, 45, true);
}
@Test
protected void testBuildLeftAntiJoin() throws Exception {
int numKeys1 = 10;
int numKeys2 = 9;
int buildValsPerKey = 10;
int probeValsPerKey = 3;
MutableObjectIterator<BinaryRowData> buildInput =
new UniformBinaryRowGenerator(numKeys1, buildValsPerKey, true);
MutableObjectIterator<BinaryRowData> probeInput =
new UniformBinaryRowGenerator(numKeys2, probeValsPerKey, true);
Object operator =
newOperator(
33 * 32 * 1024,
FlinkJoinType.ANTI,
HashJoinType.BUILD_LEFT_ANTI,
true,
false);
joinAndAssert(operator, buildInput, probeInput, 10, 1, 45, true);
}
}
| Int2HashJoinOperatorTest |
java | apache__camel | components/camel-jetty/src/test/java/org/apache/camel/component/jetty/MainHttpsRouteTest.java | {
"start": 1728,
"end": 4648
} | class ____ extends BaseJettyTest {
public static final String NULL_VALUE_MARKER = CamelTestSupport.class.getCanonicalName();
protected final Properties originalValues = new Properties();
@Override
public void doPostSetup() throws Exception {
URL trustStoreUrl = this.getClass().getClassLoader().getResource("jsse/localhost.p12");
setSystemProp("javax.net.ssl.trustStore", trustStoreUrl.toURI().getPath());
setSystemProp("javax.net.ssl.trustStorePassword", "changeit");
setSystemProp("javax.net.ssl.trustStoreType", "PKCS12");
}
@Override
public void doPostTearDown() throws Exception {
restoreSystemProperties();
}
@Test
public void testHelloEndpoint() throws Exception {
Main main = new Main();
main.configure().sslConfig().setEnabled(true);
main.configure().sslConfig().setKeyStore(
this.getClass().getClassLoader().getResource("jsse/localhost.p12").toString());
main.configure().sslConfig().setKeystorePassword("changeit");
main.configure().sslConfig().setClientAuthentication(ClientAuthentication.WANT.name());
main.addProperty("camel.component.jetty.useglobalsslcontextparameters", "true");
main.configure().addRoutesBuilder(new RouteBuilder() {
public void configure() {
Processor proc = exchange -> exchange.getMessage().setBody("<b>Hello World</b>");
from("jetty:https://localhost:" + port1 + "/hello").process(proc);
}
});
main.start();
try {
ByteArrayOutputStream os = new ByteArrayOutputStream();
URL url = new URL("https://localhost:" + port1 + "/hello");
HttpsURLConnection connection = (HttpsURLConnection) url.openConnection();
SSLContext ssl = SSLContext.getInstance("TLSv1.3");
ssl.init(null, null, null);
connection.setSSLSocketFactory(ssl.getSocketFactory());
InputStream is = connection.getInputStream();
is.transferTo(os);
String data = new String(os.toByteArray());
assertEquals("<b>Hello World</b>", data);
} finally {
main.stop();
}
}
protected void setSystemProp(String key, String value) {
String originalValue = System.setProperty(key, value);
originalValues.put(key, originalValue != null ? originalValue : NULL_VALUE_MARKER);
}
protected void restoreSystemProperties() {
for (Map.Entry<Object, Object> entry : originalValues.entrySet()) {
Object key = entry.getKey();
Object value = entry.getValue();
if (NULL_VALUE_MARKER.equals(value)) {
System.clearProperty((String) key);
} else {
System.setProperty((String) key, (String) value);
}
}
}
}
| MainHttpsRouteTest |
java | hibernate__hibernate-orm | tooling/hibernate-ant/src/main/java/org/hibernate/tool/hbm2ddl/SchemaExport.java | {
"start": 17688,
"end": 18216
} | class ____ implements TargetDescriptor {
private final EnumSet<TargetType> targetTypes;
private final ScriptTargetOutput scriptTarget;
public TargetDescriptorImpl(
EnumSet<TargetType> targetTypes,
ScriptTargetOutput scriptTarget) {
this.targetTypes = targetTypes;
this.scriptTarget = scriptTarget;
}
@Override
public EnumSet<TargetType> getTargetTypes() {
return targetTypes;
}
@Override
public ScriptTargetOutput getScriptTargetOutput() {
return scriptTarget;
}
}
}
| TargetDescriptorImpl |
java | elastic__elasticsearch | x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java | {
"start": 5777,
"end": 14310
} | class ____ {
public final DocumentVersion version;
public final Supplier<SamlServiceProviderDocument> document;
public DocumentSupplier(DocumentVersion version, Supplier<SamlServiceProviderDocument> document) {
this.version = version;
this.document = CachedSupplier.wrap(document);
}
public SamlServiceProviderDocument getDocument() {
return document.get();
}
}
public SamlServiceProviderIndex(Client client, ClusterService clusterService) {
this.client = new OriginSettingClient(client, ClientHelper.IDP_ORIGIN);
this.clusterService = clusterService;
this.clusterStateListener = this::clusterChanged;
clusterService.addListener(clusterStateListener);
}
private void clusterChanged(ClusterChangedEvent clusterChangedEvent) {
final ClusterState state = clusterChangedEvent.state();
checkForAliasStateChange(state);
}
private void checkForAliasStateChange(ClusterState state) {
final IndexAbstraction aliasInfo = state.getMetadata().getProject().getIndicesLookup().get(ALIAS_NAME);
final boolean previousState = aliasExists;
this.aliasExists = aliasInfo != null;
if (aliasExists != previousState) {
logChangedAliasState(aliasInfo);
}
}
Index getIndex(ClusterState state) {
final ProjectMetadata project = state.getMetadata().getProject();
final SortedMap<String, IndexAbstraction> indicesLookup = project.getIndicesLookup();
IndexAbstraction indexAbstraction = indicesLookup.get(ALIAS_NAME);
if (indexAbstraction == null) {
indexAbstraction = indicesLookup.get(INDEX_NAME);
}
if (indexAbstraction == null) {
return null;
} else {
return indexAbstraction.getWriteIndex();
}
}
@Override
public void close() {
logger.debug("Closing ... removing cluster state listener");
clusterService.removeListener(clusterStateListener);
}
private void logChangedAliasState(IndexAbstraction aliasInfo) {
if (aliasInfo == null) {
logger.warn("service provider index/alias [{}] no longer exists", ALIAS_NAME);
} else if (aliasInfo.getType() != IndexAbstraction.Type.ALIAS) {
logger.warn("service provider index [{}] does not exist as an alias, but it should be", ALIAS_NAME);
} else if (aliasInfo.getIndices().size() != 1) {
logger.warn(
"service provider alias [{}] refers to multiple indices [{}] - this is unexpected and is likely to cause problems",
ALIAS_NAME,
Strings.collectionToCommaDelimitedString(aliasInfo.getIndices())
);
} else {
logger.info("service provider alias [{}] refers to [{}]", ALIAS_NAME, aliasInfo.getIndices().get(0));
}
}
public void deleteDocument(DocumentVersion version, WriteRequest.RefreshPolicy refreshPolicy, ActionListener<DeleteResponse> listener) {
final DeleteRequest request = new DeleteRequest(aliasExists ? ALIAS_NAME : INDEX_NAME).id(version.id)
.setIfSeqNo(version.seqNo)
.setIfPrimaryTerm(version.primaryTerm)
.setRefreshPolicy(refreshPolicy);
client.delete(request, listener.delegateFailureAndWrap((l, response) -> {
logger.debug("Deleted service provider document [{}] ({})", version.id, response.getResult());
l.onResponse(response);
}));
}
public void writeDocument(
SamlServiceProviderDocument document,
DocWriteRequest.OpType opType,
WriteRequest.RefreshPolicy refreshPolicy,
ActionListener<DocWriteResponse> listener
) {
final ValidationException exception = document.validate();
if (exception != null) {
listener.onFailure(exception);
return;
}
try (
ByteArrayOutputStream out = new ByteArrayOutputStream();
XContentBuilder xContentBuilder = new XContentBuilder(XContentType.JSON.xContent(), out)
) {
document.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS);
// Due to the lack of "alias templates" (at the current time), we cannot write to the alias if it doesn't exist yet
// - that would cause the alias to be created as a concrete index, which is not what we want.
// So, until we know that the alias exists we have to write to the expected index name instead.
final IndexRequest request = new IndexRequest(aliasExists ? ALIAS_NAME : INDEX_NAME).opType(opType)
.source(xContentBuilder)
.id(document.docId)
.setRefreshPolicy(refreshPolicy);
client.index(request, listener.delegateFailureAndWrap((l, response) -> {
logger.debug(
"Wrote service provider [{}][{}] as document [{}] ({})",
document.name,
document.entityId,
response.getId(),
response.getResult()
);
l.onResponse(response);
}));
} catch (IOException e) {
listener.onFailure(e);
}
}
public void readDocument(String documentId, ActionListener<DocumentSupplier> listener) {
final GetRequest request = new GetRequest(ALIAS_NAME, documentId);
client.get(request, listener.delegateFailureAndWrap((l, response) -> {
if (response.isExists()) {
l.onResponse(
new DocumentSupplier(new DocumentVersion(response), () -> toDocument(documentId, response.getSourceAsBytesRef()))
);
} else {
l.onResponse(null);
}
}));
}
public void findByEntityId(String entityId, ActionListener<Set<DocumentSupplier>> listener) {
final QueryBuilder query = QueryBuilders.termQuery(SamlServiceProviderDocument.Fields.ENTITY_ID.getPreferredName(), entityId);
findDocuments(query, listener);
}
public void findAll(ActionListener<Set<DocumentSupplier>> listener) {
final QueryBuilder query = QueryBuilders.matchAllQuery();
findDocuments(query, listener);
}
public void refresh(ActionListener<Void> listener) {
client.admin()
.indices()
.refresh(new RefreshRequest(ALIAS_NAME), listener.delegateFailureAndWrap((l, response) -> l.onResponse(null)));
}
private void findDocuments(QueryBuilder query, ActionListener<Set<DocumentSupplier>> listener) {
logger.trace("Searching [{}] for [{}]", ALIAS_NAME, query);
final SearchRequest request = client.prepareSearch(ALIAS_NAME)
.setQuery(query)
.setSize(1000)
.setFetchSource(true)
.seqNoAndPrimaryTerm(true)
.request();
client.search(request, ActionListener.wrap(response -> {
if (logger.isTraceEnabled()) {
logger.trace("Search hits: [{}] [{}]", response.getHits().getTotalHits(), Arrays.toString(response.getHits().getHits()));
}
final Set<DocumentSupplier> docs = Stream.of(response.getHits().getHits())
.map(hit -> new DocumentSupplier(new DocumentVersion(hit), () -> toDocument(hit.getId(), hit.getSourceRef())))
.collect(Collectors.toUnmodifiableSet());
listener.onResponse(docs);
}, ex -> {
if (ex instanceof IndexNotFoundException) {
listener.onResponse(Set.of());
} else {
listener.onFailure(ex);
}
}));
}
private static SamlServiceProviderDocument toDocument(String documentId, BytesReference source) {
try (
XContentParser parser = XContentHelper.createParserNotCompressed(
LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG,
source,
XContentType.JSON
)
) {
return SamlServiceProviderDocument.fromXContent(documentId, parser);
} catch (IOException e) {
throw new UncheckedIOException("failed to parse document [" + documentId + "]", e);
}
}
@Override
public String toString() {
return getClass().getSimpleName() + "{alias=" + ALIAS_NAME + " [" + (aliasExists ? "exists" : "not-found") + "]}";
}
}
| DocumentSupplier |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/AbstractHeapMergingState.java | {
"start": 3561,
"end": 3904
} | class ____ implements StateTransformationFunction<SV, SV> {
@Override
public SV apply(SV targetState, SV merged) throws Exception {
if (targetState != null) {
return mergeState(targetState, merged);
} else {
return merged;
}
}
}
}
| MergeTransformation |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeAttributesProvider.java | {
"start": 944,
"end": 1033
} | class ____ will be responsible for fetching the node attributes.
*
*/
public abstract | which |
java | apache__maven | compat/maven-model-builder/src/main/java/org/apache/maven/model/plugin/DefaultReportConfigurationExpander.java | {
"start": 1471,
"end": 2352
} | class ____ implements ReportConfigurationExpander {
@Override
public void expandPluginConfiguration(Model model, ModelBuildingRequest request, ModelProblemCollector problems) {
Reporting reporting = model.getReporting();
if (reporting != null) {
for (ReportPlugin reportPlugin : reporting.getPlugins()) {
Xpp3Dom parentDom = (Xpp3Dom) reportPlugin.getConfiguration();
if (parentDom != null) {
for (ReportSet execution : reportPlugin.getReportSets()) {
Xpp3Dom childDom = (Xpp3Dom) execution.getConfiguration();
childDom = Xpp3Dom.mergeXpp3Dom(childDom, new Xpp3Dom(parentDom));
execution.setConfiguration(childDom);
}
}
}
}
}
}
| DefaultReportConfigurationExpander |
java | google__guice | core/test/com/google/inject/ProvisionExceptionTest.java | {
"start": 14628,
"end": 14765
} | class ____ {
@Inject
public F() {
throw new ProvisionException("User Exception", new RuntimeException());
}
}
static | F |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/array/source/Scientist.java | {
"start": 200,
"end": 1033
} | class ____ {
//CHECKSTYLE:OFF
public String[] publicPublications;
public String[] publicPublicationYears;
//CHECKSTYLE:ON
private String name;
private String[] publications;
private String[] publicationYears;
public Scientist(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String[] getPublications() {
return publications;
}
public void setPublications(String[] publications) {
this.publications = publications;
}
public String[] getPublicationYears() {
return publicationYears;
}
public void setPublicationYears(String[] publicationYears) {
this.publicationYears = publicationYears;
}
}
| Scientist |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/net/PemTrustOptions.java | {
"start": 2199,
"end": 6140
} | class ____ implements TrustOptions, Cloneable {
private KeyStoreHelper helper;
private ArrayList<String> certPaths;
private ArrayList<Buffer> certValues;
/**
* Default constructor
*/
public PemTrustOptions() {
super();
this.certPaths = new ArrayList<>();
this.certValues = new ArrayList<>();
}
/**
* Copy constructor
*
* @param other the options to copy
*/
public PemTrustOptions(PemTrustOptions other) {
super();
this.certPaths = new ArrayList<>(other.getCertPaths());
this.certValues = new ArrayList<>(other.getCertValues());
}
/**
* Create options from JSON
*
* @param json the JSON
*/
public PemTrustOptions(JsonObject json) {
this();
PemTrustOptionsConverter.fromJson(json, this);
}
/**
* Convert to JSON
*
* @return the JSON
*/
public JsonObject toJson() {
JsonObject json = new JsonObject();
PemTrustOptionsConverter.toJson(this, json);
return json;
}
/**
* @return the certificate paths used to locate certificates
*/
public List<String> getCertPaths() {
return certPaths;
}
/**
* Add a certificate path
*
* @param certPath the path to add
* @return a reference to this, so the API can be used fluently
* @throws NullPointerException
*/
public PemTrustOptions addCertPath(String certPath) throws NullPointerException {
Objects.requireNonNull(certPath, "No null certificate accepted");
Arguments.require(!certPath.isEmpty(), "No empty certificate path accepted");
certPaths.add(certPath);
return this;
}
/**
*
* @return the certificate values
*/
public List<Buffer> getCertValues() {
return certValues;
}
/**
* Add a certificate value
*
* @param certValue the value to add
* @return a reference to this, so the API can be used fluently
* @throws NullPointerException
*/
public PemTrustOptions addCertValue(Buffer certValue) throws NullPointerException {
Objects.requireNonNull(certValue, "No null certificate accepted");
certValues.add(certValue);
return this;
}
/**
* Load and return a Java keystore.
*
* @param vertx the vertx instance
* @return the {@code KeyStore}
*/
public KeyStore loadKeyStore(Vertx vertx) throws Exception {
KeyStoreHelper helper = getHelper(vertx);
return helper != null ? helper.store() : null;
}
@Override
public TrustManagerFactory getTrustManagerFactory(Vertx vertx) throws Exception {
KeyStoreHelper helper = getHelper(vertx);
return helper != null ? helper.getTrustMgrFactory((VertxInternal) vertx) : null;
}
@Override
public Function<String, TrustManager[]> trustManagerMapper(Vertx vertx) throws Exception {
KeyStoreHelper helper = getHelper(vertx);
return helper != null ? helper::getTrustMgr : null;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj != null && obj.getClass() == getClass()) {
PemTrustOptions that = (PemTrustOptions) obj;
return Objects.equals(certPaths, that.certPaths) && Objects.equals(certValues, that.certValues);
}
return false;
}
@Override
public int hashCode() {
int hashCode = Objects.hashCode(certPaths);
hashCode = 31 * hashCode + Objects.hashCode(certValues);
return hashCode;
}
@Override
public PemTrustOptions copy() {
return new PemTrustOptions(this);
}
public KeyStoreHelper getHelper(Vertx vertx) throws Exception {
if (helper == null) {
Stream<Buffer> certValues = certPaths.
stream().
map(path -> ((VertxInternal)vertx).fileResolver().resolve(path).getAbsolutePath()).
map(vertx.fileSystem()::readFileBlocking);
certValues = Stream.concat(certValues, this.certValues.stream());
helper = new KeyStoreHelper(KeyStoreHelper.loadCA(certValues), null, null);
}
return helper;
}
}
| PemTrustOptions |
java | netty__netty | codec-base/src/main/java/io/netty/handler/codec/MessageAggregationException.java | {
"start": 786,
"end": 1248
} | class ____ extends IllegalStateException {
private static final long serialVersionUID = -1995826182950310255L;
public MessageAggregationException() { }
public MessageAggregationException(String s) {
super(s);
}
public MessageAggregationException(String message, Throwable cause) {
super(message, cause);
}
public MessageAggregationException(Throwable cause) {
super(cause);
}
}
| MessageAggregationException |
java | google__guice | core/test/com/google/inject/BinderTest.java | {
"start": 14625,
"end": 17680
} | class ____ be constructed directly.
*/
public void testUntargettedBinding() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(HasProvidedBy1.class);
bind(HasImplementedBy1.class);
bind(HasProvidedBy2.class);
bind(HasImplementedBy2.class);
bind(JustAClass.class);
}
});
assertNotNull(injector.getInstance(HasProvidedBy1.class));
assertNotNull(injector.getInstance(HasImplementedBy1.class));
assertNotSame(HasProvidedBy2.class, injector.getInstance(HasProvidedBy2.class).getClass());
assertNotSame(
injector.getInstance(HasProvidedBy2.class), injector.getInstance(HasProvidedBy2.class));
assertSame(
ExtendsHasImplementedBy2.class, injector.getInstance(HasImplementedBy2.class).getClass());
assertSame(JustAClass.class, injector.getInstance(JustAClass.class).getClass());
}
public void testPartialInjectorGetInstance() {
Injector injector = Guice.createInjector();
try {
injector.getInstance(MissingParameter.class);
fail();
} catch (ConfigurationException expected) {
assertContains(
expected.getMessage(),
"No injectable constructor for type BinderTest$NoInjectConstructor.",
"at BinderTest$MissingParameter.<init>",
"for 1st parameter noInjectConstructor");
}
}
public void testUserReportedError() {
final Message message = new Message(getClass(), "Whoops!");
try {
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
addError(message);
}
});
fail();
} catch (CreationException expected) {
assertSame(message, Iterables.getOnlyElement(expected.getErrorMessages()));
}
}
public void testUserReportedErrorsAreAlsoLogged() {
try {
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
addError(new Message("Whoops!", new IllegalArgumentException()));
}
});
fail();
} catch (CreationException expected) {
}
LogRecord logRecord = Iterables.getOnlyElement(this.logRecords);
assertContains(
logRecord.getMessage(),
"An exception was caught and reported. Message: java.lang.IllegalArgumentException");
}
public void testBindingToProvider() {
try {
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(new TypeLiteral<Provider<String>>() {}).toInstance(Providers.of("A"));
}
});
fail();
} catch (CreationException expected) {
assertContains(
expected.getMessage(),
"Binding to Provider is not allowed.",
"at BinderTest$28.configure");
}
}
static | should |
java | apache__kafka | storage/src/main/java/org/apache/kafka/storage/internals/log/OffsetIndex.java | {
"start": 2677,
"end": 10055
} | class ____ extends AbstractIndex {
private static final Logger log = LoggerFactory.getLogger(OffsetIndex.class);
private static final int ENTRY_SIZE = 8;
/* the last offset in the index */
private volatile long lastOffset;
public OffsetIndex(File file, long baseOffset) throws IOException {
this(file, baseOffset, -1);
}
public OffsetIndex(File file, long baseOffset, int maxIndexSize) throws IOException {
this(file, baseOffset, maxIndexSize, true);
}
public OffsetIndex(File file, long baseOffset, int maxIndexSize, boolean writable) throws IOException {
super(file, baseOffset, maxIndexSize, writable);
lastOffset = lastEntry().offset();
log.debug("Loaded index file {} with maxEntries = {}, maxIndexSize = {}, entries = {}, lastOffset = {}, file position = {}",
file.getAbsolutePath(), maxEntries(), maxIndexSize, entries(), lastOffset, mmap().position());
}
@Override
public void sanityCheck() {
if (entries() != 0 && lastOffset < baseOffset())
throw new CorruptIndexException("Corrupt index found, index file " + file().getAbsolutePath() + " has non-zero size " +
"but the last offset is " + lastOffset + " which is less than the base offset " + baseOffset());
if (length() % entrySize() != 0)
throw new CorruptIndexException("Index file " + file().getAbsolutePath() + " is corrupt, found " + length() +
" bytes which is neither positive nor a multiple of " + ENTRY_SIZE);
}
/**
* Find the largest offset less than or equal to the given targetOffset
* and return a pair holding this offset and its corresponding physical file position.
*
* @param targetOffset The offset to look up.
* @return The offset found and the corresponding file position for this offset
* If the target offset is smaller than the least entry in the index (or the index is empty),
* the pair (baseOffset, 0) is returned.
*/
public OffsetPosition lookup(long targetOffset) {
return inRemapReadLock(() -> {
ByteBuffer idx = mmap().duplicate();
int slot = largestLowerBoundSlotFor(idx, targetOffset, IndexSearchType.KEY);
if (slot == -1)
return new OffsetPosition(baseOffset(), 0);
else
return parseEntry(idx, slot);
});
}
/**
* Get the nth offset mapping from the index
* @param n The entry number in the index
* @return The offset/position pair at that entry
*/
public OffsetPosition entry(int n) {
return inRemapReadLock(() -> {
if (n >= entries())
throw new IllegalArgumentException("Attempt to fetch the " + n + "th entry from index " +
file().getAbsolutePath() + ", which has size " + entries());
return parseEntry(mmap(), n);
});
}
/**
* Find an upper bound offset for the given fetch starting position and size. This is an offset which
* is guaranteed to be outside the fetched range, but note that it will not generally be the smallest
* such offset.
*/
public Optional<OffsetPosition> fetchUpperBoundOffset(OffsetPosition fetchOffset, int fetchSize) {
return inRemapReadLock(() -> {
ByteBuffer idx = mmap().duplicate();
int slot = smallestUpperBoundSlotFor(idx, fetchOffset.position() + fetchSize, IndexSearchType.VALUE);
if (slot == -1)
return Optional.empty();
else
return Optional.of(parseEntry(idx, slot));
});
}
/**
* Append an entry for the given offset/location pair to the index. This entry must have a larger offset than all subsequent entries.
* @throws IndexOffsetOverflowException if the offset causes index offset to overflow
* @throws InvalidOffsetException if provided offset is not larger than the last offset
*/
public void append(long offset, int position) {
inLock(() -> {
if (isFull())
throw new IllegalArgumentException("Attempt to append to a full index (size = " + entries() + ").");
if (entries() == 0 || offset > lastOffset) {
log.trace("Adding index entry {} => {} to {}", offset, position, file().getAbsolutePath());
mmap().putInt(relativeOffset(offset));
mmap().putInt(position);
incrementEntries();
lastOffset = offset;
if (entries() * ENTRY_SIZE != mmap().position())
throw new IllegalStateException(entries() + " entries but file position in index is " + mmap().position());
} else
throw new InvalidOffsetException("Attempt to append an offset " + offset + " to position " + entries() +
" no larger than the last offset appended (" + lastOffset + ") to " + file().getAbsolutePath());
});
}
@Override
public void truncateTo(long offset) {
inLock(() -> {
ByteBuffer idx = mmap().duplicate();
int slot = largestLowerBoundSlotFor(idx, offset, IndexSearchType.KEY);
/* There are 3 cases for choosing the new size
* 1) if there is no entry in the index <= the offset, delete everything
* 2) if there is an entry for this exact offset, delete it and everything larger than it
* 3) if there is no entry for this offset, delete everything larger than the next smallest
*/
int newEntries;
if (slot < 0)
newEntries = 0;
else if (relativeOffset(idx, slot) == offset - baseOffset())
newEntries = slot;
else
newEntries = slot + 1;
truncateToEntries(newEntries);
});
}
public long lastOffset() {
return lastOffset;
}
@Override
public void truncate() {
truncateToEntries(0);
}
@Override
protected int entrySize() {
return ENTRY_SIZE;
}
@Override
protected OffsetPosition parseEntry(ByteBuffer buffer, int n) {
return new OffsetPosition(baseOffset() + relativeOffset(buffer, n), physical(buffer, n));
}
private int relativeOffset(ByteBuffer buffer, int n) {
return buffer.getInt(n * ENTRY_SIZE);
}
private int physical(ByteBuffer buffer, int n) {
return buffer.getInt(n * ENTRY_SIZE + 4);
}
/**
* Truncates index to a known number of entries.
*/
private void truncateToEntries(int entries) {
inLock(() -> {
super.truncateToEntries0(entries);
this.lastOffset = lastEntry().offset();
log.debug("Truncated index {} to {} entries; position is now {} and last offset is now {}",
file().getAbsolutePath(), entries, mmap().position(), lastOffset);
});
}
/**
* The last entry in the index
*/
private OffsetPosition lastEntry() {
return inRemapReadLock(() -> {
int entries = entries();
if (entries == 0)
return new OffsetPosition(baseOffset(), 0);
else
return parseEntry(mmap(), entries - 1);
});
}
}
| OffsetIndex |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java | {
"start": 1974,
"end": 7936
} | class ____ extends HandledTransportAction<GetUsersRequest, GetUsersResponse> {
private final Settings settings;
private final NativeUsersStore usersStore;
private final ReservedRealm reservedRealm;
private final Authentication.RealmRef nativeRealmRef;
private final ProfileService profileService;
@Inject
public TransportGetUsersAction(
Settings settings,
ActionFilters actionFilters,
NativeUsersStore usersStore,
TransportService transportService,
ReservedRealm reservedRealm,
Realms realms,
ProfileService profileService
) {
super(GetUsersAction.NAME, transportService, actionFilters, GetUsersRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE);
this.settings = settings;
this.usersStore = usersStore;
this.reservedRealm = reservedRealm;
this.nativeRealmRef = realms.getNativeRealmRef();
this.profileService = profileService;
}
@Override
protected void doExecute(Task task, final GetUsersRequest request, final ActionListener<GetUsersResponse> listener) {
final String[] requestedUsers = request.usernames();
final boolean specificUsersRequested = requestedUsers != null && requestedUsers.length > 0;
final List<String> usersToSearchFor = new ArrayList<>();
final List<User> users = new ArrayList<>();
final List<String> realmLookup = new ArrayList<>();
if (specificUsersRequested) {
for (String username : requestedUsers) {
if (ClientReservedRealm.isReserved(username, settings)) {
realmLookup.add(username);
} else {
usersToSearchFor.add(username);
}
}
}
final ActionListener<Collection<Collection<User>>> sendingListener = ActionListener.wrap((userLists) -> {
users.addAll(userLists.stream().flatMap(Collection::stream).filter(Objects::nonNull).toList());
if (request.isWithProfileUid()) {
resolveProfileUids(
users,
ActionListener.wrap(
profileUidLookup -> listener.onResponse(new GetUsersResponse(users, profileUidLookup)),
listener::onFailure
)
);
} else {
listener.onResponse(new GetUsersResponse(users));
}
}, listener::onFailure);
final GroupedActionListener<Collection<User>> groupListener = new GroupedActionListener<>(2, sendingListener);
// We have two sources for the users object, the reservedRealm and the usersStore, we query both at the same time with a
// GroupedActionListener
if (realmLookup.isEmpty()) {
if (specificUsersRequested == false) {
// we get all users from the realm
reservedRealm.users(groupListener);
} else {
groupListener.onResponse(Collections.emptyList());// pass an empty list to inform the group listener
// - no real lookups necessary
}
} else {
// nested group listener action here - for each of the users we got and fetch it concurrently - once we are done we notify
// the "global" group listener.
GroupedActionListener<User> realmGroupListener = new GroupedActionListener<>(realmLookup.size(), groupListener);
for (String user : realmLookup) {
reservedRealm.lookupUser(user, realmGroupListener);
}
}
// user store lookups
if (specificUsersRequested && usersToSearchFor.isEmpty()) {
groupListener.onResponse(Collections.emptyList()); // no users requested notify
} else {
// go and get all users from the users store and pass it directly on to the group listener
usersStore.getUsers(usersToSearchFor.toArray(new String[usersToSearchFor.size()]), groupListener);
}
}
private void resolveProfileUids(List<User> users, ActionListener<Map<String, String>> listener) {
final List<Subject> subjects = users.stream().map(user -> {
if (user instanceof AnonymousUser) {
return new Subject(user, Authentication.RealmRef.newAnonymousRealmRef(Node.NODE_NAME_SETTING.get(settings)));
} else if (ClientReservedRealm.isReserved(user.principal(), settings)) {
return new Subject(user, reservedRealm.realmRef());
} else {
return new Subject(user, nativeRealmRef);
}
}).toList();
profileService.searchProfilesForSubjects(subjects, ActionListener.wrap(resultsAndErrors -> {
if (resultsAndErrors == null) {
// profile index does not exist
listener.onResponse(null);
} else if (resultsAndErrors.errors().isEmpty()) {
assert users.size() == resultsAndErrors.results().size();
final Map<String, String> profileUidLookup = resultsAndErrors.results()
.stream()
.filter(t -> Objects.nonNull(t.v2()))
.map(t -> new Tuple<>(t.v1().getUser().principal(), t.v2().uid()))
.collect(Collectors.toUnmodifiableMap(Tuple::v1, Tuple::v2));
listener.onResponse(profileUidLookup);
} else {
final ElasticsearchStatusException exception = new ElasticsearchStatusException(
"failed to retrieve profile for users. please retry without fetching profile uid (with_profile_uid=false)",
RestStatus.INTERNAL_SERVER_ERROR
);
resultsAndErrors.errors().values().forEach(exception::addSuppressed);
listener.onFailure(exception);
}
}, listener::onFailure));
}
}
| TransportGetUsersAction |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/image/publisher/MetadataPublisher.java | {
"start": 1502,
"end": 2740
} | interface ____ extends AutoCloseable {
/**
* Returns the name of this publisher.
*
* @return The publisher name.
*/
String name();
/**
* Handle a change in the current controller.
*
* @param newLeaderAndEpoch The new quorum leader and epoch. The new leader will be
* OptionalInt.empty if there is currently no active controller.
*/
default void onControllerChange(LeaderAndEpoch newLeaderAndEpoch) { }
/**
* Publish a new cluster metadata snapshot that we loaded.
*
* @param delta The delta between the previous state and the new one.
* @param newImage The complete new state.
* @param manifest A manifest which describes the contents of what was published.
* If we loaded a snapshot, this will be a SnapshotManifest.
* If we loaded a log delta, this will be a LogDeltaManifest.
*/
void onMetadataUpdate(
MetadataDelta delta,
MetadataImage newImage,
LoaderManifest manifest
);
/**
* Close this metadata publisher and free any associated resources.
*/
default void close() throws Exception { }
}
| MetadataPublisher |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rest/messages/MessageParametersTest.java | {
"start": 2340,
"end": 2908
} | class ____ extends MessageParameters {
private final TestPathParameter pathParameter = new TestPathParameter();
private final TestQueryParameter queryParameter = new TestQueryParameter();
@Override
public Collection<MessagePathParameter<?>> getPathParameters() {
return Collections.singleton(pathParameter);
}
@Override
public Collection<MessageQueryParameter<?>> getQueryParameters() {
return Collections.singleton(queryParameter);
}
}
private static | TestMessageParameters |
java | apache__maven | impl/maven-core/src/test/java/org/apache/maven/lifecycle/internal/BuildListCalculatorTest.java | {
"start": 1305,
"end": 2472
} | class ____ {
@Test
void testCalculateProjectBuilds() throws Exception {
LifecycleTaskSegmentCalculator lifecycleTaskSegmentCalculator = getTaskSegmentCalculator();
BuildListCalculator buildListCalculator = new BuildListCalculator();
final MavenSession session = ProjectDependencyGraphStub.getMavenSession();
List<TaskSegment> taskSegments = lifecycleTaskSegmentCalculator.calculateTaskSegments(session);
final ProjectBuildList buildList = buildListCalculator.calculateProjectBuilds(session, taskSegments);
final ProjectBuildList segments = buildList.getByTaskSegment(taskSegments.get(0));
assertEquals(3, taskSegments.size(), "Stub data contains 3 segments");
assertEquals(6, segments.size(), "Stub data contains 6 items");
final ProjectSegment build = segments.get(0);
assertNotNull(build);
for (ProjectSegment segment : segments) {
assertSame(session, segment.getSession());
}
}
private static LifecycleTaskSegmentCalculator getTaskSegmentCalculator() {
return new LifecycleTaskSegmentCalculatorStub();
}
}
| BuildListCalculatorTest |
java | processing__processing4 | app/src/processing/app/Settings.java | {
"start": 1309,
"end": 7977
} | class ____ {
/**
* Copy of the defaults in case the user mangles a preference.
* It's necessary to keep a copy of the defaults around, because the user may
* have replaced a setting on their own. In the past, we used to load the
* defaults, then replace those with what was in the user's preferences file.
* Problem is, if something like a font entry in the user's file no longer
* parses properly, we need to be able to get back to a clean version of that
* setting so that we can recover.
*/
Map<String, String> defaults;
/** Table of attributes/values. */
Map<String, String> table = new HashMap<>();
/** Associated file for this settings data. */
File file;
public Settings(File file) throws IOException {
this.file = file;
if (file.exists()) {
load();
}
// clone the hash table
defaults = new HashMap<>(table);
}
public void load() {
load(file);
}
public void load(File additions) {
String[] lines = PApplet.loadStrings(additions);
if (lines != null) {
for (String line : lines) {
if ((line.length() == 0) ||
(line.charAt(0) == '#')) continue;
// this won't properly handle = signs being in the text
int equals = line.indexOf('=');
if (equals != -1) {
String key = line.substring(0, equals).trim();
String value = line.substring(equals + 1).trim();
table.put(key, value);
}
}
} else {
Messages.err(additions + " could not be read");
}
// check for platform-specific properties in the defaults
String platformExt = "." + Platform.getName();
int platformExtLength = platformExt.length();
for (String key : table.keySet()) {
if (key.endsWith(platformExt)) {
// this is a key specific to a particular platform
String actualKey = key.substring(0, key.length() - platformExtLength);
String value = get(key);
table.put(actualKey, value);
}
}
}
public void save() {
save(file); // save back to the original file
}
public void save(File outputFile) {
PrintWriter writer = PApplet.createWriter(outputFile);
String[] keyList = table.keySet().toArray(new String[0]);
// Sorting is really helpful for debugging, diffing, and finding keys
keyList = PApplet.sort(keyList);
for (String key : keyList) {
writer.println(key + "=" + table.get(key));
}
writer.flush();
writer.close();
}
public String get(String attribute) {
return table.get(attribute);
}
public String getDefault(String attribute) {
return defaults.get(attribute);
}
public void set(String attribute, String value) {
table.put(attribute, value);
}
public boolean getBoolean(String attribute) {
String value = get(attribute);
if (value == null) {
System.err.println("Boolean not found: " + attribute);
return false;
}
return Boolean.parseBoolean(value);
}
public void setBoolean(String attribute, boolean value) {
set(attribute, value ? "true" : "false");
}
public int getInteger(String attribute) {
String value = get(attribute);
if (value == null) {
System.err.println("Integer not found: " + attribute);
return 0;
}
return Integer.parseInt(value);
}
@SuppressWarnings("unused")
public void setInteger(String key, int value) {
set(key, String.valueOf(value));
}
/**
* Parse a color from a Settings file. Values are hexadecimal in either
* #RRGGBB (for opaque) or 0xAARRGGBB format (to include alpha).
*/
public Color getColor(String attribute) {
Color outgoing = null;
String s = get(attribute);
if (s != null) {
try {
if (s.startsWith("#")) {
// parse a 6-digit hex color
outgoing = new Color(Integer.parseInt(s.substring(1), 16));
} else if (s.startsWith("0x")) {
int v = Integer.parseInt(s.substring(2), 16);
outgoing = new Color(v, true);
}
} catch (Exception ignored) { }
}
if (outgoing == null) {
System.err.println("Could not parse color " + s + " for " + attribute);
}
return outgoing;
}
public void setColor(String attr, Color what) {
set(attr, "#" + PApplet.hex(what.getRGB() & 0xffffff, 6));
}
// identical version found in Preferences.java
public Font getFont(String attr) {
try {
boolean replace = false;
String value = get(attr);
if (value == null) {
// use the default font instead
value = getDefault(attr);
replace = true;
}
String[] pieces = PApplet.split(value, ',');
if (pieces.length != 3) {
value = getDefault(attr);
pieces = PApplet.split(value, ',');
replace = true;
}
String name = pieces[0];
int style = Font.PLAIN; // equals zero
if (pieces[1].contains("bold")) { //$NON-NLS-1$
style |= Font.BOLD;
}
if (pieces[1].contains("italic")) { //$NON-NLS-1$
style |= Font.ITALIC;
}
int size = PApplet.parseInt(pieces[2], 12);
size = Toolkit.zoom(size);
// replace bad font with the default from lib/preferences.txt
if (replace) {
set(attr, value);
}
if (!name.startsWith("processing.")) {
return new Font(name, style, size);
} else {
if (pieces[0].equals("processing.sans")) {
return Toolkit.getSansFont(size, style);
} else if (pieces[0].equals("processing.mono")) {
return Toolkit.getMonoFont(size, style);
}
}
} catch (Exception e) {
// Adding try/catch block because this may be where
// a lot of startup crashes are happening.
Messages.log("Error with font " + get(attr) + " for attribute " + attr);
}
return new Font("Dialog", Font.PLAIN, 12);
}
public String remove(String key) {
return table.remove(key);
}
/**
* The day of reckoning: save() a file if it has entries, or delete
* if the file exists but the table no longer has any entries.
*/
public void reckon() {
if (table.isEmpty()) {
if (file.exists() && !file.delete()) {
System.err.println("Could not remove empty " + file);
}
} else {
save();
}
}
public boolean isEmpty() {
return table.isEmpty();
}
public void print() {
String[] keys = table.keySet().toArray(new String[0]);
Arrays.sort(keys);
for (String key : keys) {
System.out.println(key + " = " + table.get(key));
}
}
public Map<String, String> getMap() {
return table;
}
} | Settings |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundToTests.java | {
"start": 1190,
"end": 12538
} | class ____ extends AbstractScalarFunctionTestCase {
public RoundToTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
List<TestCaseSupplier> suppliers = new ArrayList<>();
for (int p = 1; p < 20; p++) {
int points = p;
suppliers.add(
doubles(
"<double, " + points + " doubles>",
DataType.DOUBLE,
() -> randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true),
IntStream.range(0, points).mapToObj(i -> DataType.DOUBLE).toList(),
() -> IntStream.range(0, points).mapToObj(i -> randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true)).toList()
)
);
suppliers.add(
doubles(
"<double, " + points + " longs>",
DataType.DOUBLE,
() -> randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true),
IntStream.range(0, points).mapToObj(i -> DataType.LONG).toList(),
() -> IntStream.range(0, points).mapToObj(i -> (double) randomLong()).toList()
)
);
suppliers.add(
doubles(
"<double, " + points + " ints>",
DataType.DOUBLE,
() -> randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true),
IntStream.range(0, points).mapToObj(i -> DataType.INTEGER).toList(),
() -> IntStream.range(0, points).mapToObj(i -> (double) randomInt()).toList()
)
);
suppliers.add(
doubles(
"<long, " + points + " doubles>",
DataType.LONG,
ESTestCase::randomLong,
IntStream.range(0, points).mapToObj(i -> DataType.DOUBLE).toList(),
() -> IntStream.range(0, points).mapToObj(i -> randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true)).toList()
)
);
suppliers.add(
doubles(
"<int, " + points + " doubles>",
DataType.INTEGER,
ESTestCase::randomInt,
IntStream.range(0, points).mapToObj(i -> DataType.DOUBLE).toList(),
() -> IntStream.range(0, points).mapToObj(i -> randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true)).toList()
)
);
suppliers.add(
longs(
"<long, " + points + " longs>",
DataType.LONG,
ESTestCase::randomLong,
IntStream.range(0, points).mapToObj(i -> DataType.LONG).toList(),
() -> IntStream.range(0, points).mapToObj(i -> randomLong()).toList()
)
);
suppliers.add(
longs(
"<int, " + points + " longs>",
DataType.INTEGER,
ESTestCase::randomInt,
IntStream.range(0, points).mapToObj(i -> DataType.LONG).toList(),
() -> IntStream.range(0, points).mapToObj(i -> randomLong()).toList()
)
);
suppliers.add(
longs(
"<date, " + points + " dates>",
DataType.DATETIME,
ESTestCase::randomMillisUpToYear9999,
IntStream.range(0, points).mapToObj(i -> DataType.DATETIME).toList(),
() -> IntStream.range(0, points).mapToObj(i -> randomMillisUpToYear9999()).toList()
)
);
suppliers.add(
longs(
"<date_nanos, " + points + " date_nanos>",
DataType.DATE_NANOS,
() -> randomLongBetween(0, Long.MAX_VALUE),
IntStream.range(0, points).mapToObj(i -> DataType.DATE_NANOS).toList(),
() -> IntStream.range(0, points).mapToObj(i -> randomLongBetween(0, Long.MAX_VALUE)).toList()
)
);
suppliers.add(
longs(
"<long, " + points + " ints>",
DataType.LONG,
ESTestCase::randomLong,
IntStream.range(0, points).mapToObj(i -> DataType.INTEGER).toList(),
() -> IntStream.range(0, points).mapToObj(i -> (long) randomInt()).toList()
)
);
suppliers.add(
ints(
"<int, " + points + " ints>",
DataType.INTEGER,
ESTestCase::randomInt,
IntStream.range(0, points).mapToObj(i -> DataType.INTEGER).toList(),
() -> IntStream.range(0, points).mapToObj(i -> randomInt()).toList()
)
);
}
suppliers.add(supplier(1.0, 0.0, 0.0, 100.0));
suppliers.add(supplier(1.0, 1.0, 0.0, 1.0, 100.0));
suppliers.add(supplier(0.5, 0.0, 0.0, 1.0, 100.0));
suppliers.add(supplier(1.5, 1.0, 0.0, 1.0, 100.0));
suppliers.add(supplier(200, 100, 0.0, 1.0, 100.0));
return parameterSuppliersFromTypedDataWithDefaultChecks(
(int nullPosition, DataType nullValueDataType, TestCaseSupplier.TestCase original) -> {
if (nullValueDataType != DataType.NULL) {
return original.expectedType();
}
List<DataType> types = original.getData().stream().map(TestCaseSupplier.TypedData::type).collect(Collectors.toList());
types.set(nullPosition, DataType.NULL);
return expectedType(types);
},
(int nullPosition, TestCaseSupplier.TypedData nullData, Matcher<String> original) -> {
if (nullPosition == 0) {
return original;
}
return equalTo("LiteralsEvaluator[lit=null]");
},
randomizeBytesRefsOffset(suppliers)
);
}
private static TestCaseSupplier supplier(double f, double expected, double... points) {
StringBuilder name = new StringBuilder("round(");
name.append(f);
for (double p : points) {
name.append(", ").append(p);
}
name.append(") -> ").append(expected);
return supplier(
name.toString(),
DataType.DOUBLE,
() -> f,
IntStream.range(0, points.length).mapToObj(i -> DataType.DOUBLE).toList(),
() -> Arrays.stream(points).boxed().toList(),
(value, de) -> expected
);
}
private static TestCaseSupplier doubles(
String name,
DataType fieldType,
Supplier<Number> fieldSupplier,
List<DataType> pointsTypes,
Supplier<List<Double>> pointsSupplier
) {
return supplier(name, fieldType, fieldSupplier, pointsTypes, pointsSupplier, (f, p) -> {
double max = p.stream().mapToDouble(d -> d).min().getAsDouble();
for (double d : p) {
if (d > max && f.doubleValue() > d) {
max = d;
}
}
return max;
});
}
private static TestCaseSupplier longs(
String name,
DataType fieldType,
Supplier<Number> fieldSupplier,
List<DataType> pointsTypes,
Supplier<List<Long>> pointsSupplier
) {
return supplier(name, fieldType, fieldSupplier, pointsTypes, pointsSupplier, (f, p) -> {
long max = p.stream().mapToLong(l -> l).min().getAsLong();
for (long l : p) {
if (l > max && f.doubleValue() > l) {
max = l;
}
}
return max;
});
}
private static TestCaseSupplier ints(
String name,
DataType fieldType,
Supplier<Number> fieldSupplier,
List<DataType> pointsTypes,
Supplier<List<Integer>> pointsSupplier
) {
return supplier(name, fieldType, fieldSupplier, pointsTypes, pointsSupplier, (f, p) -> {
int max = p.stream().mapToInt(i -> i).min().getAsInt();
for (int l : p) {
if (l > max && f.doubleValue() > l) {
max = l;
}
}
return max;
});
}
private static <P> TestCaseSupplier supplier(
String name,
DataType fieldType,
Supplier<Number> fieldSupplier,
List<DataType> pointsTypes,
Supplier<List<P>> pointsSupplier,
BiFunction<Number, List<P>, Number> expected
) {
List<DataType> types = new ArrayList<>(pointsTypes.size() + 1);
types.add(fieldType);
types.addAll(pointsTypes);
return new TestCaseSupplier(name, types, () -> {
Number field = fieldSupplier.get();
List<P> points = pointsSupplier.get();
List<TestCaseSupplier.TypedData> params = new ArrayList<>(1 + points.size());
params.add(new TestCaseSupplier.TypedData(field, fieldType, "field"));
for (int i = 0; i < points.size(); i++) {
params.add(new TestCaseSupplier.TypedData(points.get(i), pointsTypes.get(i), "point" + i).forceLiteral());
}
DataType expectedType = expectedType(types);
String type = switch (expectedType) {
case DOUBLE -> "Double";
case INTEGER -> "Int";
case DATETIME, DATE_NANOS, LONG -> "Long";
default -> throw new UnsupportedOperationException();
};
Matcher<String> expectedEvaluatorName = startsWith("RoundTo" + type + specialization(points.size()) + "Evaluator");
return new TestCaseSupplier.TestCase(params, expectedEvaluatorName, expectedType, equalTo(expected.apply(field, points)));
});
}
private static String specialization(int pointsSize) {
if (pointsSize < 11) {
return Integer.toString(pointsSize);
}
return "BinarySearch";
}
private static DataType expectedType(List<DataType> types) {
if (types.stream().anyMatch(t -> t == DataType.DOUBLE)) {
return DataType.DOUBLE;
}
if (types.stream().anyMatch(t -> t == DataType.LONG)) {
return DataType.LONG;
}
if (types.stream().anyMatch(t -> t == DataType.INTEGER)) {
return DataType.INTEGER;
}
if (types.stream().anyMatch(t -> t == DataType.DATETIME)) {
return DataType.DATETIME;
}
if (types.stream().anyMatch(t -> t == DataType.DATE_NANOS)) {
return DataType.DATE_NANOS;
}
throw new UnsupportedOperationException("can't build expected types for " + types);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new RoundTo(source, args.getFirst(), args.subList(1, args.size()));
}
}
| RoundToTests |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ElementType.java | {
"start": 781,
"end": 2955
} | enum ____ {
BOOLEAN(0, "Boolean", BlockFactory::newBooleanBlockBuilder, BooleanBlock::readFrom),
INT(1, "Int", BlockFactory::newIntBlockBuilder, IntBlock::readFrom),
LONG(2, "Long", BlockFactory::newLongBlockBuilder, LongBlock::readFrom),
FLOAT(3, "Float", BlockFactory::newFloatBlockBuilder, FloatBlock::readFrom),
DOUBLE(4, "Double", BlockFactory::newDoubleBlockBuilder, DoubleBlock::readFrom),
/**
* Blocks containing only null values.
*/
NULL(5, "Null", (blockFactory, estimatedSize) -> new ConstantNullBlock.Builder(blockFactory), BlockStreamInput::readConstantNullBlock),
BYTES_REF(6, "BytesRef", BlockFactory::newBytesRefBlockBuilder, BytesRefBlock::readFrom),
/**
* Blocks that reference individual lucene documents.
*/
DOC(7, "Doc", DocBlock::newBlockBuilder, in -> { throw new UnsupportedOperationException("can't read doc blocks"); }),
/**
* Composite blocks which contain array of sub-blocks.
*/
COMPOSITE(
8,
"Composite",
(blockFactory, estimatedSize) -> { throw new UnsupportedOperationException("can't build composite blocks"); },
CompositeBlock::readFrom
),
/**
* Intermediate blocks which don't support retrieving elements.
*/
UNKNOWN(9, "Unknown", (blockFactory, estimatedSize) -> { throw new UnsupportedOperationException("can't build null blocks"); }, in -> {
throw new UnsupportedOperationException("can't read unknown blocks");
}),
/**
* Blocks that contain aggregate_metric_doubles.
*/
AGGREGATE_METRIC_DOUBLE(
10,
"AggregateMetricDouble",
BlockFactory::newAggregateMetricDoubleBlockBuilder,
AggregateMetricDoubleArrayBlock::readFrom
),
/**
* Blocks that contain exponential_histograms.
*/
EXPONENTIAL_HISTOGRAM(
11,
"ExponentialHistogram",
BlockFactory::newExponentialHistogramBlockBuilder,
ExponentialHistogramArrayBlock::readFrom
);
private static final TransportVersion ESQL_SERIALIZE_BLOCK_TYPE_CODE = TransportVersion.fromName("esql_serialize_block_type_code");
private | ElementType |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java | {
"start": 17352,
"end": 23478
} | interface ____)
if (isStriped) {
// striped block doesn't support seekToNewSource
in2.seek(0);
} else {
assertTrue(in2.seekToNewSource(0));
}
assertTrue(checkFile1(in2,expected));
// confirm all tokens cached in in3 are expired by now
List<LocatedBlock> lblocks3 = DFSTestUtil.getAllBlocks(in3);
for (LocatedBlock blk : lblocks3) {
assertTrue(isBlockTokenExpired(blk));
}
// verify fetchBlockByteRange() is able to re-fetch token transparently
assertTrue(checkFile2(in3,expected));
/*
* testing that after datanodes are restarted on the same ports, cached
* tokens should still work and there is no need to fetch new tokens from
* namenode. This test should run while namenode is down (to make sure no
* new tokens can be fetched from namenode).
*/
// restart datanodes on the same ports that they currently use
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
// confirm tokens cached in in1 are still valid
lblocks = DFSTestUtil.getAllBlocks(in1);
for (LocatedBlock blk : lblocks) {
assertFalse(isBlockTokenExpired(blk));
}
// verify blockSeekTo() still works (forced to use cached tokens)
in1.seek(0);
assertTrue(checkFile1(in1,expected));
// confirm tokens cached in in2 are still valid
lblocks2 = DFSTestUtil.getAllBlocks(in2);
for (LocatedBlock blk : lblocks2) {
assertFalse(isBlockTokenExpired(blk));
}
// verify blockSeekTo() still works (forced to use cached tokens)
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2,expected));
// confirm tokens cached in in3 are still valid
lblocks3 = DFSTestUtil.getAllBlocks(in3);
for (LocatedBlock blk : lblocks3) {
assertFalse(isBlockTokenExpired(blk));
}
// verify fetchBlockByteRange() still works (forced to use cached tokens)
assertTrue(checkFile2(in3,expected));
/*
* testing that when namenode is restarted, cached tokens should still
* work and there is no need to fetch new tokens from namenode. Like the
* previous test, this test should also run while namenode is down. The
* setup for this test depends on the previous test.
*/
// restart the namenode and then shut it down for test
cluster.restartNameNode(0);
cluster.shutdownNameNode(0);
// verify blockSeekTo() still works (forced to use cached tokens)
in1.seek(0);
assertTrue(checkFile1(in1,expected));
// verify again blockSeekTo() still works (forced to use cached tokens)
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2,expected));
// verify fetchBlockByteRange() still works (forced to use cached tokens)
assertTrue(checkFile2(in3,expected));
/*
* testing that after both namenode and datanodes got restarted (namenode
* first, followed by datanodes), DFSClient can't access DN without
* re-fetching tokens and is able to re-fetch tokens transparently. The
* setup of this test depends on the previous test.
*/
// restore the cluster and restart the datanodes for test
cluster.restartNameNode(0);
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
// shutdown namenode so that DFSClient can't get new tokens from namenode
cluster.shutdownNameNode(0);
// verify blockSeekTo() fails (cached tokens become invalid)
if (isStriped) {
try {
in1.seek(0);
assertFalse(checkFile1(in1, expected));
} catch (Exception ignored) {
}
} else {
in1.seek(0);
assertFalse(checkFile1(in1, expected));
}
// verify fetchBlockByteRange() fails (cached tokens become invalid)
if (isStriped) {
try {
assertFalse(checkFile2(in3, expected));
} catch (Exception ignored) {
}
} else {
assertFalse(checkFile2(in3, expected));
}
// restart the namenode to allow DFSClient to re-fetch tokens
cluster.restartNameNode(0);
// Reopen closed streams
in1 = fs.open(fileToRead);
in3 = fs.open(fileToRead);
// verify blockSeekTo() works again (by transparently re-fetching
// tokens from namenode)
in1.seek(0);
assertTrue(checkFile1(in1,expected));
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2,expected));
// verify fetchBlockByteRange() works again (by transparently
// re-fetching tokens from namenode)
assertTrue(checkFile2(in3,expected));
/*
* testing that when datanodes are restarted on different ports, DFSClient
* is able to re-fetch tokens transparently to connect to them
*/
// restart datanodes on newly assigned ports
assertTrue(cluster.restartDataNodes(false));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
// verify blockSeekTo() is able to re-fetch token transparently
in1.seek(0);
assertTrue(checkFile1(in1,expected));
// verify blockSeekTo() is able to re-fetch token transparently
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2,expected));
// verify fetchBlockByteRange() is able to re-fetch token transparently
assertTrue(checkFile2(in3,expected));
}
/**
* Integration testing of access token, involving NN, DN, and Balancer
*/
@Test
public void testEnd2End() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
new TestBalancer().integrationTest(conf);
}
protected boolean isBlockTokenExpired(LocatedBlock lb) throws IOException {
return SecurityTestUtil.isBlockTokenExpired(lb.getBlockToken());
}
}
| method |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StatementSwitchToExpressionSwitchTest.java | {
"start": 88763,
"end": 89736
} | class ____ {
public int foo(Suit suit) {
before:
for (; ; ) {
switch (suit) {
case HEART:
break before;
case DIAMOND:
return 3;
case SPADE:
throw new RuntimeException();
default:
throw new NullPointerException();
}
}
return 0;
}
}
""")
.setArgs(
"-XepOpt:StatementSwitchToExpressionSwitch:EnableReturnSwitchConversion",
"-XepOpt:StatementSwitchToExpressionSwitch:EnableDirectConversion=false")
.doTest();
}
@Test
public void switchByEnum_returnYield_noError() {
// Does not attempt to convert "yield" expressions
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/jndi/JndiObjectFactoryBean.java | {
"start": 5281,
"end": 10486
} | interface ____ to be specified.
* @see #setProxyInterface
* @see #setLookupOnStartup
*/
public void setCache(boolean cache) {
this.cache = cache;
}
/**
* Set whether to expose the JNDI environment context for all access to the target
* object, i.e. for all method invocations on the exposed object reference.
* <p>Default is "false", i.e. to only expose the JNDI context for object lookup.
* Switch this flag to "true" in order to expose the JNDI environment (including
* the authorization context) for each method invocation, as needed by WebLogic
* for JNDI-obtained factories (for example, JDBC DataSource, JMS ConnectionFactory)
* with authorization requirements.
*/
public void setExposeAccessContext(boolean exposeAccessContext) {
this.exposeAccessContext = exposeAccessContext;
}
/**
* Specify a default object to fall back to if the JNDI lookup fails.
* Default is none.
* <p>This can be an arbitrary bean reference or literal value.
* It is typically used for literal values in scenarios where the JNDI environment
* might define specific config settings but those are not required to be present.
* <p>Note: This is only supported for lookup on startup.
* If specified together with {@link #setExpectedType}, the specified value
* needs to be either of that type or convertible to it.
* @see #setLookupOnStartup
* @see ConfigurableBeanFactory#getTypeConverter()
* @see SimpleTypeConverter
*/
public void setDefaultObject(Object defaultObject) {
this.defaultObject = defaultObject;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) {
if (beanFactory instanceof ConfigurableBeanFactory cbf) {
// Just optional - for getting a specifically configured TypeConverter if needed.
// We'll simply fall back to a SimpleTypeConverter if no specific one available.
this.beanFactory = cbf;
}
}
@Override
public void setBeanClassLoader(ClassLoader classLoader) {
this.beanClassLoader = classLoader;
}
/**
* Look up the JNDI object and store it.
*/
@Override
public void afterPropertiesSet() throws IllegalArgumentException, NamingException {
super.afterPropertiesSet();
if (this.proxyInterfaces != null || !this.lookupOnStartup || !this.cache || this.exposeAccessContext) {
// We need to create a proxy for this...
if (this.defaultObject != null) {
throw new IllegalArgumentException(
"'defaultObject' is not supported in combination with 'proxyInterface'");
}
// We need a proxy and a JndiObjectTargetSource.
this.jndiObject = JndiObjectProxyFactory.createJndiObjectProxy(this);
}
else {
if (this.defaultObject != null && getExpectedType() != null &&
!getExpectedType().isInstance(this.defaultObject)) {
TypeConverter converter = (this.beanFactory != null ?
this.beanFactory.getTypeConverter() : new SimpleTypeConverter());
try {
this.defaultObject = converter.convertIfNecessary(this.defaultObject, getExpectedType());
}
catch (TypeMismatchException ex) {
throw new IllegalArgumentException("Default object [" + this.defaultObject + "] of type [" +
this.defaultObject.getClass().getName() + "] is not of expected type [" +
getExpectedType().getName() + "] and cannot be converted either", ex);
}
}
// Locate specified JNDI object.
this.jndiObject = lookupWithFallback();
}
}
/**
* Lookup variant that returns the specified "defaultObject"
* (if any) in case of lookup failure.
* @return the located object, or the "defaultObject" as fallback
* @throws NamingException in case of lookup failure without fallback
* @see #setDefaultObject
*/
protected Object lookupWithFallback() throws NamingException {
ClassLoader originalClassLoader = ClassUtils.overrideThreadContextClassLoader(this.beanClassLoader);
try {
return lookup();
}
catch (TypeMismatchNamingException ex) {
// Always let TypeMismatchNamingException through -
// we don't want to fall back to the defaultObject in this case.
throw ex;
}
catch (NamingException ex) {
if (this.defaultObject != null) {
if (logger.isTraceEnabled()) {
logger.trace("JNDI lookup failed - returning specified default object instead", ex);
}
else if (logger.isDebugEnabled()) {
logger.debug("JNDI lookup failed - returning specified default object instead: " + ex);
}
return this.defaultObject;
}
throw ex;
}
finally {
if (originalClassLoader != null) {
Thread.currentThread().setContextClassLoader(originalClassLoader);
}
}
}
/**
* Return the singleton JNDI object.
*/
@Override
public @Nullable Object getObject() {
return this.jndiObject;
}
@Override
public @Nullable Class<?> getObjectType() {
if (this.proxyInterfaces != null) {
if (this.proxyInterfaces.length == 1) {
return this.proxyInterfaces[0];
}
else if (this.proxyInterfaces.length > 1) {
return createCompositeInterface(this.proxyInterfaces);
}
}
if (this.jndiObject != null) {
return this.jndiObject.getClass();
}
else {
return getExpectedType();
}
}
@Override
public boolean isSingleton() {
return true;
}
/**
* Create a composite | needs |
java | alibaba__nacos | istio/src/main/java/com/alibaba/nacos/istio/common/IstioConfigProcessor.java | {
"start": 1436,
"end": 5398
} | class ____ {
private NacosXdsService nacosXdsService;
private NacosResourceManager resourceManager;
public static final String CONFIG_REASON = "config";
private static final String VIRTUAL_SERVICE = "VirtualService";
private static final String DESTINATION_RULE = "DestinationRule";
private static final String API_VERSION = "networking.istio.io/v1alpha3";
Yaml yaml = new Yaml();
public IstioConfigProcessor() {
NotifyCenter.registerSubscriber(new Subscriber() {
@Override
public void onEvent(Event event) {
if (event instanceof IstioConfigChangeEvent) {
IstioConfigChangeEvent istioConfigChangeEvent = (IstioConfigChangeEvent) event;
String content = istioConfigChangeEvent.content;
if (isContentValid(content) && tryParseContent(content)) {
PushRequest pushRequest = new PushRequest(content, true);
if (null == nacosXdsService) {
nacosXdsService = ApplicationUtils.getBean(NacosXdsService.class);
}
if (null == resourceManager) {
resourceManager = ApplicationUtils.getBean(NacosResourceManager.class);
}
pushRequest.addReason(CONFIG_REASON);
ResourceSnapshot snapshot = resourceManager.createResourceSnapshot();
pushRequest.setResourceSnapshot(snapshot);
nacosXdsService.handleConfigEvent(pushRequest);
}
}
}
@Override
public Class<? extends Event> subscribeType() {
return IstioConfigChangeEvent.class;
}
});
}
public boolean isContentValid(String content) {
if (content == null || content.trim().isEmpty()) {
Loggers.MAIN.warn("Configuration content is null or empty.");
return false;
}
Map<String, Object> obj;
try {
obj = yaml.load(content);
} catch (Exception e) {
Loggers.MAIN.error("Invalid YAML content.", e);
return false;
}
String apiVersion = obj.containsKey("apiVersion") ? (String) obj.get("apiVersion") : "";
String kind = obj.containsKey("kind") ? (String) obj.get("kind") : "";
return API_VERSION.equals(apiVersion) && (VIRTUAL_SERVICE.equals(kind)
|| DESTINATION_RULE.equals(kind)) && obj.containsKey("metadata") && obj.containsKey("spec");
}
public boolean tryParseContent(String content) {
if (content == null || content.trim().isEmpty()) {
Loggers.MAIN.warn("Configuration content is null or empty.");
return false;
}
try {
Map<String, Object> obj = yaml.load(content);
String kind = (String) obj.get("kind");
if (VIRTUAL_SERVICE.equals(kind)) {
VirtualService virtualService = yaml.loadAs(content, VirtualService.class);
Loggers.MAIN.info("Configuration Content was successfully parsed as VirtualService.");
} else if (DESTINATION_RULE.equals(kind)) {
DestinationRule destinationRule = yaml.loadAs(content, DestinationRule.class);
Loggers.MAIN.info("Configuration Content was successfully parsed as DestinationRule.");
} else {
Loggers.MAIN.warn("Unknown Config : Unknown 'kind' field in content: {}", kind);
return false;
}
return true;
} catch (Exception e) {
Loggers.MAIN.error("Error parsing configuration content: {}", e.getMessage(), e);
return false;
}
}
}
| IstioConfigProcessor |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2122/Issue2122Method2TypeConversionMapper.java | {
"start": 949,
"end": 1159
} | class ____ {
Integer value;
public Integer getValue() {
return value;
}
public void setValue(Integer value) {
this.value = value;
}
}
}
| Target |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/MapPartitionIteratorTest.java | {
"start": 1296,
"end": 13498
} | class ____ {
private static final String RECORD = "TEST";
private static final int RECORD_NUMBER = 3;
@Test
void testInitialize() throws ExecutionException, InterruptedException {
CompletableFuture<Object> result = new CompletableFuture<>();
MapPartitionIterator<String> iterator =
new MapPartitionIterator<>(stringIterator -> result.complete(null));
result.get();
assertThat(result).isCompleted();
iterator.close();
}
@Test
void testAddRecord() throws ExecutionException, InterruptedException {
CompletableFuture<List<String>> result = new CompletableFuture<>();
CompletableFuture<Object> udfFinishTrigger = new CompletableFuture<>();
MapPartitionIterator<String> iterator =
new MapPartitionIterator<>(
inputIterator -> {
List<String> strings = new ArrayList<>();
for (int index = 0; index < RECORD_NUMBER; ++index) {
strings.add(inputIterator.next());
}
result.complete(strings);
try {
udfFinishTrigger.get();
} catch (InterruptedException | ExecutionException e) {
ExceptionUtils.rethrow(e);
}
});
// 1.Test addRecord() when the cache is empty in the MapPartitionIterator.
addRecordToIterator(RECORD_NUMBER, iterator);
List<String> results = result.get();
assertThat(results.size()).isEqualTo(RECORD_NUMBER);
assertThat(results.get(0)).isEqualTo(RECORD);
assertThat(results.get(1)).isEqualTo(RECORD);
assertThat(results.get(2)).isEqualTo(RECORD);
// 2.Test addRecord() when the cache is full in the MapPartitionIterator.
addRecordToIterator(DEFAULT_MAX_CACHE_NUM, iterator);
CompletableFuture<Object> mockedTaskThread1 = new CompletableFuture<>();
CompletableFuture<List<String>> addRecordFinishIdentifier1 = new CompletableFuture<>();
mockedTaskThread1.thenRunAsync(
() -> {
iterator.addRecord(RECORD);
addRecordFinishIdentifier1.complete(null);
});
mockedTaskThread1.complete(null);
assertThat(addRecordFinishIdentifier1).isNotCompleted();
iterator.next();
addRecordFinishIdentifier1.get();
assertThat(addRecordFinishIdentifier1).isCompleted();
// 2.Test addRecord() when the udf is finished in the MapPartitionIterator.
CompletableFuture<Object> mockedTaskThread2 = new CompletableFuture<>();
CompletableFuture<List<String>> addRecordFinishIdentifier2 = new CompletableFuture<>();
mockedTaskThread2.thenRunAsync(
() -> {
iterator.addRecord(RECORD);
addRecordFinishIdentifier2.complete(null);
});
mockedTaskThread2.complete(null);
assertThat(addRecordFinishIdentifier2).isNotCompleted();
udfFinishTrigger.complete(null);
addRecordFinishIdentifier2.get();
assertThat(addRecordFinishIdentifier2).isCompleted();
assertThat(udfFinishTrigger).isCompleted();
iterator.close();
}
@Test
void testHasNext() throws ExecutionException, InterruptedException {
CompletableFuture<Object> udfTrigger = new CompletableFuture<>();
CompletableFuture<Object> udfReadIteratorFinishIdentifier = new CompletableFuture<>();
CompletableFuture<Object> udfFinishTrigger = new CompletableFuture<>();
MapPartitionIterator<String> iterator =
new MapPartitionIterator<>(
inputIterator -> {
try {
udfTrigger.get();
} catch (InterruptedException | ExecutionException e) {
ExceptionUtils.rethrow(e);
}
for (int index = 0; index < RECORD_NUMBER; ++index) {
inputIterator.next();
}
udfReadIteratorFinishIdentifier.complete(null);
try {
udfFinishTrigger.get();
} catch (InterruptedException | ExecutionException e) {
ExceptionUtils.rethrow(e);
}
});
// 1.Test hasNext() when the cache is not empty in the MapPartitionIterator.
addRecordToIterator(RECORD_NUMBER, iterator);
assertThat(iterator.hasNext()).isTrue();
// 2.Test hasNext() when the cache is empty in the MapPartitionIterator.
udfTrigger.complete(null);
udfReadIteratorFinishIdentifier.get();
assertThat(udfReadIteratorFinishIdentifier).isCompleted();
CompletableFuture<Object> mockedUDFThread1 = new CompletableFuture<>();
CompletableFuture<Boolean> hasNextFinishIdentifier1 = new CompletableFuture<>();
mockedUDFThread1.thenRunAsync(
() -> {
boolean hasNext = iterator.hasNext();
hasNextFinishIdentifier1.complete(hasNext);
});
mockedUDFThread1.complete(null);
assertThat(hasNextFinishIdentifier1).isNotCompleted();
iterator.addRecord(RECORD);
hasNextFinishIdentifier1.get();
assertThat(hasNextFinishIdentifier1).isCompletedWithValue(true);
iterator.next();
// 2.Test hasNext() when the MapPartitionIterator is closed.
CompletableFuture<Object> mockedUDFThread2 = new CompletableFuture<>();
CompletableFuture<Boolean> hasNextFinishIdentifier2 = new CompletableFuture<>();
mockedUDFThread2.thenRunAsync(
() -> {
boolean hasNext = iterator.hasNext();
hasNextFinishIdentifier2.complete(hasNext);
udfFinishTrigger.complete(null);
});
mockedUDFThread2.complete(null);
assertThat(hasNextFinishIdentifier2).isNotCompleted();
iterator.close();
assertThat(hasNextFinishIdentifier2).isCompletedWithValue(false);
assertThat(udfFinishTrigger).isCompleted();
}
@Test
void testNext() throws ExecutionException, InterruptedException {
CompletableFuture<List<String>> result = new CompletableFuture<>();
CompletableFuture<Object> udfFinishTrigger = new CompletableFuture<>();
MapPartitionIterator<String> iterator =
new MapPartitionIterator<>(
inputIterator -> {
List<String> strings = new ArrayList<>();
for (int index = 0; index < RECORD_NUMBER; ++index) {
strings.add(inputIterator.next());
}
result.complete(strings);
try {
udfFinishTrigger.get();
} catch (InterruptedException | ExecutionException e) {
ExceptionUtils.rethrow(e);
}
});
// 1.Test next() when the cache is not empty in the MapPartitionIterator.
addRecordToIterator(RECORD_NUMBER, iterator);
List<String> results = result.get();
assertThat(results.size()).isEqualTo(RECORD_NUMBER);
assertThat(results.get(0)).isEqualTo(RECORD);
assertThat(results.get(1)).isEqualTo(RECORD);
assertThat(results.get(2)).isEqualTo(RECORD);
// 2.Test next() when the cache is empty in the MapPartitionIterator.
CompletableFuture<Object> mockedUDFThread1 = new CompletableFuture<>();
CompletableFuture<String> nextFinishIdentifier1 = new CompletableFuture<>();
mockedUDFThread1.thenRunAsync(
() -> {
String next = iterator.next();
nextFinishIdentifier1.complete(next);
});
mockedUDFThread1.complete(null);
assertThat(nextFinishIdentifier1).isNotCompleted();
iterator.addRecord(RECORD);
nextFinishIdentifier1.get();
assertThat(nextFinishIdentifier1).isCompletedWithValue(RECORD);
// 2.Test next() when the MapPartitionIterator is closed.
CompletableFuture<Object> mockedUDFThread2 = new CompletableFuture<>();
CompletableFuture<String> nextFinishIdentifier2 = new CompletableFuture<>();
mockedUDFThread2.thenRunAsync(
() -> {
String next = iterator.next();
nextFinishIdentifier2.complete(next);
udfFinishTrigger.complete(null);
});
mockedUDFThread2.complete(null);
assertThat(nextFinishIdentifier2).isNotCompleted();
iterator.close();
assertThat(nextFinishIdentifier2).isCompletedWithValue(null);
assertThat(udfFinishTrigger).isCompleted();
}
@Test
void testClose() throws ExecutionException, InterruptedException {
// 1.Test close() when the cache is not empty in the MapPartitionIterator.
CompletableFuture<?> udfFinishTrigger1 = new CompletableFuture<>();
MapPartitionIterator<String> iterator1 =
new MapPartitionIterator<>(
ignored -> {
try {
udfFinishTrigger1.get();
} catch (InterruptedException | ExecutionException e) {
ExceptionUtils.rethrow(e);
}
});
iterator1.addRecord(RECORD);
CompletableFuture<Object> mockedTaskThread1 = new CompletableFuture<>();
CompletableFuture<Object> iteratorCloseIdentifier1 = new CompletableFuture<>();
mockedTaskThread1.thenRunAsync(
() -> {
iterator1.close();
iteratorCloseIdentifier1.complete(null);
});
mockedTaskThread1.complete(null);
assertThat(iteratorCloseIdentifier1).isNotCompleted();
udfFinishTrigger1.complete(null);
iteratorCloseIdentifier1.get();
assertThat(iteratorCloseIdentifier1).isCompleted();
// 2.Test close() when the cache is empty in the MapPartitionIterator.
CompletableFuture<?> udfFinishTrigger2 = new CompletableFuture<>();
MapPartitionIterator<String> iterator2 =
new MapPartitionIterator<>(
ignored -> {
try {
udfFinishTrigger2.get();
} catch (InterruptedException | ExecutionException e) {
ExceptionUtils.rethrow(e);
}
});
CompletableFuture<Object> mockedTaskThread2 = new CompletableFuture<>();
CompletableFuture<Object> iteratorCloseIdentifier2 = new CompletableFuture<>();
mockedTaskThread1.thenRunAsync(
() -> {
iterator2.close();
iteratorCloseIdentifier2.complete(null);
});
mockedTaskThread2.complete(null);
assertThat(iteratorCloseIdentifier2).isNotCompleted();
udfFinishTrigger2.complete(null);
iteratorCloseIdentifier2.get();
assertThat(iteratorCloseIdentifier2).isCompleted();
// 2.Test close() when the udf is finished in the MapPartitionIterator.
MapPartitionIterator<String> iterator3 = new MapPartitionIterator<>(ignored -> {});
iterator3.close();
}
private void addRecordToIterator(int cacheNumber, MapPartitionIterator<String> iterator) {
for (int index = 0; index < cacheNumber; ++index) {
iterator.addRecord(RECORD);
}
}
}
| MapPartitionIteratorTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/synonyms/GetSynonymsSetsAction.java | {
"start": 1107,
"end": 1378
} | class ____ extends AbstractSynonymsPagedResultAction.Request {
public Request(StreamInput in) throws IOException {
super(in);
}
public Request(int from, int size) {
super(from, size);
}
}
public static | Request |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/adapter/DruidDataSourceC3P0AdapterTest.java | {
"start": 895,
"end": 2013
} | class ____ extends PoolTestCase {
private MockDriver driver;
private DruidDataSourceC3P0Adapter dataSource;
protected void setUp() throws Exception {
super.setUp();
assertEquals(0, DruidDataSourceStatManager.getInstance().getDataSourceList().size());
driver = new MockDriver();
dataSource = new DruidDataSourceC3P0Adapter();
dataSource.setJdbcUrl("jdbc:mock:xxx");
dataSource.setDriver(driver);
dataSource.setInitialPoolSize(1);
dataSource.setMaxPoolSize(2);
dataSource.setMinPoolSize(1);
dataSource.setMaxIdleTime(300); // 300 / 10
dataSource.setIdleConnectionTestPeriod(180); // 180 / 10
dataSource.setTestConnectionOnCheckout(false);
dataSource.setPreferredTestQuery("SELECT 1");
dataSource.setFilters("stat");
}
protected void tearDown() throws Exception {
dataSource.close();
super.tearDown();
}
public void test_basic() throws Exception {
Connection conn = dataSource.getConnection();
conn.close();
}
}
| DruidDataSourceC3P0AdapterTest |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/casting/CastRuleProvider.java | {
"start": 1649,
"end": 13648
} | class ____ {
/* ------- Singleton declaration ------- */
private static final CastRuleProvider INSTANCE = new CastRuleProvider();
static {
INSTANCE
// Highest precedence rule
.addRule(IdentityCastRule.INSTANCE)
// Numeric rules
.addRule(DecimalToDecimalCastRule.INSTANCE)
.addRule(NumericPrimitiveToDecimalCastRule.INSTANCE)
.addRule(DecimalToNumericPrimitiveCastRule.INSTANCE)
.addRule(NumericPrimitiveCastRule.INSTANCE)
// Boolean <-> numeric rules
.addRule(BooleanToNumericCastRule.INSTANCE)
.addRule(NumericToBooleanCastRule.INSTANCE)
// To string rules
.addRule(NumericToStringCastRule.INSTANCE)
.addRule(BooleanToStringCastRule.INSTANCE)
.addRule(BinaryToStringCastRule.INSTANCE)
.addRule(TimestampToStringCastRule.INSTANCE)
.addRule(TimeToStringCastRule.INSTANCE)
.addRule(DateToStringCastRule.INSTANCE)
.addRule(IntervalToStringCastRule.INSTANCE)
.addRule(ArrayToStringCastRule.INSTANCE)
.addRule(MapAndMultisetToStringCastRule.INSTANCE)
.addRule(StructuredToStringCastRule.INSTANCE)
.addRule(RowToStringCastRule.INSTANCE)
.addRule(RawToStringCastRule.INSTANCE)
// From string rules
.addRule(StringToBooleanCastRule.INSTANCE)
.addRule(StringToDecimalCastRule.INSTANCE)
.addRule(StringToNumericPrimitiveCastRule.INSTANCE)
.addRule(StringToDateCastRule.INSTANCE)
.addRule(StringToTimeCastRule.INSTANCE)
.addRule(StringToTimestampCastRule.INSTANCE)
.addRule(StringToBinaryCastRule.INSTANCE)
// Date/Time/Timestamp rules
.addRule(TimestampToTimestampCastRule.INSTANCE)
.addRule(TimestampToDateCastRule.INSTANCE)
.addRule(TimestampToTimeCastRule.INSTANCE)
.addRule(DateToTimestampCastRule.INSTANCE)
.addRule(TimeToTimestampCastRule.INSTANCE)
.addRule(NumericToTimestampCastRule.INSTANCE)
.addRule(TimestampToNumericCastRule.INSTANCE)
// To binary rules
.addRule(BinaryToBinaryCastRule.INSTANCE)
.addRule(RawToBinaryCastRule.INSTANCE)
// Collection rules
.addRule(ArrayToArrayCastRule.INSTANCE)
.addRule(MapToMapAndMultisetToMultisetCastRule.INSTANCE)
.addRule(RowToRowCastRule.INSTANCE)
// Variant rules
.addRule(VariantToStringCastRule.INSTANCE)
// Special rules
.addRule(CharVarCharTrimPadCastRule.INSTANCE)
.addRule(NullToStringCastRule.INSTANCE);
}
/* ------- Entrypoint ------- */
/**
* Resolve a {@link CastRule} for the provided input type and target type. Returns {@code null}
* if no rule can be resolved.
*/
public static @Nullable CastRule<?, ?> resolve(LogicalType inputType, LogicalType targetType) {
return INSTANCE.internalResolve(inputType, targetType);
}
/**
* Returns {@code true} if and only if a {@link CastRule} can be resolved for the provided input
* type and target type.
*/
public static boolean exists(LogicalType inputType, LogicalType targetType) {
return resolve(inputType, targetType) != null;
}
/**
* Resolves the rule and returns the result of {@link CastRule#canFail(LogicalType,
* LogicalType)}. Fails with {@link NullPointerException} if the rule cannot be resolved.
*/
public static boolean canFail(LogicalType inputType, LogicalType targetType) {
return Preconditions.checkNotNull(
resolve(inputType, targetType), "Cast rule cannot be resolved")
.canFail(inputType, targetType);
}
/**
* Create a {@link CastExecutor} for the provided input type and target type. Returns {@code
* null} if no rule can be resolved.
*
* @see CastRule#create(CastRule.Context, LogicalType, LogicalType)
*/
public static @Nullable CastExecutor<?, ?> create(
CastRule.Context context, LogicalType inputLogicalType, LogicalType targetLogicalType) {
CastRule<?, ?> rule = INSTANCE.internalResolve(inputLogicalType, targetLogicalType);
if (rule == null) {
return null;
}
return rule.create(context, inputLogicalType, targetLogicalType);
}
/**
* Create a {@link CastCodeBlock} for the provided input type and target type. Returns {@code
* null} if no rule can be resolved or the resolved rule is not instance of {@link
* CodeGeneratorCastRule}.
*
* @see CodeGeneratorCastRule#generateCodeBlock(CodeGeneratorCastRule.Context, String, String,
* LogicalType, LogicalType)
*/
@SuppressWarnings("rawtypes")
public static @Nullable CastCodeBlock generateCodeBlock(
CodeGeneratorCastRule.Context context,
String inputTerm,
String inputIsNullTerm,
LogicalType inputLogicalType,
LogicalType targetLogicalType) {
CastRule<?, ?> rule = INSTANCE.internalResolve(inputLogicalType, targetLogicalType);
if (!(rule instanceof CodeGeneratorCastRule)) {
return null;
}
return ((CodeGeneratorCastRule) rule)
.generateCodeBlock(
context, inputTerm, inputIsNullTerm, inputLogicalType, targetLogicalType);
}
/**
* Create a {@link CastExecutor} and execute the cast on the provided {@code value}. Fails with
* {@link IllegalArgumentException} if the rule cannot be resolved, or with an exception from
* the {@link CastExecutor} itself if the rule can fail.
*/
@SuppressWarnings("unchecked")
public static @Nullable Object cast(
CastRule.Context context,
LogicalType inputLogicalType,
LogicalType targetLogicalType,
Object value) {
CastExecutor<Object, Object> castExecutor =
(CastExecutor<Object, Object>)
CastRuleProvider.create(context, inputLogicalType, targetLogicalType);
if (castExecutor == null) {
throw new NullPointerException(
"Unsupported casting from " + inputLogicalType + " to " + targetLogicalType);
}
return castExecutor.cast(value);
}
/**
* This method wraps {@link #generateCodeBlock(CodeGeneratorCastRule.Context, String, String,
* LogicalType, LogicalType)}, but adding the assumption that the inputTerm is always non-null.
* Used by {@link CodeGeneratorCastRule}s which checks for nullability, rather than deferring
* the check to the rules.
*/
static CastCodeBlock generateAlwaysNonNullCodeBlock(
CodeGeneratorCastRule.Context context,
String inputTerm,
LogicalType inputLogicalType,
LogicalType targetLogicalType) {
if (inputLogicalType instanceof NullType) {
return generateCodeBlock(
context, inputTerm, "true", inputLogicalType, targetLogicalType);
}
return generateCodeBlock(
context, inputTerm, "false", inputLogicalType.copy(false), targetLogicalType);
}
/* ------ Implementation ------ */
// Map<Target family or root, Map<Input family or root, rule>>
private final Map<Object, Map<Object, CastRule<?, ?>>> rules = new HashMap<>();
private final List<CastRule<?, ?>> rulesWithCustomPredicate = new ArrayList<>();
private CastRuleProvider addRule(CastRule<?, ?> rule) {
CastRulePredicate predicate = rule.getPredicateDefinition();
for (LogicalType targetType : predicate.getTargetTypes()) {
final Map<Object, CastRule<?, ?>> map =
rules.computeIfAbsent(targetType, k -> new HashMap<>());
for (LogicalTypeRoot inputTypeRoot : predicate.getInputTypeRoots()) {
map.put(inputTypeRoot, rule);
}
for (LogicalTypeFamily inputTypeFamily : predicate.getInputTypeFamilies()) {
map.put(inputTypeFamily, rule);
}
}
for (LogicalTypeRoot targetTypeRoot : predicate.getTargetTypeRoots()) {
final Map<Object, CastRule<?, ?>> map =
rules.computeIfAbsent(targetTypeRoot, k -> new HashMap<>());
for (LogicalTypeRoot inputTypeRoot : predicate.getInputTypeRoots()) {
map.put(inputTypeRoot, rule);
}
for (LogicalTypeFamily inputTypeFamily : predicate.getInputTypeFamilies()) {
map.put(inputTypeFamily, rule);
}
}
for (LogicalTypeFamily targetTypeFamily : predicate.getTargetTypeFamilies()) {
final Map<Object, CastRule<?, ?>> map =
rules.computeIfAbsent(targetTypeFamily, k -> new HashMap<>());
for (LogicalTypeRoot inputTypeRoot : predicate.getInputTypeRoots()) {
map.put(inputTypeRoot, rule);
}
for (LogicalTypeFamily inputTypeFamily : predicate.getInputTypeFamilies()) {
map.put(inputTypeFamily, rule);
}
}
if (predicate.getCustomPredicate().isPresent()) {
rulesWithCustomPredicate.add(rule);
}
return this;
}
private CastRule<?, ?> internalResolve(LogicalType input, LogicalType target) {
LogicalType inputType = unwrapDistinct(input);
LogicalType targetType = unwrapDistinct(target);
final Iterator<Object> targetTypeRootFamilyIterator =
Stream.concat(
Stream.of(targetType),
Stream.<Object>concat(
Stream.of(targetType.getTypeRoot()),
targetType.getTypeRoot().getFamilies().stream()))
.iterator();
// Try lookup by target type root/type families
while (targetTypeRootFamilyIterator.hasNext()) {
final Object targetMapKey = targetTypeRootFamilyIterator.next();
final Map<Object, CastRule<?, ?>> inputTypeToCastRuleMap = rules.get(targetMapKey);
if (inputTypeToCastRuleMap == null) {
continue;
}
// Try lookup by input type root/type families
Optional<? extends CastRule<?, ?>> rule =
Stream.<Object>concat(
Stream.of(inputType.getTypeRoot()),
inputType.getTypeRoot().getFamilies().stream())
.map(inputTypeToCastRuleMap::get)
.filter(Objects::nonNull)
.findFirst();
if (rule.isPresent()) {
return rule.get();
}
}
// Try with the custom predicate rules
return rulesWithCustomPredicate.stream()
.filter(
r ->
r.getPredicateDefinition()
.getCustomPredicate()
.map(p -> p.test(inputType, targetType))
.orElse(false))
.findFirst()
.orElse(null);
}
private LogicalType unwrapDistinct(LogicalType logicalType) {
if (logicalType.is(LogicalTypeRoot.DISTINCT_TYPE)) {
return unwrapDistinct(((DistinctType) logicalType).getSourceType());
}
return logicalType;
}
}
| CastRuleProvider |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobVertex.java | {
"start": 1968,
"end": 4058
} | class ____ implements java.io.Serializable {
private static final long serialVersionUID = 1L;
private static final String DEFAULT_NAME = "(unnamed vertex)";
public static final int MAX_PARALLELISM_DEFAULT = -1;
// --------------------------------------------------------------------------------------------
// Members that define the structure / topology of the graph
// --------------------------------------------------------------------------------------------
/** The ID of the vertex. */
private final JobVertexID id;
/**
* The IDs of all operators contained in this vertex.
*
* <p>The ID pairs are stored depth-first post-order; for the forking chain below the ID's would
* be stored as [D, E, B, C, A].
*
* <pre>
* A - B - D
* \ \
* C E
* </pre>
*
* <p>This is the same order that operators are stored in the {@code StreamTask}.
*/
private final List<OperatorIDPair> operatorIDs;
/** Produced data sets, one per writer. */
private final Map<IntermediateDataSetID, IntermediateDataSet> results = new LinkedHashMap<>();
/** List of edges with incoming data. One per Reader. */
private final List<JobEdge> inputs = new ArrayList<>();
/** The list of factories for operator coordinators. */
private final List<SerializedValue<OperatorCoordinator.Provider>> operatorCoordinators =
new ArrayList<>();
/** Number of subtasks to split this task into at runtime. */
private int parallelism = ExecutionConfig.PARALLELISM_DEFAULT;
/** Maximum number of subtasks to split this task into a runtime. */
private int maxParallelism = MAX_PARALLELISM_DEFAULT;
/** The minimum resource of the vertex. */
private ResourceSpec minResources = ResourceSpec.DEFAULT;
/** The preferred resource of the vertex. */
private ResourceSpec preferredResources = ResourceSpec.DEFAULT;
/** Custom configuration passed to the assigned task at runtime. */
private Configuration configuration;
/** The | JobVertex |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachmentParser.java | {
"start": 808,
"end": 896
} | interface ____<T extends EmailAttachmentParser.EmailAttachment> {
| EmailAttachmentParser |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/RestrictedApiCheckerTest.java | {
"start": 3657,
"end": 4080
} | class ____ extends RestrictedApiMethods {
@Allowlist
public Subclass(int restricted) {
super(restricted);
}
@Override
public int restrictedMethod() {
return 42;
}
}
public static void accept(Runnable r) {}
}
| Subclass |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/tuple/entity/EntityMetamodel.java | {
"start": 31452,
"end": 31804
} | class ____ be lazy (ie intercepted)
*/
public boolean isInstrumented() {
return bytecodeEnhancementMetadata.isEnhancedForLazyLoading();
}
public BytecodeEnhancementMetadata getBytecodeEnhancementMetadata() {
return bytecodeEnhancementMetadata;
}
public OnDeleteAction[] getPropertyOnDeleteActions() {
return propertyOnDeleteActions;
}
}
| can |
java | elastic__elasticsearch | x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java | {
"start": 9247,
"end": 152512
} | class ____ extends SecurityIntegTestCase {
private static final long DELETE_INTERVAL_MILLIS = 100L;
private static final int CRYPTO_THREAD_POOL_QUEUE_SIZE = 10;
private static final RoleDescriptor DEFAULT_API_KEY_ROLE_DESCRIPTOR = new RoleDescriptor(
"role",
new String[] { "monitor" },
null,
null
);
private static long deleteRetentionPeriodDays;
@BeforeClass
public static void randomDeleteRetentionPeriod() {
deleteRetentionPeriodDays = randomLongBetween(0, 7);
}
@Override
public Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true)
.put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true)
.put(ApiKeyService.DELETE_INTERVAL.getKey(), TimeValue.timeValueMillis(DELETE_INTERVAL_MILLIS))
.put(ApiKeyService.DELETE_TIMEOUT.getKey(), TimeValue.timeValueSeconds(5L))
.put(ApiKeyService.DELETE_RETENTION_PERIOD.getKey(), TimeValue.timeValueDays(deleteRetentionPeriodDays))
.put("xpack.security.crypto.thread_pool.queue_size", CRYPTO_THREAD_POOL_QUEUE_SIZE)
.build();
}
@Override
protected boolean addMockHttpTransport() {
return false; // need real http
}
@Before
public void waitForSecurityIndexWritable() throws Exception {
createSecurityIndexWithWaitForActiveShards();
}
@After
public void wipeSecurityIndex() throws Exception {
// get the api key service and wait until api key expiration is not in progress!
awaitApiKeysRemoverCompletion();
deleteSecurityIndex();
}
@Override
public String configRoles() {
return super.configRoles() + """
no_api_key_role:
cluster: ["manage_token"]
read_security_role:
cluster: ["read_security"]
manage_api_key_role:
cluster: ["manage_api_key"]
manage_own_api_key_role:
cluster: ["manage_own_api_key"]
run_as_role:
run_as: ["user_with_manage_own_api_key_role"]
""";
}
@Override
public String configUsers() {
final String usersPasswdHashed = new String(getFastStoredHashAlgoForTests().hash(TEST_PASSWORD_SECURE_STRING));
return super.configUsers()
+ "user_with_no_api_key_role:"
+ usersPasswdHashed
+ "\n"
+ "user_with_read_security_role:"
+ usersPasswdHashed
+ "\n"
+ "user_with_manage_api_key_role:"
+ usersPasswdHashed
+ "\n"
+ "user_with_manage_own_api_key_role:"
+ usersPasswdHashed
+ "\n";
}
@Override
public String configUsersRoles() {
return super.configUsersRoles() + """
no_api_key_role:user_with_no_api_key_role
read_security_role:user_with_read_security_role
manage_api_key_role:user_with_manage_api_key_role
manage_own_api_key_role:user_with_manage_own_api_key_role
""";
}
private void awaitApiKeysRemoverCompletion() throws Exception {
for (ApiKeyService apiKeyService : internalCluster().getInstances(ApiKeyService.class)) {
assertBusy(() -> assertFalse(apiKeyService.isExpirationInProgress()));
}
}
private Client authorizedClient() {
return client().filterWithHeader(
Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING))
);
}
public void testCreateApiKey() throws Exception {
// Get an instant without nanoseconds as the expiration has millisecond precision
final Instant start = Instant.ofEpochMilli(Instant.now().toEpochMilli());
final RoleDescriptor descriptor = new RoleDescriptor("role", new String[] { "monitor" }, null, null);
Client client = authorizedClient();
final CreateApiKeyResponse response = new CreateApiKeyRequestBuilder(client).setName("test key")
.setExpiration(TimeValue.timeValueHours(TimeUnit.DAYS.toHours(7L)))
.setRoleDescriptors(Collections.singletonList(descriptor))
.setMetadata(ApiKeyTests.randomMetadata())
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL))
.get();
assertEquals("test key", response.getName());
assertNotNull(response.getId());
assertNotNull(response.getKey());
Instant expiration = response.getExpiration();
// Expiration has millisecond precision
final long daysBetween = ChronoUnit.DAYS.between(start, expiration);
assertThat(daysBetween, is(7L));
assertThat(getApiKeyDocument(response.getId()).get("type"), equalTo("rest"));
assertThat(getApiKeyInfo(client(), response.getId(), randomBoolean(), randomBoolean()).getType(), is(ApiKey.Type.REST));
// create simple api key
final CreateApiKeyResponse simple = new CreateApiKeyRequestBuilder(client).setName("simple")
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL))
.get();
assertEquals("simple", simple.getName());
assertNotNull(simple.getId());
assertNotNull(simple.getKey());
assertThat(simple.getId(), not(containsString(new String(simple.getKey().getChars()))));
assertNull(simple.getExpiration());
// Assert that we can authenticate with the API KEY
final Map<String, Object> authResponse = authenticateWithApiKey(response.getId(), response.getKey());
assertThat(authResponse.get(User.Fields.USERNAME.getPreferredName()), equalTo(ES_TEST_ROOT_USER));
// use the first ApiKey for an unauthorized action
final Map<String, String> authorizationHeaders = Collections.singletonMap(
"Authorization",
"ApiKey " + getBase64EncodedApiKeyValue(response.getId(), response.getKey())
);
ElasticsearchSecurityException e = expectThrows(
ElasticsearchSecurityException.class,
() -> client().filterWithHeader(authorizationHeaders)
.admin()
.cluster()
.prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT)
.setPersistentSettings(Settings.builder().put(IPFilter.IP_FILTER_ENABLED_SETTING.getKey(), true))
.get()
);
assertThat(e.getMessage(), containsString("unauthorized"));
assertThat(e.status(), is(RestStatus.FORBIDDEN));
}
public void testMultipleApiKeysCanHaveSameName() {
String keyName = randomAlphaOfLength(5);
int noOfApiKeys = randomIntBetween(2, 5);
List<CreateApiKeyResponse> responses = new ArrayList<>();
for (int i = 0; i < noOfApiKeys; i++) {
final RoleDescriptor descriptor = new RoleDescriptor("role", new String[] { "monitor" }, null, null);
Client client = authorizedClient();
final CreateApiKeyResponse response = new CreateApiKeyRequestBuilder(client).setName(keyName)
.setExpiration(null)
.setRoleDescriptors(Collections.singletonList(descriptor))
.setMetadata(ApiKeyTests.randomMetadata())
.setRefreshPolicy(NONE)
.get();
assertNotNull(response.getId());
assertNotNull(response.getKey());
responses.add(response);
}
assertThat(responses.size(), is(noOfApiKeys));
for (int i = 0; i < noOfApiKeys; i++) {
assertThat(responses.get(i).getName(), is(keyName));
}
}
public void testCreateApiKeyWithoutNameWillFail() {
Client client = authorizedClient();
final ActionRequestValidationException e = expectThrows(
ActionRequestValidationException.class,
() -> new CreateApiKeyRequestBuilder(client).setRefreshPolicy(randomFrom(NONE, WAIT_UNTIL, IMMEDIATE)).get()
);
assertThat(e.getMessage(), containsString("api key name is required"));
}
public void testInvalidateApiKeysForRealm() throws InterruptedException, ExecutionException {
int noOfApiKeys = randomIntBetween(3, 5);
List<CreateApiKeyResponse> responses = createApiKeys(noOfApiKeys, null).v1();
Client client = authorizedClient();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingRealmName("file"), listener);
InvalidateApiKeyResponse invalidateResponse = listener.get();
verifyInvalidateResponse(noOfApiKeys, responses, invalidateResponse);
}
public void testInvalidateApiKeysForUser() throws Exception {
int noOfApiKeys = randomIntBetween(3, 5);
List<CreateApiKeyResponse> responses = createApiKeys(noOfApiKeys, null).v1();
Client client = authorizedClient();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingUserName(ES_TEST_ROOT_USER), listener);
InvalidateApiKeyResponse invalidateResponse = listener.get();
verifyInvalidateResponse(noOfApiKeys, responses, invalidateResponse);
}
public void testInvalidateApiKeysForRealmAndUser() throws InterruptedException, ExecutionException {
List<CreateApiKeyResponse> responses = createApiKeys(1, null).v1();
Client client = authorizedClient();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingRealmAndUserName("file", ES_TEST_ROOT_USER), listener);
InvalidateApiKeyResponse invalidateResponse = listener.get();
verifyInvalidateResponse(1, responses, invalidateResponse);
}
public void testInvalidateApiKeysForApiKeyId() throws InterruptedException, ExecutionException {
List<CreateApiKeyResponse> responses = createApiKeys(1, null).v1();
Client client = authorizedClient();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId(), false), listener);
InvalidateApiKeyResponse invalidateResponse = listener.get();
verifyInvalidateResponse(1, responses, invalidateResponse);
}
public void testInvalidateApiKeysForApiKeyName() throws InterruptedException, ExecutionException {
List<CreateApiKeyResponse> responses = createApiKeys(1, null).v1();
Client client = authorizedClient();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
InvalidateApiKeyAction.INSTANCE,
InvalidateApiKeyRequest.usingApiKeyName(responses.get(0).getName(), false),
listener
);
InvalidateApiKeyResponse invalidateResponse = listener.get();
verifyInvalidateResponse(1, responses, invalidateResponse);
}
public void testInvalidateApiKeyWillClearApiKeyCache() throws IOException, ExecutionException, InterruptedException {
final List<ApiKeyService> services = Arrays.stream(internalCluster().getNodeNames())
.map(n -> internalCluster().getInstance(ApiKeyService.class, n))
.toList();
// Create two API keys and authenticate with them
Tuple<String, String> apiKey1 = createApiKeyAndAuthenticateWithIt();
Tuple<String, String> apiKey2 = createApiKeyAndAuthenticateWithIt();
// Find out which nodes handled the above authentication requests
final ApiKeyService serviceForDoc1 = services.stream()
.filter(s -> s.getDocCache().get(apiKey1.v1()) != null)
.findFirst()
.orElseThrow();
final ApiKeyService serviceForDoc2 = services.stream()
.filter(s -> s.getDocCache().get(apiKey2.v1()) != null)
.findFirst()
.orElseThrow();
assertNotNull(serviceForDoc1.getFromCache(apiKey1.v1()));
assertNotNull(serviceForDoc2.getFromCache(apiKey2.v1()));
final boolean sameServiceNode = serviceForDoc1 == serviceForDoc2;
if (sameServiceNode) {
assertEquals(2, serviceForDoc1.getDocCache().count());
} else {
assertEquals(1, serviceForDoc1.getDocCache().count());
assertEquals(1, serviceForDoc2.getDocCache().count());
}
// Invalidate the first key
Client client = authorizedClient();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(apiKey1.v1(), false), listener);
InvalidateApiKeyResponse invalidateResponse = listener.get();
assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(1));
// The cache entry should be gone for the first key
if (sameServiceNode) {
assertEquals(1, serviceForDoc1.getDocCache().count());
assertNull(serviceForDoc1.getDocCache().get(apiKey1.v1()));
assertNotNull(serviceForDoc1.getDocCache().get(apiKey2.v1()));
} else {
assertEquals(0, serviceForDoc1.getDocCache().count());
assertEquals(1, serviceForDoc2.getDocCache().count());
}
// Authentication with the first key should fail
ResponseException e = expectThrows(
ResponseException.class,
() -> authenticateWithApiKey(apiKey1.v1(), new SecureString(apiKey1.v2().toCharArray()))
);
assertThat(e.getMessage(), containsString("security_exception"));
assertThat(e.getResponse().getStatusLine().getStatusCode(), is(RestStatus.UNAUTHORIZED.getStatus()));
}
public void testDynamicDeletionInterval() throws Exception {
try {
// Set retention period to be 1 ms, and delete interval to be 1 hour
long deleteIntervalMs = 3600_000;
Settings.Builder builder = Settings.builder();
builder.put(ApiKeyService.DELETE_RETENTION_PERIOD.getKey(), TimeValue.timeValueMillis(1));
builder.put(ApiKeyService.DELETE_INTERVAL.getKey(), TimeValue.timeValueMillis(deleteIntervalMs));
updateClusterSettings(builder);
// Create API keys to test
List<CreateApiKeyResponse> responses = createApiKeys(3, null).v1();
String[] apiKeyIds = responses.stream().map(CreateApiKeyResponse::getId).toArray(String[]::new);
// Invalidate one API key to trigger run of inactive remover (will run once and then after DELETE_INTERVAL)
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
Client client = authorizedClient();
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(apiKeyIds[0], false), listener);
verifyInvalidateResponse(1, Collections.singletonList(responses.get(0)), listener.get());
awaitApiKeysRemoverCompletion();
refreshSecurityIndex();
// Get API keys to make sure remover didn't remove any yet
assertThat(getAllApiKeyInfo(client, false).size(), equalTo(3));
// Invalidate another key
listener = new PlainActionFuture<>();
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(apiKeyIds[1], false), listener);
verifyInvalidateResponse(1, Collections.singletonList(responses.get(1)), listener.get());
awaitApiKeysRemoverCompletion();
refreshSecurityIndex();
// Get API keys to make sure remover didn't remove any yet (shouldn't be removed because of the long DELETE_INTERVAL)
assertThat(getAllApiKeyInfo(client, false).size(), equalTo(3));
// Update DELETE_INTERVAL to every 0 ms
builder = Settings.builder();
deleteIntervalMs = 0;
builder.put(ApiKeyService.DELETE_INTERVAL.getKey(), TimeValue.timeValueMillis(deleteIntervalMs));
updateClusterSettings(builder);
// Invalidate another key
listener = new PlainActionFuture<>();
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(apiKeyIds[2], false), listener);
verifyInvalidateResponse(1, Collections.singletonList(responses.get(2)), listener.get());
awaitApiKeysRemoverCompletion();
refreshSecurityIndex();
// Make sure all keys except the last invalidated one are deleted
// There is a (tiny) risk that the remover runs after the invalidation and therefore deletes the key that was just
// invalidated, so 0 or 1 keys can be returned from the get api
assertThat(getAllApiKeyInfo(client, false).size(), in(Set.of(0, 1)));
} finally {
final Settings.Builder builder = Settings.builder();
builder.putNull(ApiKeyService.DELETE_INTERVAL.getKey());
builder.putNull(ApiKeyService.DELETE_RETENTION_PERIOD.getKey());
updateClusterSettings(builder);
}
}
private void verifyInvalidateResponse(
int noOfApiKeys,
List<CreateApiKeyResponse> responses,
InvalidateApiKeyResponse invalidateResponse
) {
assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(noOfApiKeys));
assertThat(
invalidateResponse.getInvalidatedApiKeys(),
containsInAnyOrder(responses.stream().map(CreateApiKeyResponse::getId).toArray(String[]::new))
);
assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0));
assertThat(invalidateResponse.getErrors().size(), equalTo(0));
}
public void testApiKeyRemover() throws Exception {
final String namePrefix = randomAlphaOfLength(10);
try {
if (deleteRetentionPeriodDays == 0) {
doTestInvalidKeysImmediatelyDeletedByRemover(namePrefix);
// Change the setting dynamically and test the other behaviour
deleteRetentionPeriodDays = randomIntBetween(1, 7);
setRetentionPeriod(false);
doTestDeletionBehaviorWhenKeysBecomeInvalidBeforeAndAfterRetentionPeriod("not-" + namePrefix);
} else {
doTestDeletionBehaviorWhenKeysBecomeInvalidBeforeAndAfterRetentionPeriod(namePrefix);
// Change the setting dynamically and test the other behaviour
deleteRetentionPeriodDays = 0;
setRetentionPeriod(false);
doTestInvalidKeysImmediatelyDeletedByRemover("not-" + namePrefix);
}
} finally {
setRetentionPeriod(true);
}
}
private void setRetentionPeriod(boolean clear) {
final Settings.Builder builder = Settings.builder();
if (clear) {
builder.putNull(ApiKeyService.DELETE_RETENTION_PERIOD.getKey());
} else {
builder.put(ApiKeyService.DELETE_RETENTION_PERIOD.getKey(), TimeValue.timeValueDays(deleteRetentionPeriodDays));
}
updateClusterSettings(builder);
}
private void doTestInvalidKeysImmediatelyDeletedByRemover(String namePrefix) throws Exception {
assertThat(deleteRetentionPeriodDays, equalTo(0L));
Client client = waitForInactiveApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader(
Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING))
);
// Create a very short-lived key (1ms expiration)
createApiKeys(1, TimeValue.timeValueMillis(1));
// Create keys that will not expire during this test
final CreateApiKeyResponse nonExpiringKey = createApiKeys(1, namePrefix, TimeValue.timeValueDays(1)).v1().get(0);
List<CreateApiKeyResponse> createdApiKeys = createApiKeys(2, namePrefix, randomBoolean() ? TimeValue.timeValueDays(1) : null).v1();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
InvalidateApiKeyAction.INSTANCE,
InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(0).getId(), false),
listener
);
InvalidateApiKeyResponse invalidateResponse = listener.get();
assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(1));
assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0));
assertThat(invalidateResponse.getErrors().size(), equalTo(0));
awaitApiKeysRemoverCompletion();
refreshSecurityIndex();
PlainActionFuture<GetApiKeyResponse> getApiKeyResponseListener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyName(namePrefix + "*").build(),
getApiKeyResponseListener
);
// The first API key with 1ms expiration should already be deleted
Set<String> expectedKeyIds = Sets.newHashSet(nonExpiringKey.getId(), createdApiKeys.get(0).getId(), createdApiKeys.get(1).getId());
boolean apiKeyInvalidatedButNotYetDeletedByExpiredApiKeysRemover = false;
for (ApiKey apiKey : getApiKeyResponseListener.get()
.getApiKeyInfoList()
.stream()
.map(GetApiKeyResponse.Item::apiKeyInfo)
.toList()) {
assertThat(apiKey.getId(), is(in(expectedKeyIds)));
if (apiKey.getId().equals(nonExpiringKey.getId())) {
assertThat(apiKey.isInvalidated(), is(false));
assertThat(apiKey.getExpiration(), notNullValue());
} else if (apiKey.getId().equals(createdApiKeys.get(0).getId())) {
// has been invalidated but not yet deleted by InactiveApiKeysRemover
assertThat(apiKey.isInvalidated(), is(true));
apiKeyInvalidatedButNotYetDeletedByExpiredApiKeysRemover = true;
} else if (apiKey.getId().equals(createdApiKeys.get(1).getId())) {
// active api key
assertThat(apiKey.isInvalidated(), is(false));
}
}
assertThat(
getApiKeyResponseListener.get().getApiKeyInfoList().size(),
is((apiKeyInvalidatedButNotYetDeletedByExpiredApiKeysRemover) ? 3 : 2)
);
client = waitForInactiveApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader(
Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING))
);
// invalidate API key to trigger remover
listener = new PlainActionFuture<>();
client.execute(
InvalidateApiKeyAction.INSTANCE,
InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(1).getId(), false),
listener
);
assertThat(listener.get().getInvalidatedApiKeys().size(), is(1));
awaitApiKeysRemoverCompletion();
refreshSecurityIndex();
// Verify that 1st invalidated API key is deleted whereas the next one may be or may not be as it depends on whether update was
// indexed before InactiveApiKeysRemover ran
getApiKeyResponseListener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyName(namePrefix + "*").build(),
getApiKeyResponseListener
);
expectedKeyIds = Sets.newHashSet(nonExpiringKey.getId(), createdApiKeys.get(1).getId());
apiKeyInvalidatedButNotYetDeletedByExpiredApiKeysRemover = false;
for (ApiKey apiKey : getApiKeyResponseListener.get()
.getApiKeyInfoList()
.stream()
.map(GetApiKeyResponse.Item::apiKeyInfo)
.toList()) {
assertThat(apiKey.getId(), is(in(expectedKeyIds)));
if (apiKey.getId().equals(nonExpiringKey.getId())) {
assertThat(apiKey.isInvalidated(), is(false));
assertThat(apiKey.getExpiration(), notNullValue());
} else if (apiKey.getId().equals(createdApiKeys.get(1).getId())) {
// has been invalidated but not yet deleted by InactiveApiKeysRemover
assertThat(apiKey.isInvalidated(), is(true));
apiKeyInvalidatedButNotYetDeletedByExpiredApiKeysRemover = true;
}
}
assertThat(
getApiKeyResponseListener.get().getApiKeyInfoList().size(),
is((apiKeyInvalidatedButNotYetDeletedByExpiredApiKeysRemover) ? 2 : 1)
);
}
private void doTestDeletionBehaviorWhenKeysBecomeInvalidBeforeAndAfterRetentionPeriod(String namePrefix) throws Exception {
assertThat(deleteRetentionPeriodDays, greaterThan(0L));
Client client = waitForInactiveApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader(
Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING))
);
int noOfKeys = 9;
List<CreateApiKeyResponse> createdApiKeys = createApiKeys(noOfKeys, namePrefix, null).v1();
Instant created = Instant.now();
PlainActionFuture<GetApiKeyResponse> getApiKeyResponseListener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyName(namePrefix + "*").build(),
getApiKeyResponseListener
);
assertThat(getApiKeyResponseListener.get().getApiKeyInfoList().size(), is(noOfKeys));
// Expire the 1st key such that it cannot be deleted by the remover
// hack doc to modify the expiration time
Instant withinRetention = created.minus(deleteRetentionPeriodDays - 1, ChronoUnit.DAYS);
assertFalse(created.isBefore(withinRetention));
UpdateResponse expirationDateUpdatedResponse = client.prepareUpdate(SECURITY_MAIN_ALIAS, createdApiKeys.get(0).getId())
.setDoc("expiration_time", withinRetention.toEpochMilli())
.setRefreshPolicy(IMMEDIATE)
.get();
assertThat(expirationDateUpdatedResponse.getResult(), is(DocWriteResponse.Result.UPDATED));
// Expire the 2nd key such that it can be deleted by the remover
// hack doc to modify the expiration time
Instant outsideRetention = created.minus(deleteRetentionPeriodDays + 1, ChronoUnit.DAYS);
assertTrue(Instant.now().isAfter(outsideRetention));
expirationDateUpdatedResponse = client.prepareUpdate(SECURITY_MAIN_ALIAS, createdApiKeys.get(1).getId())
.setDoc("expiration_time", outsideRetention.toEpochMilli())
.setRefreshPolicy(IMMEDIATE)
.get();
assertThat(expirationDateUpdatedResponse.getResult(), is(DocWriteResponse.Result.UPDATED));
// Invalidate the 3rd key such that it cannot be deleted by the remover
UpdateResponse invalidateUpdateResponse = client.prepareUpdate(SECURITY_MAIN_ALIAS, createdApiKeys.get(2).getId())
.setDoc("invalidation_time", withinRetention.toEpochMilli(), "api_key_invalidated", true)
.setRefreshPolicy(IMMEDIATE)
.get();
assertThat(invalidateUpdateResponse.getResult(), is(DocWriteResponse.Result.UPDATED));
// Invalidate the 4th key such that it will be deleted by the remover
invalidateUpdateResponse = client.prepareUpdate(SECURITY_MAIN_ALIAS, createdApiKeys.get(3).getId())
.setDoc("invalidation_time", outsideRetention.toEpochMilli(), "api_key_invalidated", true)
.setRefreshPolicy(IMMEDIATE)
.get();
assertThat(invalidateUpdateResponse.getResult(), is(DocWriteResponse.Result.UPDATED));
// 5th key will be deleted because its expiration is outside of retention even though its invalidation time is not
UpdateResponse updateResponse = client.prepareUpdate(SECURITY_MAIN_ALIAS, createdApiKeys.get(4).getId())
.setDoc(
"expiration_time",
outsideRetention.toEpochMilli(),
"invalidation_time",
withinRetention.toEpochMilli(),
"api_key_invalidated",
true
)
.setRefreshPolicy(IMMEDIATE)
.get();
assertThat(updateResponse.getResult(), is(DocWriteResponse.Result.UPDATED));
// 6th key will be deleted because its invalidation time is outside of retention even though its expiration is not
updateResponse = client.prepareUpdate(SECURITY_MAIN_ALIAS, createdApiKeys.get(5).getId())
.setDoc(
"expiration_time",
withinRetention.toEpochMilli(),
"invalidation_time",
outsideRetention.toEpochMilli(),
"api_key_invalidated",
true
)
.setRefreshPolicy(IMMEDIATE)
.get();
assertThat(updateResponse.getResult(), is(DocWriteResponse.Result.UPDATED));
// 7th key will be deleted because it has old style invalidation (no invalidation time)
// It does not matter whether it has an expiration time or whether the expiration time is still within retention period
updateResponse = client.prepareUpdate(SECURITY_MAIN_ALIAS, createdApiKeys.get(6).getId())
.setDoc("api_key_invalidated", true, "expiration_time", randomBoolean() ? withinRetention.toEpochMilli() : null)
.setRefreshPolicy(IMMEDIATE)
.get();
assertThat(updateResponse.getResult(), is(DocWriteResponse.Result.UPDATED));
// Invalidate to trigger the remover
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
InvalidateApiKeyAction.INSTANCE,
InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(7).getId(), false),
listener
);
assertThat(listener.get().getInvalidatedApiKeys().size(), is(1));
awaitApiKeysRemoverCompletion();
refreshSecurityIndex();
// Verify get API keys does not return api keys deleted by InactiveApiKeysRemover
getApiKeyResponseListener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyName(namePrefix + "*").build(),
getApiKeyResponseListener
);
Set<String> expectedKeyIds = Sets.newHashSet(
createdApiKeys.get(0).getId(),
createdApiKeys.get(2).getId(),
createdApiKeys.get(7).getId(),
createdApiKeys.get(8).getId()
);
for (ApiKey apiKey : getApiKeyResponseListener.get()
.getApiKeyInfoList()
.stream()
.map(GetApiKeyResponse.Item::apiKeyInfo)
.toList()) {
assertThat(apiKey.getId(), is(in(expectedKeyIds)));
if (apiKey.getId().equals(createdApiKeys.get(0).getId())) {
// has been expired, not invalidated
assertTrue(apiKey.getExpiration().isBefore(Instant.now()));
assertThat(apiKey.isInvalidated(), is(false));
} else if (apiKey.getId().equals(createdApiKeys.get(2).getId())) {
// has been invalidated, not expired
assertThat(apiKey.getExpiration(), nullValue());
assertThat(apiKey.isInvalidated(), is(true));
} else if (apiKey.getId().equals(createdApiKeys.get(7).getId())) {
// has not been expired as no expiration, is invalidated but not yet deleted by InactiveApiKeysRemover
assertThat(apiKey.getExpiration(), is(nullValue()));
assertThat(apiKey.isInvalidated(), is(true));
} else if (apiKey.getId().equals(createdApiKeys.get(8).getId())) {
// has not been expired as no expiration, not invalidated
assertThat(apiKey.getExpiration(), is(nullValue()));
assertThat(apiKey.isInvalidated(), is(false));
} else {
fail("unexpected API key " + apiKey);
}
}
assertThat(getApiKeyResponseListener.get().getApiKeyInfoList().size(), is(4));
}
private Client waitForInactiveApiKeysRemoverTriggerReadyAndGetClient() throws Exception {
String[] nodeNames = internalCluster().getNodeNames();
// Default to first node in list of no remover run detected
String nodeWithMostRecentRun = nodeNames[0];
final long[] apiKeyRemoverLastTriggerTimestamp = new long[] { -1 };
for (String nodeName : nodeNames) {
ApiKeyService apiKeyService = internalCluster().getInstance(ApiKeyService.class, nodeName);
if (apiKeyService != null) {
if (apiKeyService.lastTimeWhenApiKeysRemoverWasTriggered() > apiKeyRemoverLastTriggerTimestamp[0]) {
nodeWithMostRecentRun = nodeName;
apiKeyRemoverLastTriggerTimestamp[0] = apiKeyService.lastTimeWhenApiKeysRemoverWasTriggered();
}
}
}
final ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, nodeWithMostRecentRun);
// If remover didn't run, no need to wait, just return a client
if (apiKeyRemoverLastTriggerTimestamp[0] == -1) {
return internalCluster().client(nodeWithMostRecentRun);
}
// If remover ran, wait until delete interval has passed to make sure next invalidate will trigger remover
assertBusy(
() -> assertThat(threadPool.relativeTimeInMillis() - apiKeyRemoverLastTriggerTimestamp[0], greaterThan(DELETE_INTERVAL_MILLIS))
);
return internalCluster().client(nodeWithMostRecentRun);
}
private void refreshSecurityIndex() throws Exception {
assertBusy(() -> {
final BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh(SECURITY_MAIN_ALIAS).get();
assertThat(refreshResponse.getFailedShards(), is(0));
});
}
public void testActiveApiKeysWithNoExpirationNeverGetDeletedByRemover() throws Exception {
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple = createApiKeys(2, null);
List<CreateApiKeyResponse> responses = tuple.v1();
Client client = authorizedClient();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
// trigger expired keys remover
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(1).getId(), false), listener);
InvalidateApiKeyResponse invalidateResponse = listener.get();
assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(1));
assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0));
assertThat(invalidateResponse.getErrors().size(), equalTo(0));
final boolean withLimitedBy = randomBoolean();
PlainActionFuture<GetApiKeyResponse> getApiKeyResponseListener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().realmName("file").withLimitedBy(withLimitedBy).build(),
getApiKeyResponseListener
);
GetApiKeyResponse response = getApiKeyResponseListener.get();
verifyApiKeyInfos(
2,
responses,
tuple.v2(),
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null,
response.getApiKeyInfoList(),
Collections.singleton(responses.get(0).getId()),
Collections.singletonList(responses.get(1).getId())
);
}
public void testGetApiKeysForRealm() throws InterruptedException, ExecutionException, IOException {
int noOfApiKeys = randomIntBetween(3, 5);
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple = createApiKeys(noOfApiKeys, null);
List<CreateApiKeyResponse> responses = tuple.v1();
Client client = authorizedClient();
boolean invalidate = randomBoolean();
List<String> invalidatedApiKeyIds = null;
Set<String> expectedValidKeyIds = null;
if (invalidate) {
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
InvalidateApiKeyAction.INSTANCE,
InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId(), false),
listener
);
InvalidateApiKeyResponse invalidateResponse = listener.get();
invalidatedApiKeyIds = invalidateResponse.getInvalidatedApiKeys();
expectedValidKeyIds = responses.stream()
.filter(o -> o.getId().equals(responses.get(0).getId()) == false)
.map(o -> o.getId())
.collect(Collectors.toSet());
} else {
invalidatedApiKeyIds = Collections.emptyList();
expectedValidKeyIds = responses.stream().map(o -> o.getId()).collect(Collectors.toSet());
}
final boolean withLimitedBy = randomBoolean();
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().realmName("file").withLimitedBy(withLimitedBy).build(),
listener
);
GetApiKeyResponse response = listener.get();
verifyApiKeyInfos(
noOfApiKeys,
responses,
tuple.v2(),
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null,
response.getApiKeyInfoList(),
expectedValidKeyIds,
invalidatedApiKeyIds
);
}
public void testGetApiKeysForUser() throws Exception {
int noOfApiKeys = randomIntBetween(3, 5);
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple = createApiKeys(noOfApiKeys, null);
List<CreateApiKeyResponse> responses = tuple.v1();
Client client = authorizedClient();
final boolean withLimitedBy = randomBoolean();
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().userName(ES_TEST_ROOT_USER).withLimitedBy(withLimitedBy).build(),
listener
);
GetApiKeyResponse response = listener.get();
verifyApiKeyInfos(
noOfApiKeys,
responses,
tuple.v2(),
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null,
response.getApiKeyInfoList(),
responses.stream().map(o -> o.getId()).collect(Collectors.toSet()),
null
);
}
public void testGetApiKeysForRealmAndUser() throws InterruptedException, ExecutionException, IOException {
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple = createApiKeys(1, null);
List<CreateApiKeyResponse> responses = tuple.v1();
Client client = authorizedClient();
final boolean withLimitedBy = randomBoolean();
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().realmName("file").userName(ES_TEST_ROOT_USER).withLimitedBy(withLimitedBy).build(),
listener
);
GetApiKeyResponse response = listener.get();
verifyApiKeyInfos(
1,
responses,
tuple.v2(),
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null,
response.getApiKeyInfoList(),
Collections.singleton(responses.get(0).getId()),
null
);
}
public void testGetApiKeysForApiKeyId() throws InterruptedException, ExecutionException, IOException {
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple = createApiKeys(1, null);
List<CreateApiKeyResponse> responses = tuple.v1();
Client client = authorizedClient();
final boolean withLimitedBy = randomBoolean();
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyId(responses.get(0).getId()).withLimitedBy(withLimitedBy).build(),
listener
);
GetApiKeyResponse response = listener.get();
verifyApiKeyInfos(
1,
responses,
tuple.v2(),
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null,
response.getApiKeyInfoList(),
Collections.singleton(responses.get(0).getId()),
null
);
}
public void testGetApiKeysForApiKeyName() throws InterruptedException, ExecutionException, IOException {
final Map<String, String> headers = Collections.singletonMap(
"Authorization",
basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)
);
final int noOfApiKeys = randomIntBetween(1, 3);
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple1 = createApiKeys(noOfApiKeys, null);
final List<CreateApiKeyResponse> createApiKeyResponses1 = tuple1.v1();
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple2 = createApiKeys(
headers,
noOfApiKeys,
"another-test-key-",
null,
"monitor"
);
final List<CreateApiKeyResponse> createApiKeyResponses2 = tuple2.v1();
Client client = client().filterWithHeader(headers);
final boolean withLimitedBy = randomBoolean();
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
@SuppressWarnings("unchecked")
List<CreateApiKeyResponse> responses = randomFrom(createApiKeyResponses1, createApiKeyResponses2);
List<Map<String, Object>> metadatas = responses == createApiKeyResponses1 ? tuple1.v2() : tuple2.v2();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyName(responses.get(0).getName()).withLimitedBy(withLimitedBy).build(),
listener
);
// role descriptors are the same between randomization
verifyApiKeyInfos(
1,
responses,
metadatas,
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null,
listener.get().getApiKeyInfoList(),
Collections.singleton(responses.get(0).getId()),
null
);
PlainActionFuture<GetApiKeyResponse> listener2 = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyName("test-key*").withLimitedBy(withLimitedBy).build(),
listener2
);
verifyApiKeyInfos(
noOfApiKeys,
createApiKeyResponses1,
tuple1.v2(),
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null,
listener2.get().getApiKeyInfoList(),
createApiKeyResponses1.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()),
null
);
expectAttributesForApiKeys(
createApiKeyResponses1.stream().map(CreateApiKeyResponse::getId).toList(),
Map.of(ApiKeyAttribute.ASSIGNED_ROLE_DESCRIPTORS, List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR))
);
PlainActionFuture<GetApiKeyResponse> listener3 = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyName("*").withLimitedBy(withLimitedBy).build(),
listener3
);
responses = Stream.concat(createApiKeyResponses1.stream(), createApiKeyResponses2.stream()).collect(Collectors.toList());
metadatas = Stream.concat(tuple1.v2().stream(), tuple2.v2().stream()).collect(Collectors.toList());
verifyApiKeyInfos(
2 * noOfApiKeys,
responses,
metadatas,
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null,
listener3.get().getApiKeyInfoList(),
responses.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()),
null
);
PlainActionFuture<GetApiKeyResponse> listener4 = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyName("does-not-exist*").withLimitedBy(withLimitedBy).build(),
listener4
);
verifyApiKeyInfos(
0,
Collections.emptyList(),
null,
List.of(),
List.of(),
listener4.get().getApiKeyInfoList(),
Collections.emptySet(),
null
);
PlainActionFuture<GetApiKeyResponse> listener5 = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyName("another-test-key*").withLimitedBy(withLimitedBy).build(),
listener5
);
verifyApiKeyInfos(
noOfApiKeys,
createApiKeyResponses2,
tuple2.v2(),
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy ? List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR) : null,
listener5.get().getApiKeyInfoList(),
createApiKeyResponses2.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()),
null
);
}
public void testGetApiKeysOwnedByCurrentAuthenticatedUser() throws InterruptedException, ExecutionException {
int noOfSuperuserApiKeys = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5);
List<CreateApiKeyResponse> defaultUserCreatedKeys = createApiKeys(noOfSuperuserApiKeys, null).v1();
String userWithManageApiKeyRole = randomFrom("user_with_manage_api_key_role", "user_with_manage_own_api_key_role");
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple = createApiKeys(
userWithManageApiKeyRole,
noOfApiKeysForUserWithManageApiKeyRole,
null,
"monitor"
);
List<CreateApiKeyResponse> userWithManageApiKeyRoleApiKeys = tuple.v1();
final Client client = client().filterWithHeader(
Collections.singletonMap("Authorization", basicAuthHeaderValue(userWithManageApiKeyRole, TEST_PASSWORD_SECURE_STRING))
);
final boolean withLimitedBy = randomBoolean();
final List<RoleDescriptor> expectedLimitedByRoleDescriptors;
if (withLimitedBy) {
if (userWithManageApiKeyRole.equals("user_with_manage_api_key_role")) {
expectedLimitedByRoleDescriptors = List.of(
new RoleDescriptor("manage_api_key_role", new String[] { "manage_api_key" }, null, null)
);
} else {
expectedLimitedByRoleDescriptors = List.of(
new RoleDescriptor("manage_own_api_key_role", new String[] { "manage_own_api_key" }, null, null)
);
}
} else {
expectedLimitedByRoleDescriptors = null;
}
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().ownedByAuthenticatedUser().withLimitedBy(withLimitedBy).build(),
listener
);
verifyApiKeyInfos(
userWithManageApiKeyRole,
noOfApiKeysForUserWithManageApiKeyRole,
userWithManageApiKeyRoleApiKeys,
tuple.v2(),
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
expectedLimitedByRoleDescriptors,
listener.get().getApiKeyInfoList().stream().map(GetApiKeyResponse.Item::apiKeyInfo).toList(),
userWithManageApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()),
null
);
}
public void testGetApiKeysOwnedByRunAsUserWhenOwnerIsTrue() throws ExecutionException, InterruptedException {
createUserWithRunAsRole();
int noOfSuperuserApiKeys = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5);
createApiKeys(noOfSuperuserApiKeys, null);
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple = createApiKeys(
"user_with_manage_own_api_key_role",
"user_with_run_as_role",
noOfApiKeysForUserWithManageApiKeyRole,
null,
"monitor"
);
List<CreateApiKeyResponse> userWithManageOwnApiKeyRoleApiKeys = tuple.v1();
final boolean withLimitedBy = randomBoolean();
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
getClientForRunAsUser().execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().ownedByAuthenticatedUser().withLimitedBy(withLimitedBy).build(),
listener
);
verifyApiKeyInfos(
"user_with_manage_own_api_key_role",
noOfApiKeysForUserWithManageApiKeyRole,
userWithManageOwnApiKeyRoleApiKeys,
tuple.v2(),
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy
? List.of(new RoleDescriptor("manage_own_api_key_role", new String[] { "manage_own_api_key" }, null, null))
: null,
listener.get().getApiKeyInfoList().stream().map(GetApiKeyResponse.Item::apiKeyInfo).toList(),
userWithManageOwnApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()),
null
);
}
public void testGetApiKeysOwnedByRunAsUserWhenRunAsUserInfoIsGiven() throws ExecutionException, InterruptedException {
createUserWithRunAsRole();
int noOfSuperuserApiKeys = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5);
createApiKeys(noOfSuperuserApiKeys, null);
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple = createApiKeys(
"user_with_manage_own_api_key_role",
"user_with_run_as_role",
noOfApiKeysForUserWithManageApiKeyRole,
null,
"monitor"
);
List<CreateApiKeyResponse> userWithManageOwnApiKeyRoleApiKeys = tuple.v1();
final boolean withLimitedBy = randomBoolean();
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
getClientForRunAsUser().execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().realmName("file").userName("user_with_manage_own_api_key_role").withLimitedBy(withLimitedBy).build(),
listener
);
verifyApiKeyInfos(
"user_with_manage_own_api_key_role",
noOfApiKeysForUserWithManageApiKeyRole,
userWithManageOwnApiKeyRoleApiKeys,
tuple.v2(),
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
withLimitedBy
? List.of(new RoleDescriptor("manage_own_api_key_role", new String[] { "manage_own_api_key" }, null, null))
: null,
listener.get().getApiKeyInfoList().stream().map(GetApiKeyResponse.Item::apiKeyInfo).toList(),
userWithManageOwnApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()),
null
);
}
public void testGetApiKeysOwnedByRunAsUserWillNotWorkWhenAuthUserInfoIsGiven() throws ExecutionException, InterruptedException {
createUserWithRunAsRole();
int noOfSuperuserApiKeys = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5);
createApiKeys(noOfSuperuserApiKeys, null);
final List<CreateApiKeyResponse> userWithManageOwnApiKeyRoleApiKeys = createApiKeys(
"user_with_manage_own_api_key_role",
"user_with_run_as_role",
noOfApiKeysForUserWithManageApiKeyRole,
null,
"monitor"
).v1();
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
@SuppressWarnings("unchecked")
final Tuple<String, String> invalidRealmAndUserPair = randomFrom(
new Tuple<>("file", "user_with_run_as_role"),
new Tuple<>("index", "user_with_manage_own_api_key_role"),
new Tuple<>("index", "user_with_run_as_role")
);
getClientForRunAsUser().execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().realmName(invalidRealmAndUserPair.v1()).userName(invalidRealmAndUserPair.v2()).build(),
listener
);
final ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, listener::actionGet);
assertThat(
e.getMessage(),
containsString("unauthorized for user [user_with_run_as_role] run as [user_with_manage_own_api_key_role]")
);
}
public void testGetAllApiKeys() throws InterruptedException, ExecutionException {
int noOfSuperuserApiKeys = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageOwnApiKeyRole = randomIntBetween(3, 7);
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> defaultUserTuple = createApiKeys(noOfSuperuserApiKeys, null);
List<CreateApiKeyResponse> defaultUserCreatedKeys = defaultUserTuple.v1();
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> userWithManageTuple = createApiKeys(
"user_with_manage_api_key_role",
noOfApiKeysForUserWithManageApiKeyRole,
null,
"monitor"
);
List<CreateApiKeyResponse> userWithManageApiKeyRoleApiKeys = userWithManageTuple.v1();
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> userWithManageOwnTuple = createApiKeys(
"user_with_manage_own_api_key_role",
noOfApiKeysForUserWithManageOwnApiKeyRole,
null,
"monitor"
);
List<CreateApiKeyResponse> userWithManageOwnApiKeyRoleApiKeys = userWithManageOwnTuple.v1();
final Client client = client().filterWithHeader(
Collections.singletonMap(
"Authorization",
basicAuthHeaderValue(
randomFrom("user_with_read_security_role", "user_with_manage_api_key_role"),
TEST_PASSWORD_SECURE_STRING
)
)
);
final boolean withLimitedBy = randomBoolean();
int totalApiKeys = noOfSuperuserApiKeys + noOfApiKeysForUserWithManageApiKeyRole + noOfApiKeysForUserWithManageOwnApiKeyRole;
List<CreateApiKeyResponse> allApiKeys = new ArrayList<>();
Stream.of(defaultUserCreatedKeys, userWithManageApiKeyRoleApiKeys, userWithManageOwnApiKeyRoleApiKeys).forEach(allApiKeys::addAll);
final List<Map<String, Object>> metadatas = Stream.of(defaultUserTuple.v2(), userWithManageTuple.v2(), userWithManageOwnTuple.v2())
.flatMap(List::stream)
.collect(Collectors.toList());
final Function<String, List<RoleDescriptor>> expectedLimitedByRoleDescriptorsLookup = username -> {
if (withLimitedBy) {
return switch (username) {
case ES_TEST_ROOT_USER -> List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR);
case "user_with_manage_api_key_role" -> List.of(
new RoleDescriptor("manage_api_key_role", new String[] { "manage_api_key" }, null, null)
);
case "user_with_manage_own_api_key_role" -> List.of(
new RoleDescriptor("manage_own_api_key_role", new String[] { "manage_own_api_key" }, null, null)
);
default -> throw new IllegalStateException("unknown username: " + username);
};
} else {
return null;
}
};
verifyApiKeyInfos(
new String[] { ES_TEST_ROOT_USER, "user_with_manage_api_key_role", "user_with_manage_own_api_key_role" },
totalApiKeys,
allApiKeys,
metadatas,
List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR),
expectedLimitedByRoleDescriptorsLookup,
getAllApiKeyInfo(client, withLimitedBy),
allApiKeys.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toSet()),
null
);
}
public void testGetAllApiKeysFailsForUserWithNoRoleOrRetrieveOwnApiKeyRole() throws InterruptedException, ExecutionException {
int noOfSuperuserApiKeys = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageOwnApiKeyRole = randomIntBetween(3, 7);
List<CreateApiKeyResponse> defaultUserCreatedKeys = createApiKeys(noOfSuperuserApiKeys, null).v1();
List<CreateApiKeyResponse> userWithManageApiKeyRoleApiKeys = createApiKeys(
"user_with_manage_api_key_role",
noOfApiKeysForUserWithManageApiKeyRole,
null,
"monitor"
).v1();
List<CreateApiKeyResponse> userWithManageOwnApiKeyRoleApiKeys = createApiKeys(
"user_with_manage_own_api_key_role",
noOfApiKeysForUserWithManageOwnApiKeyRole,
null,
"monitor"
).v1();
final String withUser = randomFrom("user_with_manage_own_api_key_role", "user_with_no_api_key_role");
final Client client = client().filterWithHeader(
Collections.singletonMap("Authorization", basicAuthHeaderValue(withUser, TEST_PASSWORD_SECURE_STRING))
);
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().build(), listener);
ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, () -> listener.actionGet());
assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", withUser);
}
public void testInvalidateApiKeysOwnedByCurrentAuthenticatedUser() throws InterruptedException, ExecutionException {
int noOfSuperuserApiKeys = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5);
List<CreateApiKeyResponse> defaultUserCreatedKeys = createApiKeys(noOfSuperuserApiKeys, null).v1();
String userWithManageApiKeyRole = randomFrom("user_with_manage_api_key_role", "user_with_manage_own_api_key_role");
List<CreateApiKeyResponse> userWithManageApiKeyRoleApiKeys = createApiKeys(
userWithManageApiKeyRole,
noOfApiKeysForUserWithManageApiKeyRole,
null,
"monitor"
).v1();
final Client client = client().filterWithHeader(
Collections.singletonMap("Authorization", basicAuthHeaderValue(userWithManageApiKeyRole, TEST_PASSWORD_SECURE_STRING))
);
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.forOwnedApiKeys(), listener);
InvalidateApiKeyResponse invalidateResponse = listener.get();
verifyInvalidateResponse(noOfApiKeysForUserWithManageApiKeyRole, userWithManageApiKeyRoleApiKeys, invalidateResponse);
}
public void testInvalidateApiKeysOwnedByRunAsUserWhenOwnerIsTrue() throws InterruptedException, ExecutionException {
createUserWithRunAsRole();
int noOfSuperuserApiKeys = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5);
createApiKeys(noOfSuperuserApiKeys, null);
List<CreateApiKeyResponse> userWithManageApiKeyRoleApiKeys = createApiKeys(
"user_with_manage_own_api_key_role",
"user_with_run_as_role",
noOfApiKeysForUserWithManageApiKeyRole,
null,
"monitor"
).v1();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
getClientForRunAsUser().execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.forOwnedApiKeys(), listener);
InvalidateApiKeyResponse invalidateResponse = listener.get();
verifyInvalidateResponse(noOfApiKeysForUserWithManageApiKeyRole, userWithManageApiKeyRoleApiKeys, invalidateResponse);
}
public void testInvalidateApiKeysOwnedByRunAsUserWhenRunAsUserInfoIsGiven() throws InterruptedException, ExecutionException {
createUserWithRunAsRole();
int noOfSuperuserApiKeys = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5);
createApiKeys(noOfSuperuserApiKeys, null);
List<CreateApiKeyResponse> userWithManageApiKeyRoleApiKeys = createApiKeys(
"user_with_manage_own_api_key_role",
"user_with_run_as_role",
noOfApiKeysForUserWithManageApiKeyRole,
null,
"monitor"
).v1();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
getClientForRunAsUser().execute(
InvalidateApiKeyAction.INSTANCE,
InvalidateApiKeyRequest.usingRealmAndUserName("file", "user_with_manage_own_api_key_role"),
listener
);
InvalidateApiKeyResponse invalidateResponse = listener.get();
verifyInvalidateResponse(noOfApiKeysForUserWithManageApiKeyRole, userWithManageApiKeyRoleApiKeys, invalidateResponse);
}
public void testInvalidateApiKeysOwnedByRunAsUserWillNotWorkWhenAuthUserInfoIsGiven() throws InterruptedException, ExecutionException {
createUserWithRunAsRole();
int noOfSuperuserApiKeys = randomIntBetween(3, 5);
int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5);
createApiKeys(noOfSuperuserApiKeys, null);
List<CreateApiKeyResponse> userWithManageApiKeyRoleApiKeys = createApiKeys(
"user_with_manage_own_api_key_role",
"user_with_run_as_role",
noOfApiKeysForUserWithManageApiKeyRole,
null,
"monitor"
).v1();
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
@SuppressWarnings("unchecked")
final Tuple<String, String> invalidRealmAndUserPair = randomFrom(
new Tuple<>("file", "user_with_run_as_role"),
new Tuple<>("index", "user_with_manage_own_api_key_role"),
new Tuple<>("index", "user_with_run_as_role")
);
getClientForRunAsUser().execute(
InvalidateApiKeyAction.INSTANCE,
InvalidateApiKeyRequest.usingRealmAndUserName(invalidRealmAndUserPair.v1(), invalidRealmAndUserPair.v2()),
listener
);
final ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, listener::actionGet);
assertThat(
e.getMessage(),
containsString("unauthorized for user [user_with_run_as_role] run as [user_with_manage_own_api_key_role]")
);
}
public void testApiKeyAuthorizationApiKeyMustBeAbleToRetrieveItsOwnInformationButNotAnyOtherKeysCreatedBySameOwner()
throws InterruptedException, ExecutionException {
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple = createApiKeys(
ES_TEST_ROOT_USER,
2,
null,
(String[]) null
);
List<CreateApiKeyResponse> responses = tuple.v1();
final String base64ApiKeyKeyValue = Base64.getEncoder()
.encodeToString((responses.get(0).getId() + ":" + responses.get(0).getKey().toString()).getBytes(StandardCharsets.UTF_8));
Client client = client().filterWithHeader(Map.of("Authorization", "ApiKey " + base64ApiKeyKeyValue));
PlainActionFuture<GetApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyId(responses.get(0).getId()).ownedByAuthenticatedUser(randomBoolean()).build(),
listener
);
GetApiKeyResponse response = listener.get();
verifyApiKeyInfos(
1,
responses,
tuple.v2(),
List.of(new RoleDescriptor(DEFAULT_API_KEY_ROLE_DESCRIPTOR.getName(), Strings.EMPTY_ARRAY, null, null)),
null,
response.getApiKeyInfoList(),
Collections.singleton(responses.get(0).getId()),
null
);
// It cannot retrieve its own limited-by role descriptors
final PlainActionFuture<GetApiKeyResponse> future2 = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyId(responses.get(0).getId()).ownedByAuthenticatedUser(randomBoolean()).withLimitedBy().build(),
future2
);
final ElasticsearchSecurityException e2 = expectThrows(ElasticsearchSecurityException.class, future2::actionGet);
assertErrorMessage(e2, "cluster:admin/xpack/security/api_key/get", ES_TEST_ROOT_USER, responses.get(0).getId());
// for any other API key id, it must deny access
final PlainActionFuture<GetApiKeyResponse> failureListener = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyId(responses.get(1).getId()).ownedByAuthenticatedUser(randomBoolean()).build(),
failureListener
);
ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, () -> failureListener.actionGet());
assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", ES_TEST_ROOT_USER, responses.get(0).getId());
final PlainActionFuture<GetApiKeyResponse> failureListener1 = new PlainActionFuture<>();
client.execute(GetApiKeyAction.INSTANCE, GetApiKeyRequest.builder().ownedByAuthenticatedUser().build(), failureListener1);
ese = expectThrows(ElasticsearchSecurityException.class, () -> failureListener1.actionGet());
assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", ES_TEST_ROOT_USER, responses.get(0).getId());
}
public void testApiKeyViewLimitedBy() {
// 1. An API key with manage_own_api_key
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple1 = createApiKeys(
ES_TEST_ROOT_USER,
1,
null,
"manage_own_api_key"
);
final List<CreateApiKeyResponse> responses1 = tuple1.v1();
final String apiKeyId1 = responses1.get(0).getId();
final Client client1 = client().filterWithHeader(
Map.of(
"Authorization",
"ApiKey "
+ Base64.getEncoder()
.encodeToString((apiKeyId1 + ":" + responses1.get(0).getKey().toString()).getBytes(StandardCharsets.UTF_8))
)
);
// Can view itself without limited-by
verifyApiKeyInfos(
ES_TEST_ROOT_USER,
1,
responses1,
tuple1.v2(),
List.of(new RoleDescriptor(DEFAULT_API_KEY_ROLE_DESCRIPTOR.getName(), new String[] { "manage_own_api_key" }, null, null)),
null,
List.of(getApiKeyInfo(client1, apiKeyId1, false, randomBoolean())),
Collections.singleton(apiKeyId1),
null
);
// Cannot view itself with limited-by
final boolean useGetApiKey = randomBoolean();
final var e2 = expectThrows(ElasticsearchSecurityException.class, () -> getApiKeyInfo(client1, apiKeyId1, true, useGetApiKey));
assertErrorMessage(e2, "cluster:admin/xpack/security/api_key/" + (useGetApiKey ? "get" : "query"), ES_TEST_ROOT_USER, apiKeyId1);
// 2. An API key with manage_api_key can view its own limited-by or any other key's limited-by
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> tuple3 = createApiKeys(
ES_TEST_ROOT_USER,
1,
null,
"manage_api_key"
);
final List<CreateApiKeyResponse> responses3 = tuple3.v1();
final String apiKeyId3 = responses3.get(0).getId();
final Client client3 = client().filterWithHeader(
Map.of(
"Authorization",
"ApiKey "
+ Base64.getEncoder()
.encodeToString((apiKeyId3 + ":" + responses3.get(0).getKey().toString()).getBytes(StandardCharsets.UTF_8))
)
);
// View its own limited-by
verifyApiKeyInfos(
ES_TEST_ROOT_USER,
1,
responses3,
tuple3.v2(),
List.of(new RoleDescriptor(DEFAULT_API_KEY_ROLE_DESCRIPTOR.getName(), new String[] { "manage_api_key" }, null, null)),
List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR),
List.of(getApiKeyInfo(client3, apiKeyId3, true, randomBoolean())),
Collections.singleton(apiKeyId3),
null
);
// View other key's limited-by
verifyApiKeyInfos(
ES_TEST_ROOT_USER,
1,
responses1,
tuple1.v2(),
List.of(new RoleDescriptor(DEFAULT_API_KEY_ROLE_DESCRIPTOR.getName(), new String[] { "manage_own_api_key" }, null, null)),
List.of(ES_TEST_ROOT_ROLE_DESCRIPTOR),
List.of(getApiKeyInfo(client3, apiKeyId1, true, randomBoolean())),
Collections.singleton(apiKeyId1),
null
);
}
public void testLegacySuperuserLimitedByWillBeReturnedAsTransformed() throws Exception {
final Tuple<CreateApiKeyResponse, Map<String, Object>> createdApiKey = createApiKey(TEST_USER_NAME, null);
final var apiKeyId = createdApiKey.v1().getId();
final ServiceWithNodeName serviceWithNodeName = getServiceWithNodeName();
final Authentication authentication = Authentication.newRealmAuthentication(
new User(TEST_USER_NAME, TEST_ROLE),
new Authentication.RealmRef("file", "file", serviceWithNodeName.nodeName())
);
// Force set user role descriptors to 7.x legacy superuser role descriptors
assertSingleUpdate(
apiKeyId,
updateApiKeys(
serviceWithNodeName.service(),
authentication,
BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId),
Set.of(ApiKeyService.LEGACY_SUPERUSER_ROLE_DESCRIPTOR)
)
);
// raw document has the legacy superuser role descriptor
expectRoleDescriptorsForApiKey(
"limited_by_role_descriptors",
Set.of(ApiKeyService.LEGACY_SUPERUSER_ROLE_DESCRIPTOR),
getApiKeyDocument(apiKeyId)
);
ApiKey apiKeyInfo = getApiKeyInfo(client(), apiKeyId, true, randomBoolean());
assertThat(
apiKeyInfo.getLimitedBy().roleDescriptorsList().iterator().next(),
equalTo(Set.of(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR))
);
}
public void testApiKeyWithManageOwnPrivilegeIsAbleToInvalidateItselfButNotAnyOtherKeysCreatedBySameOwner() throws InterruptedException,
ExecutionException {
List<CreateApiKeyResponse> responses = createApiKeys(ES_TEST_ROOT_USER, 2, null, "manage_own_api_key").v1();
final String base64ApiKeyKeyValue = Base64.getEncoder()
.encodeToString((responses.get(0).getId() + ":" + responses.get(0).getKey().toString()).getBytes(StandardCharsets.UTF_8));
Client client = client().filterWithHeader(Map.of("Authorization", "ApiKey " + base64ApiKeyKeyValue));
final PlainActionFuture<InvalidateApiKeyResponse> failureListener = new PlainActionFuture<>();
// for any other API key id, it must deny access
client.execute(
InvalidateApiKeyAction.INSTANCE,
InvalidateApiKeyRequest.usingApiKeyId(responses.get(1).getId(), randomBoolean()),
failureListener
);
ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, () -> failureListener.actionGet());
assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/invalidate", ES_TEST_ROOT_USER, responses.get(0).getId());
final PlainActionFuture<InvalidateApiKeyResponse> failureListener1 = new PlainActionFuture<>();
client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.forOwnedApiKeys(), failureListener1);
ese = expectThrows(ElasticsearchSecurityException.class, () -> failureListener1.actionGet());
assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/invalidate", ES_TEST_ROOT_USER, responses.get(0).getId());
PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client.execute(
InvalidateApiKeyAction.INSTANCE,
InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId(), randomBoolean()),
listener
);
InvalidateApiKeyResponse invalidateResponse = listener.get();
assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(1));
assertThat(invalidateResponse.getInvalidatedApiKeys(), containsInAnyOrder(responses.get(0).getId()));
assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0));
assertThat(invalidateResponse.getErrors().size(), equalTo(0));
}
public void testDerivedKeys() throws ExecutionException, InterruptedException {
Client client = authorizedClient();
final CreateApiKeyResponse response = new CreateApiKeyRequestBuilder(client).setName("key-1")
.setRoleDescriptors(
Collections.singletonList(new RoleDescriptor("role", new String[] { "manage_api_key", "manage_token" }, null, null))
)
.setMetadata(ApiKeyTests.randomMetadata())
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL, NONE))
.get();
assertEquals("key-1", response.getName());
assertNotNull(response.getId());
assertNotNull(response.getKey());
// use the first ApiKey for authorized action
final String base64ApiKeyKeyValue = Base64.getEncoder()
.encodeToString((response.getId() + ":" + response.getKey().toString()).getBytes(StandardCharsets.UTF_8));
final Client clientKey1;
if (randomBoolean()) {
clientKey1 = client().filterWithHeader(Collections.singletonMap("Authorization", "ApiKey " + base64ApiKeyKeyValue));
} else {
final CreateTokenResponse createTokenResponse = new CreateTokenRequestBuilder(
client().filterWithHeader(Collections.singletonMap("Authorization", "ApiKey " + base64ApiKeyKeyValue))
).setGrantType("client_credentials").get();
clientKey1 = client().filterWithHeader(Map.of("Authorization", "Bearer " + createTokenResponse.getTokenString()));
}
final String expectedMessage = "creating derived api keys requires an explicit role descriptor that is empty";
final IllegalArgumentException e1 = expectThrows(
IllegalArgumentException.class,
() -> new CreateApiKeyRequestBuilder(clientKey1).setName("key-2")
.setMetadata(ApiKeyTests.randomMetadata())
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL, NONE))
.get()
);
assertThat(e1.getMessage(), containsString(expectedMessage));
final IllegalArgumentException e2 = expectThrows(
IllegalArgumentException.class,
() -> new CreateApiKeyRequestBuilder(clientKey1).setName("key-3")
.setRoleDescriptors(Collections.emptyList())
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL, NONE))
.get()
);
assertThat(e2.getMessage(), containsString(expectedMessage));
final IllegalArgumentException e3 = expectThrows(
IllegalArgumentException.class,
() -> new CreateApiKeyRequestBuilder(clientKey1).setName("key-4")
.setMetadata(ApiKeyTests.randomMetadata())
.setRoleDescriptors(
Collections.singletonList(new RoleDescriptor("role", new String[] { "manage_own_api_key" }, null, null))
)
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL, NONE))
.get()
);
assertThat(e3.getMessage(), containsString(expectedMessage));
final List<RoleDescriptor> roleDescriptors = randomList(2, 10, () -> new RoleDescriptor("role", null, null, null));
roleDescriptors.set(
randomInt(roleDescriptors.size() - 1),
new RoleDescriptor("role", new String[] { "manage_own_api_key" }, null, null)
);
final IllegalArgumentException e4 = expectThrows(
IllegalArgumentException.class,
() -> new CreateApiKeyRequestBuilder(clientKey1).setName("key-5")
.setMetadata(ApiKeyTests.randomMetadata())
.setRoleDescriptors(roleDescriptors)
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL, NONE))
.get()
);
assertThat(e4.getMessage(), containsString(expectedMessage));
final CreateApiKeyResponse key100Response = new CreateApiKeyRequestBuilder(clientKey1).setName("key-100")
.setMetadata(ApiKeyTests.randomMetadata())
.setRoleDescriptors(Collections.singletonList(new RoleDescriptor("role", null, null, null)))
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL))
.get();
assertEquals("key-100", key100Response.getName());
assertNotNull(key100Response.getId());
assertNotNull(key100Response.getKey());
// Derive keys have empty limited-by role descriptors
final PlainActionFuture<GetApiKeyResponse> future = new PlainActionFuture<>();
client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyId(key100Response.getId()).withLimitedBy().build(),
future
);
assertThat(future.actionGet().getApiKeyInfoList().size(), equalTo(1));
RoleDescriptorsIntersection limitedBy = future.actionGet().getApiKeyInfoList().get(0).apiKeyInfo().getLimitedBy();
assertThat(limitedBy.roleDescriptorsList().size(), equalTo(1));
assertThat(limitedBy.roleDescriptorsList().iterator().next(), emptyIterable());
// Check at the end to allow sometime for the operation to happen. Since an erroneous creation is
// asynchronous so that the document is not available immediately.
assertApiKeyNotCreated(client, "key-2");
assertApiKeyNotCreated(client, "key-3");
assertApiKeyNotCreated(client, "key-4");
assertApiKeyNotCreated(client, "key-5");
}
public void testApiKeyRunAsAnotherUserCanCreateApiKey() {
final RoleDescriptor descriptor = new RoleDescriptor("role", Strings.EMPTY_ARRAY, null, new String[] { ES_TEST_ROOT_USER });
Client client = client().filterWithHeader(
Map.of("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING))
);
final CreateApiKeyResponse response1 = new CreateApiKeyRequestBuilder(client).setName("run-as-key")
.setRoleDescriptors(List.of(descriptor))
.setMetadata(ApiKeyTests.randomMetadata())
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL))
.get();
final String base64ApiKeyKeyValue = Base64.getEncoder()
.encodeToString((response1.getId() + ":" + response1.getKey()).getBytes(StandardCharsets.UTF_8));
final CreateApiKeyResponse response2 = new CreateApiKeyRequestBuilder(
client().filterWithHeader(
Map.of("Authorization", "ApiKey " + base64ApiKeyKeyValue, "es-security-runas-user", ES_TEST_ROOT_USER)
)
).setName("create-by run-as user")
.setRoleDescriptors(List.of(new RoleDescriptor("a", new String[] { "all" }, null, null)))
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL))
.get();
final GetApiKeyResponse getApiKeyResponse = client.execute(
GetApiKeyAction.INSTANCE,
GetApiKeyRequest.builder().apiKeyId(response2.getId()).ownedByAuthenticatedUser(true).build()
).actionGet();
assertThat(getApiKeyResponse.getApiKeyInfoList(), iterableWithSize(1));
ApiKey apiKeyInfo = getApiKeyResponse.getApiKeyInfoList().get(0).apiKeyInfo();
assertThat(apiKeyInfo.getId(), equalTo(response2.getId()));
assertThat(apiKeyInfo.getUsername(), equalTo(ES_TEST_ROOT_USER));
assertThat(apiKeyInfo.getRealm(), equalTo("file"));
}
public void testCreationAndAuthenticationReturns429WhenThreadPoolIsSaturated() throws Exception {
final String nodeName = randomFrom(internalCluster().getNodeNames());
final Settings settings = internalCluster().getInstance(Settings.class, nodeName);
final int allocatedProcessors = EsExecutors.allocatedProcessors(settings);
final ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, nodeName);
final ApiKeyService apiKeyService = internalCluster().getInstance(ApiKeyService.class, nodeName);
final RoleDescriptor descriptor = new RoleDescriptor("auth_only", new String[] {}, null, null);
final Client client = authorizedClient();
final CreateApiKeyResponse createApiKeyResponse = new CreateApiKeyRequestBuilder(client).setName("auth only key")
.setRoleDescriptors(Collections.singletonList(descriptor))
.setMetadata(ApiKeyTests.randomMetadata())
.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL, NONE))
.get();
assertNotNull(createApiKeyResponse.getId());
assertNotNull(createApiKeyResponse.getKey());
// Clear the auth cache to force recompute the expensive hash which requires the crypto thread pool
apiKeyService.getApiKeyAuthCache().invalidateAll();
final List<NodeInfo> nodeInfos = clusterAdmin().prepareNodesInfo()
.get()
.getNodes()
.stream()
.filter(nodeInfo -> nodeInfo.getNode().getName().equals(nodeName))
.collect(Collectors.toList());
assertEquals(1, nodeInfos.size());
final ExecutorService executorService = threadPool.executor(SECURITY_CRYPTO_THREAD_POOL_NAME);
final int numberOfThreads = (allocatedProcessors + 1) / 2;
final CountDownLatch blockingLatch = new CountDownLatch(1);
final CountDownLatch readyLatch = new CountDownLatch(numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
executorService.submit(() -> {
readyLatch.countDown();
try {
blockingLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
}
// Make sure above tasks are running
readyLatch.await();
// Then fill the whole queue for the crypto thread pool
Future<?> lastTaskFuture = null;
int i = 0;
try {
for (i = 0; i < CRYPTO_THREAD_POOL_QUEUE_SIZE; i++) {
lastTaskFuture = executorService.submit(() -> {});
}
} catch (EsRejectedExecutionException e) {
logger.info("Attempted to push {} tasks but only pushed {}", CRYPTO_THREAD_POOL_QUEUE_SIZE, i + 1);
}
try (RestClient restClient = createRestClient(nodeInfos, null, "http")) {
final String base64ApiKeyKeyValue = Base64.getEncoder()
.encodeToString(
(createApiKeyResponse.getId() + ":" + createApiKeyResponse.getKey().toString()).getBytes(StandardCharsets.UTF_8)
);
final Request authRequest = new Request("GET", "_security/_authenticate");
authRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + base64ApiKeyKeyValue).build());
final ResponseException e1 = expectThrows(ResponseException.class, () -> restClient.performRequest(authRequest));
assertThat(e1.getMessage(), containsString("429 Too Many Requests"));
assertThat(e1.getResponse().getStatusLine().getStatusCode(), is(429));
final Request createApiKeyRequest = new Request("POST", "_security/api_key");
createApiKeyRequest.setJsonEntity("{\"name\":\"key\"}");
createApiKeyRequest.setOptions(
createApiKeyRequest.getOptions()
.toBuilder()
.addHeader("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING))
);
final ResponseException e2 = expectThrows(ResponseException.class, () -> restClient.performRequest(createApiKeyRequest));
assertThat(e2.getMessage(), containsString("429 Too Many Requests"));
assertThat(e2.getResponse().getStatusLine().getStatusCode(), is(429));
} finally {
blockingLatch.countDown();
if (lastTaskFuture != null) {
lastTaskFuture.get();
}
}
}
public void testCacheInvalidationViaApiCalls() throws Exception {
final List<ApiKeyService> services = Arrays.stream(internalCluster().getNodeNames())
.map(n -> internalCluster().getInstance(ApiKeyService.class, n))
.collect(Collectors.toList());
// Create two API keys and authenticate with them
String docId1 = createApiKeyAndAuthenticateWithIt().v1();
String docId2 = createApiKeyAndAuthenticateWithIt().v1();
// Find out which nodes handled the above authentication requests
final ApiKeyService serviceForDoc1 = services.stream().filter(s -> s.getDocCache().get(docId1) != null).findFirst().orElseThrow();
final ApiKeyService serviceForDoc2 = services.stream().filter(s -> s.getDocCache().get(docId2) != null).findFirst().orElseThrow();
assertNotNull(serviceForDoc1.getFromCache(docId1));
assertNotNull(serviceForDoc2.getFromCache(docId2));
final boolean sameServiceNode = serviceForDoc1 == serviceForDoc2;
if (sameServiceNode) {
assertEquals(2, serviceForDoc1.getDocCache().count());
assertEquals(2, serviceForDoc1.getRoleDescriptorsBytesCache().count());
} else {
assertEquals(1, serviceForDoc1.getDocCache().count());
assertEquals(2, serviceForDoc1.getRoleDescriptorsBytesCache().count());
assertEquals(1, serviceForDoc2.getDocCache().count());
assertEquals(2, serviceForDoc2.getRoleDescriptorsBytesCache().count());
}
// Invalidate cache for only the first key
ClearSecurityCacheRequest clearSecurityCacheRequest = new ClearSecurityCacheRequest();
clearSecurityCacheRequest.cacheName("api_key");
clearSecurityCacheRequest.keys(docId1);
ClearSecurityCacheResponse clearSecurityCacheResponse = client().execute(
ClearSecurityCacheAction.INSTANCE,
clearSecurityCacheRequest
).get();
assertFalse(clearSecurityCacheResponse.hasFailures());
assertBusy(() -> {
expectThrows(NullPointerException.class, () -> serviceForDoc1.getFromCache(docId1));
if (sameServiceNode) {
assertEquals(1, serviceForDoc1.getDocCache().count());
assertNotNull(serviceForDoc1.getFromCache(docId2));
} else {
assertEquals(0, serviceForDoc1.getDocCache().count());
assertEquals(1, serviceForDoc2.getDocCache().count());
assertNotNull(serviceForDoc2.getFromCache(docId2));
}
// Role descriptors are not invalidated when invalidation is for specific API keys
assertEquals(2, serviceForDoc1.getRoleDescriptorsBytesCache().count());
assertEquals(2, serviceForDoc2.getRoleDescriptorsBytesCache().count());
});
// Invalidate all cache entries by setting keys to an empty array
clearSecurityCacheRequest.keys(new String[0]);
clearSecurityCacheResponse = client().execute(ClearSecurityCacheAction.INSTANCE, clearSecurityCacheRequest).get();
assertFalse(clearSecurityCacheResponse.hasFailures());
assertBusy(() -> {
assertEquals(0, serviceForDoc1.getDocCache().count());
assertEquals(0, serviceForDoc1.getRoleDescriptorsBytesCache().count());
if (sameServiceNode) {
expectThrows(NullPointerException.class, () -> serviceForDoc1.getFromCache(docId2));
} else {
expectThrows(NullPointerException.class, () -> serviceForDoc2.getFromCache(docId2));
assertEquals(0, serviceForDoc2.getDocCache().count());
assertEquals(0, serviceForDoc2.getRoleDescriptorsBytesCache().count());
}
});
}
public void testSecurityIndexStateChangeWillInvalidateApiKeyCaches() throws Exception {
final List<ApiKeyService> services = Arrays.stream(internalCluster().getNodeNames())
.map(n -> internalCluster().getInstance(ApiKeyService.class, n))
.collect(Collectors.toList());
String docId = createApiKeyAndAuthenticateWithIt().v1();
// The API key is cached by one of the node that the above request hits, find out which one
final ApiKeyService apiKeyService = services.stream().filter(s -> s.getDocCache().count() > 0).findFirst().orElseThrow();
assertNotNull(apiKeyService.getFromCache(docId));
assertEquals(1, apiKeyService.getDocCache().count());
assertEquals(2, apiKeyService.getRoleDescriptorsBytesCache().count());
// Close security index to trigger invalidation
final CloseIndexResponse closeIndexResponse = indicesAdmin().close(new CloseIndexRequest(INTERNAL_SECURITY_MAIN_INDEX_7)).get();
assertTrue(closeIndexResponse.isAcknowledged());
assertBusy(() -> {
expectThrows(NullPointerException.class, () -> apiKeyService.getFromCache(docId));
assertEquals(0, apiKeyService.getDocCache().count());
assertEquals(0, apiKeyService.getRoleDescriptorsBytesCache().count());
});
}
public void testUpdateApiKeysForSingleKey() throws Exception {
final Tuple<CreateApiKeyResponse, Map<String, Object>> createdApiKey = createApiKey(TEST_USER_NAME, null);
final var apiKeyId = createdApiKey.v1().getId();
final Map<String, Object> oldMetadata = createdApiKey.v2();
final var newRoleDescriptors = randomRoleDescriptors();
final boolean nullRoleDescriptors = newRoleDescriptors == null;
// Role descriptor corresponding to SecuritySettingsSource.TEST_ROLE_YML
final var expectedLimitedByRoleDescriptors = Set.of(
new RoleDescriptor(
TEST_ROLE,
new String[] { "ALL" },
new RoleDescriptor.IndicesPrivileges[] {
RoleDescriptor.IndicesPrivileges.builder().indices("*").allowRestrictedIndices(true).privileges("ALL").build() },
null
)
);
final var request = new UpdateApiKeyRequest(apiKeyId, newRoleDescriptors, ApiKeyTests.randomMetadata(), null);
final UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, request);
assertNotNull(response);
// In this test, non-null roleDescriptors always result in an update since they either update the role name, or associated
// privileges. As such null descriptors (plus matching or null metadata) is the only way we can get a noop here
final boolean metadataChanged = request.getMetadata() != null && false == request.getMetadata().equals(oldMetadata);
final boolean isUpdated = nullRoleDescriptors == false || metadataChanged;
assertEquals(isUpdated, response.isUpdated());
// Test authenticate works with updated API key
final var authResponse = authenticateWithApiKey(apiKeyId, createdApiKey.v1().getKey());
assertThat(authResponse.get(User.Fields.USERNAME.getPreferredName()), equalTo(TEST_USER_NAME));
// Document updated as expected
final Map<String, Object> expectedCreator = new HashMap<>();
expectedCreator.put("principal", TEST_USER_NAME);
expectedCreator.put("full_name", null);
expectedCreator.put("email", null);
expectedCreator.put("metadata", Map.of());
expectedCreator.put("realm_type", "file");
expectedCreator.put("realm", "file");
final var expectedMetadata = request.getMetadata() != null ? request.getMetadata() : createdApiKey.v2();
final var expectedRoleDescriptors = nullRoleDescriptors ? List.of(DEFAULT_API_KEY_ROLE_DESCRIPTOR) : newRoleDescriptors;
doTestApiKeyHasExpectedAttributes(
apiKeyId,
Map.of(
ApiKeyAttribute.CREATOR,
expectedCreator,
ApiKeyAttribute.METADATA,
expectedMetadata == null ? Map.of() : expectedMetadata,
ApiKeyAttribute.ASSIGNED_ROLE_DESCRIPTORS,
expectedRoleDescriptors,
ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS,
expectedLimitedByRoleDescriptors
)
);
// Check if update resulted in API key role going from `monitor` to `all` cluster privilege and assert that action that requires
// `all` is authorized or denied accordingly
final boolean hasAllClusterPrivilege = expectedRoleDescriptors.stream()
.filter(rd -> Arrays.asList(rd.getClusterPrivileges()).contains("all"))
.toList()
.isEmpty() == false;
final var authorizationHeaders = Collections.singletonMap(
"Authorization",
"ApiKey " + getBase64EncodedApiKeyValue(createdApiKey.v1().getId(), createdApiKey.v1().getKey())
);
if (hasAllClusterPrivilege) {
createUserWithRunAsRole(authorizationHeaders);
} else {
ExecutionException e = expectThrows(ExecutionException.class, () -> createUserWithRunAsRole(authorizationHeaders));
assertThat(e.getMessage(), containsString("unauthorized"));
assertThat(e.getCause(), instanceOf(ElasticsearchSecurityException.class));
}
}
public void testBulkUpdateApiKeysForMultipleKeys() throws ExecutionException, InterruptedException, IOException {
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> apiKeys = createApiKeys(
TEST_USER_NAME,
randomIntBetween(3, 5),
null
);
final List<String> apiKeyIds = apiKeys.v1().stream().map(CreateApiKeyResponse::getId).toList();
final List<RoleDescriptor> newRoleDescriptors = randomValueOtherThan(null, this::randomRoleDescriptors);
final Map<String, Object> newMetadata = randomValueOtherThan(null, ApiKeyTests::randomMetadata);
BulkUpdateApiKeyResponse response = executeBulkUpdateApiKey(
TEST_USER_NAME,
new BulkUpdateApiKeyRequest(apiKeyIds, newRoleDescriptors, newMetadata, ApiKeyTests.randomFutureExpirationTime())
);
assertNotNull(response);
assertThat(response.getErrorDetails(), anEmptyMap());
final List<String> allIds = Stream.concat(response.getUpdated().stream(), response.getNoops().stream()).toList();
assertThat(allIds, containsInAnyOrder(apiKeyIds.toArray()));
// Role descriptor corresponding to SecuritySettingsSource.TEST_ROLE_YML
final var expectedLimitedByRoleDescriptors = Set.of(
new RoleDescriptor(
TEST_ROLE,
new String[] { "ALL" },
new RoleDescriptor.IndicesPrivileges[] {
RoleDescriptor.IndicesPrivileges.builder().indices("*").allowRestrictedIndices(true).privileges("ALL").build() },
null
)
);
for (String apiKeyId : apiKeyIds) {
doTestApiKeyHasExpectedAttributes(
apiKeyId,
Map.of(
ApiKeyAttribute.METADATA,
newMetadata,
ApiKeyAttribute.ASSIGNED_ROLE_DESCRIPTORS,
newRoleDescriptors,
ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS,
expectedLimitedByRoleDescriptors
)
);
}
// Check that bulk update works when there are no actual updates
final List<String> newIds = new ArrayList<>(apiKeyIds);
// include not found ID to force error
final List<String> notFoundIds = randomList(
1,
5,
() -> randomValueOtherThanMany(apiKeyIds::contains, () -> randomAlphaOfLength(10))
);
newIds.addAll(notFoundIds);
final BulkUpdateApiKeyRequest request = new BulkUpdateApiKeyRequest(shuffledList(newIds), newRoleDescriptors, newMetadata, null);
response = executeBulkUpdateApiKey(TEST_USER_NAME, request);
assertNotNull(response);
assertThat(response.getUpdated(), empty());
assertEquals(apiKeyIds.size(), response.getNoops().size());
assertThat(response.getNoops(), containsInAnyOrder(apiKeyIds.toArray()));
assertThat(response.getErrorDetails().keySet(), containsInAnyOrder(notFoundIds.toArray()));
for (String apiKeyId : apiKeyIds) {
doTestApiKeyHasExpectedAttributes(
apiKeyId,
Map.of(
ApiKeyAttribute.METADATA,
newMetadata,
ApiKeyAttribute.ASSIGNED_ROLE_DESCRIPTORS,
newRoleDescriptors,
ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS,
expectedLimitedByRoleDescriptors
)
);
}
// Check that bulk update works when some or all updates result in errors
final List<String> invalidatedIds = randomNonEmptySubsetOf(apiKeyIds);
getSecurityClient().invalidateApiKeys(invalidatedIds.toArray(new String[0]));
final List<String> expectedSuccessfulIds = apiKeyIds.stream().filter(i -> invalidatedIds.contains(i) == false).toList();
final BulkUpdateApiKeyRequest requestWithSomeErrors = new BulkUpdateApiKeyRequest(
shuffledList(apiKeyIds),
randomValueOtherThan(null, this::randomRoleDescriptors),
randomValueOtherThan(null, ApiKeyTests::randomMetadata),
ApiKeyTests.randomFutureExpirationTime()
);
response = executeBulkUpdateApiKey(TEST_USER_NAME, requestWithSomeErrors);
final List<String> allSuccessfulIds = Stream.concat(response.getUpdated().stream(), response.getNoops().stream()).toList();
assertThat(allSuccessfulIds, containsInAnyOrder(expectedSuccessfulIds.toArray()));
assertThat(response.getErrorDetails().keySet(), containsInAnyOrder(invalidatedIds.toArray()));
}
public void testBulkUpdateApiKeysWithDuplicates() throws ExecutionException, InterruptedException {
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> apiKeys = createApiKeys(
TEST_USER_NAME,
randomIntBetween(3, 5),
null
);
final List<String> apiKeyIds = apiKeys.v1().stream().map(CreateApiKeyResponse::getId).toList();
final List<RoleDescriptor> newRoleDescriptors = randomValueOtherThan(null, this::randomRoleDescriptors);
final Map<String, Object> newMetadata = randomValueOtherThan(null, ApiKeyTests::randomMetadata);
final List<String> idsWithDuplicates = shuffledList(Stream.concat(apiKeyIds.stream(), apiKeyIds.stream()).toList());
assertEquals(idsWithDuplicates.size(), apiKeyIds.size() * 2);
BulkUpdateApiKeyResponse response = executeBulkUpdateApiKey(
TEST_USER_NAME,
new BulkUpdateApiKeyRequest(idsWithDuplicates, newRoleDescriptors, newMetadata, ApiKeyTests.randomFutureExpirationTime())
);
assertNotNull(response);
assertThat(response.getErrorDetails(), anEmptyMap());
final List<String> allIds = Stream.concat(response.getUpdated().stream(), response.getNoops().stream()).toList();
assertThat(allIds, containsInAnyOrder(apiKeyIds.toArray()));
// Check not found errors reported for all unique IDs
final List<String> notFoundIds = randomList(
1,
5,
() -> randomValueOtherThanMany(apiKeyIds::contains, () -> randomAlphaOfLength(10))
);
final List<String> notFoundIdsWithDuplicates = shuffledList(Stream.concat(notFoundIds.stream(), notFoundIds.stream()).toList());
response = executeBulkUpdateApiKey(
TEST_USER_NAME,
new BulkUpdateApiKeyRequest(
notFoundIdsWithDuplicates,
newRoleDescriptors,
newMetadata,
ApiKeyTests.randomFutureExpirationTime()
)
);
assertNotNull(response);
assertThat(response.getErrorDetails().keySet(), containsInAnyOrder(notFoundIds.toArray()));
assertThat(response.getUpdated(), empty());
assertThat(response.getNoops(), empty());
}
public void testBulkUpdateApiKeysWithDifferentLimitedByRoleDescriptorsForSameUser() throws ExecutionException, InterruptedException,
IOException {
// Create separate native realm user and role for user role change test
final var nativeRealmUser = randomAlphaOfLengthBetween(5, 10);
final var nativeRealmRole = randomAlphaOfLengthBetween(5, 10);
createNativeRealmUser(
nativeRealmUser,
nativeRealmRole,
new String(HASHER.hash(TEST_PASSWORD_SECURE_STRING)),
Collections.singletonMap("Authorization", basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING))
);
final List<String> firstGenerationClusterPrivileges = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names()));
// At a minimum include privilege to manage own API key to ensure no 403
firstGenerationClusterPrivileges.add(randomFrom("manage_api_key", "manage_own_api_key"));
final RoleDescriptor firstGenerationRoleDescriptor = putRoleWithClusterPrivileges(
nativeRealmRole,
firstGenerationClusterPrivileges.toArray(new String[0])
);
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> firstGenerationApiKeys = createApiKeys(
Collections.singletonMap("Authorization", basicAuthHeaderValue(nativeRealmUser, TEST_PASSWORD_SECURE_STRING)),
randomIntBetween(1, 5),
null,
"all"
);
final List<String> firstGenerationApiKeyIds = firstGenerationApiKeys.v1().stream().map(CreateApiKeyResponse::getId).toList();
expectAttributesForApiKeys(
firstGenerationApiKeyIds,
Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(firstGenerationRoleDescriptor))
);
// Update user's permissions and create new API keys for the user. The new API keys will have different limited-by role descriptors
final List<String> secondGenerationClusterPrivileges = randomValueOtherThan(firstGenerationClusterPrivileges, () -> {
final List<String> privs = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names()));
// At a minimum include privilege to manage own API key to ensure no 403
privs.add(randomFrom("manage_api_key", "manage_own_api_key"));
return privs;
});
final RoleDescriptor secondGenerationRoleDescriptor = putRoleWithClusterPrivileges(
nativeRealmRole,
secondGenerationClusterPrivileges.toArray(new String[0])
);
final Tuple<List<CreateApiKeyResponse>, List<Map<String, Object>>> secondGenerationApiKeys = createApiKeys(
Collections.singletonMap("Authorization", basicAuthHeaderValue(nativeRealmUser, TEST_PASSWORD_SECURE_STRING)),
randomIntBetween(1, 5),
null,
"all"
);
final List<String> secondGenerationApiKeyIds = secondGenerationApiKeys.v1().stream().map(CreateApiKeyResponse::getId).toList();
expectAttributesForApiKeys(
secondGenerationApiKeyIds,
Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(secondGenerationRoleDescriptor))
);
// Update user role then bulk update all API keys. This should result in new limited-by role descriptors for all API keys
final List<String> allIds = Stream.concat(firstGenerationApiKeyIds.stream(), secondGenerationApiKeyIds.stream()).toList();
final List<String> finalClusterPrivileges = randomValueOtherThanMany(
p -> firstGenerationClusterPrivileges.equals(p) || secondGenerationClusterPrivileges.equals(p),
() -> {
final List<String> privs = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names()));
// At a minimum include privilege to manage own API key to ensure no 403
privs.add(randomFrom("manage_api_key", "manage_own_api_key"));
return privs;
}
);
final RoleDescriptor finalRoleDescriptor = putRoleWithClusterPrivileges(
nativeRealmRole,
finalClusterPrivileges.toArray(new String[0])
);
final var response = executeBulkUpdateApiKey(
nativeRealmUser,
BulkUpdateApiKeyRequest.usingApiKeyIds(allIds.toArray(String[]::new))
);
assertThat(response.getErrorDetails(), anEmptyMap());
assertThat(response.getNoops(), empty());
assertThat(response.getUpdated(), containsInAnyOrder(allIds.toArray()));
expectAttributesForApiKeys(allIds, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(finalRoleDescriptor)));
}
public void testUpdateApiKeysAutoUpdatesUserFields() throws Exception {
// Create separate native realm user and role for user role change test
final var nativeRealmUser = randomAlphaOfLengthBetween(5, 10);
final var nativeRealmRole = randomAlphaOfLengthBetween(5, 10);
createNativeRealmUser(
nativeRealmUser,
nativeRealmRole,
new String(HASHER.hash(TEST_PASSWORD_SECURE_STRING)),
Collections.singletonMap("Authorization", basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING))
);
final List<String> clusterPrivileges = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names()));
// At a minimum include privilege to manage own API key to ensure no 403
clusterPrivileges.add(randomFrom("manage_api_key", "manage_own_api_key"));
final RoleDescriptor roleDescriptorBeforeUpdate = putRoleWithClusterPrivileges(
nativeRealmRole,
clusterPrivileges.toArray(new String[0])
);
// Create api key
final CreateApiKeyResponse createdApiKey = createApiKeys(
Collections.singletonMap("Authorization", basicAuthHeaderValue(nativeRealmUser, TEST_PASSWORD_SECURE_STRING)),
1,
null,
"all"
).v1().get(0);
final String apiKeyId = createdApiKey.getId();
doTestApiKeyHasExpectedAttributes(
apiKeyId,
Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorBeforeUpdate))
);
final List<String> newClusterPrivileges = randomValueOtherThan(clusterPrivileges, () -> {
final List<String> privs = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names()));
// At a minimum include privilege to manage own API key to ensure no 403
privs.add(randomFrom("manage_api_key", "manage_own_api_key"));
return privs;
});
// Update user role
final RoleDescriptor roleDescriptorAfterUpdate = putRoleWithClusterPrivileges(
nativeRealmRole,
newClusterPrivileges.toArray(new String[0])
);
UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(
nativeRealmUser,
UpdateApiKeyRequest.usingApiKeyId(apiKeyId)
);
assertNotNull(response);
assertTrue(response.isUpdated());
doTestApiKeyHasExpectedAttributes(apiKeyId, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorAfterUpdate)));
// Update user role name only
final RoleDescriptor roleDescriptorWithNewName = putRoleWithClusterPrivileges(
randomValueOtherThan(nativeRealmRole, () -> randomAlphaOfLength(10)),
// Keep old privileges
newClusterPrivileges.toArray(new String[0])
);
final User updatedUser = AuthenticationTestHelper.userWithRandomMetadataAndDetails(
nativeRealmUser,
roleDescriptorWithNewName.getName()
);
updateUser(updatedUser);
// Update API key
response = updateSingleApiKeyMaybeUsingBulkAction(nativeRealmUser, UpdateApiKeyRequest.usingApiKeyId(apiKeyId));
assertNotNull(response);
assertTrue(response.isUpdated());
final Map<String, Object> expectedCreator = new HashMap<>();
expectedCreator.put("principal", updatedUser.principal());
expectedCreator.put("full_name", updatedUser.fullName());
expectedCreator.put("email", updatedUser.email());
expectedCreator.put("metadata", updatedUser.metadata());
expectedCreator.put("realm_type", "native");
expectedCreator.put("realm", "index");
doTestApiKeyHasExpectedAttributes(
apiKeyId,
Map.of(ApiKeyAttribute.CREATOR, expectedCreator, ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, Set.of(roleDescriptorWithNewName))
);
}
public void testUpdateApiKeysNotFoundScenarios() throws Exception {
final Tuple<CreateApiKeyResponse, Map<String, Object>> createdApiKey = createApiKey(TEST_USER_NAME, null);
final var apiKeyId = createdApiKey.v1().getId();
final var expectedRoleDescriptor = new RoleDescriptor(randomAlphaOfLength(10), new String[] { "all" }, null, null);
final var request = new UpdateApiKeyRequest(
apiKeyId,
List.of(expectedRoleDescriptor),
ApiKeyTests.randomMetadata(),
ApiKeyTests.randomFutureExpirationTime()
);
// Validate can update own API key
final UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, request);
assertNotNull(response);
assertTrue(response.isUpdated());
// Test not found exception on non-existent API key
final var otherApiKeyId = randomValueOtherThan(apiKeyId, () -> randomAlphaOfLength(20));
doTestUpdateApiKeysNotFound(
new UpdateApiKeyRequest(
otherApiKeyId,
request.getRoleDescriptors(),
request.getMetadata(),
ApiKeyTests.randomFutureExpirationTime()
)
);
// Test not found exception on other user's API key
final Tuple<CreateApiKeyResponse, Map<String, Object>> otherUsersApiKey = createApiKey("user_with_manage_api_key_role", null);
doTestUpdateApiKeysNotFound(
new UpdateApiKeyRequest(
otherUsersApiKey.v1().getId(),
request.getRoleDescriptors(),
request.getMetadata(),
ApiKeyTests.randomFutureExpirationTime()
)
);
// Test not found exception on API key of user with the same username but from a different realm
// Create native realm user with same username but different password to allow us to create an API key for _that_ user
// instead of file realm one
final var passwordSecureString = new SecureString("x-pack-test-other-password".toCharArray());
createNativeRealmUser(
TEST_USER_NAME,
TEST_ROLE,
new String(HASHER.hash(passwordSecureString)),
Collections.singletonMap("Authorization", basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING))
);
final CreateApiKeyResponse apiKeyForNativeRealmUser = createApiKeys(
Collections.singletonMap("Authorization", basicAuthHeaderValue(TEST_USER_NAME, passwordSecureString)),
1,
null,
"all"
).v1().get(0);
doTestUpdateApiKeysNotFound(
new UpdateApiKeyRequest(
apiKeyForNativeRealmUser.getId(),
request.getRoleDescriptors(),
request.getMetadata(),
ApiKeyTests.randomFutureExpirationTime()
)
);
}
public void testInvalidUpdateApiKeysScenarios() throws ExecutionException, InterruptedException {
final List<String> apiKeyPrivileges = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names()));
// At a minimum include privilege to manage own API key to ensure no 403
apiKeyPrivileges.add(randomFrom("manage_api_key", "manage_own_api_key"));
final CreateApiKeyResponse createdApiKey = createApiKeys(TEST_USER_NAME, 1, null, apiKeyPrivileges.toArray(new String[0])).v1()
.get(0);
final var apiKeyId = createdApiKey.getId();
final var roleDescriptor = new RoleDescriptor(randomAlphaOfLength(10), new String[] { "manage_own_api_key" }, null, null);
final var request = new UpdateApiKeyRequest(
apiKeyId,
List.of(roleDescriptor),
ApiKeyTests.randomMetadata(),
ApiKeyTests.randomFutureExpirationTime()
);
final PlainActionFuture<UpdateApiKeyResponse> updateListener = new PlainActionFuture<>();
client().filterWithHeader(
Collections.singletonMap(
"Authorization",
"ApiKey " + getBase64EncodedApiKeyValue(createdApiKey.getId(), createdApiKey.getKey())
)
).execute(UpdateApiKeyAction.INSTANCE, request, updateListener);
final var apiKeysNotAllowedEx = expectThrows(ExecutionException.class, updateListener::get);
assertThat(apiKeysNotAllowedEx.getCause(), instanceOf(IllegalArgumentException.class));
assertThat(
apiKeysNotAllowedEx.getMessage(),
containsString("authentication via API key not supported: only the owner user can update an API key")
);
final boolean invalidated = randomBoolean();
if (invalidated) {
final PlainActionFuture<InvalidateApiKeyResponse> listener = new PlainActionFuture<>();
client().execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingRealmName("file"), listener);
final var invalidateResponse = listener.get();
assertThat(invalidateResponse.getErrors(), empty());
assertThat(invalidateResponse.getInvalidatedApiKeys(), contains(apiKeyId));
}
if (invalidated == false || randomBoolean()) {
final var dayBefore = Instant.now().minus(1L, ChronoUnit.DAYS);
assertTrue(Instant.now().isAfter(dayBefore));
final var expirationDateUpdatedResponse = client().prepareUpdate(SECURITY_MAIN_ALIAS, apiKeyId)
.setDoc("expiration_time", dayBefore.toEpochMilli())
.setRefreshPolicy(IMMEDIATE)
.get();
assertThat(expirationDateUpdatedResponse.getResult(), is(DocWriteResponse.Result.UPDATED));
}
final var ex = expectThrowsWithUnwrappedExecutionException(
IllegalArgumentException.class,
() -> updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, request)
);
if (invalidated) {
assertThat(ex.getMessage(), containsString("cannot update invalidated API key [" + apiKeyId + "]"));
} else {
assertThat(ex.getMessage(), containsString("cannot update expired API key [" + apiKeyId + "]"));
}
}
public void testUpdateApiKeysAccountsForSecurityDomains() throws Exception {
final Tuple<CreateApiKeyResponse, Map<String, Object>> createdApiKey = createApiKey(TEST_USER_NAME, null);
final var apiKeyId = createdApiKey.v1().getId();
final ServiceWithNodeName serviceWithNodeName = getServiceWithNodeName();
final RealmConfig.RealmIdentifier creatorRealmOnCreatedApiKey = new RealmConfig.RealmIdentifier(FileRealmSettings.TYPE, "file");
final RealmConfig.RealmIdentifier otherRealmInDomain = AuthenticationTestHelper.randomRealmIdentifier(true);
final var realmDomain = new RealmDomain(
ESTestCase.randomAlphaOfLengthBetween(3, 8),
Set.of(creatorRealmOnCreatedApiKey, otherRealmInDomain)
);
// Update should work for any of the realms within the domain
final var authenticatingRealm = randomFrom(creatorRealmOnCreatedApiKey, otherRealmInDomain);
final var authentication = randomValueOtherThanMany(
Authentication::isApiKey,
() -> AuthenticationTestHelper.builder()
.user(new User(TEST_USER_NAME, TEST_ROLE))
.realmRef(
new Authentication.RealmRef(
authenticatingRealm.getName(),
authenticatingRealm.getType(),
serviceWithNodeName.nodeName(),
realmDomain
)
)
.build()
);
final BulkUpdateApiKeyResponse response = updateApiKeys(
serviceWithNodeName.service(),
authentication,
BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId),
Set.of()
);
assertSingleUpdate(apiKeyId, response);
final Map<String, Object> expectedCreator = new HashMap<>();
expectedCreator.put("principal", TEST_USER_NAME);
expectedCreator.put("full_name", null);
expectedCreator.put("email", null);
expectedCreator.put("metadata", Map.of());
expectedCreator.put("realm_type", authenticatingRealm.getType());
expectedCreator.put("realm", authenticatingRealm.getName());
final XContentBuilder builder = realmDomain.toXContent(XContentFactory.jsonBuilder(), null);
expectedCreator.put("realm_domain", XContentHelper.convertToMap(BytesReference.bytes(builder), false, XContentType.JSON).v2());
expectCreatorForApiKey(expectedCreator, getApiKeyDocument(apiKeyId));
}
public void testUpdateApiKeysNoopScenarios() throws Exception {
final Tuple<CreateApiKeyResponse, Map<String, Object>> createdApiKey = createApiKey(TEST_USER_NAME, null);
final var apiKeyId = createdApiKey.v1().getId();
final var initialRequest = new UpdateApiKeyRequest(
apiKeyId,
List.of(new RoleDescriptor(randomAlphaOfLength(10), new String[] { "all" }, null, null)),
// Ensure not `null` to set metadata since we use the initialRequest further down in the test to ensure that
// metadata updates are non-noops
randomValueOtherThanMany(Objects::isNull, ApiKeyTests::randomMetadata),
null // Expiration is relative current time, so must be null to cause noop
);
UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, initialRequest);
assertNotNull(response);
// First update is not noop, because role descriptors changed and possibly metadata
assertTrue(response.isUpdated());
// Update with same request is a noop and does not clear cache
authenticateWithApiKey(apiKeyId, createdApiKey.v1().getKey());
final var serviceWithNameForDoc1 = Arrays.stream(internalCluster().getNodeNames())
.map(n -> internalCluster().getInstance(ApiKeyService.class, n))
.filter(s -> s.getDocCache().get(apiKeyId) != null)
.findFirst()
.orElseThrow();
final int count = serviceWithNameForDoc1.getDocCache().count();
response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, initialRequest);
assertNotNull(response);
assertFalse(response.isUpdated());
assertEquals(count, serviceWithNameForDoc1.getDocCache().count());
// Update with empty request is a noop
response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, UpdateApiKeyRequest.usingApiKeyId(apiKeyId));
assertNotNull(response);
assertFalse(response.isUpdated());
// Update with different role descriptors is not a noop
final List<RoleDescriptor> newRoleDescriptors = List.of(
randomValueOtherThanMany(
rd -> RoleDescriptorRequestValidator.validate(rd) != null || initialRequest.getRoleDescriptors().contains(rd),
() -> RoleDescriptorTestHelper.builder().build()
),
randomValueOtherThanMany(
rd -> RoleDescriptorRequestValidator.validate(rd) != null || initialRequest.getRoleDescriptors().contains(rd),
() -> RoleDescriptorTestHelper.builder().build()
)
);
response = updateSingleApiKeyMaybeUsingBulkAction(
TEST_USER_NAME,
new UpdateApiKeyRequest(apiKeyId, newRoleDescriptors, null, null)
);
assertNotNull(response);
assertTrue(response.isUpdated());
// Update with re-ordered role descriptors is a noop
response = updateSingleApiKeyMaybeUsingBulkAction(
TEST_USER_NAME,
new UpdateApiKeyRequest(apiKeyId, List.of(newRoleDescriptors.get(1), newRoleDescriptors.get(0)), null, null)
);
assertNotNull(response);
assertFalse(response.isUpdated());
// Update with different metadata is not a noop
response = updateSingleApiKeyMaybeUsingBulkAction(
TEST_USER_NAME,
new UpdateApiKeyRequest(
apiKeyId,
null,
randomValueOtherThanMany(md -> md == null || md.equals(initialRequest.getMetadata()), ApiKeyTests::randomMetadata),
null
)
);
assertNotNull(response);
assertTrue(response.isUpdated());
// Update with different creator info is not a noop
// First, ensure that the user role descriptors alone do *not* cause an update, so we can test that we correctly perform the noop
// check when we update creator info
final ServiceWithNodeName serviceWithNodeName = getServiceWithNodeName();
// Role descriptor corresponding to SecuritySettingsSource.TEST_ROLE_YML, i.e., should not result in update
final Set<RoleDescriptor> oldUserRoleDescriptors = Set.of(
new RoleDescriptor(
TEST_ROLE,
new String[] { "ALL" },
new RoleDescriptor.IndicesPrivileges[] {
RoleDescriptor.IndicesPrivileges.builder().indices("*").allowRestrictedIndices(true).privileges("ALL").build() },
null
)
);
assertSingleNoop(
apiKeyId,
updateApiKeys(
serviceWithNodeName.service(),
Authentication.newRealmAuthentication(
new User(TEST_USER_NAME, TEST_ROLE),
new Authentication.RealmRef("file", "file", serviceWithNodeName.nodeName())
),
BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId),
oldUserRoleDescriptors
)
);
final User updatedUser = AuthenticationTestHelper.userWithRandomMetadataAndDetails(TEST_USER_NAME, TEST_ROLE);
final RealmConfig.RealmIdentifier creatorRealmOnCreatedApiKey = new RealmConfig.RealmIdentifier(FileRealmSettings.TYPE, "file");
final boolean noUserChanges = updatedUser.equals(new User(TEST_USER_NAME, TEST_ROLE));
final Authentication.RealmRef realmRef;
if (randomBoolean() || noUserChanges) {
final RealmConfig.RealmIdentifier otherRealmInDomain = AuthenticationTestHelper.randomRealmIdentifier(true);
final var realmDomain = new RealmDomain(
ESTestCase.randomAlphaOfLengthBetween(3, 8),
Set.of(creatorRealmOnCreatedApiKey, otherRealmInDomain)
);
// Using other realm from domain should result in update
realmRef = new Authentication.RealmRef(
otherRealmInDomain.getName(),
otherRealmInDomain.getType(),
serviceWithNodeName.nodeName(),
realmDomain
);
} else {
realmRef = new Authentication.RealmRef(
creatorRealmOnCreatedApiKey.getName(),
creatorRealmOnCreatedApiKey.getType(),
serviceWithNodeName.nodeName()
);
}
final var authentication = randomValueOtherThanMany(
Authentication::isApiKey,
() -> AuthenticationTestHelper.builder().user(updatedUser).realmRef(realmRef).build()
);
assertSingleUpdate(
apiKeyId,
updateApiKeys(
serviceWithNodeName.service(),
authentication,
BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId),
oldUserRoleDescriptors
)
);
}
public void testUpdateApiKeysAutoUpdatesLegacySuperuserRoleDescriptor() throws Exception {
final Tuple<CreateApiKeyResponse, Map<String, Object>> createdApiKey = createApiKey(TEST_USER_NAME, null);
final var apiKeyId = createdApiKey.v1().getId();
final ServiceWithNodeName serviceWithNodeName = getServiceWithNodeName();
final Authentication authentication = Authentication.newRealmAuthentication(
new User(TEST_USER_NAME, TEST_ROLE),
new Authentication.RealmRef("file", "file", serviceWithNodeName.nodeName())
);
final Set<RoleDescriptor> legacySuperuserRoleDescriptor = Set.of(ApiKeyService.LEGACY_SUPERUSER_ROLE_DESCRIPTOR);
// Force set user role descriptors to 7.x legacy superuser role descriptors
assertSingleUpdate(
apiKeyId,
updateApiKeys(
serviceWithNodeName.service(),
authentication,
BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId),
legacySuperuserRoleDescriptor
)
);
// raw document has the legacy superuser role descriptor
expectRoleDescriptorsForApiKey("limited_by_role_descriptors", legacySuperuserRoleDescriptor, getApiKeyDocument(apiKeyId));
final Set<RoleDescriptor> currentSuperuserRoleDescriptors = ApiKeyService.removeUserRoleDescriptorDescriptions(
Set.of(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR)
);
// The first request is not a noop because we are auto-updating the legacy role descriptors to 8.x role descriptors
assertSingleUpdate(
apiKeyId,
updateApiKeys(
serviceWithNodeName.service(),
authentication,
BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId),
currentSuperuserRoleDescriptors
)
);
doTestApiKeyHasExpectedAttributes(apiKeyId, Map.of(ApiKeyAttribute.LIMITED_BY_ROLE_DESCRIPTORS, currentSuperuserRoleDescriptors));
// Second update is noop because role descriptors were auto-updated by the previous request
assertSingleNoop(
apiKeyId,
updateApiKeys(
serviceWithNodeName.service(),
authentication,
BulkUpdateApiKeyRequest.usingApiKeyIds(apiKeyId),
currentSuperuserRoleDescriptors
)
);
}
public void testUpdateApiKeysClearsApiKeyDocCache() throws Exception {
final List<ServiceWithNodeName> services = Arrays.stream(internalCluster().getNodeNames())
.map(n -> new ServiceWithNodeName(internalCluster().getInstance(ApiKeyService.class, n), n))
.toList();
// Create two API keys and authenticate with them
final var apiKey1 = createApiKeyAndAuthenticateWithIt();
final var apiKey2 = createApiKeyAndAuthenticateWithIt();
// Find out which nodes handled the above authentication requests
final var serviceWithNameForDoc1 = services.stream()
.filter(s -> s.service().getDocCache().get(apiKey1.v1()) != null)
.findFirst()
.orElseThrow();
final var serviceWithNameForDoc2 = services.stream()
.filter(s -> s.service().getDocCache().get(apiKey2.v1()) != null)
.findFirst()
.orElseThrow();
final var serviceForDoc1 = serviceWithNameForDoc1.service();
final var serviceForDoc2 = serviceWithNameForDoc2.service();
assertNotNull(serviceForDoc1.getFromCache(apiKey1.v1()));
assertNotNull(serviceForDoc2.getFromCache(apiKey2.v1()));
final boolean sameServiceNode = serviceWithNameForDoc1 == serviceWithNameForDoc2;
if (sameServiceNode) {
assertEquals(2, serviceForDoc1.getDocCache().count());
} else {
assertEquals(1, serviceForDoc1.getDocCache().count());
assertEquals(1, serviceForDoc2.getDocCache().count());
}
final int serviceForDoc1AuthCacheCount = serviceForDoc1.getApiKeyAuthCache().count();
final int serviceForDoc2AuthCacheCount = serviceForDoc2.getApiKeyAuthCache().count();
// Update the first key
final UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(
ES_TEST_ROOT_USER,
new UpdateApiKeyRequest(
apiKey1.v1(),
List.of(),
// Set metadata to ensure update
Map.of(randomAlphaOfLength(5), randomAlphaOfLength(10)),
ApiKeyTests.randomFutureExpirationTime()
)
);
assertNotNull(response);
assertTrue(response.isUpdated());
// The doc cache entry should be gone for the first key
if (sameServiceNode) {
assertEquals(1, serviceForDoc1.getDocCache().count());
assertNull(serviceForDoc1.getDocCache().get(apiKey1.v1()));
assertNotNull(serviceForDoc1.getDocCache().get(apiKey2.v1()));
} else {
assertEquals(0, serviceForDoc1.getDocCache().count());
assertEquals(1, serviceForDoc2.getDocCache().count());
}
// Auth cache has not been affected
assertEquals(serviceForDoc1AuthCacheCount, serviceForDoc1.getApiKeyAuthCache().count());
assertEquals(serviceForDoc2AuthCacheCount, serviceForDoc2.getApiKeyAuthCache().count());
}
public void testCreateCrossClusterApiKeyWithCertificateIdentity() throws Exception {
final String certificateIdentity = "CN=remote-cluster-cert";
final String keyName = randomAlphaOfLengthBetween(3, 8);
final CrossClusterApiKeyRoleDescriptorBuilder roleBuilder = CrossClusterApiKeyRoleDescriptorBuilder.parse("""
{
"search": [ {"names": ["logs"]} ]
}""");
final var request = new CreateCrossClusterApiKeyRequest(
keyName,
roleBuilder,
null,
null,
new CertificateIdentity(certificateIdentity)
);
request.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL));
final PlainActionFuture<CreateApiKeyResponse> future = new PlainActionFuture<>();
client().execute(CreateCrossClusterApiKeyAction.INSTANCE, request, future);
final CreateApiKeyResponse response = future.actionGet();
assertEquals(keyName, response.getName());
assertNotNull(response.getId());
assertNotNull(response.getKey());
final Map<String, Object> apiKeyDoc = getApiKeyDocument(response.getId());
assertThat(apiKeyDoc.get("certificate_identity"), equalTo(certificateIdentity));
assertThat(apiKeyDoc.get("type"), equalTo("cross_cluster"));
}
public void testCreateCrossClusterApiKeyWithoutCertificateIdentity() throws Exception {
final String keyName = randomAlphaOfLengthBetween(3, 8);
final var request = CreateCrossClusterApiKeyRequest.withNameAndAccess(keyName, """
{
"search": [ {"names": ["logs"]} ]
}""");
request.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL));
final PlainActionFuture<CreateApiKeyResponse> future = new PlainActionFuture<>();
client().execute(CreateCrossClusterApiKeyAction.INSTANCE, request, future);
final CreateApiKeyResponse response = future.actionGet();
assertEquals(keyName, response.getName());
assertNotNull(response.getId());
assertNotNull(response.getKey());
final Map<String, Object> apiKeyDoc = getApiKeyDocument(response.getId());
assertThat(apiKeyDoc.containsKey("certificate_identity"), is(false));
assertThat(apiKeyDoc.get("type"), equalTo("cross_cluster"));
}
public void testUpdateCrossClusterApiKeyWithCertificateIdentity() throws Exception {
// Create a cross-cluster API key first
final String keyName = randomAlphaOfLengthBetween(3, 8);
final CrossClusterApiKeyRoleDescriptorBuilder roleBuilder = CrossClusterApiKeyRoleDescriptorBuilder.parse("""
{
"search": [ {"names": ["logs"]} ]
}""");
final var createRequest = new CreateCrossClusterApiKeyRequest(
keyName,
roleBuilder,
null,
null,
new CertificateIdentity("CN=original-cert")
);
createRequest.setRefreshPolicy(IMMEDIATE);
final PlainActionFuture<CreateApiKeyResponse> createFuture = new PlainActionFuture<>();
client().execute(CreateCrossClusterApiKeyAction.INSTANCE, createRequest, createFuture);
final CreateApiKeyResponse createdApiKey = createFuture.actionGet();
final var apiKeyId = createdApiKey.getId();
// Verify original certificate identity is set
Map<String, Object> apiKeyDoc = getApiKeyDocument(apiKeyId);
assertThat(apiKeyDoc.get("certificate_identity"), equalTo("CN=original-cert"));
assertThat(apiKeyDoc.get("type"), equalTo("cross_cluster"));
// Now test updating the certificate identity using UpdateCrossClusterApiKeyRequest
final var newCertIdentity = "CN=updated-cert";
final UpdateCrossClusterApiKeyRequest updateRequest = new UpdateCrossClusterApiKeyRequest(
apiKeyId,
null,
null,
null,
new CertificateIdentity(newCertIdentity)
);
final PlainActionFuture<UpdateApiKeyResponse> updateFuture = new PlainActionFuture<>();
client().execute(UpdateCrossClusterApiKeyAction.INSTANCE, updateRequest, updateFuture);
final UpdateApiKeyResponse response = updateFuture.actionGet();
assertNotNull(response);
assertTrue(response.isUpdated());
apiKeyDoc = getApiKeyDocument(apiKeyId);
assertThat(apiKeyDoc.get("certificate_identity"), equalTo(newCertIdentity));
assertThat(apiKeyDoc.get("type"), equalTo("cross_cluster"));
}
public void testUpdateCrossClusterApiKeyClearCertificateIdentity() throws Exception {
// Create a cross-cluster API key with certificate identity
final String keyName = randomAlphaOfLengthBetween(3, 8);
final CrossClusterApiKeyRoleDescriptorBuilder roleBuilder = CrossClusterApiKeyRoleDescriptorBuilder.parse("""
{
"search": [ {"names": ["logs"]} ]
}""");
final var createRequest = new CreateCrossClusterApiKeyRequest(
keyName,
roleBuilder,
null,
null,
new CertificateIdentity("CN=to-be-cleared")
);
createRequest.setRefreshPolicy(IMMEDIATE);
final PlainActionFuture<CreateApiKeyResponse> createFuture = new PlainActionFuture<>();
client().execute(CreateCrossClusterApiKeyAction.INSTANCE, createRequest, createFuture);
final CreateApiKeyResponse createdApiKey = createFuture.actionGet();
final var apiKeyId = createdApiKey.getId();
final UpdateCrossClusterApiKeyRequest updateRequest = new UpdateCrossClusterApiKeyRequest(
apiKeyId,
null,
null,
null,
new CertificateIdentity(null)
);
final PlainActionFuture<UpdateApiKeyResponse> updateFuture = new PlainActionFuture<>();
client().execute(UpdateCrossClusterApiKeyAction.INSTANCE, updateRequest, updateFuture);
final UpdateApiKeyResponse response = updateFuture.actionGet();
assertNotNull(response);
assertTrue(response.isUpdated());
final Map<String, Object> apiKeyDoc = getApiKeyDocument(apiKeyId);
assertThat(apiKeyDoc.containsKey("certificate_identity"), is(false));
}
public void testUpdateCrossClusterApiKeyPreserveCertificateIdentity() throws Exception {
// Create a cross-cluster API key with certificate identity
final String keyName = randomAlphaOfLengthBetween(3, 8);
final CrossClusterApiKeyRoleDescriptorBuilder roleBuilder = CrossClusterApiKeyRoleDescriptorBuilder.parse("""
{
"search": [ {"names": ["logs"]} ]
}""");
final var createRequest = new CreateCrossClusterApiKeyRequest(
keyName,
roleBuilder,
null,
null,
new CertificateIdentity("CN=preserve-me")
);
createRequest.setRefreshPolicy(IMMEDIATE);
final PlainActionFuture<CreateApiKeyResponse> createFuture = new PlainActionFuture<>();
client().execute(CreateCrossClusterApiKeyAction.INSTANCE, createRequest, createFuture);
final CreateApiKeyResponse createdApiKey = createFuture.actionGet();
final var apiKeyId = createdApiKey.getId();
// Update without specifying certificate identity (should preserve existing)
final UpdateCrossClusterApiKeyRequest updateRequest = new UpdateCrossClusterApiKeyRequest(
apiKeyId,
null,
Map.of("updated", "true"),
null,
null
);
final PlainActionFuture<UpdateApiKeyResponse> updateFuture = new PlainActionFuture<>();
client().execute(UpdateCrossClusterApiKeyAction.INSTANCE, updateRequest, updateFuture);
final UpdateApiKeyResponse response = updateFuture.actionGet();
assertNotNull(response);
assertTrue(response.isUpdated());
// Verify the certificate identity was preserved
final Map<String, Object> apiKeyDoc = getApiKeyDocument(apiKeyId);
assertThat(apiKeyDoc.get("certificate_identity"), equalTo("CN=preserve-me"));
}
private List<RoleDescriptor> randomRoleDescriptors() {
int caseNo = randomIntBetween(0, 3);
return switch (caseNo) {
case 0 -> List.of(new RoleDescriptor(randomAlphaOfLength(10), new String[] { "all" }, null, null));
case 1 -> List.of(
new RoleDescriptor(randomAlphaOfLength(10), new String[] { "all" }, null, null),
randomValueOtherThanMany(
rd -> RoleDescriptorRequestValidator.validate(rd) != null,
() -> RoleDescriptorTestHelper.builder().allowRemoteIndices(true).allowRemoteClusters(true).build()
)
);
case 2 -> null;
// vary default role descriptor assigned to created API keys by name only
case 3 -> List.of(
new RoleDescriptor(
randomValueOtherThan(DEFAULT_API_KEY_ROLE_DESCRIPTOR.getName(), () -> randomAlphaOfLength(10)),
DEFAULT_API_KEY_ROLE_DESCRIPTOR.getClusterPrivileges(),
DEFAULT_API_KEY_ROLE_DESCRIPTOR.getIndicesPrivileges(),
DEFAULT_API_KEY_ROLE_DESCRIPTOR.getRunAs()
)
);
default -> throw new IllegalStateException("unexpected case no");
};
}
private void doTestUpdateApiKeysNotFound(final UpdateApiKeyRequest request) {
final var ex = expectThrowsWithUnwrappedExecutionException(
ResourceNotFoundException.class,
() -> updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, request)
);
assertThat(ex.getMessage(), containsString("no API key owned by requesting user found for ID [" + request.getId() + "]"));
}
private | ApiKeyIntegTests |
java | apache__hadoop | hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java | {
"start": 1736,
"end": 2140
} | enum ____ some of the values commonly used by history log events.
* since values in history can only be strings - Values.name() is used in
* most places in history file.
*
* Note: "SUCCEEDED" is actually not a pre-0.21 value, but it might appear
* in jhist logs when the event is an unsuccessful job completion, yet, the
* overall job status is "SUCCEEDED".
*/
public static | contains |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/MarkerFileExclusiveReadLockStrategyUnlockTest.java | {
"start": 1193,
"end": 2491
} | class ____ extends ContextTestSupport {
@Override
@BeforeEach
public void setUp() throws Exception {
super.setUp();
testDirectory("input-a", true);
testDirectory("input-b", true);
}
@Test
public void testUnlocking() throws Exception {
NotifyBuilder notify = new NotifyBuilder(context).whenDone(1).create();
Files.write(testFile("input-a/file1.dat"), "File-1".getBytes());
Files.write(testFile("input-b/file2.dat"), "File-2".getBytes());
boolean done = notify.matches(5, TimeUnit.SECONDS);
assertTrue(done, "Route should be done processing 1 exchanges");
assertFileNotExists(testFile("input-a/file1.dat.camelLock"));
assertFileNotExists(testFile("input-b/file2.dat.camelLock"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(fileUri("input-a?fileName=file1.dat&readLock=markerFile&initialDelay=0&delay=10"))
.pollEnrich(fileUri("input-b?fileName=file2.dat&readLock=markerFile&initialDelay=0&delay=10"))
.to("mock:result");
}
};
}
}
| MarkerFileExclusiveReadLockStrategyUnlockTest |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeEmpty.java | {
"start": 865,
"end": 1273
} | class ____ extends Maybe<Object> implements ScalarSupplier<Object> {
public static final MaybeEmpty INSTANCE = new MaybeEmpty();
@Override
protected void subscribeActual(MaybeObserver<? super Object> observer) {
EmptyDisposable.complete(observer);
}
@Override
public Object get() {
return null; // nulls of ScalarCallable are considered empty sources
}
}
| MaybeEmpty |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/string/GetExArgs.java | {
"start": 415,
"end": 4083
} | class ____ extends io.quarkus.redis.datasource.value.GetExArgs implements RedisCommandExtraArguments {
private long ex = -1;
private long exAt = -1;
private long px = -1;
private long pxAt = -1;
private boolean persist;
/**
* Set the specified expire time, in seconds.
*
* @param timeout expire time in seconds.
* @return the current {@code GetExArgs}
*/
public GetExArgs ex(long timeout) {
this.ex = timeout;
return this;
}
/**
* Sets the expiration.
*
* @param timeout expire time in seconds.
* @return the current {@code GetExArgs}
*/
public GetExArgs ex(Duration timeout) {
if (timeout == null) {
throw new IllegalArgumentException("`timeout` must not be `null`");
}
return ex(timeout.toMillis() / 1000);
}
/**
* Sets the expiration time
*
* @param timestamp the timestamp
* @return the current {@code GetExArgs}
*/
public GetExArgs exAt(long timestamp) {
this.exAt = timestamp;
return this;
}
/**
* Sets the expiration time
*
* @param timestamp the timestamp type: posix time in seconds.
* @return the current {@code GetExArgs}
*/
public GetExArgs exAt(Instant timestamp) {
if (timestamp == null) {
throw new IllegalArgumentException("`timestamp` must not be `null`");
}
exAt(timestamp.toEpochMilli() / 1000);
return this;
}
/**
* Set the specified expire time, in milliseconds.
*
* @param timeout expire time in milliseconds.
* @return the current {@code GetExArgs}
*/
public GetExArgs px(long timeout) {
this.px = timeout;
return this;
}
/**
* Set the specified expire time, in milliseconds.
*
* @param timeout expire time in milliseconds.
* @return the current {@code GetExArgs}
*/
public GetExArgs px(Duration timeout) {
if (timeout == null) {
throw new IllegalArgumentException("`timeout` must not be `null`");
}
return px(timeout.toMillis());
}
/**
* Set the specified Unix time at which the key will expire, in milliseconds.
*
* @param timestamp the timestamp
* @return the current {@code GetExArgs}
*/
public GetExArgs pxAt(long timestamp) {
this.pxAt = timestamp;
return this;
}
/**
* Set the specified Unix time at which the key will expire, in milliseconds.
*
* @param timestamp the timestamp
* @return the current {@code GetExArgs}
*/
public GetExArgs pxAt(Instant timestamp) {
if (timestamp == null) {
throw new IllegalArgumentException("`timestamp` must not be `null`");
}
return pxAt(timestamp.toEpochMilli());
}
/**
* Sets {@code PERSIST}
*
* @return the current {@code GetExArgs}
*/
public GetExArgs persist() {
this.persist = true;
return this;
}
public List<Object> toArgs() {
List<Object> args = new ArrayList<>();
if (ex >= 0) {
args.add("EX");
args.add(Long.toString(ex));
}
if (exAt >= 0) {
args.add("EXAT");
args.add(Long.toString(exAt));
}
if (px >= 0) {
args.add("PX");
args.add(Long.toString(px));
}
if (pxAt >= 0) {
args.add("PXAT");
args.add(Long.toString(pxAt));
}
if (persist) {
args.add("PERSIST");
}
return args;
}
}
| GetExArgs |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/resource/basic/resource/ScanResource.java | {
"start": 82,
"end": 269
} | class ____ implements ScanProxy {
public ScanSubresource doit() {
return new ScanSubresource();
}
public String get() {
return "hello world";
}
}
| ScanResource |
java | spring-projects__spring-framework | spring-orm/src/test/java/org/springframework/orm/jpa/hibernate/HibernateEntityManagerFactoryIntegrationTests.java | {
"start": 1327,
"end": 2999
} | class ____ extends AbstractContainerEntityManagerFactoryIntegrationTests {
@Override
protected String[] getConfigLocations() {
return new String[] {"/org/springframework/orm/jpa/hibernate/hibernate-manager.xml",
"/org/springframework/orm/jpa/memdb.xml", "/org/springframework/orm/jpa/inject.xml"};
}
@Test
void testCanCastNativeEntityManagerFactoryToHibernateEntityManagerFactoryImpl() {
EntityManagerFactoryInfo emfi = (EntityManagerFactoryInfo) entityManagerFactory;
assertThat(emfi.getNativeEntityManagerFactory()).isInstanceOf(SessionFactory.class);
}
@Test
void testCanCastSharedEntityManagerProxyToHibernateEntityManager() {
assertThat(((EntityManagerProxy) sharedEntityManager).getTargetEntityManager()).isInstanceOf(Session.class);
}
@Test
void testCanUnwrapAopProxy() {
EntityManager em = entityManagerFactory.createEntityManager();
EntityManager proxy = ProxyFactory.getProxy(EntityManager.class, new SingletonTargetSource(em));
assertThat(proxy.unwrap(Session.class)).isSameAs(em);
assertThat(proxy.getDelegate()).isSameAs(em.getDelegate());
}
@Test // SPR-16956
public void testReadOnly() {
assertThat(sharedEntityManager.unwrap(Session.class).getHibernateFlushMode()).isSameAs(FlushMode.AUTO);
assertThat(sharedEntityManager.unwrap(Session.class).isDefaultReadOnly()).isFalse();
endTransaction();
this.transactionDefinition.setReadOnly(true);
startNewTransaction();
assertThat(sharedEntityManager.unwrap(Session.class).getHibernateFlushMode()).isSameAs(FlushMode.MANUAL);
assertThat(sharedEntityManager.unwrap(Session.class).isDefaultReadOnly()).isTrue();
}
}
| HibernateEntityManagerFactoryIntegrationTests |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/NamedXContentObjectHelper.java | {
"start": 464,
"end": 1826
} | class ____ {
private NamedXContentObjectHelper() {}
public static XContentBuilder writeNamedObjects(
XContentBuilder builder,
ToXContent.Params params,
boolean useExplicitOrder,
String namedObjectsName,
List<? extends NamedXContentObject> namedObjects
) throws IOException {
if (useExplicitOrder) {
builder.startArray(namedObjectsName);
} else {
builder.startObject(namedObjectsName);
}
for (NamedXContentObject object : namedObjects) {
if (useExplicitOrder) {
builder.startObject();
}
builder.field(object.getName(), object, params);
if (useExplicitOrder) {
builder.endObject();
}
}
if (useExplicitOrder) {
builder.endArray();
} else {
builder.endObject();
}
return builder;
}
public static XContentBuilder writeNamedObject(
XContentBuilder builder,
ToXContent.Params params,
String namedObjectName,
NamedXContentObject namedObject
) throws IOException {
builder.startObject(namedObjectName);
builder.field(namedObject.getName(), namedObject, params);
builder.endObject();
return builder;
}
}
| NamedXContentObjectHelper |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/generatedkeys/identity/IdentityGeneratedKeysTest.java | {
"start": 970,
"end": 5788
} | class ____ {
@Test
public void testIdentityColumnGeneratedIds(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
MyEntity myEntity = new MyEntity( "test" );
session.persist( myEntity );
assertThat( myEntity.getId() )
.describedAs( "identity column did not force immediate insert" )
.isNotNull();
session.remove( myEntity );
}
);
}
@Test
public void testPersistOutsideTransaction(SessionFactoryScope scope) {
scope.inSession(
session -> {
try {
StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
long initialInsertCount = statistics.getEntityInsertCount();
MyEntity myEntity2 = new MyEntity( "test-persist" );
session.persist( myEntity2 );
assertThat( statistics.getEntityInsertCount() )
.describedAs( "persist on identity column not delayed" )
.isEqualTo( initialInsertCount );
assertThat( myEntity2.getId() ).isNull();
// an explicit flush should cause execution of the delayed insertion
session.flush();
fail( "TransactionRequiredException required upon flush" );
}
catch (TransactionRequiredException ex) {
// expected
}
}
);
}
@Test
public void testPersistOutsideTransactionCascadedToNonInverseCollection(SessionFactoryScope scope) {
StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
long initialInsertCount = statistics.getEntityInsertCount();
scope.inSession(
session -> {
try {
MyEntity myEntity = new MyEntity( "test-persist" );
myEntity.getNonInverseChildren().add( new MyChild( "test-child-persist-non-inverse" ) );
session.persist( myEntity );
assertThat( statistics.getEntityInsertCount() )
.describedAs( "persist on identity column not delayed" )
.isEqualTo( initialInsertCount );
assertThat( myEntity.getId() ).isNull();
session.flush();
fail( "TransactionRequiredException required upon flush" );
}
catch (TransactionRequiredException ex) {
// expected
}
}
);
}
@Test
public void testPersistOutsideTransactionCascadedToInverseCollection(SessionFactoryScope scope) {
StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
long initialInsertCount = statistics.getEntityInsertCount();
scope.inSession(
session -> {
try {
MyEntity myEntity2 = new MyEntity( "test-persist-2" );
MyChild child = new MyChild( "test-child-persist-inverse" );
myEntity2.getInverseChildren().add( child );
child.setInverseParent( myEntity2 );
session.persist( myEntity2 );
assertThat( statistics.getEntityInsertCount() )
.describedAs( "persist on identity column not delayed" )
.isEqualTo( initialInsertCount );
assertThat( myEntity2.getId() ).isNull();
session.flush();
fail( "TransactionRequiredException expected upon flush." );
}
catch (TransactionRequiredException ex) {
// expected
}
}
);
}
@Test
public void testPersistOutsideTransactionCascadedToManyToOne(SessionFactoryScope scope) {
StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
long initialInsertCount = statistics.getEntityInsertCount();
scope.inSession(
session -> {
try {
MyEntity myEntity = new MyEntity( "test-persist" );
myEntity.setSibling( new MySibling( "test-persist-sibling-out" ) );
session.persist( myEntity );
assertThat( statistics.getEntityInsertCount() )
.describedAs( "persist on identity column not delayed" )
.isEqualTo( initialInsertCount );
assertThat( myEntity.getId() ).isNull();
session.flush();
fail( "TransactionRequiredException expected upon flush." );
}
catch (TransactionRequiredException ex) {
// expected
}
}
);
}
@Test
public void testPersistOutsideTransactionCascadedFromManyToOne(SessionFactoryScope scope) {
StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
long initialInsertCount = statistics.getEntityInsertCount();
scope.inSession(
session -> {
try {
MyEntity myEntity2 = new MyEntity( "test-persist-2" );
MySibling sibling = new MySibling( "test-persist-sibling-in" );
sibling.setEntity( myEntity2 );
session.persist( sibling );
assertThat( statistics.getEntityInsertCount() )
.describedAs( "persist on identity column not delayed" )
.isEqualTo( initialInsertCount );
assertThat( myEntity2.getId() ).isNull();
session.flush();
fail( "TransactionRequiredException expected upon flush." );
}
catch (TransactionRequiredException ex) {
// expected
}
}
);
}
}
| IdentityGeneratedKeysTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.